Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 90 of 92 for GetShape (0.12 sec)

  1. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

            aqtype.getZeroPoints(), new_out_quant_dim, /*narrow_range=*/true);
        auto new_tensor_type = RankedTensorType::getChecked(
            transpose_op.getLoc(), transpose_op.getType().getShape(), new_qtype);
        rewriter.setInsertionPointAfter(transpose_op);
        auto new_q_op = rewriter.create<quantfork::QuantizeCastOp>(
            transpose_op.getLoc(), new_tensor_type, q_op.getArg());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

          ".getShape()[" # dim # " ] == " # size>]>;
    
    // Returns true if the n-th operand is ranked and has a dimension length <=
    // size at the rank dim.
    class TFL_OperandDimIsAtMost<int n, int dim, int size> : And<[
      TFL_OperandIsRankedAndHasDimPred<n, dim>,
      CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>()"
          ".getShape()[" # dim # " ] <= " # size>]>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/stack_ops_decomposition.cc

      if (!elem_type.has_value()) {
        return stack.emitOpError("cannot infer element shape of stack");
      }
      OpBuilder builder(stack);
      Value buffer;
      if (failed(cutil::CreateInitBufferValue(
              elem_type->getShape(), stack.getMaxSize(), stack,
              elem_type->getElementType(), builder, &buffer))) {
        return failure();
      }
      auto size_var_type = GetSizeVarType(builder);
      auto var_type = RankedTensorType::get(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc

      }
      elt_type = TensorFlowRefType::get(elt_type);
      if (RankedTensorType tensor_type = mlir::dyn_cast<RankedTensorType>(type)) {
        *output_type = RankedTensorType::get(tensor_type.getShape(), elt_type);
      }
      *output_type = UnrankedTensorType::get(elt_type);
      return absl::OkStatus();
    }
    
    Status MlirAbstractOp::Create(ArrayRef<Value> operands,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc

      if (!ranked_type)
        return context_op->emitOpError()
               << "A map_outside_compilation op's input and output types must be "
                  "ranked tensors.";
      ArrayRef<int64_t> in_shape = ranked_type.getShape();
      if (in_shape.empty() || in_shape[0] < 0) {
        return context_op->emitOpError()
               << "A map_outside_compilation op's input and output shapes must "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 68.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/flatbuffer_export.cc

        llvm::ArrayRef<int64_t> shape_ref =
            mlir::cast<TensorType>(tensor_attr.getType()).getShape();
        if (mlir::failed(check_shape(shape_ref))) return std::nullopt;
    
        shape = std::vector<int32_t>(shape_ref.begin(), shape_ref.end());
      } else if (type.hasRank()) {
        llvm::ArrayRef<int64_t> shape_ref = type.getShape();
        if (mlir::failed(check_shape(shape_ref))) return std::nullopt;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:41:49 UTC 2024
    - 164.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc

        auto output_uniform_quantized_tensor_type = RankedTensorType::getChecked(
            rewriter.getUnknownLoc(),
            /*shape=*/
            mlir::cast<TensorType>(conv_output_value.getType()).getShape(),
            output_uniform_quantized_type);
    
        SmallVector<Type> new_conv_output_types = {
            output_uniform_quantized_tensor_type};
        auto new_conv_op_with_output_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  8. tensorflow/c/tf_tensor.cc

      return static_cast<TF_DataType>(t->tensor->Type());
    }
    
    void TF_SetShape(TF_Tensor* t, const int64_t* dims, int num_dims) {
      tensorflow::down_cast<tensorflow::TensorInterface*>(t->tensor)->SetShape(
          dims, num_dims);
    }
    
    int TF_NumDims(const TF_Tensor* t) { return t->tensor->NumDims(); }
    
    int64_t TF_Dim(const TF_Tensor* t, int dim_index) {
      return t->tensor->Dim(dim_index);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 21:57:32 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  9. tensorflow/c/tf_tensor_internal.h

      size_t ByteSize() const override;
      void* Data() const override;
      bool IsAligned() const override;
      bool CanMove() const override;
      std::string SummarizeValue() const override;
    
      void SetShape(const int64_t* dims, int num_dims);
      Status ToTensor(tensorflow::Tensor* dst) const;
      Status BitcastFrom(const TensorInterface& from, DataType type,
                         const int64_t* new_dims, int num_new_dims);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 24 20:38:55 UTC 2023
    - 4.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/shape_inference.cc

            }
    
            shape_inference::ShapeHandle handle;
            TF_RETURN_IF_ERROR(
                context->MakeShapeFromPartialTensorShape(arg_shape.shape, &handle));
            TF_RETURN_IF_ERROR(shape_refiner->SetShape(n, 0, handle));
          }
        }
    
        // Sometimes we have VariableShape nodes in while loop (after Enter nodes).
        // They won't be constant-folded because TensorFlow constant folding does
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 00:41:19 UTC 2024
    - 13K bytes
    - Viewed (0)
Back to top