Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 56 for ShapedType (0.12 sec)

  1. tensorflow/compiler/mlir/lite/utils/utils.td

      "TFL::IsTransposeTrivial($0.getType().cast<ShapedType>().getShape(), $1)">>;
    
    // Constraint that checks if the reshape op is equivalent to a transpose op.
    // This is true if the reshape op is a trivial reshape op, meaning no change in
    // the order of non-identity dimensions.
    def IsReshapeEquivalentToTranspose : Constraint<CPred<
      "TFL::IsReshapeEquivalentToTranspose("
        "$0.getType().cast<ShapedType>(),"
        "$1.getType().cast<ShapedType>())">>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_tensor_helper.cc

    LogicalResult VerifyTypesCompatibility(Operation::operand_type_range types,
                                           bool mask_one_dim, Operation *op) {
      int64_t common_rank = ShapedType::kDynamic;
      llvm::SmallVector<int64_t, 4> common_dims;
      int64_t dim_to_mask = ShapedType::kDynamic;
    
      // Initialize common_rank with rank of the first ranked type and verify that
      // following ranked types have the same rank.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

      CPred<"quant::ReshapableTo1DTensor($0.getType().cast<ShapedType>())">,
      "Checks if the value dims are all ones except the right most dim">;
    
    def ReshapeTo1DTensor : NativeCodeCall<
      "quant::ReshapeTo1DTensor($_builder, $_loc, $0)">;
    
    def HasEqualShape : Constraint<CPred<
      "$0.getType().cast<ShapedType>().hasRank() && "
      "$1.getType().cast<ShapedType>().hasRank() && "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/ir/tf_arith_ops_folder.h

    OpFoldResult IdentityArithmeticOpFolder(OpT arithmetic_op,
                                            ArrayRef<Attribute> operands) {
      auto lhs_type = mlir::cast<ShapedType>(arithmetic_op.getX().getType());
      auto rhs_type = mlir::cast<ShapedType>(arithmetic_op.getY().getType());
      auto result_type =
          mlir::cast<ShapedType>(arithmetic_op.getResult().getType());
    
      // We can fold arithmetic operation only of we can prove that we will not
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc

    using ::mlir::stablehlo::DotGeneralOp;
    
    bool HasStaticShape(Value value) {
      auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType());
      if (!shaped_type) return false;
    
      return shaped_type.hasStaticShape();
    }
    
    bool HasStaticShapeAtDims(Value value, const ArrayRef<int> dims) {
      auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType());
      if (!shaped_type || !shaped_type.hasRank()) return false;
    
      for (auto dim : dims) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_utils.cc

      if (auto bool_attr = mlir::dyn_cast_or_null<BoolAttr>(attr)) {
        return bool_attr.getValue();
      }
      return std::nullopt;
    }
    
    ShapedType GetNhwcReturnTypeFromNchw(Operation* old_op) {
      auto composite_result_shape =
          mlir::cast<ShapedType>(old_op->getResults().front().getType()).getShape();
      std::array<int64_t, 4> output_shape;
      // NHWC <- NCHW
      output_shape[0] = composite_result_shape[0];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 18:33:05 UTC 2024
    - 3.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

        Operation *op = srcop.getOperation();
        bool allTypesFp = true;
        bool allTypesQuantizedOrInt = true;
        for (auto operand : op->getOperands()) {
          ShapedType type = mlir::dyn_cast<ShapedType>(operand.getType());
          if (!type) continue;
          allTypesFp &= !mlir::isa<quant::QuantizedType>(type.getElementType());
          allTypesQuantizedOrInt &=
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/utils/constant_utils.cc

      }
    }
    
    // Returns a Constant op with a splat vector value.
    absl::StatusOr<arith::ConstantOp> CreateConstOpWithVectorValue(
        PatternRewriter* rewriter, Location loc, ShapedType shaped_type,
        int value) {
      ShapedType dense_type = RankedTensorType::get(shaped_type.getShape(),
                                                    shaped_type.getElementType());
      auto attr = CreateTypedAttr(dense_type, value);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/utils/constant_utils.h

    absl::StatusOr<arith::ConstantOp> CreateConstOpWithSingleValue(
        PatternRewriter* rewriter, Location loc, ShapedType shaped_type, int value);
    
    // Returns a Constant op with a splat vector value.
    absl::StatusOr<arith::ConstantOp> CreateConstOpWithVectorValue(
        PatternRewriter* rewriter, Location loc, ShapedType shaped_type, int value);
    
    }  // namespace TFL
    }  // namespace mlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 27 06:24:28 UTC 2024
    - 1.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/utils/export_utils.h

    //   bool hasRank()
    //   ArrayRef<int64_t> getShape()
    // This includes mlir::TF::ShapeAttr and mlir::ShapedType.
    template <typename ShapeContainerT>
    void SetTensorShapeProto(ShapeContainerT shape, TensorShapeProto* proto) {
      if (shape.hasRank()) {
        for (int64_t dim : shape.getShape()) {
          proto->add_dim()->set_size(mlir::ShapedType::isDynamic(dim) ? -1 : dim);
        }
      } else {
        proto->set_unknown_rank(true);
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 3.9K bytes
    - Viewed (0)
Back to top