Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 92 for GetShape (0.15 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.cc

                                       ConversionPatternRewriter& rewriter) {
      if (index_vector_dim == indices_type.getRank()) {
        llvm::SmallVector<int64_t, 4> new_start_indices_shape(
            indices_type.getShape().begin(), indices_type.getShape().end());
        new_start_indices_shape.push_back(1);
        indices_type = RankedTensorType::get(new_start_indices_shape,
                                             indices_type.getElementType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.cc

      auto x_type = mlir::dyn_cast<RankedTensorType>(x.getType());
      auto y_type = mlir::dyn_cast<RankedTensorType>(y.getType());
      if (!x_type || !y_type) return failure();
      if (x_type.getShape() != y_type.getShape()) return failure();
    
      auto result_type = squared_diff_op.getType();
      if (!result_type) return failure();
    
      auto sub_op =
          rewriter.create<TF::SubOp>(squared_diff_op.getLoc(), result_type, x, y);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 25.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/ir/tfl_canonicalize.td

    def GetSqueezedPermutation: NativeCodeCall<"GetSqueezedPermutation($0, $1)">;
    
    // Check to see if the tensor dimensions can be Squeezed by eliminating 1s'
    def CanSqueezeTensor : Constraint<CPred<
      "GetShape($0).getNumElements() > GetSqueezedShape($0).getNumElements()">>;
    
    
    // Pattern to convert TFL_TransposeOp with rank>6 to rank<=6 if there are
    // redundant dimensions in the tensor. For example- [2x1x3] == [2x3] and 1 is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 13 20:41:03 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/ir/tfl_ops.cc

      new_values.reserve(num_elements);
      const auto result_shape = type.getShape();
      std::vector<int64_t> current_index(type.getRank(), 0);
      // Create the new shape with ones padded to the left.
      const std::vector<int64_t> lhs_new_shape =
          GetPaddedShape(lhs.getType().getShape(), type.getRank());
      const std::vector<int64_t> rhs_new_shape =
          GetPaddedShape(rhs.getType().getShape(), type.getRank());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 169.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc

        auto rhs_type = mlir::dyn_cast<RankedTensorType>(input_rhs.getType());
    
        if (!lhs_type || !rhs_type) return failure();
    
        auto lhs_shape = lhs_type.getShape();
        auto rhs_shape = rhs_type.getShape();
    
        // Ensure that input ranks are at least 2.
        const int dims_a = lhs_shape.size();
        const int dims_b = rhs_shape.size();
        if (dims_a < 2 || dims_b < 2) {
          return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc

        // Get the unbroadcasted shapes in the operand order.
        std::array<llvm::ArrayRef<int64_t>, 2> operand_shapes;
        operand_shapes[i] = broadcast_arg_type.getShape();
        operand_shapes[1 - i] = argument_type.getShape();
    
        // Check that the input of the broadcast and the other operand is broadcast
        // compatible.
        llvm::SmallVector<int64_t, 4> broadcasted_shape;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/utils/utils.td

    def CreateNoneValue : NativeCodeCall<
      "$_builder.create<TFL::NoValueOp>($0.getLoc(), $_builder.getUnitAttr())">;
    
    // Returns shape of a ranked tensor.
    // if called without a ranked tensor it will fail.
    def GetShape: NativeCodeCall<"GetShape($0)">;
    
    // Constraint that values in list attribute are all ones.
    def IsAllOnesConstant : Constraint<CPred<"TFL::IsAllOnesConstant($0)">>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils_test.cc

      ShapedType packed_shape_type =
          mlir::dyn_cast<ShapedType>(packed_value.getType());
      llvm::SmallVector<int64_t> packed_shape(packed_shape_type.getShape().begin(),
                                              packed_shape_type.getShape().end());
      EXPECT_THAT(packed_shape, testing::ElementsAreArray(expected_packed_shape));
      llvm::SmallVector<int8_t> packed_value_vector(
          packed_value_attr.getValues<int8_t>());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc

              // TensorArrayScatter `value`.
              auto t = scatter.getValue().getType().dyn_cast<RankedTensorType>();
              if (!t || t.getShape().empty()) return std::nullopt;
              return RankedTensorType::get(t.getShape().drop_front(),
                                           t.getElementType());
            } else if (auto gather =
                           llvm::dyn_cast<TF::TensorArrayGatherV3Op>(user)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 02 20:41:19 UTC 2023
    - 40.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_avg_pool.cc

      TorchAvgPoolData data;
    
      auto op_type = mlir::cast<RankedTensorType>(op.getOperand(0).getType());
    
      data.n = op_type.getShape()[0];
      data.c = op_type.getShape()[1];
      data.h_in = op_type.getShape()[2];
      data.w_in = op_type.getShape()[3];
    
      std::vector<int32_t> kernel_size;
      GetI32VectorFromDenseI64CompositeAttr(composite_attrs, "kernel_size",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:16:05 UTC 2024
    - 9.2K bytes
    - Viewed (0)
Back to top