Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 128 for get_shape (0.29 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc

      RankedTensorType tensorType = mlir::cast<RankedTensorType>(value.getType());
      Type element_type = tensorType.getElementType();
    
      int rank = tensorType.getShape().size();
      int num_rows = tensorType.getShape()[rank - 2];
      int num_cols = tensorType.getShape()[rank - 1];
    
      std::vector<Value> sliced;
    
      if (batch_size == 1) {
        // Batch size is 1, no splitting is required
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

          if (input_ty_i.hasStaticShape() &&
              input_ty_i.getShape() != input_ty_0.getShape()) {
            return op.emitOpError()
                   << "inputs[" << i << "] has shape [" << input_ty_i.getShape()
                   << "] different than the shape of inputs[0]: "
                   << input_ty_0.getShape();
          }
        }
    
        if (op.getDimensionsToReduce().size() > input_ty_0.getRank()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

      if (!type.hasRank()) return type;
    
      auto shape = type.getShape();
      SmallVector<int64_t, 4> new_shape(shape.size());
    
      for (int i = 0; i < permutation.size(); ++i) {
        int64_t index = permutation[i];
        assert(index < shape.size());
        new_shape[index] = shape[i];
      }
    
      return type.clone(new_shape);
    }
    
    // Move Transpose operations that permute `op` operands after the `op`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.cc

                                       ConversionPatternRewriter& rewriter) {
      if (index_vector_dim == indices_type.getRank()) {
        llvm::SmallVector<int64_t, 4> new_start_indices_shape(
            indices_type.getShape().begin(), indices_type.getShape().end());
        new_start_indices_shape.push_back(1);
        indices_type = RankedTensorType::get(new_start_indices_shape,
                                             indices_type.getElementType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/ir/tfl_canonicalize.td

    def GetSqueezedPermutation: NativeCodeCall<"GetSqueezedPermutation($0, $1)">;
    
    // Check to see if the tensor dimensions can be Squeezed by eliminating 1s'
    def CanSqueezeTensor : Constraint<CPred<
      "GetShape($0).getNumElements() > GetSqueezedShape($0).getNumElements()">>;
    
    
    // Pattern to convert TFL_TransposeOp with rank>6 to rank<=6 if there are
    // redundant dimensions in the tensor. For example- [2x1x3] == [2x3] and 1 is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 13 20:41:03 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc

        auto rhs_type = mlir::dyn_cast<RankedTensorType>(input_rhs.getType());
    
        if (!lhs_type || !rhs_type) return failure();
    
        auto lhs_shape = lhs_type.getShape();
        auto rhs_shape = rhs_type.getShape();
    
        // Ensure that input ranks are at least 2.
        const int dims_a = lhs_shape.size();
        const int dims_b = rhs_shape.size();
        if (dims_a < 2 || dims_b < 2) {
          return failure();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc

        // Get the unbroadcasted shapes in the operand order.
        std::array<llvm::ArrayRef<int64_t>, 2> operand_shapes;
        operand_shapes[i] = broadcast_arg_type.getShape();
        operand_shapes[1 - i] = argument_type.getShape();
    
        // Check that the input of the broadcast and the other operand is broadcast
        // compatible.
        llvm::SmallVector<int64_t, 4> broadcasted_shape;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/utils/utils.td

    def CreateNoneValue : NativeCodeCall<
      "$_builder.create<TFL::NoValueOp>($0.getLoc(), $_builder.getUnitAttr())">;
    
    // Returns shape of a ranked tensor.
    // if called without a ranked tensor it will fail.
    def GetShape: NativeCodeCall<"GetShape($0)">;
    
    // Constraint that values in list attribute are all ones.
    def IsAllOnesConstant : Constraint<CPred<"TFL::IsAllOnesConstant($0)">>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils_test.cc

      ShapedType packed_shape_type =
          mlir::dyn_cast<ShapedType>(packed_value.getType());
      llvm::SmallVector<int64_t> packed_shape(packed_shape_type.getShape().begin(),
                                              packed_shape_type.getShape().end());
      EXPECT_THAT(packed_shape, testing::ElementsAreArray(expected_packed_shape));
      llvm::SmallVector<int8_t> packed_value_vector(
          packed_value_attr.getValues<int8_t>());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc

              // TensorArrayScatter `value`.
              auto t = scatter.getValue().getType().dyn_cast<RankedTensorType>();
              if (!t || t.getShape().empty()) return std::nullopt;
              return RankedTensorType::get(t.getShape().drop_front(),
                                           t.getElementType());
            } else if (auto gather =
                           llvm::dyn_cast<TF::TensorArrayGatherV3Op>(user)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 02 20:41:19 UTC 2023
    - 40.2K bytes
    - Viewed (0)
Back to top