Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 110 for get_shape (0.17 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

        if (!input_ty.hasRank() || input_ty.getRank() != 4) {
          return failure();
        }
    
        int64_t batch_cst = input_ty.getShape()[0];
        int64_t channels_cst = input_ty.getShape()[3];
    
        int64_t in_y_cst = input_ty.getShape()[1];
        int64_t in_x_cst = input_ty.getShape()[2];
        int64_t in_spatial_cst =
            in_y_cst < 0 || in_x_cst < 0 ? -1 : in_y_cst * in_x_cst;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc

                               Location loc) {
      auto buffer_type = mlir::cast<RankedTensorType>(buffer.getType());
      if (buffer_type.getShape().size() == 1) return index;
      // Create a concat of index and trailing zeros.
      llvm::SmallVector<int64_t, 8> zeros(buffer_type.getShape().size() - 1, 0);
      auto zeros_tensor = GetR1Const(zeros, builder, loc);
      return builder.create<TF::ConcatV2Op>(
          loc,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc

      if (auto tensor_type = dyn_cast<RankedTensorType>(input_type))
        return RankedTensorType::get(tensor_type.getShape(), elemental_type);
      if (auto tensor_type = dyn_cast<UnrankedTensorType>(input_type))
        return UnrankedTensorType::get(elemental_type);
      if (auto vector_type = dyn_cast<VectorType>(input_type))
        return VectorType::get(vector_type.getShape(), elemental_type);
    
      // If the expressed types match, just use the new elemental type.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

          SmallVector<int64_t> new_shape(shape.begin(), shape.end());
          // If dimension of the input type is dynamic. Update the
          // bounds of the dim with the new type if needed.
          for (int i = 0; i < input_ty.getShape().size(); i++) {
            if (hlo::isDynamicDimSize(input_ty.getShape()[i])) {
              new_bounds[i] = new_shape[i];
              new_shape[i] = ShapedType::kDynamic;
            }
          }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc

      // Extract Z from %dot.
      ArrayRef<int64_t> shape_z =
          dot.getType().getShape().drop_front(shape_b.size() + shape_y2.size());
    
      // Check %after shape.
      if (reshape_after.getType().getShape() !=
          ArrayRef<int64_t>(llvm::to_vector(
              llvm::concat<const int64_t>(shape_b, shape_y1, shape_z)))) {
        return failure();
      }
    
      rewriter.replaceOpWithNewOp<mhlo::DotGeneralOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 26.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc

      RankedTensorType tensorType = mlir::cast<RankedTensorType>(value.getType());
      Type element_type = tensorType.getElementType();
    
      int rank = tensorType.getShape().size();
      int num_rows = tensorType.getShape()[rank - 2];
      int num_cols = tensorType.getShape()[rank - 1];
    
      std::vector<Value> sliced;
    
      if (batch_size == 1) {
        // Batch size is 1, no splitting is required
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

          if (input_ty_i.hasStaticShape() &&
              input_ty_i.getShape() != input_ty_0.getShape()) {
            return op.emitOpError()
                   << "inputs[" << i << "] has shape [" << input_ty_i.getShape()
                   << "] different than the shape of inputs[0]: "
                   << input_ty_0.getShape();
          }
        }
    
        if (op.getDimensionsToReduce().size() > input_ty_0.getRank()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc

      if (!type.hasRank()) return type;
    
      auto shape = type.getShape();
      SmallVector<int64_t, 4> new_shape(shape.size());
    
      for (int i = 0; i < permutation.size(); ++i) {
        int64_t index = permutation[i];
        assert(index < shape.size());
        new_shape[index] = shape[i];
      }
    
      return type.clone(new_shape);
    }
    
    // Move Transpose operations that permute `op` operands after the `op`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.cc

                                       ConversionPatternRewriter& rewriter) {
      if (index_vector_dim == indices_type.getRank()) {
        llvm::SmallVector<int64_t, 4> new_start_indices_shape(
            indices_type.getShape().begin(), indices_type.getShape().end());
        new_start_indices_shape.push_back(1);
        indices_type = RankedTensorType::get(new_start_indices_shape,
                                             indices_type.getElementType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/ir/tfl_canonicalize.td

    def GetSqueezedPermutation: NativeCodeCall<"GetSqueezedPermutation($0, $1)">;
    
    // Check to see if the tensor dimensions can be Squeezed by eliminating 1s'
    def CanSqueezeTensor : Constraint<CPred<
      "GetShape($0).getNumElements() > GetSqueezedShape($0).getNumElements()">>;
    
    
    // Pattern to convert TFL_TransposeOp with rank>6 to rank<=6 if there are
    // redundant dimensions in the tensor. For example- [2x1x3] == [2x3] and 1 is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 13 20:41:03 UTC 2023
    - 2.7K bytes
    - Viewed (0)
Back to top