- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 118 for getShape (0.14 sec)
-
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// only when height = width. if (params_type.getShape().size() != 4 || indices_type.getShape().size() != 2) return failure(); if (params_type.getShape()[1] != 1) return failure(); if (params_type.getShape()[0] != params_type.getShape()[2]) return failure(); if (result_type.getShape()[0] != params_type.getShape()[0] * 2) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/utils.h
if (!shaped_type || !shaped_type.hasStaticShape()) { return false; } if (operand_num == 0) { shape = shaped_type.getShape(); } else { if (shape != shaped_type.getShape()) { return false; } } ++operand_num; } return true; } // Utility function to map final permutation to initial permutation
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tf_xla_op_to_tf_op.cc
dot_dimension_numbers.ParseFromString(dot_dimension_numbers_str.str()); SmallVector<Value> input_arguments = {lhs, rhs}; const int lhs_rank = mlir::cast<ShapedType>(lhs.getType()).getShape().size(); const int rhs_rank = mlir::cast<ShapedType>(rhs.getType()).getShape().size(); const std::string einsum_equation = CreateEinsumEquation(dot_dimension_numbers, lhs_rank, rhs_rank);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 13.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
mlir::cast<ShapedType>(lhs.getType()).getShape(), mlir::cast<ShapedType>(rhs.getType()).getShape(), symbolic_broadcast_shape)) { return failure(); } // Calculates the broadcast shape using BroadcastArgs op. Value lhs_shape = GetShape(lhs, op->getLoc(), rewriter); Value rhs_shape = GetShape(rhs, op->getLoc(), rewriter); auto broadcast_shape =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc
Location loc) { auto buffer_type = mlir::cast<RankedTensorType>(buffer.getType()); if (buffer_type.getShape().size() == 1) return index; // Create a concat of index and trailing zeros. llvm::SmallVector<int64_t, 8> zeros(buffer_type.getShape().size() - 1, 0); auto zeros_tensor = GetR1Const(zeros, builder, loc); return builder.create<TF::ConcatV2Op>( loc,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc
if (auto tensor_type = dyn_cast<RankedTensorType>(input_type)) return RankedTensorType::get(tensor_type.getShape(), elemental_type); if (auto tensor_type = dyn_cast<UnrankedTensorType>(input_type)) return UnrankedTensorType::get(elemental_type); if (auto vector_type = dyn_cast<VectorType>(input_type)) return VectorType::get(vector_type.getShape(), elemental_type); // If the expressed types match, just use the new elemental type.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc
// Extract Z from %dot. ArrayRef<int64_t> shape_z = dot.getType().getShape().drop_front(shape_b.size() + shape_y2.size()); // Check %after shape. if (reshape_after.getType().getShape() != ArrayRef<int64_t>(llvm::to_vector( llvm::concat<const int64_t>(shape_b, shape_y1, shape_z)))) { return failure(); } rewriter.replaceOpWithNewOp<mhlo::DotGeneralOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 26.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
} for (auto idx = 0; idx < output_type.getShape().size(); idx++) { if (input_type.getShape()[idx] != output_type.getShape()[idx]) { op.emitOpError() << "the " << idx << "th dim of output tensor is " << output_type.getShape()[idx] << ". It is not equal to the one in input tensor, which is " << input_type.getShape()[idx]; return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
RankedTensorType tensorType = mlir::cast<RankedTensorType>(value.getType()); Type element_type = tensorType.getElementType(); int rank = tensorType.getShape().size(); int num_rows = tensorType.getShape()[rank - 2]; int num_cols = tensorType.getShape()[rank - 1]; std::vector<Value> sliced; if (batch_size == 1) { // Batch size is 1, no splitting is required
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
if (input_ty_i.hasStaticShape() && input_ty_i.getShape() != input_ty_0.getShape()) { return op.emitOpError() << "inputs[" << i << "] has shape [" << input_ty_i.getShape() << "] different than the shape of inputs[0]: " << input_ty_0.getShape(); } } if (op.getDimensionsToReduce().size() > input_ty_0.getRank()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0)