- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 64 for _output_shapes (0.33 sec)
-
tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc
PartialTensorShape output_shape = ConvertTypeToTensorShape(mlir::UnrankedTensorType::get(b.getF32Type())); EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape())); } TEST(ConvertTypeToTensorTypeTest, NonFullyDefinedRankedTensorType) { mlir::MLIRContext context; RegisterDialects(context); mlir::Builder b(&context); PartialTensorShape output_shape = ConvertTypeToTensorShape(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization.td
def FuseMapAndBatch : Pat< (TF_BatchDatasetV2Op (TF_MapDatasetOp $input_dataset, $other_arguments, $f, $output_types, $output_shapes, $use_inter_op_parallelism, $preserve_cardinality, $force_synchronous, $map_dataset_metadata), $batch_size, $drop_remainder, $parallel_copy, $batch_output_types,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
return failure(); // Build the lstm op. SmallVector<int64_t, 3> output_shape; if (time_majored) { output_shape = {time, batch, n_output}; } else { output_shape = {batch, time, n_output}; } auto result_type = mlir::RankedTensorType::get( output_shape, mlir::cast<RankedTensorType>(final_inputs.getType()).getElementType());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
SmallVector<int64_t, 4> output_shape; for (int i = 0; i < num_dimensions; ++i) { perm.push_back(perm_tensor.getValues<IntegerAttr>()[i].getInt()); output_shape.push_back(input_shape[perm[i]]); // Check that the derived output shape matches the static shape. assert(!output_type.hasStaticShape() || output_type.getShape()[i] == output_shape[i]); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/utils.h
inline DenseElementsAttr GetShape(Value output_val, bool truncate = false) { auto output_shape = output_val.getType().dyn_cast<ShapedType>().getShape(); SmallVector<int32_t> shape; shape.reserve(output_shape.size()); bool needs_truncation = true; for (size_t dim_idx = 0; dim_idx < output_shape.size(); ++dim_idx) { int64_t dim = output_shape[dim_idx]; if (truncate && needs_truncation && dim == 1) { continue;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 11.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tf_xla_op_to_tf_op.cc
// dimensions. SmallVector<int64_t> output_shape(output_tensor_rank); for (int i = 0; i < output_tensor_rank; i++) { if (collapsed_dims.contains(i)) { // The collapsed dimension's size should have been 1, so it restores the // dimension with size 1. output_shape[i] = 1; } else { output_shape[i] = *shape_itr; shape_itr++; } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 13.2K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_util.cc
std::vector<PartialTensorShape> output_shapes; std::transform(iter.second.begin(), iter.second.end(), std::back_inserter(output_shapes), [](const InferredShape& inferred_shape) { return inferred_shape.shape; }); Node* n = node_name_index[iter.first]; n->AddAttr(kXlaInferredShapesAttrName, output_shapes); } return absl::OkStatus();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 15.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc
SmallVector<int64_t> output_shape(input_shape.getShape().begin(), input_shape.getShape().end()); for (int i : spatial_dims) { output_shape[i] += padding_values[2 * i] + padding_values[2 * i + 1]; } return builder.create<TF::PadV2Op>( loc, RankedTensorType::get(output_shape, builder.getI8Type()), input, temp_padding,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc
const mlir::TensorType cluster_func_output_type, const xla::OpSharding& output_sharding, mlir::Type* tiled_logical_computation_type) { const auto output_shape = cluster_func_output_type.getShape(); auto new_output_shape = llvm::to_vector<4>(output_shape); auto dimension_to_splits_map = GetDimensionIndicesAndNumSplitsFromSharding(output_sharding); if (!dimension_to_splits_map.ok()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 22 21:28:13 UTC 2024 - 34K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/fuse-tftext.mlir
%17 = "tf.If"(%2, %2, %13, %13) {_lower_using_switch_merge = true, _read_only_resource_inputs = [], device = "", else_branch = @WhitespaceTokenize_RaggedConcat_assert_equal_1_Assert_AssertGuard_false_3210, is_stateless = false, output_shapes = [#tf_type.shape<>], then_branch = @WhitespaceTokenize_RaggedConcat_assert_equal_1_Assert_AssertGuard_true_3200} : (tensor<i1>, tensor<i1>, tensor<i64>, tensor<i64>) -> tensor<i1>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 460.3K bytes - Viewed (0)