- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 88 for input_shapes_ (0.29 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
assert(input_shape.size() == stride.size()); for (int i = 0, e = input_shape.size(); i < e; ++i) { if (ShapedType::isDynamic(input_shape[i])) continue; int64_t dim_i = input_shape[i]; int64_t begin_i = begin[i]; int64_t end_i = end[i]; int64_t stride_i = stride[i]; // [0]: mask for begin, [1]: mask for end int64_t masks[] = {begin_mask & (1 << i), end_mask & (1 << i)};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
%i32_min_filled = "tf.Fill" (%input_shape, %i32_min) : (tensor<*xi32>, tensor<i32>) -> tensor<*xi32> %i32_max_filled = "tf.Fill" (%input_shape, %i32_max) : (tensor<*xi32>, tensor<i32>) -> tensor<*xi32> %i32_act_max_f32_filled = "tf.Fill" (%input_shape, %i32_act_max_f32) : (tensor<*xi32>, tensor<i32>) -> tensor<*xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/device_compiler_test_helper.h
JitCompilationListener* listener() const { return listener_; } // Returns a test graph that will split into two XLA clusters (due to a node // with _XlaCompile = false). GraphDef GetTestGraph(const PartialTensorShape& input_shape); // Runs the graph using specified batch size both with and without XLA JIT // compilation. Returns an error if the results between the two do not match. Status ExecuteWithBatch(const GraphDef& graph, int batch);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 08:24:16 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.h
const GraphImportConfig& specs, absl::string_view debug_info_file, absl::string_view input_arrays, absl::string_view input_dtypes, absl::string_view input_shapes, absl::string_view output_arrays, absl::string_view control_output_arrays, llvm::SourceMgr* source_mgr, mlir::MLIRContext* context); // Load Saved model (either v1 or v2) into MLIR.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 08:30:24 UTC 2024 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
bool cifg) { SmallVector<int64_t, 2> input_shape{1, 2}; SmallVector<int64_t, 2> weight_shape{3, 12}; SmallVector<int64_t, 1> bias_shape{2}; SmallVector<int64_t, 2> projection_shape{1, 2}; SmallVector<int64_t, 1> layer_norm_scale{4}; SmallVector<int64_t, 2> output_shape{1, 2}; auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/dilated_conv.h
expand_op.setOperand(0, stb_op.getInput()); // Calculate the shape for expand. auto input_shape = mlir::cast<ShapedType>(stb_op.getInput().getType()).getShape(); SmallVector<int64_t, 4> expand_shape(input_shape.begin(), input_shape.end()); expand_shape.insert(expand_shape.begin() + expand_axis, 1); auto expand_result_type = RankedTensorType::get(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/unfreeze_constants.mlir
} func.func private @__inference_main(%arg0: tensor<1x5x5x1024xf32> {tf._user_specified_name = "input_tensor"}) -> tensor<1x5x5x1024xf32> attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<1x5x5x1024>], tf._noinline = true, tf._original_func_name = "__inference_main_540"} { %cst_0 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// Ex. input_shape = [1 x 1 x 1 x 1 x 2 x 1] => 4 // returns 0 if the input shape is not static. int GetNumLeadingOnes(ShapedType input_type) { if (!input_type.hasStaticShape()) return 0; auto input_shape = input_type.getShape(); int num_leading_broadcast_dims = 0; for (int i = 0; i < input_shape.size(); ++i) { if (input_shape[i] == 1) { ++num_leading_broadcast_dims;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.cc
"'' if a single data type is skipped. The data type from " "the import graph is used if it is skipped."), llvm::cl::init("")); // NOLINTNEXTLINE opt<std::string> input_shapes( "tf-input-shapes", llvm::cl::desc( "Input tensor shapes. Shapes for different tensors are separated by " "':', and dimension sizes for the same tensor are separated by ','"),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 10 20:59:50 UTC 2023 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir
func.func private @conv(%input: tensor<1x3x4x3xf32> {tf._user_specified_name = "input_tensor"}) -> tensor<*xf32> attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf_type.shape<1x3x4x3>]} { %weight = arith.constant dense_resource<__elided__> : tensor<2x3x3x2xf32> %bias = arith.constant dense<[7.11401462, 7.05456924]> : tensor<2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 11.4K bytes - Viewed (0)