- Sort Score
- Result 10 results
- Languages All
Results 11 - 16 of 16 for _input_shapes (0.13 sec)
-
tensorflow/compiler/mlir/tensorflow/utils/convert_type.h
// Converts an TensorFlow shape to the one used in MLIR. void ConvertToMlirShape(const TensorShape& input_shape, llvm::SmallVectorImpl<int64_t>* shape); // Converts an TensorFlow shape proto to the one used in MLIR. Status ConvertToMlirShape(const TensorShapeProto& input_shape, llvm::SmallVectorImpl<int64_t>* shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/device_compiler_test_helper.cc
{{"dtype", DT_FLOAT}, {"shape", input_shape}}); *graph.add_node() = MakeNode("b", "Placeholder", {}, {{"dtype", DT_FLOAT}, {"shape", input_shape}}); *graph.add_node() = MakeNode("c", "Placeholder", {}, {{"dtype", DT_FLOAT}, {"shape", input_shape}}); *graph.add_node() = MakeNode("m", "TestFn", {"a", "b", "c"}, {}); return graph; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 08:24:16 UTC 2024 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc
enable_soft_placement, set_original_tf_func_name}; auto module_or = tensorflow::GraphdefToMlirTranslateFunction( input, input_arrays, input_dtypes, input_shapes, output_arrays, control_output_arrays, options, context); if (!module_or.status().ok()) return nullptr; return std::move(module_or).value(); } static TranslateToMLIRRegistration GraphdefToMlirTranslate(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc
bmm_op->getLoc(), permuation_tensor_type, DenseElementsAttr::get(permuation_tensor_type, permute)); auto input_shape = input_type.getShape(); llvm::SmallVector<int64_t, 4> permuted_shape(input_shape.begin(), input_shape.end()); // Swaps z dimension and x dimension to get permuted shape. std::iter_swap(permuted_shape.begin() + input_rank - 1,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.6K bytes - Viewed (0) -
tensorflow/compiler/jit/tests/device_compiler_test_helper.h
JitCompilationListener* listener() const { return listener_; } // Returns a test graph that will split into two XLA clusters (due to a node // with _XlaCompile = false). GraphDef GetTestGraph(const PartialTensorShape& input_shape); // Runs the graph using specified batch size both with and without XLA JIT // compilation. Returns an error if the results between the two do not match. Status ExecuteWithBatch(const GraphDef& graph, int batch);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Feb 09 08:24:16 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.h
const GraphImportConfig& specs, absl::string_view debug_info_file, absl::string_view input_arrays, absl::string_view input_dtypes, absl::string_view input_shapes, absl::string_view output_arrays, absl::string_view control_output_arrays, llvm::SourceMgr* source_mgr, mlir::MLIRContext* context); // Load Saved model (either v1 or v2) into MLIR.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 08:30:24 UTC 2024 - 4.7K bytes - Viewed (0)