- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 13 for unconditionally_use_set_output_shapes (0.53 sec)
-
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.cc
specs.upgrade_legacy = import_options.upgrade_legacy; specs.enable_shape_inference = import_options.enable_shape_inference; specs.unconditionally_use_set_output_shapes = import_options.unconditionally_use_set_output_shapes; specs.xla_compile_device_type = import_options.xla_compile_device_type; specs.enable_soft_placement = import_options.enable_soft_placement;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 11:51:44 UTC 2024 - 14.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc
debug_info_file, xla_compile_device_type, prune_unused_nodes, convert_legacy_fed_inputs, graph_as_function, upgrade_legacy, enable_shape_inference, unconditionally_use_set_output_shapes, enable_soft_placement, set_original_tf_func_name}; auto module_or = tensorflow::GraphdefToMlirTranslateFunction( input, input_arrays, input_dtypes, input_shapes, output_arrays,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h
// If true, functionalize the input graph before importing it into MLIR. bool upgrade_legacy = false; // Whether to unconditionally use the shape set via _output_shapes on import. bool unconditionally_use_set_output_shapes = false; // Apply default attributes from the op definition to the loaded op. bool add_default_attributes = true; // If set, promote tf.VarHandleOp to resource arguments for all functions.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 20 13:19:26 UTC 2023 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h
std::string xla_compile_device_type; bool prune_unused_nodes; bool convert_legacy_fed_inputs; bool graph_as_function; bool upgrade_legacy; bool enable_shape_inference; bool unconditionally_use_set_output_shapes; bool enable_soft_placement; bool set_original_tf_func_name = false; }; // TODO(antiagainst): Directly manipulating files in library functions is not
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.h
extern llvm::cl::opt<bool> upgrade_legacy; // TODO(jpienaar): Temporary flag, flip default and remove. extern llvm::cl::opt<bool> enable_shape_inference; extern llvm::cl::opt<bool> unconditionally_use_set_output_shapes; extern llvm::cl::opt<bool> enable_soft_placement; extern llvm::cl::opt<bool> set_original_tf_func_name; // Export options. extern llvm::cl::opt<bool> export_entry_func_to_flib;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 10 20:59:50 UTC 2023 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.cc
specs.prune_unused_nodes = true; specs.convert_legacy_fed_inputs = true; specs.graph_as_function = false; specs.upgrade_legacy = true; specs.unconditionally_use_set_output_shapes = true; internal::WarningUnusedFlags(model_flags, toco_flags); // Register all custom ops, including user-specified custom ops. TF_RETURN_IF_ERROR(internal::RegisterAllCustomOps(toco_flags));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
input_filename, tags, exported_names, context, /*unconditionally_use_set_output_shapes=*/true); if (!module_or.status().ok()) return module_or.status(); return std::move(module_or).value(); } else if (saved_model_version == 1) { MLIRImportOptions options; options.upgrade_legacy = specs.upgrade_legacy; options.unconditionally_use_set_output_shapes = true; options.lift_variables = enable_variable_lifting;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/import_model.h
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelV1ToMlirLite( SavedModelMLIRImportInput& input, std::optional<absl::Span<const std::string>> exported_names, mlir::MLIRContext* context, bool unconditionally_use_set_output_shapes = false); // Serialize a MLIR module to a string. std::string MlirModuleToString(mlir::ModuleOp module, mlir::OpPrintingFlags flags);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.cc
opt<bool> enable_shape_inference( "tf-enable-shape-inference-on-import", llvm::cl::desc("Enable shape inference on import (temporary)"), llvm::cl::init(false)); // NOLINTNEXTLINE opt<bool> unconditionally_use_set_output_shapes( "tf-enable-unconditionally-use-set-output-shapes-on-import", llvm::cl::desc("Enable using the _output_shapes unconditionally on import " "(temporary)"), llvm::cl::init(false));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 10 20:59:50 UTC 2023 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.cc
ss << "\nrestrict_functionalization_to_compiled_nodes: " << restrict_functionalization_to_compiled_nodes; ss << "\nenable_shape_inference: " << enable_shape_inference; ss << "\nunconditionally_use_set_output_shapes: " << unconditionally_use_set_output_shapes; ss << "\nxla_compile_device_type: " << xla_compile_device_type; return ss.str(); } Status ParseOutputArrayInfo(absl::string_view array_names,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 10.4K bytes - Viewed (0)