- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 22 for SavedModelBundle (0.24 sec)
-
tensorflow/compiler/mlir/python/mlir.cc
bool include_variables_in_initializers, bool upgrade_legacy, bool show_debug_info, TF_Status* status) { // Load the saved model into a SavedModelBundle. std::unordered_set<string> tag_set = absl::StrSplit(tags, ',', absl::SkipEmpty()); tensorflow::SavedModelBundle bundle; auto load_status = tensorflow::LoadSavedModel({}, {}, saved_model_path, tag_set, &bundle); if (!load_status.ok()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:16:49 UTC 2024 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/odml_to_stablehlo.cc
namespace odml { absl::StatusOr<OwningOpRef<mlir::ModuleOp>> ImportSavedModelOrMLIR( const std::string& input_path, MLIRContext* context, llvm::SourceMgr* source_mgr, std::unique_ptr<tensorflow::SavedModelBundle>* saved_model_bundle) { if (absl::EndsWith(input_path, ".mlir")) { auto file_or_err = llvm::MemoryBuffer::getFileOrSTDIN(input_path.c_str()); if (std::error_code error = file_or_err.getError()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:16:49 UTC 2024 - 14.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow_to_stablehlo/tf_to_stablehlo.cc
#include "tensorflow/core/public/session.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace mlir { namespace { // Extract the mlir TF module and optionally a ::tensorflow::SavedModelBundle // from a saved model or from an mlir file. absl::StatusOr<quant::stablehlo::ImportedMlirModuleOp> ImportSavedModelOrTfMlir( absl::string_view input_path, MLIRContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 22:58:42 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
const absl::string_view saved_model_dir, const std::unordered_set<std::string>& saved_model_tags, QuantizationConfig* quantization_config, const PyFunctionLibrary* quantization_py_function_lib, const SavedModelBundle* saved_model_bundle, mlir::PassManager& pass_manager, mlir::StatusScopedDiagnosticHandler& status_handler, ModuleOp& module) { // TODO: b/194747383 - We need to valid that indeed the "main" func is // presented.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h
const std::unordered_set<std::string>& tags, absl::Span<std::string> exported_names, mlir::MLIRContext* context, MLIRImportOptions options, std::unique_ptr<tensorflow::SavedModelBundle>* saved_model_bundle = nullptr); // Converts a TensorFlow V1 SavedModel stored in the directory with the given // `saved_model_dir` into a MLIR module. Creates MLIR entities into the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
absl::flat_hash_map<std::string, std::string> &function_aliases) { // Convert the SavedModelBundle to an MLIR module. MLIRImportOptions import_options; import_options.upgrade_legacy = true; import_options.lift_variables = false; import_options.include_variables_in_initializers = true; auto bundle = std::make_unique<SavedModelBundle>(); // TODO: b/213406917 - Add support for the object graph based saved model.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/import_model.h
// Given a V1 SavedModel, returns a MLIR module containing the functions, // expressed with tf_executor dialect. absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelV1ToMlir( const SavedModelBundle& saved_model, absl::Span<std::string> exported_names, mlir::MLIRContext* context, MLIRImportOptions options = {}); // Given a V1 SavedModel, returns a MLIR module containing the functions,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.cc
std::vector<std::string> custom_opdefs(toco_flags.custom_opdefs().begin(), toco_flags.custom_opdefs().end()); auto bundle = std::make_unique<tensorflow::SavedModelBundle>(); TF_ASSIGN_OR_RETURN( auto module, ImportSavedModel( model_flags.saved_model_dir(), model_flags.saved_model_version(), tags, absl::MakeSpan(custom_opdefs), exported_names, specs,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_translate.cc
llvm::errs() << "You must specify `emit-select-tf-ops=true` when passing " "`select-user-tf-ops` flag."; return kTrFailure; } std::unique_ptr<tensorflow::SavedModelBundle> bundle; // TODO(b/147435528): We need to test the e2e behavior once the graph freezing // inside mlir is done. if ((import_saved_model_object_graph || import_saved_model_signature_defs) &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc
mlir::OwningOpRef<mlir::ModuleOp> module, const mlir::TFL::PassConfig& pass_config, const std::unordered_set<std::string>& saved_model_tags, std::string* result, SavedModelBundle* saved_model_bundle, const PyFunctionLibrary* quantization_py_function_lib) { if (toco_flags.has_dump_graphviz_dir()) { TF_RETURN_IF_ERROR(DumpOpGraphToFile( module.get(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 17.3K bytes - Viewed (0)