- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for SavedModelBundle (0.28 sec)
-
tensorflow/cc/saved_model/loader.h
/// RAM. struct SavedModelBundle : public SavedModelBundleInterface { /// A TensorFlow Session does not Close itself on destruction. To avoid /// resource leaks, we explicitly call Close on Sessions that we create. ~SavedModelBundle() override { if (session) { session->Close().IgnoreError(); } } SavedModelBundle() = default;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 05 18:28:37 UTC 2023 - 6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc
return signature_def_map; } // Retrieves the function name -> function alias mapping from the // `SavedModelBundle`. // TODO: b/314124142 - Remove the need for this function. absl::flat_hash_map<std::string, std::string> GetFunctionAliases( const SavedModelBundle& saved_model_bundle) { const protobuf::Map<std::string, std::string>& function_aliases =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Apr 14 10:49:12 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h
// Represents a pair of `mlir::ModuleOp` and `tensorflow::SavedModelBundle`. The // SavedModelBundle complements the imported ModuleOp by providing access to // `tensorflow::Session` which may be useful when reading values from resources // (e.g. `TF::VarHandleOp`s). using ImportedMlirModuleOp = std::pair<OwningOpRef<ModuleOp>, std::unique_ptr<::tensorflow::SavedModelBundle>>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization_test.cc
WhenPyFunctionLibIsNullptrReturnsInvalidArgumentError) { const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir(); ASSERT_THAT(tmp_saved_model_dir, IsOk()); // Dummy SavedModelBundle to pass a non-nullptr argument. SavedModelBundle bundle{}; QuantizationConfig config; const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization( /*saved_model_bundle=*/&bundle, *tmp_saved_model_dir,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.h
absl::Span<const std::string> extra_tf_opdefs, absl::Span<std::string> exported_names, const GraphImportConfig& specs, bool enable_variable_lifting, mlir::MLIRContext* context, std::unique_ptr<tensorflow::SavedModelBundle>* saved_model_bundle); Status ConvertTFExecutorToStablehloFlatbuffer( mlir::PassManager& pass_manager, mlir::ModuleOp module, bool export_to_mlir, mlir::StatusScopedDiagnosticHandler& statusHandler,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 08:30:24 UTC 2024 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc
#include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace mlir::quant::stablehlo { using ::stablehlo::quantization::QuantizationConfig; using ::tensorflow::MLIRImportOptions; using ::tensorflow::SavedModelBundle; using ::tensorflow::SavedModelSignatureDefsToMlirImport; using ::tensorflow::quantization::PreprocessAndFreezeGraph; absl::StatusOr<ImportedMlirModuleOp> SavedModelToMlirModuleOp(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h
// XlaCallModuleOp. Returns a non-OK status if quantization fails, or any of // `saved_model_bundle` or `quantization_py_function_lib` is a nullptr. absl::StatusOr<mlir::ModuleOp> RunQuantization( const SavedModelBundle* saved_model_bundle, absl::string_view saved_model_dir, const std::unordered_set<std::string>& saved_model_tags, const stablehlo::quantization::QuantizationConfig& quantization_config,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 02:44:03 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.h
mlir::OwningOpRef<mlir::ModuleOp> module, const mlir::TFL::PassConfig& pass_config, const std::unordered_set<std::string>& saved_model_tags, string* result, SavedModelBundle* saved_model_bundle, const quantization::PyFunctionLibrary* quantization_py_function_lib); // Give a warning for any unused flags that have been specified. void WarningUnusedFlags(const toco::ModelFlags& model_flags,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow_to_stablehlo/tf_to_stablehlo.cc
#include "tensorflow/core/public/session.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace mlir { namespace { // Extract the mlir TF module and optionally a ::tensorflow::SavedModelBundle // from a saved model or from an mlir file. absl::StatusOr<quant::stablehlo::ImportedMlirModuleOp> ImportSavedModelOrTfMlir( absl::string_view input_path, MLIRContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 22:58:42 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h
const std::unordered_set<std::string>& tags, absl::Span<std::string> exported_names, mlir::MLIRContext* context, MLIRImportOptions options, std::unique_ptr<tensorflow::SavedModelBundle>* saved_model_bundle = nullptr); // Converts a TensorFlow V1 SavedModel stored in the directory with the given // `saved_model_dir` into a MLIR module. Creates MLIR entities into the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 5.9K bytes - Viewed (0)