Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 22 for SavedModelBundle (0.23 sec)

  1. tensorflow/cc/saved_model/loader.h

    /// RAM.
    struct SavedModelBundle : public SavedModelBundleInterface {
      /// A TensorFlow Session does not Close itself on destruction. To avoid
      /// resource leaks, we explicitly call Close on Sessions that we create.
      ~SavedModelBundle() override {
        if (session) {
          session->Close().IgnoreError();
        }
      }
    
      SavedModelBundle() = default;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 05 18:28:37 UTC 2023
    - 6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc

      return signature_def_map;
    }
    
    // Retrieves the function name -> function alias mapping from the
    // `SavedModelBundle`.
    // TODO: b/314124142 - Remove the need for this function.
    absl::flat_hash_map<std::string, std::string> GetFunctionAliases(
        const SavedModelBundle& saved_model_bundle) {
      const protobuf::Map<std::string, std::string>& function_aliases =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Apr 14 10:49:12 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h

    // Represents a pair of `mlir::ModuleOp` and `tensorflow::SavedModelBundle`. The
    // SavedModelBundle complements the imported ModuleOp by providing access to
    // `tensorflow::Session` which may be useful when reading values from resources
    // (e.g. `TF::VarHandleOp`s).
    using ImportedMlirModuleOp =
        std::pair<OwningOpRef<ModuleOp>,
                  std::unique_ptr<::tensorflow::SavedModelBundle>>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization_test.cc

         WhenPyFunctionLibIsNullptrReturnsInvalidArgumentError) {
      const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir();
      ASSERT_THAT(tmp_saved_model_dir, IsOk());
    
      // Dummy SavedModelBundle to pass a non-nullptr argument.
      SavedModelBundle bundle{};
      QuantizationConfig config;
      const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization(
          /*saved_model_bundle=*/&bundle, *tmp_saved_model_dir,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 3.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.cc

        MLIRImportOptions options,
        std::unique_ptr<tensorflow::SavedModelBundle>* saved_model_bundle) {
      // Create local bundle if no one is provided to use.
      std::unique_ptr<tensorflow::SavedModelBundle> bundle;
      if (saved_model_bundle == nullptr) {
        bundle = std::make_unique<tensorflow::SavedModelBundle>();
      } else if (*saved_model_bundle == nullptr) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 11:51:44 UTC 2024
    - 14.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.h

        absl::Span<const std::string> extra_tf_opdefs,
        absl::Span<std::string> exported_names, const GraphImportConfig& specs,
        bool enable_variable_lifting, mlir::MLIRContext* context,
        std::unique_ptr<tensorflow::SavedModelBundle>* saved_model_bundle);
    
    Status ConvertTFExecutorToStablehloFlatbuffer(
        mlir::PassManager& pass_manager, mlir::ModuleOp module, bool export_to_mlir,
        mlir::StatusScopedDiagnosticHandler& statusHandler,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 08:30:24 UTC 2024
    - 4.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc

    #include "tsl/platform/errors.h"
    #include "tsl/platform/statusor.h"
    
    namespace mlir::quant::stablehlo {
    
    using ::stablehlo::quantization::QuantizationConfig;
    using ::tensorflow::MLIRImportOptions;
    using ::tensorflow::SavedModelBundle;
    using ::tensorflow::SavedModelSignatureDefsToMlirImport;
    using ::tensorflow::quantization::PreprocessAndFreezeGraph;
    
    absl::StatusOr<ImportedMlirModuleOp> SavedModelToMlirModuleOp(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h

    // XlaCallModuleOp. Returns a non-OK status if quantization fails, or any of
    // `saved_model_bundle` or `quantization_py_function_lib` is a nullptr.
    absl::StatusOr<mlir::ModuleOp> RunQuantization(
        const SavedModelBundle* saved_model_bundle,
        absl::string_view saved_model_dir,
        const std::unordered_set<std::string>& saved_model_tags,
        const stablehlo::quantization::QuantizationConfig& quantization_config,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 02:44:03 UTC 2024
    - 2.7K bytes
    - Viewed (0)
  9. tensorflow/cc/saved_model/loader.cc

                          const RunOptions& run_options, const string& export_dir,
                          const std::unordered_set<string>& tags,
                          SavedModelBundle* const bundle) {
      return LoadSavedModelGeneric<SavedModelBundle>(session_options, run_options,
                                                     export_dir, tags, bundle);
    }
    
    Status RestoreSession(const RunOptions& run_options,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 04:36:00 UTC 2024
    - 23K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.h

        mlir::OwningOpRef<mlir::ModuleOp> module,
        const mlir::TFL::PassConfig& pass_config,
        const std::unordered_set<std::string>& saved_model_tags, string* result,
        SavedModelBundle* saved_model_bundle,
        const quantization::PyFunctionLibrary* quantization_py_function_lib);
    
    // Give a warning for any unused flags that have been specified.
    void WarningUnusedFlags(const toco::ModelFlags& model_flags,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 3.2K bytes
    - Viewed (0)
Back to top