Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 14 for saved_model_path (0.25 sec)

  1. tensorflow/compiler/mlir/python/mlir.h

    //
    // Args:
    //   saved_model_path: File path from which to load the SavedModel.
    //   exported_names_str: Comma-separated list of names to export.
    //                       Empty means "export all".
    //
    // Returns:
    //   A string of textual MLIR representing the raw imported SavedModel.
    std::string ExperimentalConvertSavedModelToMlir(
        const std::string &saved_model_path, const std::string &exported_names_str,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 14 23:44:01 UTC 2023
    - 5.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h

        absl::string_view saved_model_path,
        const std::unordered_set<std::string>& tags,
        const std::vector<std::string>& signature_keys,
        MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND);
    
    // Gets the function aliases from the SavedModel.
    absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
    GetFunctionAliases(absl::string_view saved_model_path,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.h

    absl::StatusOr<ExportedModel> QuantizeQatModel(
        absl::string_view saved_model_path,
        const std::vector<std::string>& signature_keys,
        const std::unordered_set<std::string>& tags,
        const QuantizationOptions& quantization_options);
    
    // Applies post-training dynamic-range quantization to the model.
    absl::StatusOr<ExportedModel> QuantizeDynamicRangePtq(
        absl::string_view saved_model_path,
        const std::vector<std::string>& signature_keys,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 15:31:08 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc

    }
    
    absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
    GetFunctionAliases(absl::string_view saved_model_path,
                       const std::unordered_set<std::string>& tags) {
      tensorflow::MetaGraphDef meta_graph;
      TF_RETURN_IF_ERROR(tensorflow::ReadMetaGraphDefFromSavedModel(
          saved_model_path, tags, &meta_graph));
    
      absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  5. tensorflow/cc/saved_model/experimental/public/saved_model_api.h

    };
    
    inline std::unique_ptr<SavedModelAPI> SavedModelAPI::Load(
        const std::string& saved_model_path, const Runtime& runtime, Status* status,
        const std::unordered_set<std::string>* tags) {
      TF_SavedModel* saved_model = nullptr;
    
      if (tags == nullptr) {
        saved_model =
            TF_LoadSavedModel(saved_model_path.c_str(), runtime.GetTFEContext(),
                              status->GetTFStatus());
      } else {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Nov 04 00:45:47 UTC 2020
    - 6.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py

        # saved the model
        test_model = TestModule()
        saved_model_path = '/tmp/test.saved_model'
        save_options = tf.saved_model.SaveOptions(save_debug_info=True)
        tf.saved_model.save(test_model, saved_model_path, options=save_options)
    
        # load the model and convert
        converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)
        converter.convert()
    
    # pylint: disable=line-too-long
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Sep 28 21:37:05 UTC 2021
    - 2.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.cc

                               exported_model, src_saved_model_path, tags,
                               signature_def_map);
      }
    
      std::optional<bool> RunCalibration(
          const absl::string_view saved_model_path,
          const std::vector<std::string>& signature_keys,
          const std::unordered_set<std::string>& tags,
          const bool force_graph_mode_calibration,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:33:29 UTC 2024
    - 5K bytes
    - Viewed (0)
  8. tensorflow/cc/saved_model/metrics.h

    // Returns "/tensorflow/core/saved_model/write/path" cell, which contains
    // the saved_model_path of the SM when it is exported.
    monitoring::GaugeCell<std::string>& SavedModelWritePath();
    
    // Returns "/tensorflow/core/saved_model/write/path_and_fingerprint" cell, which
    // contains the path (saved_model_path) and fingerprint (concatenation of
    // graph_def_program_hash, signature_def_hash, saved_object_graph_hash,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jan 18 23:43:59 UTC 2024
    - 6.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.pyi

          tags: set[str],
          serialized_signature_def_map: dict[str, bytes],
      ) -> Optional[bool]: ...
      # LINT.ThenChange()
    
      # LINT.IfChange(run_calibration)
      def run_calibration(
          self,
          saved_model_path: str,
          signature_keys: list[str],
          tags: set[str],
          force_graph_mode_calibration: bool,
          # Value type: RepresentativeDatasetFile.
          representative_dataset_file_map_serialized: dict[str, bytes],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:33:29 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h

              signature_def_map) const = 0;
      // LINT.ThenChange(
      //     pywrap_function_lib.pyi:save_exported_model,
      //     py_function_lib.py:save_exported_model,
      // )
    
      // Runs calibration on a model saved at `saved_model_path`. `exported_model`
      // should be the corresponding exported model resulting from the
      // pre-calibration step. `signature_keys` is a set of keys that identify a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:33:29 UTC 2024
    - 5.2K bytes
    - Viewed (0)
Back to top