Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for saved_model_path (0.21 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

            tags
        ).meta_info_def.function_aliases
    
      def _create_matmul_model(
          self,
          input_shape: Sequence[int],
          weight_shape: Sequence[int],
          saved_model_path: str,
          bias_fn: Optional[ops.Operation] = None,
          activation_fn: Optional[ops.Operation] = None,
      ) -> module.Module:
        class MatmulModel(module.Module):
          """A simple model with a single matmul.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  2. tensorflow/cc/saved_model/metrics.cc

    // Gauge that contains the path (saved_model_path) of the newly written
    // SavedModel.
    auto* saved_model_write_path = monitoring::Gauge<std::string, 0>::New(
        "/tensorflow/core/saved_model/write/path",
        "The path (saved_model_path) of the exported SavedModel.");
    
    // Gauge that contains the path (saved_model_path) and the singleprint
    // (concatenation of graph_def_program_hash, signature_def_hash,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jan 18 23:43:59 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc

          function_aliases = GetFunctionAliases(saved_model_path, tags);
      if (!function_aliases.ok()) {
        return absl::InternalError(absl::StrCat(
            "Failed to get function alias: ", function_aliases.status().message()));
      }
    
      absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> module =
          ImportAndPreprocessSavedModel(
              saved_model_path, signature_keys, tags, context.get(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/python/mlir.cc

    }
    
    std::string ExperimentalConvertSavedModelToMlir(
        const std::string& saved_model_path, const std::string& exported_names_str,
        bool show_debug_info, TF_Status* status) {
      // Load the saved model into a SavedModelV2Bundle.
    
      tensorflow::SavedModelV2Bundle bundle;
      auto load_status =
          tensorflow::SavedModelV2Bundle::Load(saved_model_path, &bundle);
      if (!load_status.ok()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:16:49 UTC 2024
    - 19.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.py

            func=functools.partial(
                _run_calibration,
                saved_model_path,
                signature_keys,
                tags,
                force_graph_mode_calibration,
                dataset_file_map,
            ),
            error_msg=(
                f'Failed to run calibration on model "{saved_model_path}",'
                f' signature_keys: {signature_keys}, tags: {tags}.'
            ),
        )
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 31 05:32:11 UTC 2024
    - 27.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

    _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS = 1024
    
    
    def _is_qat_saved_model(saved_model_path: str):
      """Checks if the SavedModel is QAT-enabled by looking for 'FakeQuant' ops."""
      saved_model_proto = saved_model_loader.parse_saved_model(saved_model_path)
      for meta_graph in saved_model_proto.meta_graphs:
        if any(
            node.op.startswith('FakeQuant') for node in meta_graph.graph_def.node
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/save_model.py

    _SignatureDefMap = Mapping[str, meta_graph_pb2.SignatureDef]
    
    
    def get_signatures_from_saved_model(
        saved_model_path: str,
        signature_keys: Optional[Sequence[str]] = None,
        tags: Optional[Collection[str]] = None,
    ) -> Dict[str, meta_graph_pb2.SignatureDef]:
      """Gets a map from signature keys to their SignatureDef.
    
      Args:
        saved_model_path: Path to the saved model.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.cc

          the function matching the signature key.
    
          Raises `StatusNotOk` exception if when the run was unsuccessful.
          )pbdoc",
          py::arg("saved_model_path"), py::arg("dst_saved_model_path"),
          py::arg("quantization_options_serialized"), py::kw_only(),
          py::arg("signature_keys"), py::arg("signature_def_map_serialized"),
          py::arg("py_function_library"),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 09 06:33:29 UTC 2024
    - 12K bytes
    - Viewed (0)
  9. tensorflow/c/experimental/saved_model/internal/saved_model_api_test.cc

    #include "tensorflow/core/platform/tstring.h"
    
    namespace {
    
    using tensorflow::tstring;
    
    constexpr char kTestData[] = "cc/saved_model/testdata";
    const char* kServeTag[] = {"serve"};
    
    std::string SavedModelPath(tensorflow::StringPiece saved_model_dir) {
      return tensorflow::io::JoinPath(tensorflow::testing::TensorFlowSrcRoot(),
                                      kTestData, saved_model_dir);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 23 08:08:45 UTC 2024
    - 21.3K bytes
    - Viewed (0)
  10. tensorflow/cc/saved_model/fingerprinting_utils.cc

      // Set the saved_model_checksum.
      TF_ASSIGN_OR_RETURN(uint64_t saved_model_hash,
                          HashFields(chunk_metadata.message(), reader, chunks_info,
                                     {}, &saved_model));
      saved_model_hash = FingerprintCat64(
          saved_model_hash, Fingerprint64(SerializeProto(saved_model)));
      fingerprint_def.set_saved_model_checksum(saved_model_hash);
    
      // Fill saved_model with only relevant chunk(s).
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 22:19:55 UTC 2024
    - 20.2K bytes
    - Viewed (0)
Back to top