- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 65 for quantization_options (0.56 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
syntax = "proto3"; package stablehlo.quantization; option cc_enable_arenas = true; // Defines arious options to specify and control the behavior of the // StableHLO quantizer. // NEXT ID: 2 message QuantizationOptions { QuantizationMethod quantization_method = 1; } // NEXT ID: 3 message QuantizationMethod { // Quantization Method can be either preset or custom. oneof quantization_method {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 22 02:20:05 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// None, the default configuration is "do not quantize the model". // 2) A set of supported operations. // 3) Unit wise quantization precision. // 4) Target hardware name. // NEXT ID: 18 message QuantizationOptions { // The default quantization configuration for the model. If the below // unit-wise configuration does not exist, we use this quantization // configuration for the entire model. For each method, default configuration
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
Args: quantization_options: An instance of QuantizationOptions. """ if quantization_options.op_set == quant_opts_pb2.OpSet.OP_SET_UNSPECIFIED: quantization_options.op_set = quant_opts_pb2.OpSet.XLA if not quantization_options.tags: quantization_options.tags.append(tag_constants.SERVING) if not quantization_options.signature_keys:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
ImportAndPreprocessSavedModel( saved_model_path, {quantization_options.signature_keys().begin(), quantization_options.signature_keys().end()}, {quantization_options.tags().begin(), quantization_options.tags().end()}, context.get(), /*is_inliner_run=*/true, /*run_tf_to_stablehlo=*/is_stablehlo,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc
quantization_options.quantization_method().preset_method(), quantization_options.op_set())); pm.addPass(mlir::quant::CreateQuantizeCompositeFunctionsPass( quantization_options.quantization_method().preset_method(), quantization_options.op_set(), quantization_options.enable_per_channel_quantization(), quantization_options.min_num_elements_for_weights(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.h
void AddQuantizePtqDynamicRangePasses( mlir::OpPassManager &pm, const QuantizationOptions &quantization_options, std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt); void AddQuantizeWeightOnlyPasses( mlir::OpPassManager &pm, const QuantizationOptions &quantization_options, std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 10:03:23 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantize_passes.cc
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h" #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.h" namespace stablehlo { namespace quantization { void AddQuantizationPasses(mlir::PassManager& pass_manager, const QuantizationOptions& quantization_options) { QuantizationOptions quantization_options_ = quantization_options;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 08:32:43 UTC 2024 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.cc
std::unordered_set<std::string> tags; tags.insert(quantization_options.tags().begin(), quantization_options.tags().end()); const absl::StatusOr<ExportedModel> exported_model = QuantizeQatModel( src_saved_model_path, signature_keys, tags, quantization_options); if (!exported_model.ok()) return exported_model.status();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:33:29 UTC 2024 - 12K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.h
const QuantizationOptions& quantization_options); // Applies post-training dynamic-range quantization to the model. absl::StatusOr<ExportedModel> QuantizeDynamicRangePtq( absl::string_view saved_model_path, const std::vector<std::string>& signature_keys, const std::unordered_set<std::string>& tags, const QuantizationOptions& quantization_options);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 28 15:31:08 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
'Do not specify both the `representative_dataset` argument and the' ' `representative_datasets` field in `QuantizationOptions`', ): quantize_model.quantize( self._input_saved_model_path, self._output_saved_model_path, quantization_options=quantization_options, representative_dataset=representative_dataset, ) converted_model = quantize_model.quantize(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0)