Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for quantize_saved_model (0.23 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

            pipeline_config=qc.PipelineConfig(
                merge_fusion_with_dequantize=merge_fusion_with_dequantize
            ),
        )
        quantization.quantize_saved_model(
            self._input_saved_model_path,
            self._output_saved_model_path,
            config,
        )
    
        expected_outputs = model.matmul(input_data)
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/python/quantization.py

      for spec in quantization_specs.specs:
        if spec.method.HasField(method):
          return True
      return False
    
    
    # TODO: b/310594193 - Export API to pip package.
    def quantize_saved_model(
        src_saved_model_path: str,
        dst_saved_model_path: str,
        config: qc.QuantizationConfig,
    ) -> None:
      """Quantizes a saved model.
    
      Args:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 12:53:33 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

          == _PresetMethod.METHOD_STATIC_RANGE_INT8
      ):
        # Check and populate calibration options.
        _populate_calibration_options(quantization_options)
    
    
    @tf_export.tf_export('quantization.experimental.quantize_saved_model')
    def quantize(
        saved_model_path: str,
        output_directory: Optional[str] = None,
        quantization_options: Optional[_QuantizationOptions] = None,
        representative_dataset: Optional[
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py

      quantization_options = tf.quantization.experimental.QuantizationOptions(
          signature_keys=['serving_default'],
          representative_datasets=dataset_file_map,
      )
      tf.quantization.experimental.quantize_saved_model(
          '/tmp/input_model',
          '/tmp/output_model',
          quantization_options=quantization_options,
      )
      ```
      """
    
      def __init__(
          self,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 14.2K bytes
    - Viewed (0)
Back to top