Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for freeze_all_variables (0.28 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc

      absl::StatusOr<ExportedModel> exported_model = ModuleOpToExportedModel(
          *cloned_module_ref, context, kTfQuantPtqPreCalibrationStepName,
          /*unfreeze_constants=*/!quantization_options.freeze_all_variables(),
          function_aliases);
      if (!exported_model.status().ok()) {
        return absl::InternalError(
            absl::StrCat("Failed to export calibration model: ",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

        quantization_options.signature_keys.append(
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        )
    
      if not quantization_options.HasField('freeze_all_variables'):
        quantization_options.freeze_all_variables = True
    
      if quantization_options.enable_legacy_weight_only:
        raise ValueError(
            'Legacy weight-only is deprecated. Use weight-only quantization method.'
        )
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

      // Setting this to `false` is an experimental feature and quantization may
      // fail. To quantize models larger than 2 GiB, this should be set to `false`.
      // If not set, it defaults to `true`.
      optional bool freeze_all_variables = 9;
    
      // Enables channel-wise quantization. By default, channel-wise quantization is
      // not applied regardless of the op support. Currently, it is supported for
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

                preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
            ),
            tags=tags,
            signature_keys=['serving_default'],
            op_set=quant_opts_pb2.TF,
            freeze_all_variables=False,
        )
    
        converted_model = quantize_model.quantize(
            self._input_saved_model_path,
            self._output_saved_model_path,
            quantization_options,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
Back to top