Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for enable_legacy_weight_only (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc

          quantization_options.op_set(),
          quantization_options.enable_per_channel_quantization(),
          quantization_options.min_num_elements_for_weights(),
          quantization_options.enable_legacy_weight_only(), mlir_dump_file_prefix));
      pm.addPass(mlir::createSymbolDCEPass());
      pm.addPass(mlir::TF::CreateTFShapeInferencePass());
    
      // TODO: b/264637396 - Deprecate TF opset
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

          const bool enable_per_channel_quantization,
          const int min_num_elements_for_weights,
          const bool enable_legacy_weight_only,
          std::optional<const std::string> mlir_dump_file_name)
          : enable_legacy_weight_only_(enable_legacy_weight_only),
            min_num_elements_for_weights_(min_num_elements_for_weights),
            mlir_dump_file_name_(std::move(mlir_dump_file_name)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

      // Produces legacy weight-only graph where the qconst op(containing quantized
      // values) is followed by a dequantization op. This flag will be deprecated.
      bool enable_legacy_weight_only = 13;
    
      // If set to true, it forces calibration in graph model instead of eager mode
      // when the context is in eager mode. This will be forcibly set to true when
      // using DebuggerOptions.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

            quantization_method,
        tensorflow::quantization::OpSet target_opset,
        bool enable_per_channel_quantization, int min_num_elements_for_weights,
        bool enable_legacy_weight_only = false,
        std::optional<const absl::string_view> mlir_dump_file_prefix =
            std::nullopt);
    
    // Converts dequantize-(quantizable) call-quantize pattern to a single call op
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

        )
    
      if not quantization_options.HasField('freeze_all_variables'):
        quantization_options.freeze_all_variables = True
    
      if quantization_options.enable_legacy_weight_only:
        raise ValueError(
            'Legacy weight-only is deprecated. Use weight-only quantization method.'
        )
    
      # Converter assumes options are specified. So set SRQ explicitly.
      if (
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
Back to top