Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for post_training_quantize_ (0.44 sec)

  1. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

                              "inference type specification";
          signalPassFailure();
          return;
        }
        quant_specs_.post_training_quantization = post_training_quantize_;
        quant_specs_.legacy_float_scale = legacy_float_scale_;
        quant_specs_.disable_set_input_nodes_quantization_params =
            disable_set_input_nodes_quantization_params_;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/passes.td

          Option<"activation_number_of_bits_", "activation-number-of-bits", "int", "8",
                 "number of bits for inference type. Only used in tests">,
          Option<"post_training_quantize_", "post-training-quantize", "bool", "false",
                 "enable post training quantization. Only used in tests">,
          Option<"legacy_float_scale_", "legacy-float-scale", "bool", "false",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc

      }
    
      // Some extra flag related to post training quantization. If post-training
      // quantization is enabled, `inference_type` and `inference_input_type` are
      // not used by MLIR passes.
      if (toco_flags.post_training_quantize()) {
        quant_specs->weight_quantization = true;
        quant_specs->disable_per_channel =
            toco_flags.disable_per_channel_quantization();
        if (toco_flags.quantize_to_float16()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 17.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

      // when toco flag post training quantization is specified with converting to
      // stablehlo, we automatically enable dynamic range quantization
    
      if (toco_flags.post_training_quantize()) {
        const auto status = quantization::PreprocessAndFreezeGraph(
            module, module.getContext(), session);
        if (!status.ok()) {
          return status_handler.Combine(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
Back to top