Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 15 of 15 for legacy_float_scale_ (0.17 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

                            QuantizationUnit quant_op) const {
        auto [quantized_op, weight_idx] = quant_op;
        const bool is_narrow_range = true;
        const bool is_legacy_float = quant_specs_.legacy_float_scale;
        const bool is_signed = quant_specs_.IsSignedInferenceType();
        const int bit_width = quant_specs_.GetQuantizationTypeWidth();
    
        std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(quantized_op);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      int64_t minimum_elements_for_weights = 1024;
    
      // Whether to calculate scales in float to keep quantized values the same with
      // old TOCO quantizer.
      bool legacy_float_scale = false;
    
      // Whether to perform per-tensor quantization. Currently, this option is only
      // valid when the quantization parameters need to be created by scanning the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/passes.h

    // the binary size.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
        bool verify_numeric = false, bool whole_model_verify = false,
        bool legacy_float_scale = false,
        const absl::flat_hash_set<std::string>& ops_blocklist = {},
        const absl::flat_hash_set<std::string>& nodes_blocklist = {});
    
    // Creates an instance of the TensorFlow Lite dialect PrepareQuantize pass.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 07 21:29:34 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

          input_model_buffer, input_type, output_type, inference_tensor_type,
          /*operator_names=*/{}, disable_per_channel, fully_quantize, output_model,
          enable_numeric_verify, enable_whole_model_verify,
          /*legacy_float_scale=*/true, denylisted_ops, denylisted_nodes,
          enable_variable_quantization, disable_per_channel_for_dense_layers,
          debug_options);
      if (status != kTfLiteOk) {
        error_reporter->exception();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

      bool quantizeOpAsInt8(PatternRewriter& rewriter, arith::ConstantOp op,
                            std::pair<Operation*, int> quant_op) const {
        bool is_narrow_range = true;
        bool is_legacy_float = quant_specs_.legacy_float_scale;
        bool is_signed = quant_specs_.IsSignedInferenceType();
        int bit_width = quant_specs_.GetQuantizationTypeWidth();
    
        Operation* quantize_op = quant_op.first;
        int quantize_operand_num = quant_op.second;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
Back to top