Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 25 for legacy_float_scale_ (0.37 sec)

  1. tensorflow/compiler/mlir/lite/transforms/quantize.cc

    };
    
    class QuantizeConstPattern : public OpRewritePattern<QuantizeOp> {
     public:
      explicit QuantizeConstPattern(MLIRContext* context, bool legacy_float_scale)
          : OpRewritePattern<QuantizeOp>(context),
            legacy_float_scale_(legacy_float_scale) {}
      LogicalResult matchAndRewrite(QuantizeOp op,
                                    PatternRewriter& rewriter) const override {
        DenseFPElementsAttr attr;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h

            op_quant_spec_getter_(op_quant_spec_getter),
            op_quant_scale_spec_getter_(op_quant_scale_spec_getter),
            infer_tensor_range_(infer_tensor_range),
            legacy_float_scale_(legacy_float_scale),
            is_qdq_conversion_(is_qdq_conversion) {}
    
      // The entry point of the quantization parameters propagation.
      void Run();
    
      // Sets up the states for all the op results in the function.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:42:17 UTC 2024
    - 16.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc

            /*narrow_range=*/true, legacy_float_scale_);
      } else {
        // per-tensor quantization weight
        final_type = GetUniformQuantizedTypeForWeight(
            attr, /*symmetric=*/is_weight && is_signed_,
            /*num_bits=*/8, is_signed_,
            /*narrow_range=*/is_weight, legacy_float_scale_);
      }
      if (const auto quant_type = mlir::dyn_cast_or_null<QuantizedType>(final_type);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 38.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

                              "inference type specification";
          signalPassFailure();
          return;
        }
        quant_specs_.post_training_quantization = post_training_quantize_;
        quant_specs_.legacy_float_scale = legacy_float_scale_;
        quant_specs_.disable_set_input_nodes_quantization_params =
            disable_set_input_nodes_quantization_params_;
      }
    
      if (quant_specs_.post_training_quantization) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/passes.td

          Option<"post_training_quantize_", "post-training-quantize", "bool", "false",
                 "enable post training quantization. Only used in tests">,
          Option<"legacy_float_scale_", "legacy-float-scale", "bool", "false",
                 "calculate quantization scales in float instead of double">,
          Option<"disable_per_channel_", "disable-per-channel", "bool", "false",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

      quant_specs.weight_only_quantization = weight_only_quantization;
      quant_specs.minimum_elements_for_weights = minimum_elements_for_weights;
      quant_specs.disable_per_channel = disable_per_channel;
      quant_specs.legacy_float_scale = legacy_float_scale;
      quant_specs.ops_blocklist = denylisted_mlir_op_names;
      for (const auto& entry : custom_op_map) {
        quant_specs.custom_map[entry.first].quantizable_input_indices =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

      QuantizationDriver quantization_driver(
          main_fn, /*is_signed=*/true, /*bit_width=*/8,
          /*disable_per_channel=*/false, op_quant_spec_getter,
          GetDefaultQuantScaleSpec,
          /*infer_tensor_range=*/true, /*legacy_float_scale=*/false,
          /*is_qdq_conversion=*/false);
    
      quantization_driver.Initialize();
    
      int64_t num_constant_op = 0;
      main_fn.walk([&](arith::ConstantOp cst) { ++num_constant_op; });
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

          disable_per_channel_for_dense_layers;
      quant_specs.verify_numeric = verify_numeric;
      quant_specs.whole_model_verify = whole_model_verify;
      quant_specs.legacy_float_scale = legacy_float_scale;
      quant_specs.ops_blocklist = denylisted_mlir_op_names;
      quant_specs.nodes_blocklist = denylisted_nodes;
      quant_specs.enable_mlir_variable_quantization = enable_variable_quantization;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h

    //
    // When `verify_numeric` is true, the model will have it's original float ops
    // and NumericVerify ops to compare output values from the quantized and float
    // ops.
    //
    // When `legacy_float_scale` is true, the quantizer will use float scale instead
    // of double, and call TOCO's quantization routines to maintain bit-exactness of
    // the values with the TOCO quantizer.
    TfLiteStatus QuantizeModel(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

                 const int adjusted_quant_dim,
                 const bool legacy_float_scale) -> quant::QuantParams {
        if (auto qtype = mlir::dyn_cast_or_null<UniformQuantizedType>(
                quant::GetUniformQuantizedTypeForBias(
                    quant_params, legacy_float_scale, adjusted_quant_dim))) {
          return quant::UniformQuantizedType::get(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
Back to top