Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 28 for quant_specs_ (0.64 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc

        quant_specs_ = other.quant_specs_;
        enable_post_training_quantize_ = other.enable_post_training_quantize_;
        enable_per_channel_quantization_ = !quant_specs_.disable_per_channel;
      }
    
      explicit PrepareQuantizePass(const QuantizationSpecs& quant_specs)
          : quant_specs_(quant_specs) {
        enable_post_training_quantize_ = quant_specs.post_training_quantization;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

      explicit PrepareDynamicRangeQuantizePass() {
        quant_specs_.inference_type = tensorflow::DT_QINT8;
        quant_specs_.weight_quantization = true;
        quant_specs_.enable_mlir_dynamic_range_quantizer = true;
      }
    
      // Constructor used by manually creating the pass.
      explicit PrepareDynamicRangeQuantizePass(
          const quant::QuantizationSpecs& quant_specs)
          : quant_specs_(quant_specs) {
        enable_dynamic_range_per_channel_quantization_ =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

                                        is_signed, quant_specs_.legacy_float_scale,
                                        ctx);
    
      if (quant_specs_.post_training_quantization) {
        patterns_2.add<ConvertLstmStatsToQDQs<LSTMOp>>(ctx, quant_specs_);
        patterns_2.add<ConvertLstmStatsToQDQs<UnidirectionalSequenceLSTMOp>>(
            ctx, quant_specs_);
        patterns_2.add<ConvertSvdfStatsToQDQs>(ctx, quant_specs_);
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

        quant_specs_.inference_type = tensorflow::DT_QINT8;
      }
    
      // Constructor used by manually creating the pass.
      explicit PrepareQuantizeDRQPass(const QuantizationSpecs& quant_specs,
                                      OpSet op_set)
          : quant_specs_(quant_specs), op_set_(op_set) {
        enable_per_channel_quantization_ = !quant_specs_.disable_per_channel;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

      explicit QuantizePass() {
        quant_specs_.inference_type = tensorflow::DT_QINT8;
      }
    
      // Constructor used by manually creating the pass.
      explicit QuantizePass(const QuantizationSpecs& quant_specs,
                            OpSet target_opset)
          : quant_specs_(quant_specs) {
        weight_quantization_ = quant_specs.weight_quantization;
        target_opset_ = target_opset;
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

      explicit ConvertOpStatsToQDQs(MLIRContext* context,
                                    const quant::QuantizationSpecs& quant_specs,
                                    PatternBenefit benefit = 1)
          : OpRewritePattern<SourceOp>(context, benefit),
            quant_specs_(quant_specs) {}
    
     protected:
      quant::QuantizationSpecs quant_specs_;
    
      LogicalResult processInputs(
          SourceOp op, const operator_property::OpVariant& op_variant,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/quantize.cc

        quant_specs.inference_type = tensorflow::DT_HALF;
      }
    
      const quant::QuantPassSpec quant_params = {
          {quant_specs.verify_numeric, error_tolerance_,
           quant_specs.whole_model_verify, enable_log_if_failed_},
          quant_specs};
    
      populateWithGenerated(patterns);
    
      if (quant_specs.weight_quantization || quant_specs.use_fake_quant_num_bits ||
          quant_specs.qdq_conversion_mode ==
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc

          quant_specs->inference_type = DT_HALF;
          quant_specs->inference_input_type = DT_HALF;
        } else {
          quant_specs->inference_type = DT_QINT8;
          quant_specs->inference_input_type = DT_QINT8;
        }
      } else {
        // These flags are incompatible with post_training_quantize() as only
        // QAT models can provide required ranges.
        quant_specs->disable_infer_tensor_range =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 17.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

      }
      quant::QuantizationSpecs quant_specs;
      quant_specs.inference_type = tflite::TflTypeToTfType(inference_type);
      quant_specs.post_training_quantization = true;
      quant_specs.disable_per_channel = disable_per_channel;
      quant_specs.disable_per_channel_for_dense_layers =
          disable_per_channel_for_dense_layers;
      quant_specs.verify_numeric = verify_numeric;
      quant_specs.whole_model_verify = whole_model_verify;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.cc

        pass_config.quant_specs.qdq_conversion_mode =
            mlir::quant::QDQConversionMode::kQDQStatic;
      } else if (toco_flags.qdq_conversion_mode() == "DYNAMIC") {
        pass_config.quant_specs.qdq_conversion_mode =
            mlir::quant::QDQConversionMode::kQDQDynamic;
        // Need to set this or else the ops will still use floating point kernels
        pass_config.quant_specs.inference_type = tensorflow::DT_QINT8;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 11K bytes
    - Viewed (0)
Back to top