Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 82 for dequantize (0.18 sec)

  1. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

      quant::QuantizationSpecs quant_specs_;
    };
    
    #include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc"
    
    // If the weight is applicable to dynamic range quantization, insert Quantize
    // and Dequantize ops with either per-axis or per-tensor scale.
    class PrepareDynamicRangeQuantizableOp
        : public OpRewritePattern<arith::ConstantOp> {
     public:
      explicit PrepareDynamicRangeQuantizableOp(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

          llvm::cl::desc("Whether enable per-channel quantized weights.")};
    };
    
    // If the weight is applicable to dynamic range quantization, insert Quantize
    // and Dequantize ops with per-tensor scale.
    class PrepareDRQQuantizableOp : public OpRewritePattern<arith::ConstantOp> {
     public:
      explicit PrepareDRQQuantizableOp(MLIRContext* context,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc

      // If `value` is produced by tf.Dequantize op, then return the Dequantize op's
      // input. Otherwise return `value`.
      auto get_real_input_value = [](Value value) -> Value {
        Operation* defining_op = value.getDefiningOp();
        if (auto dequantize = dyn_cast_or_null<TF::DequantizeOp>(defining_op)) {
          return dequantize.getInput();
        } else if (auto dequantize =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 20:06:54 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

            per_axis_type.getStorageTypeMin(), per_axis_type.getStorageTypeMax());
      }
    
      auto quantize = builder.create<quantfork::QuantizeCastOp>(
          q_op.getLoc(), new_value_type.clone(new_qtype), new_value);
      auto dequantize = builder.create<quantfork::DequantizeCastOp>(
          dq_op.getLoc(), new_value_type, quantize.getResult());
      return dequantize.getResult();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/transforms/passes.td

      ];
    }
    def DecomposeHybridQuantizationPass : Pass<"tfl-decompose-hybrid-quantization", "mlir::func::FuncOp"> {
      let summary = "Decomposes hybridge quantization to explicit quantize / dequantize";
      let description = [{
          Decomposes (with explicit quantize/dequantize ops) selected math
          operations which exist in the model with hybrid quantization
          (some arguments/results left in floating point).
      }];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc

          auto* inst = value.getDefiningOp();
          if (!inst) {
            continue;
          }
    
          // There could be a Dequantize op after the weight tensor in cases like
          // fp16 post-training quantization. We need to get the weight from the
          // input of the Dequantize op.
          if (isa<DequantizeOp>(inst)) {
            op = inst;
            value = inst->getOperand(0);
            inst = value.getDefiningOp();
            if (!inst) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

      }
    
      void rewrite(quantfork::DequantizeCastOp op,
                   PatternRewriter& rewriter) const final {
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* op_with_region : op.getResult().getUsers()) {
          // Collect all the quantized inputs and "clone" the matched op by these
          // inputs.
          SmallVector<Value, 4> inputs;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc

          func_name, rewriter, quant_type, val_to_dequantize, result_type,
          LogicsForUniformDequanization);
    
      return dequant_op;
    }
    }  // namespace
    
    // Generate quantize and dequantize functions with uniform quantization.
    std::optional<TF::PartitionedCallOp> ApplyUniformQuantization(
        PatternRewriter& rewriter, TF::ConstOp op,
        tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py

        find the quant_min and quant_max that best describe this distribution. To do
        this, we quantize hist_mids using quant_min and quant_max and dequantize
        them again. Then the difference between hist_mids and dequantized hist_mids
        equates to quantization error when using quant_min and quant_max.
    
    
        Args:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 14.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h

      // weights but will dequantize them back at runtime which is useful for
      // memory bound case without kernel support available in lower precisions.
      // Used in MLIR dynamic range quantizer.
      bool weight_only_quantization = false;
    
      // The minimum number of elements in a weights array required to apply
      // quantization. This is especially useful not to quantize small tensors as
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 13 10:16:19 UTC 2024
    - 10.8K bytes
    - Viewed (0)
Back to top