Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 203 for dequantize (0.25 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

        if (failed(candidate_ops) || candidate_ops->empty()) return failure();
    
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* candidate_op : *candidate_ops) {
          // If it is requantize op, we shouldn't rewrite this op.
          if (isa<QuantizeOpT, DequantizeOpT>(candidate_op)) {
            return failure();
          }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h

    RewritePatternSet GetHardwareRewritePatterns(MLIRContext* context,
                                                 const std::string& hardware);
    
    // Convert quantized ops to float, this will essentially insert dequantize &
    // quantize pair around the op.
    void ConvertQuantizedOpToFloat(func::FuncOp func, OpBuilder* builder);
    
    // This will optimize the quantized ops -> float graph.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 07 18:43:51 UTC 2022
    - 2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc

      quant::QuantizationSpecs quant_specs_;
    };
    
    #include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc"
    
    // If the weight is applicable to dynamic range quantization, insert Quantize
    // and Dequantize ops with either per-axis or per-tensor scale.
    class PrepareDynamicRangeQuantizableOp
        : public OpRewritePattern<arith::ConstantOp> {
     public:
      explicit PrepareDynamicRangeQuantizableOp(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.cc

    // going forward.
    // NOLINTNEXTLINE
    llvm::cl::list<std::string> custom_opdefs(
        "tf-custom-opdefs", llvm::cl::desc("List of custom opdefs when importing "
                                           "graphdef"));
    
    // Quantize and Dequantize ops pair can be optionally emitted before and after
    // the quantized model as the adaptors to receive and produce floating point
    // type data with the quantized model. Set this to `false` if the model input is
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 20:53:17 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/components/post_calibration_component.mlir

    // CHECK-NO-UNPACK: %[[DEQUANTIZE:.+]] = stablehlo.uniform_dequantize %[[QUANTIZE_1]] : (tensor<1x3x!quant.uniform<i8:f32, {{.*}}>>) -> tensor<1x3xf32>
    // CHECK-NO-UNPACK: return %[[DEQUANTIZE]] : tensor<1x3xf32>
    
    // -----
    
    // Tests that a simple dot_general without CustomAggregators is not quantized.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 6.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc

          llvm::cl::desc("Whether enable per-channel quantized weights.")};
    };
    
    // If the weight is applicable to dynamic range quantization, insert Quantize
    // and Dequantize ops with per-tensor scale.
    class PrepareDRQQuantizableOp : public OpRewritePattern<arith::ConstantOp> {
     public:
      explicit PrepareDRQQuantizableOp(MLIRContext* context,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  7. tensorflow/cc/framework/fuzzing/op_fuzzing.bzl

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 07 19:14:57 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

    ^bb0(%arg0: tensor<1x2xf32>):
      %cst_0 = arith.constant dense<[1, 0]> : tensor<2xi32>
      %0 = "tfl.quantize"(%arg0){qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>}: (tensor<1x2xf32>) -> (tensor<1x2x!quant.uniform<u8:f32, 1.0>>)
      %1 = "tfl.dequantize"(%0): (tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> (tensor<1x2xf32>)
      %2 = "tf.Transpose"(%1, %cst_0): (tensor<1x2xf32>, tensor<2xi32>) -> tensor<2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc

      // If `value` is produced by tf.Dequantize op, then return the Dequantize op's
      // input. Otherwise return `value`.
      auto get_real_input_value = [](Value value) -> Value {
        Operation* defining_op = value.getDefiningOp();
        if (auto dequantize = dyn_cast_or_null<TF::DequantizeOp>(defining_op)) {
          return dequantize.getInput();
        } else if (auto dequantize =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 20:06:54 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc

          if (!read_variable_op) continue;
          // Add dequantize.
          builder.setInsertionPointAfter(read_variable_op);
          auto new_read_variable_op =
              builder.create<ReadVariableOp>(read_variable_op.getLoc(), ref_qtype,
                                             read_variable_op.getResourceId());
          auto new_dq_op = builder.create<DequantizeOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
Back to top