Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 87 for requantize (0.15 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc

        auto op_before_dequantize = original_dequantize_op.getOperand(0);
    
        // Create a new dequantize op that is propagated.
        rewriter.setInsertionPointAfter(user_op);
        TF::PartitionedCallOp new_dequantize_op =
            cast<TF::PartitionedCallOp>(rewriter.clone(*original_dequantize_op));
    
        // Skip the original dequant op and connect the op before dequantize to the
        // user op.
        user_op->setOperand(user_idx, op_before_dequantize);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h

        // Finally, use the quantization parameter to create the quantize and
        // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp
        // and its users.
        auto quantize = rewriter.create<quantfork::QuantizeCastOp>(
            tf_op.getLoc(), qtype.getValue(), input);
        auto dequantize = rewriter.create<quantfork::DequantizeCastOp>(
            tf_op.getLoc(), res_type, quantize.getResult());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc

    // and tfl.dequantize pairs before tf.FakeQuant* being foled.
    LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx,
                                      bool use_fake_quant_num_bits) {
      OpBuilder builder(func);
      if (failed(UnwrapTFCustomOps(func, builder))) {
        return failure();
      }
    
      // Insert the tfl.quantize/tfl.dequantize ops after the tf.FakeQuant* ops to
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 03 00:14:05 UTC 2023
    - 4.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/post_quantize_patterns.td

    include "mlir/IR/OpBase.td"
    include "mlir/IR/PatternBase.td"
    include "mlir/Dialect/Func/IR/FuncOps.td"
    include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td"
    
    // Both Quantize and Dequantize ops have side effects, so we have to define
    // patterns to remove dead ones after the quantization rewrite.
    def : Pat<(TFL_QuantizeOp:$op $in, $qt), (replaceWithValue $in), [(HasNoUseOf:$op)]>;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 16 23:20:46 UTC 2022
    - 1.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/ir/Passes.td

    }
    
    def QuantConvertSimulatedQuant
        : Pass<"quant-convert-simulated-quantization", "func::FuncOp"> {
      let summary = "Converts training-time simulated quantization ops to "
                    "corresponding quantize/dequantize casts";
      let constructor = "mlir::quantfork::createConvertSimulatedQuantPass()";
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jul 29 18:55:28 UTC 2022
    - 1.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h

    RewritePatternSet GetHardwareRewritePatterns(MLIRContext* context,
                                                 const std::string& hardware);
    
    // Convert quantized ops to float, this will essentially insert dequantize &
    // quantize pair around the op.
    void ConvertQuantizedOpToFloat(func::FuncOp func, OpBuilder* builder);
    
    // This will optimize the quantized ops -> float graph.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 07 18:43:51 UTC 2022
    - 2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc

    // `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported.
    bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {
      if (storage_type.getWidth() == 8 ||
          (storage_type.isSigned() && storage_type.getWidth() == 16)) {
        return true;
      }
      LLVM_DEBUG(llvm::dbgs()
                 << "Uniform quantize / dequantize op only supports ui8, i8 or "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

        left as is for weight-only which means the weight is dequantized at runtime.
    
        For example, if the kernel does not support dynamic range quantization the
        graph will be converted into the following IR:
    
        %q_w = "tfl.pseudo_qconst"() {
             qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>
        %w = "tfl.dequantize"(%q_w) :
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/propagate_quantize_type.mlir

    // CHECK: %[[IDENTITY:.*]] = "tf.Identity"(%cst_0) : (tensor<200x100x300xi8>) -> tensor<200x100x300xi8>
    // CHECK: %[[DEQUANTIZED:.*]] = "tf.PartitionedCall"(%[[IDENTITY]]) <{config = "", config_proto = "", executor_type = "", f = @composite_dequantize_uniform}> : (tensor<200x100x300xi8>) -> tensor<200x100x300xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/aot/quantize.h

    Jake Harmon <******@****.***> 1694027275 -0700
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 1.4K bytes
    - Viewed (0)
Back to top