- Sort Score
- Result 10 results
- Languages All
Results 61 - 70 of 200 for requantize (0.39 sec)
-
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc
// and tfl.dequantize pairs before tf.FakeQuant* being foled. LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx, bool use_fake_quant_num_bits) { OpBuilder builder(func); if (failed(UnwrapTFCustomOps(func, builder))) { return failure(); } // Insert the tfl.quantize/tfl.dequantize ops after the tf.FakeQuant* ops to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 03 00:14:05 UTC 2023 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
]; } def DecomposeHybridQuantizationPass : Pass<"tfl-decompose-hybrid-quantization", "mlir::func::FuncOp"> { let summary = "Decomposes hybridge quantization to explicit quantize / dequantize"; let description = [{ Decomposes (with explicit quantize/dequantize ops) selected math operations which exist in the model with hybrid quantization (some arguments/results left in floating point). }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize_patterns.td
include "mlir/IR/OpBase.td" include "mlir/IR/PatternBase.td" include "mlir/Dialect/Func/IR/FuncOps.td" include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td" // Both Quantize and Dequantize ops have side effects, so we have to define // patterns to remove dead ones after the quantization rewrite. def : Pat<(TFL_QuantizeOp:$op $in, $qt), (replaceWithValue $in), [(HasNoUseOf:$op)]>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 16 23:20:46 UTC 2022 - 1.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
llvm::cl::desc("Whether enable per-channel quantized weights.")}; }; // If the weight is applicable to dynamic range quantization, insert Quantize // and Dequantize ops with per-tensor scale. class PrepareDRQQuantizableOp : public OpRewritePattern<arith::ConstantOp> { public: explicit PrepareDRQQuantizableOp(MLIRContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/Passes.td
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jul 29 18:55:28 UTC 2022 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 42K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
quant::QuantizationSpecs quant_specs_; }; #include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc" // If the weight is applicable to dynamic range quantization, insert Quantize // and Dequantize ops with either per-axis or per-tensor scale. class PrepareDynamicRangeQuantizableOp : public OpRewritePattern<arith::ConstantOp> { public: explicit PrepareDynamicRangeQuantizableOp(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h
RewritePatternSet GetHardwareRewritePatterns(MLIRContext* context, const std::string& hardware); // Convert quantized ops to float, this will essentially insert dequantize & // quantize pair around the op. void ConvertQuantizedOpToFloat(func::FuncOp func, OpBuilder* builder); // This will optimize the quantized ops -> float graph.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 07 18:43:51 UTC 2022 - 2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
// `tfl.quantize` or `tfl.dequantize` ops. ui8, i8 and i16 are supported. bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) { if (storage_type.getWidth() == 8 || (storage_type.isSigned() && storage_type.getWidth() == 16)) { return true; } LLVM_DEBUG(llvm::dbgs() << "Uniform quantize / dequantize op only supports ui8, i8 or "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/decompose-hybrid-quantization.mlir
// CHECK-DAG: %[[VAL2:.+]] = "tfl.dequantize"(%[[VAL0]]) // CHECK-DAG: %[[VAL3:.+]] = "tfl.dequantize"(%[[VAL1]]) // CHECK-DAG: %[[VAL4:.+]] = "tfl.conv_2d"(%arg0, %[[VAL2]], %[[VAL3]]) <{dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 13.1K bytes - Viewed (0)