- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 200 for requantize (0.18 sec)
-
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir
// CHECK: %[[REQUANTIZE:.*]] = mhlo.uniform_quantize %[[CONVERT_2]] : (tensor<4x!quant.uniform<i8:f32, 1.000000e+00:3>>) -> tensor<4x!quant.uniform<i8:f32, 2.000000e+00:5>> // CHECK: %[[CONVERT_3:.*]] = mhlo.bitcast_convert %[[REQUANTIZE]] : (tensor<4x!quant.uniform<i8:f32, 2.000000e+00:5>>) -> tensor<4xi8>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 01:25:29 UTC 2024 - 37.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
auto func_op = dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(func_name)); if (!func_op) return failure(); // The quantized fusion should have requantize and return ops at the end. auto return_op = dyn_cast_or_null<func::ReturnOp>( func_op.getRegion().getBlocks().front().getTerminator()); if (!return_op) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
CustomMap custom_map = quant_params_.quant_spec.custom_map; // Rewrite the floating-point ops to the quantized version, by fusing // preceding dequantize ops and succeding quantize ops. for (Operation* quantizing_op : quantizing_ops) { // If it is requantize op, we shouldn't rewrite this op. if (llvm::isa<QuantizeOpT, DequantizeOpT>(quantizing_op)) { return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
if (failed(candidate_ops) || candidate_ops->empty()) return failure(); // Rewrite the floating-point ops to the quantized version, by fusing // preceding dequantize ops and succeding quantize ops. for (Operation* candidate_op : *candidate_ops) { // If it is requantize op, we shouldn't rewrite this op. if (isa<QuantizeOpT, DequantizeOpT>(candidate_op)) { return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/prepare_quantize.cc
auto func_op_quant_scale_spec = GetStableHloQuantConstraints; for (auto func_op : module_op.getOps<func::FuncOp>()) { // The function might contain more stats ops than required, and it will // introduce requantize if the calibration stats have conflicts. This tries // to remove all the redundant stats ops. RemoveRedundantStatsOps(func_op, func_op_quant_spec, func_op_quant_scale_spec);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 05:11:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// TODO: b/323478683 - Make the attribute being part of op definition. quantize->setAttr(kVolatileOpAttrName, builder_.getUnitAttr()); // `original_result` has a use to `quantize`, so this will replace that use // by the result of `dequantize`. Remember to reset that use afterwards value.replaceAllUsesWith(dequantize); quantize.getOperation()->replaceUsesOfWith(dequantize, value); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
TFDynamicRangeQuantization>(ctx, quant_params) {} }; // Removes quantize-dequantize pairs that are not used in the quantization. // The benefit of this pattern is set to lower value than other patterns, so // that the other patterns can work on quantize/dequantize ops first. class RemoveUnusedQdqPattern : public OpRewritePattern<quantfork::DequantizeCastOp> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
} void rewrite(quantfork::DequantizeCastOp op, PatternRewriter& rewriter) const final { // Rewrite the floating-point ops to the quantized version, by fusing // preceding dequantize ops and succeding quantize ops. for (Operation* op_with_region : op.getResult().getUsers()) { // Collect all the quantized inputs and "clone" the matched op by these // inputs. SmallVector<Value, 4> inputs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
// ranges. bool SetInputNodesQuantizationParams(func::FuncOp func); // The function might contain more stats ops than required, and it will // introduce requantize if the calibration stats have conflicts. This method // tries to remove all the redundant stats ops. bool RemoveRedundantStats(func::FuncOp func);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
// Whether the func contains Quantize ops. This is used to determine whether // to use the quantization parameters from the fixed output range property. bool ContainsQuantizeOps(func::FuncOp func); QuantizationSpecs quant_specs_; Option<bool> enable_post_training_quantize_{ *this, "post-training-quantize", llvm::cl::init(false),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0)