- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for QuantizePass (0.79 sec)
-
tensorflow/compiler/mlir/lite/transforms/quantize.cc
struct QuantizePass : public impl::QuantizePassBase<QuantizePass> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass) // Constructor used by the PassRegistration and only used by test. explicit QuantizePass() { quant_specs.inference_type = tensorflow::DT_QINT8; } // Constructor used by manually creating the pass. explicit QuantizePass(const quant::QuantizationSpecs& quant_specs)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize.cc
quantfork::QuantizeCastOp>(ctx) {} }; class QuantizePass : public impl::QuantizePassBase<QuantizePass> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass) using impl::QuantizePassBase<QuantizePass>::QuantizePassBase; explicit QuantizePass(const bool enable_per_channel_quantized_weight) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 07:08:19 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
} }; // Applies quantization on the model in TF dialect. class QuantizePass : public PassWrapper<QuantizePass, OperationPass<func::FuncOp>> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass) // Constructor used by the PassRegistration and only used by test. explicit QuantizePass() { quant_specs_.inference_type = tensorflow::DT_QINT8; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_composite_functions.cc
} private: void runOnOperation() override; }; void QuantizeCompositeFunctionsPass::runOnOperation() { MLIRContext& ctx = getContext(); PassManager pm(&ctx); // Intermediate output from QuantizePass will have quantized ops // (XlaCallModuleOps) with quantized input and output types, which are not // allowed in the TF dialect. pm.enableVerifier(false); PrepareQuantizePassOptions options;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 02:59:01 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
bool use_fake_quant_num_bits = false; // Names of ops to block from quantization. Used in QuantizePass. // For dynamic range quantization, ops in blocklist are quantized in weight- // only manner. absl::flat_hash_set<std::string> ops_blocklist; // Names of locations to block from quantization. Used in QuantizePass. absl::flat_hash_set<std::string> nodes_blocklist;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
// TODO(b/265081639): When added PrepareQuantizeVariablesPass before adding // PrepareQuantizePass, an error occurs in certain model. It could fix it by // roll-back to run PrepareQuantizeVariablesPass, QuantizePass, // PostQuantizePass as suggested in cl/479634700. Need to figure out the // fundamental reason of the error, and (if needed) fix it without this // rollback. if (quant_specs.enable_mlir_variable_quantization) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.td
let dependentDialects = [ "mlir::stablehlo::StablehloDialect", "mlir::quant::QuantizationDialect", "mlir::quantfork::QuantizationForkDialect", "mlir::arith::ArithDialect", ]; } def QuantizePass : Pass<"stablehlo-quantize", "mlir::ModuleOp"> { let summary = "Applies static-range quantization on ops by converting quantfork.qcast, quantfork.dcast, and float op into uniform quantized ops ."; let options = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
return true; } return false; } // Unwraps quantization parameters of PartitionedCall ops with quantized // input/outputs that are created from QuantizePass. class QuantizeFunctionPattern : public mlir::OpRewritePattern<TF::PartitionedCallOp> { public: explicit QuantizeFunctionPattern(MLIRContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
Option<"use_fake_quant_num_bits_", "use-fake-quant-num-bits", "bool", "false", "Use quantization calculated from fake quant attributes.">, ]; } def QuantizePass : Pass<"tfl-quantize", "mlir::func::FuncOp"> { let summary = "Apply quantization on models in TensorFlow Lite dialect."; let constructor = "CreateDefaultQuantizePass()";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
ConvertTFDilatedConvOp<TF::DepthwiseConv2dNativeOp>>(ctx); patterns.add<RemoveIdentity>(ctx); TFL::populateWithGenerated(patterns); // TODO(fengliuai): Implement similar rule in the QuantizePass if the constant // folding hook of tfl.transpose and tfl.reshape are implemented. patterns.add<ReorderFakeQuantPattern<TF::ReshapeOp>, ReorderFakeQuantPattern<TF::TransposeOp>>(ctx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0)