- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for enable_per_channel_quantization_ (0.68 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
bool enable_per_channel_quantization) { op_set_ = op_set; quantization_method_ = quantization_method; enable_per_channel_quantization_ = enable_per_channel_quantization; } PreprocessOpPass(const PreprocessOpPass& other) { op_set_ = other.op_set_; quantization_method_ = other.quantization_method_; enable_per_channel_quantization_ = other.enable_per_channel_quantization_; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
OpSet op_set, bool enable_per_channel_quantization) : OpRewritePattern<arith::ConstantOp>(context), quant_specs_(quant_specs), op_set_(op_set), enable_per_channel_quantization_(enable_per_channel_quantization) {} LogicalResult matchAndRewrite(arith::ConstantOp op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
QuantMethod quantization_method) : quant_specs_(quant_specs) { quant_specs_.inference_type = tensorflow::DT_QINT8; enable_per_channel_quantization_ = !quant_specs_.disable_per_channel; enable_post_training_quantize_ = (quantization_method == tensorflow::quantization::QuantizationMethod:: METHOD_STATIC_RANGE_INT8);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc
LogicalResult FillAttributesForUniformQuantizedDotOp( PatternRewriter& rewriter, Operation* op, llvm::StringMap<Attribute>& identifier_to_attr, QuantMethod quantization_method, bool enable_per_channel_quantization) { NamedAttrList attrs; if (quantization_method == tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) { // Fill quantization related attributes for Hybrid op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils_test.cc
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8; auto res = FillAttributesForUniformQuantizedAddOp( test_peer.rewriter_, op, identifier_to_attr, quantization_method, /*enable_per_channel_quantization=*/false); ASSERT_TRUE(succeeded(res)); ASSERT_EQ(2147483647, op.getLhsQuantizationMaxValAttr().getInt()); ASSERT_EQ(-2147483648, op.getLhsQuantizationMinValAttr().getInt());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Sep 11 00:47:05 UTC 2023 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc
// applicable pm.addPass(mlir::quant::CreateQuantizeCompositeFunctionsPass( quantization_options.quantization_method().preset_method(), quantization_options.op_set(), quantization_options.enable_per_channel_quantization(), quantization_options.min_num_elements_for_weights(), quantization_options.enable_legacy_weight_only(), mlir_dump_file_prefix)); pm.addPass(mlir::createSymbolDCEPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::unique_ptr<OperationPass<ModuleOp>> CreateQuantizeCompositeFunctionsPass( tensorflow::quantization::QuantizationMethod::PresetMethod quantization_method, tensorflow::quantization::OpSet target_opset, bool enable_per_channel_quantization, int min_num_elements_for_weights, bool enable_legacy_weight_only = false, std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
), _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS, ) if not quantization_options.HasField('enable_per_channel_quantization'): quantization_options.enable_per_channel_quantization = False if quantization_options.enable_per_channel_quantization and not ( ( quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
// TODO: b/331302857 - Remove `enable_per_channel_quantized_weight` usage. quantization_config.mutable_static_range_ptq_preset() ->set_enable_per_channel_quantized_weight( quantization_options.enable_per_channel_quantization()); // When targeting server TPUs quantized types should be unpacked into // integer ops. quantization_config.mutable_pipeline_config()->set_unpack_quantized_types( true);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0)