- Sort Score
- Result 10 results
- Languages All
Results 11 - 14 of 14 for enable_per_channel_quantization_ (0.37 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::unique_ptr<OperationPass<ModuleOp>> CreateQuantizeCompositeFunctionsPass( tensorflow::quantization::QuantizationMethod::PresetMethod quantization_method, tensorflow::quantization::OpSet target_opset, bool enable_per_channel_quantization, int min_num_elements_for_weights, bool enable_legacy_weight_only = false, std::optional<const absl::string_view> mlir_dump_file_prefix = std::nullopt);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
), _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS, ) if not quantization_options.HasField('enable_per_channel_quantization'): quantization_options.enable_per_channel_quantization = False if quantization_options.enable_per_channel_quantization and not ( ( quantization_options.op_set == quant_opts_pb2.OpSet.UNIFORM_QUANTIZED
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// not applied regardless of the op support. Currently, it is supported for // XLA opset for SRQ on weight tensors (not activation), // and Uniform Quantized opset . optional bool enable_per_channel_quantization = 10; // Enables two inputs of an operation to be both tensors. // Currently supports MatMul and BatchMatMul ops for XLA. // TODO(b/263528090): Check the condition when this feature is beneficial.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
// TODO: b/331302857 - Remove `enable_per_channel_quantized_weight` usage. quantization_config.mutable_static_range_ptq_preset() ->set_enable_per_channel_quantized_weight( quantization_options.enable_per_channel_quantization()); // When targeting server TPUs quantized types should be unpacked into // integer ops. quantization_config.mutable_pipeline_config()->set_unpack_quantized_types( true);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0)