- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 14 for min_num_elements_for_weights (0.3 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
explicit LiftQuantizableSpotsAsFunctionsDRQPass( const QuantMethod quantization_method, const OpSet target_opset, const int min_num_elements_for_weights) { quantization_method_ = quantization_method; target_opset_ = target_opset; min_num_elements_for_weights_ = min_num_elements_for_weights; } LiftQuantizableSpotsAsFunctionsDRQPass( const LiftQuantizableSpotsAsFunctionsDRQPass& other) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc
quantization_options.quantization_method().preset_method(), quantization_options.op_set(), quantization_options.enable_per_channel_quantization(), quantization_options.min_num_elements_for_weights(), quantization_options.enable_legacy_weight_only(), mlir_dump_file_prefix)); pm.addPass(mlir::createSymbolDCEPass()); pm.addPass(mlir::TF::CreateTFShapeInferencePass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
# TODO(b/242805842): Find good minimum_elements_for_weights number for server. # please also update default value in tflite converter: # tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc;l=201 if quantization_options.min_num_elements_for_weights == 0: quantization_options.min_num_elements_for_weights = ( _DYNAMIC_RANGE_DEFAULT_MIN_NUM_ELEMENTS_FOR_WEIGHTS )
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::unique_ptr<OperationPass<ModuleOp>> CreateLiftQuantizableSpotsAsFunctionsDRQPass( tensorflow::quantization::QuantizationMethod::PresetMethod quantization_method, tensorflow::quantization::OpSet op_set, int min_num_elements_for_weights); // Replaces tf.CustomAggregator ops with quant.Stats ops for finalizing the // calibration procedure. std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertCustomAggregationOpToQuantStatsPass();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
const bool enable_per_channel_quantization, const int min_num_elements_for_weights, const bool enable_legacy_weight_only, std::optional<const std::string> mlir_dump_file_name) : enable_legacy_weight_only_(enable_legacy_weight_only), min_num_elements_for_weights_(min_num_elements_for_weights), mlir_dump_file_name_(std::move(mlir_dump_file_name)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
mlir::cast<ShapedType>(call_op.getOperand(0).getType()) .getNumElements(); if (num_elements < quant_options_.min_num_elements_for_weights()) { return absl::InternalError( "The params of Gather have fewer number of elements than " "the `min_num_elements_for_weights`."); } } // Disable quantization if the quantization method is NO_QUANTIZE.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
if (!hasUsageFromQuantizableOp(op)) return failure(); // Check if the weight size is big enough. int num_elements_threshold = quant_options_.min_num_elements_for_weights(); int num_elements = cast<ShapedType>(op.getType()).getNumElements(); if (num_elements < num_elements_threshold) { op->emitRemark("Quantization is skipped because the op has ")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// supported for Post-training Dynamic Range Quantization. By default, it is // set to 1024. To disable this, set the value to -1 explicitly. int64 min_num_elements_for_weights = 8; // When set to `true`, freezes all variables in the model into constants. // When set to `false` the model's large constants are converted to variables.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
), tags=tags, signature_keys=['serving_default'], op_set=quant_opts_pb2.XLA, # Gather op is opt-outed if the size is smaller than the threshold. min_num_elements_for_weights=1024 if expect_quantized_gather else 8192, ) data_gen = self._create_data_generator( input_key='input_tensor', shape=[6], minval=0, maxval=10,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc
quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); quant_specs.weight_quantization = true; quant_specs.weight_only_quantization = weight_only_quantization; quant_specs.minimum_elements_for_weights = minimum_elements_for_weights; quant_specs.disable_per_channel = disable_per_channel; quant_specs.legacy_float_scale = legacy_float_scale; quant_specs.ops_blocklist = denylisted_mlir_op_names;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 9.5K bytes - Viewed (0)