- Sort Score
- Result 10 results
- Languages All
Results 1 - 3 of 3 for enable_float16_quantization_ (0.25 sec)
-
tensorflow/compiler/mlir/lite/transforms/quantize.cc
ParseCustomOpSpecs(enable_custom_op_weight_only_, quant::CustomOpUpdateOptions::kWeightOnly, quant_specs.custom_map); } if (enable_float16_quantization_) { quant_specs.inference_type = tensorflow::DT_HALF; } const quant::QuantPassSpec quant_params = { {quant_specs.verify_numeric, error_tolerance_,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
Option<"min_elements_for_weights_", "min-elements-for-weights", "int64_t", "1024", "The minimum number of elements in a weights array required to apply quantization.">, Option<"enable_float16_quantization_", "enable-float16-quantization", "bool", "false", "Whether apply float16 quantization. If false, int8 quantization is applied.">, Option<"enable_custom_op_quantization_",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
stats_op.erase(); }); } void PrepareDynamicRangeQuantizePass::runOnOperation() { func::FuncOp func = getOperation(); MLIRContext* ctx = func.getContext(); if (enable_float16_quantization_) { quant_specs_.inference_type = tensorflow::DT_HALF; } quant_specs_.disable_per_channel = !enable_dynamic_range_per_channel_quantization_;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0)