- Sort Score
- Result 10 results
- Languages All
Results 1 - 5 of 5 for error_tolerance_ (0.22 sec)
-
tensorflow/compiler/mlir/lite/transforms/quantize.cc
} if (enable_float16_quantization_) { quant_specs.inference_type = tensorflow::DT_HALF; } const quant::QuantPassSpec quant_params = { {quant_specs.verify_numeric, error_tolerance_, quant_specs.whole_model_verify, enable_log_if_failed_}, quant_specs}; populateWithGenerated(patterns); if (quant_specs.weight_quantization || quant_specs.use_fake_quant_num_bits ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
]; let options = [ Option<"enable_numeric_verify_", "numeric-verify", "bool", "false", "Whether verify numericals at runtime.">, Option<"error_tolerance_", "error-tolerance", "float", "5.0f", "Error tolerance for numeric verify. Valid when `-numeric-verify` is set.">, Option<"enable_whole_model_verify_", "whole-model-verify",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_to_mhlo_int_test.cc
})mlir"; TF_ASSERT_OK_AND_ASSIGN(auto arg0, CreateRandomF32Literal({10})); // error_tolerance is set to be slightly > scale because different rounding // implementations for UniformQuantize in TF kernel and the lowering passes // may cause +/-1 differences. ExecuteAndCompareResultsWithTfKernel( kProgram, {&arg0}, /*tf_program=*/std::nullopt, /*error_tolerance=*/0.35); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 01:03:21 UTC 2024 - 35.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
bool verify_numeric = false; // Tolerance level from the quantized value for verification. If the tolerance // is very small(<0.1), only the stats of the diff is displayed. float error_tolerance = 5.0f; // Whether to verify numerical correctness layer by layer or by whole model bool whole_model_verify = false; // Whether to enable log for failures bool log_if_failed_flag = false; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
auto func = getOperation(); auto* ctx = func.getContext(); quant_specs_.weight_quantization = weight_quantization_; const QuantPassSpec quant_params = { {quant_specs_.verify_numeric, /*error_tolerance=*/5.0f, quant_specs_.whole_model_verify, /*enable_log_if_failed=*/false}, quant_specs_}; if (quant_specs_.weight_quantization) { patterns.add<TFDynamicRangeQuantization>(ctx, quant_params);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0)