- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for legacy_float_scale_ (0.49 sec)
-
tensorflow/compiler/mlir/lite/transforms/quantize.cc
}; class QuantizeConstPattern : public OpRewritePattern<QuantizeOp> { public: explicit QuantizeConstPattern(MLIRContext* context, bool legacy_float_scale) : OpRewritePattern<QuantizeOp>(context), legacy_float_scale_(legacy_float_scale) {} LogicalResult matchAndRewrite(QuantizeOp op, PatternRewriter& rewriter) const override { DenseFPElementsAttr attr;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
op_quant_spec_getter_(op_quant_spec_getter), op_quant_scale_spec_getter_(op_quant_scale_spec_getter), infer_tensor_range_(infer_tensor_range), legacy_float_scale_(legacy_float_scale), is_qdq_conversion_(is_qdq_conversion) {} // The entry point of the quantization parameters propagation. void Run(); // Sets up the states for all the op results in the function.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
/*narrow_range=*/true, legacy_float_scale_); } else { // per-tensor quantization weight final_type = GetUniformQuantizedTypeForWeight( attr, /*symmetric=*/is_weight && is_signed_, /*num_bits=*/8, is_signed_, /*narrow_range=*/is_weight, legacy_float_scale_); } if (const auto quant_type = mlir::dyn_cast_or_null<QuantizedType>(final_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
"inference type specification"; signalPassFailure(); return; } quant_specs_.post_training_quantization = post_training_quantize_; quant_specs_.legacy_float_scale = legacy_float_scale_; quant_specs_.disable_set_input_nodes_quantization_params = disable_set_input_nodes_quantization_params_; } if (quant_specs_.post_training_quantization) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.td
Option<"post_training_quantize_", "post-training-quantize", "bool", "false", "enable post training quantization. Only used in tests">, Option<"legacy_float_scale_", "legacy-float-scale", "bool", "false", "calculate quantization scales in float instead of double">, Option<"disable_per_channel_", "disable-per-channel", "bool", "false",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
const int adjusted_quant_dim, const bool legacy_float_scale) -> quant::QuantParams { if (auto qtype = mlir::dyn_cast_or_null<UniformQuantizedType>( quant::GetUniformQuantizedTypeForBias( quant_params, legacy_float_scale, adjusted_quant_dim))) { return quant::UniformQuantizedType::get(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
is_signed, legacy_float_scale, use_fake_quant_num_bits); if (auto ele_type = dyn_cast_or_null<TensorType>(type)) return ele_type.getElementType(); return {}; } quant::QuantizedType GetUniformQuantizedTypeForBias( const std::vector<quant::QuantizedType>& op_types, const int adjusted_quant_dim, const bool legacy_float_scale) { if (op_types.empty()) return {};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
ConvertStatsToQDQs(int num_bits, bool narrow_range, bool is_signed, bool legacy_float_scale, MLIRContext* context) : OpRewritePattern<quantfork::StatisticsOp>(context), num_bits(num_bits), narrow_range(narrow_range), is_signed(is_signed), legacy_float_scale(legacy_float_scale) {} LogicalResult matchAndRewrite(quantfork::StatisticsOp op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
weight_type = dyn_cast<quant::QuantizedType>( quant::GetUniformQuantizedTypeForWeight( attr, /*symmetric=*/true, /*num_bits=*/8, /*is_signed=*/true, /*narrow_range=*/true, /*legacy_float_scale=*/false)); } else { int quantization_dimension = GetQuantizationDimension( weight_only_ptq, cast<TF::XlaCallModuleOp>(quantizable_op));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
// Currently, only activation stats are imported, so narrow_range = false. patterns.add<PrepareQuantStats>(bit_width, false, true, /*legacy_float_scale=*/false, ctx); if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) { signalPassFailure(); } SanityCheckAndAdjustment(func);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0)