- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for OpQuantSpec (0.27 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
// Check whether dynamic range quantization can be applied. for (auto& use : value.getUses()) { Operation* user = use.getOwner(); int operand_num = use.getOperandNumber(); std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(user); if (quant_specs_.inference_type == tensorflow::DT_QINT8 && spec->quantizable_operands.contains(operand_num)) { quantizable_ops.insert({user, operand_num});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op = FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op); ASSERT_TRUE(succeeded(xla_call_module_op)); const std::unique_ptr<OpQuantSpec> op_quant_spec = GetStableHloOpQuantSpec(*xla_call_module_op); ASSERT_THAT(op_quant_spec, NotNull()); EXPECT_THAT(op_quant_spec->coeff_op_quant_dim, IsEmpty()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc
// Per-channel activation is not supported attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis", rewriter.getI64IntegerAttr(-1))); } std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op); absl::flat_hash_set<int> operands = spec->quantizable_operands; int quant_dim = -1; if (enable_per_channel_quantization && operands.size() == 1) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 18.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
template <typename LstmOp> std::unique_ptr<quant::OpQuantSpec> GetLstmOpQuantSpec(LstmOp op) { operator_property::OpVariant lstm_variant; operator_property::OperatorProperty lstm_property; if (failed(GetLstmProperty(op, &lstm_variant, &lstm_property))) { return nullptr; } auto spec = std::make_unique<quant::OpQuantSpec>(); for (const auto& enumerated_inputs : lstm_property.inputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
PatternRewriter& rewriter, StringRef function_name) const { std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(op); const absl::flat_hash_set<int> operands = spec->quantizable_operands; if (operands.size() != 1) return failure(); int weight_operand_idx = *operands.begin();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
InitializeStateForValue(op, index, value, /*as_result=*/true, states_, value_to_state_, operand_states_, result_states_); } std::unique_ptr<OpQuantSpec> QuantizationDriver::GetQuantSpec(Operation* op) { return op_quant_spec_getter_(op); } std::unique_ptr<OpQuantScaleSpec> QuantizationDriver::GetQuantScaleSpec( Operation* op) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
// the indices of biases can be found in the `biases_params`. absl::flat_hash_set<int> quantizable_operands; }; // A function signature for getting the particular OpQuantSpec for the provided // op. using OpQuantSpecGetter = std::function<std::unique_ptr<OpQuantSpec>(Operation*)>; // Quantization scale spec of an op. The information defined in the MLIR // interfaces FixedOutputRangeInterface and SameOperandsAndResultsScale should
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
"The unit-wise quantization method has been set to " "METHOD_NO_QUANTIZE."); } is_unitwise_quantization_enabled = true; } std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(call_op); for (auto iter : spec->coeff_op_quant_dim) { Operation* preceding_op = call_op.getOperand(iter.first).getDefiningOp();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
SanityCheckAndAdjustment(func); // Bind the getter with the fixed configuration parameter for the correct // quantization settings of the ops. std::function<std::unique_ptr<quant::OpQuantSpec>(Operation*)> op_quant_spec_getter = std::bind(GetOpQuantSpec, std::placeholders::_1, quant_specs_.disable_per_channel_for_dense_layers);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h
// different ops. bool IsWeight(Operation* cst) { return llvm::is_contained(weights_, cst); } // Returns all the related quantization constraints of the op. std::unique_ptr<OpQuantSpec> GetQuantSpec(Operation* op); std::unique_ptr<OpQuantScaleSpec> GetQuantScaleSpec(Operation* op); // Returns whether quantization parameters have been propagated to the results // of this op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 20 11:42:17 UTC 2024 - 16.8K bytes - Viewed (0)