- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 21 for OpQuantSpec (0.17 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/ops/uniform_op_quant_spec.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" namespace mlir::quant { std::unique_ptr<OpQuantSpec> GetUniformOpQuantSpec(Operation* op) { auto spec = std::make_unique<OpQuantSpec>(); if (isa<TF::UniformQuantizedConvolutionHybridOp>(op) || isa<TF::UniformQuantizedConvolutionOp>(op)) { spec->coeff_op_quant_dim[1] = 3;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Mar 24 07:44:40 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc
// is set for `spec`. // TODO: b/323478683 - Duplicate tracking of config will be eliminated. // `OpQuantSpec` will be deprecated and `Method` will be used instead. void PopulateCoeffOpQuantDimIfPerChannelQuantized( TF::XlaCallModuleOp xla_call_module_op, OpQuantSpec& spec) { absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op); if (method.ok() && method->has_static_range_ptq()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tools/op_quant_spec_getters_gen.cc
// name. std::vector<Record *> defs = records.getAllDerivedDefinitions("Op"); llvm::sort(defs, LessRecord()); OUT(0) << "static std::unique_ptr<quant::OpQuantSpec> " "GetOpQuantSpec(mlir::Operation *op, bool " "disable_per_channel_for_dense_layers = false) {\n"; // TODO(b/176258587): Move to OpTrait if this should be generalized.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 11:18:44 UTC 2024 - 4.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/uniform_op_quant_spec.h
namespace mlir { namespace quant { // Returns the spec for the given operation that can be used for both of // dynamic and static range quantization. std::unique_ptr<OpQuantSpec> GetUniformOpQuantSpec(Operation* op); } // namespace quant } // namespace mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
return cur_spec; } return std::nullopt; } // TODO(b/228928859): Improve the getter function to match attributes rather // than function name. std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op) { auto spec = std::make_unique<OpQuantSpec>(); if (auto call_op = dyn_cast<TF::PartitionedCallOp>(op)) { StringRef function_name = mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc
} } )mlir"; // TOOD: b/323478683 - Directly use types rather than creating a `unique_ptr`. std::unique_ptr<quant::OpQuantSpec> GetOpQuantSpec( const mlir::Operation* op, bool disable_per_channel_for_dense_layers = false) { auto spec = std::make_unique<quant::OpQuantSpec>(); spec->coeff_op_quant_dim[1] = 3; spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.h
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h" namespace mlir::quant::stablehlo { // Returns StableHLO quantization specs for an op. std::unique_ptr<OpQuantSpec> GetStableHloOpQuantSpec(Operation* op); // Returns quantization constraints (ex: fixed output, same scale) given // a StableHLO op. std::unique_ptr<OpQuantScaleSpec> GetStableHloQuantConstraints(Operation* op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 03 05:11:03 UTC 2024 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h
GetWeightComponentSpec( const tensorflow::quantization::QuantizationOptions& quantization_options); // Returns the spec for the given operation that can be used for both of // dynamic and static range quantization. std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op); // Returns quantization scale specs (fixed output, same scale) for a TF op. std::unique_ptr<OpQuantScaleSpec> GetTfQuantScaleSpec(Operation* op); } // namespace quant
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
// Check whether dynamic range quantization can be applied. for (auto& use : value.getUses()) { Operation* user = use.getOwner(); int operand_num = use.getOperandNumber(); std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(user); if (quant_specs_.inference_type == tensorflow::DT_QINT8 && spec->quantizable_operands.contains(operand_num)) { quantizable_ops.insert({user, operand_num});
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op = FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op); ASSERT_TRUE(succeeded(xla_call_module_op)); const std::unique_ptr<OpQuantSpec> op_quant_spec = GetStableHloOpQuantSpec(*xla_call_module_op); ASSERT_THAT(op_quant_spec, NotNull()); EXPECT_THAT(op_quant_spec->coeff_op_quant_dim, IsEmpty()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 07:19:09 UTC 2024 - 14.8K bytes - Viewed (0)