- Sort Score
- Result 10 results
- Languages All
Results 1 - 5 of 5 for QuantizationUnits (1.04 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc
// Marks users that are applicable for quantization where the criteria for // determining quantizable ops differs by the inference type. QuantizationUnits GetQuantizableOps(ConstantOp op) const { // Non-float tensors do not need quantization. QuantizationUnits quantizable_ops; const ShapedType type = mlir::dyn_cast<ShapedType>(op.getType()); if (!type || !type.getElementType().isF32()) return quantizable_ops;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
// The prepare-quantize-drq Pass. // namespace mlir { namespace quant { namespace { using QuantizationUnit = std::pair<Operation*, int>; using QuantizationUnits = llvm::SetVector<QuantizationUnit>; using ::tensorflow::quantization::OpSet; // Applies prepare quantization on the model in TF dialect for dynamic range // quantization case. class PrepareQuantizeDRQPass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
// A boolean attribute used to describe whether input activations need to be // asymmetrically quantized. constexpr char kAsymmetricQuantizeInputsAttr[] = "asymmetric_quantize_inputs"; using QuantizationUnits = llvm::SetVector<std::pair<Operation*, int>>; // Applies prepare dynamic range quantization on the model in TFL dialect. // This pass runs before the quantization pass and apply preprocess if // applicable.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h
namespace mlir::quant::stablehlo { // A class that manages information about `QuantizableUnit`s post-quantization, // internally in the form of `QuantizationUnits`. It is used to collect // quantization summary from a quantized `ModuleOp` and emit it in a human- and // machine-readable format. class QuantizationReport { public: QuantizationReport() = default;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 10:10:34 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/preprocess_op.cc
// namespace mlir { namespace quant { namespace { using QuantMethod = ::tensorflow::quantization::QuantizationMethod::PresetMethod; using QuantizationUnit = std::pair<Operation*, int>; using QuantizationUnits = llvm::SetVector<QuantizationUnit>; using ::tensorflow::quantization::OpSet; // Preprocesses ops to allow multi-axis quantization, prior to quantization
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0)