- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 83 for Quantile (0.17 sec)
-
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
assert(scales_.size() == zero_points_.size()); } // Quantize an Attribute by the quantization parameters. Return nullptr if // the conversion fails or the input array isn't an ElementsAttr. ElementsAttr convert(Attribute real_value); private: // Quantize an DenseFPElementsAttr by the quantization parameters. DenseElementsAttr convert(DenseFPElementsAttr attr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
func.return %dot_out : tensor<*x!tf_type.qint32> } // Quantize initial input at the start of the graph. Output is qint8. func.func @quantize_i8(%input : tensor<*xf32>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>) -> tensor<*x!tf_type.qint8> { %quantize = "tf.UniformQuantize"(%input, %input_scale, %input_zp) { Tin = "tfdtype$DT_FLOAT", Tout = "tfdtype$DT_QINT8",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
// The input of the quantize op has already been quantized, i.e. // rescale. return failure(); } Operation* operand_op = operand.getDefiningOp(); if (operand_op == nullptr) { // When `QuantizeOpT`'s operand does not have a defining op, it means it // is a `BlockArgument`. The pattern does not match if there is no op to // quantize. return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr)); } TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) { constexpr absl::string_view kQuantizeOp = R"mlir( func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> { %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// of `quantized_type`. if (new_value_type == nullptr) return; auto quantize = builder_.create<quantfork::QuantizeCastOp>(loc, new_value_type, value); auto dequantize = builder_.create<quantfork::DequantizeCastOp>( loc, expressed_type, quantize.getResult()); // This attribute is set to distinguish the quantize ops being added by the // quantization pass. These ops can be removed without losing original
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
// Whether the func contains Quantize ops. This is used to determine whether // to use the quantization parameters from the fixed output range property. bool ContainsQuantizeOps(func::FuncOp func); QuantizationSpecs quant_specs_; Option<bool> enable_post_training_quantize_{ *this, "post-training-quantize", llvm::cl::init(false),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc
// 1. Collect quantizable ops. QuantizationUnits quantizable_ops = GetQuantizableOps(op); if (quantizable_ops.empty()) { return failure(); } // 2. Quantize collected ops. if (!QuantizeOps(rewriter, op, quantizable_ops)) { return failure(); } // 3. Complete the Q-DQ pair for each inference type. if (!ConvertToFloat16Constant(rewriter, op)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/monitoring/counter.h" //===----------------------------------------------------------------------===// // The prepare-quantize Pass. // namespace mlir { namespace TFL { namespace { #define GEN_PASS_DEF_PREPAREQUANTIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/types.h" // NOLINTNEXTLINE //===----------------------------------------------------------------------===// // The prepare-dynamic-range-quantize Pass. // namespace mlir { namespace TFL { namespace { #define GEN_PASS_DEF_PREPAREDYNAMICRANGEQUANTIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// quantization. // * Non-convolution ops: All // // Default value: true bool enable_per_channel_quantized_weight = 2 [deprecated = true]; // Whether to quantize all quantizable ops or only compute-heavy ops. bool enable_full_int_quantization = 3; } // Applies int8 per-channel weight-only post-training quantization for all // dot_general and convolution ops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0)