- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 206 for Scales (0.99 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/legalize_tf_quant_test.cc
func.func @main(%arg0 : tensor<1xf32>) -> tensor<1xf32> { %scales = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32> %zps = "tf.Const"() { value = dense<3> : tensor<i32> } : () -> tensor<i32> %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) { quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 18:43:55 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/verify-quant-legalization.mlir
func.func @illegal_tf_uniform_quantize(%arg0 : tensor<1xf32>) -> tensor<1xf32> { %scales = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32> %zps = "tf.Const"() { value = dense<3> : tensor<i32> } : () -> tensor<i32> // expected-error@+1 {{'tf.UniformQuantize' op is illegal as it is a UQ op or contains uq/qint types}} %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 18 18:54:14 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/shape_inference_with_shape_specialization.mlir
// CHECK-NEXT: return %[[UDQ]] : tensor<1xf32> func.func @main(%arg0 : tensor<?xf32>) -> tensor<?xf32> { %scales = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32> %zps = "tf.Const"() { value = dense<3> : tensor<i32> } : () -> tensor<i32> %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) { quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 12:49:45 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/convert-tf-quant-types.mlir
{ %scales = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32> %zps = "tf.Const"() { value = dense<3> : tensor<i32> } : () -> tensor<i32> // CHECK: %[[qint:.*]] = "tf.UniformQuantize" // CHECK: %[[int:.*]] = "tf.Cast"(%[[qint]]) <{Truncate = false}> : (tensor<1x!tf_type.qint8>) -> tensor<1xi8> %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 25.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
const ArrayRef<double> scales = qtype.getScales(); // Broadcasting hasn't been implemented yet. if (static_cast<int64_t>(scales.size()) != factor_values.getNumElements()) return {}; SmallVector<double, 4> new_scales; new_scales.reserve(scales.size()); auto scales_iter = scales.begin(); for (const auto& f : factor_values) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
/*scale=*/1.0, /*zero_point=*/0); EXPECT_TRUE(quantized_type.getExpressedType().isF32()); } TEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, /*scale=*/1.0, /*zero_point=*/0);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow_to_stablehlo/tests/test_tf_to_stablehlo.mlir
func.func @main(%arg0 : tensor<?xf32>) -> tensor<?xf32> { %scales = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32> %zps = "tf.Const"() { value = dense<3> : tensor<i32> } : () -> tensor<i32> %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) { quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 22:58:42 UTC 2024 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.cc
} double scale; int64_t nudgedZeroPoint; getNudgedScaleAndZeroPoint(qmin, qmax, rmin, rmax, scale, nudgedZeroPoint); scales.push_back(scale); zeroPoints.push_back(nudgedZeroPoint); } unsigned flags = isSigned ? quant::QuantizationFlags::Signed : 0; return quant::UniformQuantizedPerAxisType::getChecked( loc, flags, storageType, expressedType, scales, zeroPoints,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 11:52:27 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir
// CHECK: return %[[DEQUANTIZE]] : tensor<2xf32> %0 = "tf.UniformQuantize"(%arg0, %scales, %zps) { quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64 } : (tensor<2xf32>, tensor<f32>, tensor<i32>) -> tensor<2x!tf_type.qint8> %1 = "tf.UniformDequantize"(%0, %scales, %zps) { quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 01:25:29 UTC 2024 - 37.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
UniformQuantizedPerAxisType CreateI8F32UniformQuantizedPerAxisType( Location loc, MLIRContext& context, ArrayRef<double> scales, ArrayRef<int64_t> zero_points, int quantization_dimension, bool narrow_range = false); // Creates a `UniformQuantizedPerAxisType` with the given `scales` and // `zero_points` values. The produced type has f32 as its expressed type and
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0)