- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 22 for zero_point (0.18 sec)
-
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
/*scale=*/1.0, /*zero_point=*/0); EXPECT_TRUE(quantized_type.getExpressedType().isF32()); } TEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, /*scale=*/1.0, /*zero_point=*/0); EXPECT_TRUE(quantized_type.isSigned()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
UniformQuantizedValueConverter(double scale, double zero_point, double clamp_min, double clamp_max, uint32_t storage_bit_width, bool is_signed) : scale_(scale), zero_point_(zero_point), clamp_min_(clamp_min), clamp_max_(clamp_max), scale_double_(scale), zero_point_double_(zero_point), clamp_min_double_(clamp_min),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm_quantized.mlir
// CHECK-NEXT: zero_point: [ 0 ] // CHECK-NEXT: }, // CHECK-NEXT: has_rank: true // CHECK-NEXT: }, { // CHECK-NEXT: shape: [ 2048, 528 ], // CHECK-NEXT: type: INT8, // CHECK-NEXT: buffer: 3, // CHECK-NEXT: name: "arg2", // CHECK-NEXT: quantization: { // CHECK-NEXT: scale: [ 0.031926 ], // CHECK-NEXT: zero_point: [ 0 ] // CHECK-NEXT: },
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 15.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir
// CHECK-NEXT: zero_point: [ 151 ] // CHECK-NEXT: }, // CHECK-NEXT: has_rank: true // CHECK-NEXT: }, { // CHECK-NEXT: shape: [ 32 ], // CHECK-NEXT: type: INT32, // CHECK-NEXT: buffer: 5, // CHECK-NEXT: name: "tfl.pseudo_qconst1", // CHECK-NEXT: quantization: { // CHECK-NEXT: scale: [ 0.000171 ], // CHECK-NEXT: zero_point: [ 0 ] // CHECK-NEXT: },
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 11.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py
minbound = 0 scale = (quant_max - quant_min) / maxbound zero_point = -quant_min / scale # Limit the range of zero_point and scale in case (quant_max - quant_min) # is unusually small. if abs(zero_point) > 9e9: zero_point = 9e9 if abs(scale) < 1e-9: scale = 1e-9 zero_point = round(zero_point) quantized_hist_mids = np.clip(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 14.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
return scale * rate; }; const auto& recalculate_zero_point = [&](int64_t zero_point) -> int64_t { return qmax - std::round((storage_type_max - zero_point) / rate); }; if (auto q_type = dyn_cast<UniformQuantizedType>(type)) { const double scale = recalculate_scale(q_type.getScale()); const double zero_point = recalculate_zero_point(q_type.getZeroPoint());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt
# CHECK: scale: [ 0.093635 ], # CHECK: zero_point: [ 22 ] # CHECK: } # CHECK: }, { # CHECK: shape: [ 1, 6, 31 ], # CHECK: type: INT8, # CHECK: buffer: 6, # CHECK: name: "output", # CHECK: quantization: { # CHECK: scale: [ 0.093635 ], # CHECK: zero_point: [ 22 ] # CHECK: } # CHECK: } ], # CHECK: inputs: [ 0 ],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/test_schema.fbs
// t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3 quantized_dimension:int; } // Sparse tensors. // We use a modification of the TACO format. // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf // // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 19 19:46:06 UTC 2021 - 26.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt
# CHECK: scale: [ 0.093635 ], # CHECK: zero_point: [ 22 ] # CHECK: } # CHECK: }, { # CHECK: shape: [ 1, 6, 31 ], # CHECK: type: INT8, # CHECK: buffer: 6, # CHECK: name: "output", # CHECK: quantization: { # CHECK: scale: [ 0.093635 ], # CHECK: zero_point: [ 22 ] # CHECK: } # CHECK: } ], # CHECK: inputs: [ 0 ],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
// t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3 quantized_dimension:int; } // Sparse tensors. // We use a modification of the TACO format. // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf // // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0)