- Sort Score
- Result 10 results
- Languages All
Results 1 - 4 of 4 for IsI8F32UniformQuantizedPerAxisType (0.64 sec)
-
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
bool IsI8F32UniformQuantizedType(Type type); // Returns true iff `type` is a uniform quantized per-axis (per-channel) type // whose storage type is 8-bit integer and expressed type is f32. bool IsI8F32UniformQuantizedPerAxisType(Type type); // Returns true iff `type` is a uniform quantized type whose storage type is // 32-bit integer and expressed type is f32. bool IsI32F32UniformQuantizedType(Type type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: " << quantized_type << ".\n"); return false; } return true; } bool IsI8F32UniformQuantizedPerAxisType(const Type type) { const UniformQuantizedPerAxisType quantized_per_axis_type = mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type); if (!quantized_per_axis_type) { LLVM_DEBUG(llvm::dbgs()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
builder_.getF32Type(), /*scales=*/{1.0}, /*zeroPoints=*/{0}, /*quantizedDimension=*/0, /*storageTypeMin=*/-128, /*storageTypeMax=*/127); EXPECT_TRUE(IsI8F32UniformQuantizedPerAxisType(qi8_per_axis_type)); EXPECT_FALSE(IsI8F32UniformQuantizedType(qi8_per_axis_type)); } TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
using ::mlir::quant::GetElementType; using ::mlir::quant::IsDotGeneralFullyConnected; using ::mlir::quant::IsI32F32UniformQuantizedPerAxisType; using ::mlir::quant::IsI32F32UniformQuantizedType; using ::mlir::quant::IsI8F32UniformQuantizedPerAxisType; using ::mlir::quant::IsI8F32UniformQuantizedType; using ::mlir::quant::IsOpFullyQuantized; using ::mlir::quant::IsQuantizedTensorType; using ::mlir::quant::IsSupportedByTfliteQuantizeOrDequantizeOps;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0)