- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for getExpressedType (0.3 sec)
-
tensorflow/compiler/mlir/lite/quantization/ir/QuantizeUtils.h
class QuantizedType; class UniformQuantizedType; } // namespace quant namespace quantfork { class UniformQuantizedValueConverter; /// Converts an attribute from a type based on /// quantizedElementType.getExpressedType() to one based on /// quantizedElementType.getStorageType(), where quantizedElementType is as from /// QuantizedType::getQuantizedElementType(). /// Returns nullptr if the conversion is not supported. On success, stores the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jul 29 18:55:28 UTC 2022 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc
// dimension sizes are same. for (const auto op_type : op_types) { if (!op_type) return {}; if (expressed_type && expressed_type != op_type.getExpressedType()) { return {}; } expressed_type = op_type.getExpressedType(); if (const auto type = dyn_cast<quant::UniformQuantizedPerAxisType>(op_type)) { if (axis_size != 1 && axis_size != type.getScales().size()) return {};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 43.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
double new_scale = multiplier_array.front() * uniform_type.getScale(); new_qtype = UniformQuantizedType::get( uniform_type.getFlags(), uniform_type.getStorageType(), uniform_type.getExpressedType(), new_scale, uniform_type.getZeroPoint(), uniform_type.getStorageTypeMin(), uniform_type.getStorageTypeMax()); } else { auto new_scales =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 13.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
static_cast<double>(uniform_type.getStorageTypeMax()), uniform_type.getStorageTypeIntegralWidth(), uniform_type.isSigned()) { assert(isa<FloatType>(uniform_type.getExpressedType())); assert(uniform_type.getStorageType().isSignlessInteger()); } UniformQuantizedValueConverter(double scale, double zero_point, double clamp_min, double clamp_max,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, /*scale=*/1.0, /*zero_point=*/0); EXPECT_TRUE(quantized_type.getExpressedType().isF32()); } TEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantizeUtils.cc
return convertPrimitiveValueAttr(realValue, quantizedElementType, converter, outConvertedType); } /// Convert an attribute from a type based on /// quantizedElementType.getExpressedType() to one based on /// quantizedElementType.getStorageType(). /// Returns nullptr if the conversion is not supported. /// On success, stores the converted type in outConvertedType. Attribute mlir::quantfork::quantizeAttr(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc
if (auto vector_type = dyn_cast<VectorType>(input_type)) return VectorType::get(vector_type.getShape(), elemental_type); // If the expressed types match, just use the new elemental type. if (elemental_type.getExpressedType() == expressed_type) { return elemental_type; } // Unsupported. return nullptr; } ElementsAttr UniformQuantizedPerAxisValueConverter::convert( Attribute real_value) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
qtype = quantfork::fakeQuantAttrsToType( op.getLoc(), tensor_property.number_of_bits, calibrated_type.getMin(), calibrated_type.getMax(), /*narrowRange=*/false, calibrated_type.getExpressedType(), /*isSigned=*/this->quant_specs_.IsSignedInferenceType()); if (this->quant_specs_.legacy_float_scale) { qtype = mlir::cast<UniformQuantizedType>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
const Type storage_type = quantized_type.getStorageType(); return storage_type.isInteger(/*width=*/32); } bool IsExpressedTypeF32(const QuantizedType quantized_type) { const Type expressed_type = quantized_type.getExpressedType(); return mlir::isa<Float32Type>(expressed_type); } bool IsI8F32UniformQuantizedType(const Type type) { const UniformQuantizedType quantized_type = mlir::dyn_cast_or_null<UniformQuantizedType>(type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
changed |= SetOperandParams( op, bias_index, UniformQuantizedType::getChecked( bias_op->getLoc(), params.getFlags(), params.getStorageType(), params.getExpressedType(), new_bias_scale, 0, params.getStorageTypeMin(), params.getStorageTypeMax())); arith::ConstantOp filter_op = DuplicateConstantOpIfNeeded(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0)