- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 30 for getF32 (0.82 sec)
-
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
return UniformQuantizedType::getChecked( loc, /*flags=*/QuantizationFlags::Signed, /*storageType=*/IntegerType::get(&context, /*width=*/8), /*expressedType=*/FloatType::getF32(&context), scale, zero_point, /*storageTypeMin=*/llvm::minIntN(8) + (narrow_range ? 1 : 0), /*storageTypeMax=*/llvm::maxIntN(8)); } UniformQuantizedType CreateI32F32UniformQuantizedType(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils_test.cc
{2, 2}, quant::UniformQuantizedType::get( quant::QuantizationFlags::FlagValue::Signed, IntegerType::get(context.get(), 8), FloatType::getF32(context.get()), 3.0, 2, -128, 127)); auto dense_attr = GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type); ASSERT_TRUE(succeeded(dense_attr));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tflite_import_export.cc
cost = target_hardware->GetOpCost(op); } mlir::StringAttr device_identifier = mlir::StringAttr::get(module.getContext(), device); auto float_type = mlir::FloatType::getF32(module.getContext()); auto float_attr = mlir::FloatAttr::get(float_type, static_cast<float>(cost)); device_costs.push_back({device_identifier, float_attr}); } op->setAttr("per_device_costs",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc
mlir::FloatType::getBF16(&context))); ASSERT_NO_FATAL_FAILURE(VerifyConversion<float>( {1.0, -1.0}, DT_FLOAT, mlir::FloatType::getF32(&context))); ASSERT_NO_FATAL_FAILURE(VerifyConversion<double>( {1.0, -1.0}, DT_DOUBLE, mlir::FloatType::getF64(&context))); ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e5m2>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/device_target.cc
namespace quant { constexpr int k8Bits = 8; constexpr int k32Bits = 32; constexpr unsigned kSigned = QuantizationFlags::Signed; DeviceTarget::DeviceTarget(MLIRContext* ctx) : ctx_(ctx) { f32_ = FloatType::getF32(ctx_); i8_ = IntegerType::get(ctx_, k8Bits); i8_min_ = QuantizedType::getDefaultMinimumForInteger(kSigned, k8Bits); i8_max_ = QuantizedType::getDefaultMaximumForInteger(kSigned, k8Bits);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/python/mlir_wrapper/mlir_wrapper.pyi
class FloatType(Type): def __init__(self, *args, **kwargs) -> None: ... def getBF16(self) -> FloatType: ... def getF16(self) -> FloatType: ... def getF32(self) -> FloatType: ... def getF64(self) -> FloatType: ... class FuncOp: def __init__(self, *args, **kwargs) -> None: ... def create(self, arg0: str, arg1: FunctionType) -> FuncOp: ...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 09 17:10:09 UTC 2023 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
return APInt(/*numBits=*/32, real_int); }; auto dequant_values = mlir::cast<DenseIntOrFPElementsAttr>(input_values) .mapValues(FloatType::getF32(rewriter.getContext()), llvm::function_ref<DequantizeFuncType>(dequantize_func)); rewriter.replaceOpWithNewOp<TFL::ConstOp>(dequant_op, dequant_op.getType(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc
Type tensor_type_f32; if (tensor_type.hasRank()) { tensor_type_f32 = tensorflow::GetTypeFromTFTensorShape( tensor_type.getShape(), FloatType::getF32(context)); } else { tensor_type_f32 = UnrankedTensorType::get(FloatType::getF32(context)); } // Add cast to f32 to conform with element type of result. operand = rewriter.create<CastOp>(op.getLoc(), tensor_type_f32, operand);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 74.9K bytes - Viewed (0) -
tests/test_ambiguous_params.py
Registered: Mon Jun 17 08:32:26 UTC 2024 - Last Modified: Tue Dec 12 00:22:47 UTC 2023 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
// accumulation over the given input type. Type GetSumAccumulationType(Type input_type) { MLIRContext *ctx = input_type.getContext(); if (input_type.isBF16() || input_type.isF16()) return FloatType::getF32(ctx); if (input_type.isSignlessInteger(8) || input_type.isSignlessInteger(16)) return IntegerType::get(ctx, 32); return input_type; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0)