- Sort Score
- Result 10 results
- Languages All
Results 1 - 2 of 2 for AnyQuantizedType (0.25 sec)
-
tensorflow/compiler/mlir/lite/quantization/device_target.cc
i32_max_ = QuantizedType::getDefaultMaximumForInteger(kSigned, k32Bits); any_ = AnyQuantizedType(); qi8_ = AnyQuantizedType::get(kSigned, i8_, f32_, i8_min_, i8_max_); qi8n_ = AnyQuantizedType::get(kSigned, i8_, f32_, i8_min_ + 1, i8_max_); qi32_ = AnyQuantizedType::get(kSigned, i32_, f32_, i32_min_, i32_max_); assert(qi8n_ == qi8n_); } std::optional<KernelSpec> DeviceTarget::GetKernelSpec(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/device_target.h
ScaleDecomposeFn GetDecomposeFn(quantfork::QuantizeRegionOp op) const; // converts specification to signature: // - UniformedQuantizedType -> AnyQuantizedType // - AnyQuantizedType (int) -> AnyQuantizedType // - Float -> {} static void AppendToSignature(Type spec, KernelSpecs::Signature* signature); protected: // Adds the kernel spec with the custom scale function for the kernel.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.1K bytes - Viewed (0)