- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 294 for Quantized (0.19 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h
// exported as a TF SavedModel. void AddCallModuleSerializationPasses(OpPassManager& pm); // Passes for unpacking quantized ops to int valued StableHLO ops. This is // useful when uniform quantized types are suboptimal for the hardware. It goes // through a StableHLO <-> MHLO roundtrip to utilize the MHLOQuantToInt pass. void AddStablehloQuantToIntPasses(OpPassManager& pm);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 12:53:33 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir
// limitations under the License. // Quantization as a function library with Uniform Quantized Ops for Dynamic // PTQ // // Internal functions should be marked as private. They will be inlined and // deleted in `InsertQuantizedFunctionsPass`. // // For Uniform Quantized op case, attributes are generated during quantize // composite pass. Therefore, attr_map is set to an empty string. module {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/common/tfl_pass_config.h
bool reduce_type_precision = false; // Whether to consider this model a quantized model with quantize/dequantize // ops and to convert kernels to quantized kernels wherever appropriate. quant::QDQConversionMode qdq_conversion_mode = quant::QDQConversionMode::kQDQNone; // When set to true, StableHLO Quantizer is run. The full configuration for // the quantizer is at `TocoFlags::quantization_config`.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:05:30 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/const_tensor_utils.cc
if (!mlir::isa<mlir::IntegerType>(raw_elem_type)) { return absl::InvalidArgumentError( "Quantized tensors must be stored as integers"); } storage_type = mlir::cast<mlir::IntegerType>(raw_elem_type); } // TFlite uses narrow-range [u]int8 for constant buffers of quantized weights. // Since we don't know which ones are weights, we represent this optimization
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 16.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/Passes.h
std::unique_ptr<OperationPass<func::FuncOp>> createConvertSimulatedQuantPass(); /// Creates a pass that converts constants followed by a qbarrier to a /// constant whose value is quantized. This is typically one of the last /// passes done when lowering to express actual quantized arithmetic in a /// low level representation. Because it modifies the constant, it is /// destructive and cannot be undone.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jul 29 18:55:28 UTC 2022 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.cc
"float and quantized types"), llvm::cl::init("")); // NOLINTNEXTLINE opt<std::string> min_values( "tf-input-min-values", llvm::cl::desc( "Sets the lower bound of the input data. Separated by ','; Each entry " "in the list should match an entry in -tf-input-arrays. This is " "used when -tf-inference-type is a quantized type."), llvm::cl::Optional, llvm::cl::init(""));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 10 20:59:50 UTC 2023 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
SymbolTable symbol_table(call_op->getParentOfType<ModuleOp>()); auto func_op = dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(func_name)); if (!func_op) return failure(); // The quantized fusion should have requantize and return ops at the end. auto return_op = dyn_cast_or_null<func::ReturnOp>( func_op.getRegion().getBlocks().front().getTerminator()); if (!return_op) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
%mul = "tf.Mul"(%cast, %scale) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %mul : tensor<*xf32> } // Requantizes and clips to the range of quantized type if there is no specific activation. func.func private @internal_requantize_no_activation_fn(%accumulation : tensor<*xi32>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/Passes.td
let summary = "Converts constants followed by qbarrier to actual quantized " "values"; let constructor = "mlir::quantfork::createConvertConstPass()"; } def QuantConvertSimulatedQuant : Pass<"quant-convert-simulated-quantization", "func::FuncOp"> { let summary = "Converts training-time simulated quantization ops to " "corresponding quantize/dequantize casts";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jul 29 18:55:28 UTC 2022 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h
// of "apply<Name of the Quantization Algorithm>Quantization". // After applying the function, a quantize/dequantize functions are created // where the body of each function contains a specific quantization algorithm. // The input of the quantize function has one operand of // IsValueWithQuantizablePrecision and the output is a tensor with supported // quantized precision (like int8). For dequantize function, it is the other way // around.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Mar 24 07:44:40 UTC 2024 - 1.9K bytes - Viewed (0)