- Sort Score
- Result 10 results
- Languages All
Results 121 - 130 of 200 for dequantize (0.27 sec)
-
tensorflow/compiler/mlir/tfr/passes/decompose_patterns.td
include "mlir/IR/PatternBase.td" include "mlir/Dialect/Arith/IR/ArithOps.td" include "mlir/Dialect/Func/IR/FuncOps.td" include "tensorflow/compiler/mlir/tfr/ir/tfr_ops.td" class Quantize<string value> : NativeCodeCall<"TFR::Quantize(" # value # ", $0, $1, $_builder)">; class HasStringAttr<string value> : AttrConstraint< CPred<"$_self.cast<StringAttr>().getValue() == \"" # value # "\"">>; def QuantActRangeNonePattern :
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 29 21:02:21 UTC 2022 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.h
struct RequantizeState { // Sometimes, we have to "requantize" the quantization result to satisfy all // the constraints. The "requantize" can happen either on the input or output // of the quantization result. enum RequantizePosition { NO_REQUANTIZE, ON_INPUT, ON_OUTPUT } pos = NO_REQUANTIZE; // Quantization parameters will be used to add the requantize ops. QuantParams params; };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
let summary = "Quantizes then dequantizes a tensor."; let description = [{ This is almost identical to QuantizeAndDequantizeV2, except that it returns a gradient of 1 for inputs that are within the quantization range, or 0 otherwise. }]; let arguments = (ins Arg<TF_FloatTensor, [{Tensor to quantize and then dequantize.}]>:$input,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
): quantize_model.quantize( self._input_saved_model_path, self._output_saved_model_path, quantization_options=quantization_options, representative_dataset=representative_dataset, ) converted_model = quantize_model.quantize( self._input_saved_model_path, self._output_saved_model_path,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
METHOD_UNSPECIFIED = 0; // go/do-include-enum-unspecified // Apply default weight-only quantization. Weights are quantized during // conversion, then dequantized during inference. // Activation: f32, Weight: qi8, Bias: f32 WEIGHT_ONLY = 1; // Apply default dynamic range quantization. Quantized tensor value's // ranges are determined during graph runtime.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 22 02:20:05 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc
int32_t qmax) { auto quantize = [scale, zero_point](float f) { return zero_point + static_cast<int32_t>(std::round(f / scale)); }; if (rmin.has_value() && rmax.has_value()) { return {std::max(qmin, quantize(rmin.value())), std::min(qmax, quantize(rmax.value()))}; } else if (rmin.has_value()) { return {std::max(qmin, quantize(rmin.value())), qmax}; } else if (rmax.has_value()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 17 19:57:04 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_drq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-prepare-quantize-drq -quant-quantize='weight-quantization=true' -verify-each=false | FileCheck %s // ----- module { func.func @matmul(%arg0: tensor<1x2x2x3xf32>) -> (tensor<*xf32>) { %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<2x1024xf32>} : () -> tensor<2x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
// This is the argument used to refer to the pass in // the textual format (on the commandline for example). return "quant-quantize-weights"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Quantize weights used by quantizable ops."; } void getDependentDialects(DialectRegistry& registry) const override {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc
namespace mlir { namespace TFL { void ConvertTFLQuantOpsToMlirQuantOps(func::FuncOp func) { OpBuilder b(func); func.walk([&](Operation* op) { b.setInsertionPoint(op); if (auto dq = llvm::dyn_cast<DequantizeOp>(op)) { auto dcast = b.create<quantfork::DequantizeCastOp>( dq.getLoc(), dq.getOutput().getType(), dq.getInput()); dq.getOutput().replaceAllUsesWith(dcast); dq.erase();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 02:50:01 UTC 2024 - 3.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/convert_type.h
// Returns element type from attribute Type 'type_attr'. mlir::Type GetShapeStrippedType(mlir::TypeAttr type_attr); // Returns true if 'val' is not from Quantize op or // from Quantize Op with same quant type as 'qtype_attr' bool NotFromQuantOpOrSameQuantType(mlir::Value val, mlir::TypeAttr qtype_attr); } // namespace tflite
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 2.1K bytes - Viewed (0)