- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 277 for quantize (0.17 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
namespace mlir::quant::stablehlo { // Checks whether an op is connected with a quantized composite function. If // not, the same-scale op will not be quantized. This decision is based on the // current assumption that the performance gain of the same-scale op itself // could not beat the overhead of the quantize and dequantize routines need to // be added around that op. When the assumption changes, this policy might // change as well.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_drq.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-lift-quantizable-spots-as-functions -quant-prepare-quantize-drq -quant-quantize='weight-quantization=true' -verify-each=false | FileCheck %s // ----- module { func.func @matmul(%arg0: tensor<1x2x2x3xf32>) -> (tensor<*xf32>) { %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<2x1024xf32>} : () -> tensor<2x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
return; } } else { result = quantized_op->getResult(i); } for (auto user : result.getUsers()) { // Skip the Requantize op and set the user to the following dequantize // op. This happens when the quantizer tries to match the scale conflict // with QuantizeOp - QuantizeOp(requant) - DequantizeOp triples. The
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// Finally, use the quantization parameter to create the quantize and // dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp // and its users. auto quantize = rewriter.create<quantfork::QuantizeCastOp>( tf_op.getLoc(), qtype.getValue(), input); auto dequantize = rewriter.create<quantfork::DequantizeCastOp>( tf_op.getLoc(), res_type, quantize.getResult());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// TODO: b/323478683 - Make the attribute being part of op definition. quantize->setAttr(kVolatileOpAttrName, builder_.getUnitAttr()); // `original_result` has a use to `quantize`, so this will replace that use // by the result of `dequantize`. Remember to reset that use afterwards value.replaceAllUsesWith(dequantize); quantize.getOperation()->replaceUsesOfWith(dequantize, value); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// previous quantized layer (Please note that this part is different part // from DEBUGGER_TYPE_FLOAT_PER_LAYER). Each layer in the debugging model // has a DumpTensor, and it is used to save the entire value of outputs from // both the quantized and unquantized layer. DEBUGGER_TYPE_INT_PER_LAYER = 2; // DEBUGGER_TYPE_FLOAT_PER_LAYER creates a debugging model with both
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h
assert(scales_.size() == zero_points_.size()); } // Quantize an Attribute by the quantization parameters. Return nullptr if // the conversion fails or the input array isn't an ElementsAttr. ElementsAttr convert(Attribute real_value); private: // Quantize an DenseFPElementsAttr by the quantization parameters. DenseElementsAttr convert(DenseFPElementsAttr attr);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 02:10:16 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
// The prepare-dynamic-range-quantize Pass. // namespace mlir { namespace TFL { namespace { #define GEN_PASS_DEF_PREPAREDYNAMICRANGEQUANTIZEPASS #include "tensorflow/compiler/mlir/lite/transforms/passes.h.inc" // A boolean attribute used to describe whether input activations need to be // asymmetrically quantized. constexpr char kAsymmetricQuantizeInputsAttr[] = "asymmetric_quantize_inputs";
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0)