- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 205 for unquantized (0.19 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// quantized and unquantized layers. The unquantized layer's input come from // the previous unquantized layer (Please note that this part is different // part from DEBUGGER_TYPE_INT_PER_LAYER). Each layer in the debugging model // has a DumpTensor, and it is used to save the entire value of outputs from // both the quantized and unquantized layer. DEBUGGER_TYPE_FLOAT_PER_LAYER = 3; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
Value result = op->getResult(0); rewriter.setInsertionPointAfterValue(result); // In Whole model, we first need to set file_name as // unquantized_tensor_data.pb as it is used by unquantized dump model. // After saving unquantized dump model, the file name will be changed to // quantized_tensor_data.pb. // Since this process doesn't happen for per layer, we need to set file_name // as quantized_tensor_data.pb here.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
"values. Parts of the graph are not quantized. " << status; } // Saves the current model to the `unquantized_dump_model_path` if the // debugger type is `DEBUGGER_TYPE_WHOLE_MODEL`. This is required // because in whole-model debugging mode the `DumpTensor` ops for the // unquantized tensors are only inserted in the unquantized model
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
// (e.g. matmul) has both quantized and unquantized inputs by dequantizing // the quantized inputs, performing the operation in the expressed type, then // requantizing if a quantized output is required. // // The motivation behind these changes is for Dialects that assume only float // or quantized computation, and do not support a mixture of these types on
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc
// // 1. Replaces quantized `TF::XlaCallModuleOp` with a `func::CallOp`. // 2. Quantizes the callee function. // // The inputs of this pattern assumes an invalid IR, where even if a // `TF::XlaCallModuleOp` is quantized the callee remains unquantized. Step (2) // not only replaces the input and output tensor types into quantized ones, but // also rewrites the body with a quantized equivalent. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 06:04:36 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc
const QuantizationResults& results = report.GetQuantizationResults(); // The quantized call op without the _quantization_method attribute is not // captured as a `QuantizationResult`. ASSERT_THAT(results.results(), IsEmpty()); } TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) { // A quantized dot_general op but the callee function has an invalid name. It
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 10:10:34 UTC 2024 - 18.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC', ) return {'output': out} # TODO(b/280208261): Add unit tests for comparing unquantized and # quantized results @test_util.run_all_in_graph_and_eager_modes class QuantizationOptionsTest(quantize_model_test_base.QuantizedModelTest): """Test cases regarding the use of QuantizationOptions proto.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
METHOD_UNSPECIFIED = 0; // go/do-include-enum-unspecified // Apply default weight-only quantization. Weights are quantized during // conversion, then dequantized during inference. // Activation: f32, Weight: qi8, Bias: f32 WEIGHT_ONLY = 1; // Apply default dynamic range quantization. Quantized tensor value's // ranges are determined during graph runtime. // Activation: f32, Weight: qi8, Bias: f32
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 22 02:20:05 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir
// RUN: odml-to-stablehlo-opt --compose-uniform-quantized-type \ // RUN: --split-input-file --verify-diagnostics %s | FileCheck %s module { // CHECK-LABEL: quantized_conv_op // CHECK-SAME: %[[ARG:.*]]: tensor<1x3x3x4xf32> func.func @quantized_conv_op(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> { %1 = stablehlo.constant dense<1.000000e+03> : tensor<1x1x1x1xf32> // Input inverse scale.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 37K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// CHECK: return %[[CONV2D]] : tensor<1x3x2x2x!quant.uniform<i8:f32, 4.000000e+00>> // ----- // Tests static range quantized dot_general with asymmetric quantized input. func.func @dot_general_upstream_srq_asym_input(%arg0: tensor<1x2x3x4x!quant.uniform<i8:f32, 1.000000e+0:-100>>) -> tensor<1x2x3x5x!quant.uniform<i8:f32, 4.000000e+0>> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0)