- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for quantization (0.18 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
EXPECT_THAT(output->quantization->zero_point, Eq(input1->quantization->zero_point)); EXPECT_THAT(output->quantization->scale, Eq(input2->quantization->scale)); EXPECT_THAT(output->quantization->zero_point, Eq(input2->quantization->zero_point)); } INSTANTIATE_TEST_SUITE_P(MinimumMaximumTestInst, QuantizeMinimumMaximumTest,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
from tensorflow.compiler.mlir.quantization.common.python import testing from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2 as qc from tensorflow.compiler.mlir.quantization.stablehlo.python import quantization from tensorflow.compiler.mlir.quantization.stablehlo.python.integration_test import quantize_model_test_base
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/BUILD
"//tensorflow/compiler/mlir/lite/quantization/ir:QuantOps", "//tensorflow/compiler/mlir/lite/quantization/lite:tfl_to_std", "//tensorflow/compiler/mlir/lite/schema:schema_fbs", "//tensorflow/compiler/mlir/quantization/common:uniform_quantized_types", "//tensorflow/compiler/mlir/quantization/common/ir:QuantOps", "//tensorflow/compiler/mlir/quantization/common/quantization_lib",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:41:49 UTC 2024 - 49.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
clEnumValN(tensorflow::quantization::QuantizationMethod:: METHOD_DYNAMIC_RANGE_INT8, "drq", "Post-training dynamic-range quantizaiton"), clEnumValN(tensorflow::quantization::QuantizationMethod:: METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8, "weight_only", "Post-training weight-only quantization"))};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
#include "stablehlo/dialect/StablehloOps.h" // from @stablehlo #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h" #include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h" #include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h" #define DEBUG_TYPE "stablehlo-compose-uniform-quantized-type" namespace mlir { namespace odml { namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
asset_file = os.path.join(asset_dir, 'vocab_file.txt') content = '\n'.join(['static', 'range', 'quantization']) file_io.write_string_to_file(filename=asset_file, file_content=content) # The resulting table looks like: # "static" -> 0 # "range" -> 1 # "quantization" -> 2 # default -> -1 init = lookup_ops.TextFileInitializer( filename=asset_file,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
if (!tensor.quantization || tfl::IsQuantized(tensor)) return nullptr; // If the result isn't float and unquantizable, the min/max is ignored. if (!res.getType() .cast<mlir::ShapedType>() .getElementType() .isa<mlir::FloatType>()) { return nullptr; } auto mins = tensor.quantization->min; auto maxs = tensor.quantization->max;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
tensorflow/BUILD
"//tensorflow/compiler/mlir/lite/quantization/lite:quantize_model", "//tensorflow/compiler/mlir/quantization/common/quantization_lib:quantization_config", "//tensorflow/compiler/mlir/lite/sparsity:sparsify_model", "//tensorflow/compiler/mlir/quantization/stablehlo/python:pywrap_quantization_lib_impl",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 16:51:59 UTC 2024 - 53.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions.mlir
module = "", platforms = [], version = 5 : i64, _entry_function = @composite_conv_fn, _original_entry_function = "composite_conv_fn", // Per-channel quantization at dimension 3 for input index 1. _quantization_method = "static_range_ptq {input_quantized_types {key: 1, value {dimension_specs {dimension: 3}}}}", _stablehlo_module_attrs = {},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 91.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir
// RUN: tf-opt %s -tfl-prepare-quantize="quantize-allowlist=quantize_float_placeholder_only,not_reset_input" | FileCheck %s // RUN: tf-opt %s -tfl-prepare-quantize="disable-set-input-nodes-quantization-params=true" | FileCheck --check-prefix=MixedPrecision %s // RUN: tf-opt %s -tfl-prepare-quantize="is-qdq-conversion=true" | FileCheck --check-prefix=QDQ %s // CHECK-LABEL: main // Uses `main` function to match the default target function of QuantSpecs and
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 67.5K bytes - Viewed (0)