- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for quantize_model_test (0.23 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
# Tries to run all tests cases in both the graph mode (default in TF1) and the # eager mode (default in TF2) to ensure support for when TF2 is disabled. class StaticRangeQuantizationTest(quantize_model_test_base.QuantizedModelTest): @parameterized.parameters( testing.parameter_combinations([{ 'bias_fn': ( None, nn_ops.bias_add, ), 'activation_fn': (
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
float scale = (float_max - float_min) / ((1 << bit_num) - 1); EXPECT_THAT(scale, FloatNear(quantized_quant_params.scale[0], eps)); } class QuantizeModelTest : public testing::Test { protected: QuantizeModelTest() { input_model_ = ReadModel(internal::kConvModelWith0Plus10Weights); readonly_model_ = input_model_->GetModel(); model_ = UnPackFlatBufferModel(*readonly_model_); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
# TODO(b/280208261): Add unit tests for comparing unquantized and # quantized results @test_util.run_all_in_graph_and_eager_modes class QuantizationOptionsTest(quantize_model_test_base.QuantizedModelTest): """Test cases regarding the use of QuantizationOptions proto. Run all tests cases in both the graph mode (default in TF1) and the eager mode (default in TF2) to ensure support for when TF2 is disabled.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/BUILD
# "//tensorflow/python/types:core", # "@absl_py//absl/testing:parameterized", # ], # ) # # tf_py_strict_test( # name = "quantize_model_test", # srcs = ["integration_test/quantize_model_test.py"], # shard_count = 50, # Parallelize the test to avoid timeouts. # deps = [ # ":quantization", # ":quantize_model_test_base",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 20:18:36 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/BUILD
"//tensorflow/lite:framework", "//tensorflow/lite/c:c_api_types", "@llvm-project//llvm:Support", ], ) tf_cc_test( name = "quantize_model_test", srcs = ["quantize_model_test.cc"], args = [ "--test_model_file=$(location //tensorflow/lite/tools/optimize:testdata/single_conv_weights_min_0_max_plus_10.bin)", ], data = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model_test.py
# ============================================================================== """Test cases for pywrap_quantize_model. These test cases are mostly for validation checks. Tests for functionalities are at `quantize_model_test.py`. """ from tensorflow.compiler.mlir.quantization.tensorflow.python import py_function_lib from tensorflow.compiler.mlir.quantization.tensorflow.python import pywrap_quantize_model
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 02:09:24 UTC 2024 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/BUILD
"//tensorflow/python/util:tf_export", "@absl_py//absl/logging", ], ) tf_py_strict_test( name = "quantize_model_test", size = "medium", srcs = ["integration_test/quantize_model_test.py"], shard_count = 50, # Parallelize the test to avoid timeouts. tags = [ "no_mac", # TODO(b/292100835): Reenable ], deps = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 18.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import save as saved_model_save from tensorflow.python.types import core FUNC_ALIAS = 'some_alias' class QuantizedModelTest(test.TestCase, parameterized.TestCase): """Base test class for StableHLO quant tests.""" def setUp(self) -> None: super().setUp()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
from tensorflow.python.trackable import autotrackable from tensorflow.python.types import core # Type aliases for supported attribute types. _AttrValType = Union[List[int], bool, str, None] class QuantizedModelTest(test.TestCase, parameterized.TestCase): """Base test class for TF-quant tests.""" def setUp(self) -> None: super().setUp() # Many test cases for quantization involve creating and saving the input
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0)