- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 16 for qat (0.02 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
A SavedModel object with TF quantization applied. Raises: ValueError: when the model is QAT model. """ mode_str = 'weight-only quantization' # QAT weight-only is not supported yet. if _is_qat_saved_model(src_saved_model_path): raise ValueError( 'The models trained with quantization-aware training (QAT) is not ' 'supported for %s.' % mode_str ) logging.info(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
releasenotes/notes/42203.yaml
apiVersion: release-notes/v2 kind: feature area: security releaseNotes: - |
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue Nov 29 20:11:11 UTC 2022 - 163 bytes - Viewed (0) -
security/pkg/nodeagent/sds/sdsservice.go
"fmt" "strconv" "sync" "sync/atomic" "time" cryptomb "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha" qat "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/private_key_providers/qat/v3alpha" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Sat May 25 00:20:04 UTC 2024 - 10.9K bytes - Viewed (0) -
pilot/pkg/xds/sds.go
"strings" "time" xxhashv2 "github.com/cespare/xxhash/v2" cryptomb "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha" qat "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/private_key_providers/qat/v3alpha" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoytls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Mon Apr 15 23:04:36 UTC 2024 - 15.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.h
limitations under the License. ==============================================================================*/ // // This file defines support utilities for interoperating with FakeQuant* based // QAT (Quantized Aware Training) computations, as implemented by TFLite. Note // that FakeQuant* operators mix multiple concerns specific to how TFLite // originally implemented quantization. As such, utilities here enforce
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 11:52:27 UTC 2024 - 3.7K bytes - Viewed (0) -
pkg/config/validation/agent/validation_test.go
}, } }, ), isValid: true, }, { name: "private key provider with qat without poll_delay", in: modify(valid, func(c *meshconfig.ProxyConfig) { c.PrivateKeyProvider = &meshconfig.PrivateKeyProvider{ Provider: &meshconfig.PrivateKeyProvider_Qat{ Qat: &meshconfig.PrivateKeyProvider_QAT{}, }, } }, ), isValid: false, }, {
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Wed Apr 17 20:06:41 UTC 2024 - 39.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h
bool post_training_quantization = false; // Whether to allow dynamic range quantization. This is the easiest // quantization mode which doesn't require QAT or sample inputs. // This option only targets `DT_HALF` and `DT_QINT8` inference type. bool weight_quantization = false; // Whether to use the MLIR dynamic range quantizer instead of TOCO.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 13 10:16:19 UTC 2024 - 10.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
) # Insert fake quant to simulate a QAT model. weight = array_ops.fake_quant_with_min_max_args( weight, min=-0.1, max=0.2, num_bits=8, narrow_range=False ) # shape: (2, 2) output_tensor = math_ops.matmul(matmul_input, weight) # Insert fake quant to simulate a QAT model. output_tensor = array_ops.fake_quant_with_min_max_args(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.cc
signature_def_map); return absl::OkStatus(); }, R"pbdoc( Quantizes a model that went through quantization-aware training (QAT) saved at `src_saved_model_path`. The resulting model will be saved to `dst_saved_model_path`. Returns an OK sataus when successful, otherwise raises `StatusNotOk` exception.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:33:29 UTC 2024 - 12K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc
} else { quant_specs->inference_type = DT_QINT8; quant_specs->inference_input_type = DT_QINT8; } } else { // These flags are incompatible with post_training_quantize() as only // QAT models can provide required ranges. quant_specs->disable_infer_tensor_range = toco_flags.disable_infer_tensor_range(); quant_specs->use_fake_quant_num_bits = toco_flags.use_fake_quant_num_bits(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 17.3K bytes - Viewed (0)