- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for QuantizationOptions (0.27 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.cc
namespace py = pybind11; namespace { using ::tensorflow::SignatureDef; using ::tensorflow::quantization::ExportedModel; using ::tensorflow::quantization::PyFunctionLibrary; using ::tensorflow::quantization::QuantizationOptions; using ::tensorflow::quantization::QuantizeDynamicRangePtq; using ::tensorflow::quantization::QuantizeQatModel; using ::tensorflow::quantization::QuantizeStaticRangePtq; using ::tensorflow::quantization::QuantizeWeightOnly;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 09 06:33:29 UTC 2024 - 12K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
{asset_file_defs.begin(), asset_file_defs.end()}); } absl::StatusOr<ExportedModel> ExportCalibrationModel( mlir::ModuleOp module_op, mlir::MLIRContext *context, const QuantizationOptions &quantization_options, const absl::flat_hash_map<std::string, std::string> &function_aliases, absl::string_view calibration_data_dir) { // Clone ModuleOp and function aliases so changes in this pipeline won't
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
from tensorflow.python.util import tf_export # Type aliases for quant_opts_pb2 messages. _QuantizationOptions = tf_export.tf_export( 'quantization.experimental.QuantizationOptions' )(quant_opts_pb2.QuantizationOptions) _QuantizationMethod = tf_export.tf_export( 'quantization.experimental.QuantizationMethod' )(quant_opts_pb2.QuantizationMethod) _QuantizationComponentSpec = tf_export.tf_export(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h" #include "xla/mlir_hlo/mhlo/transforms/passes.h" namespace tensorflow { namespace quantization { namespace { using ::tensorflow::quantization::QuantizationOptions; void AddConvertTpuToCpuModelPasses(mlir::OpPassManager &pm) { pm.addPass(mlir::quant::CreateConvertTpuModelToCpuPass()); pm.addPass(mlir::createInlinerPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizeWeightsPass) explicit QuantizeWeightsPass() : test_mode_(true) { initializeForTest(); } explicit QuantizeWeightsPass( const tensorflow::quantization::QuantizationOptions& quant_options) : test_mode_(false), quant_options_(quant_options) {} QuantizeWeightsPass(const QuantizeWeightsPass& other) { test_mode_ = other.test_mode_; quant_options_ = other.quant_options_;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
using ::tensorflow::quantization::OpSet; using ::tensorflow::quantization::QuantizationComponentSpec; using ::tensorflow::quantization::QuantizationMethod; using ::tensorflow::quantization::QuantizationOptions; using ::tensorflow::quantization::UnitWiseQuantizationSpec; class LiftQuantizableSpotsAsFunctionsPass : public PassWrapper<LiftQuantizableSpotsAsFunctionsPass, OperationPass<ModuleOp>> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py
path_map={'serving_default': '/tmp/representative_dataset_path'} ).save({'serving_default': representative_dataset}) ) # Using in QuantizationOptions. quantization_options = tf.quantization.experimental.QuantizationOptions( signature_keys=['serving_default'], representative_datasets=dataset_file_map, ) tf.quantization.experimental.quantize_saved_model( '/tmp/input_model',
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 14.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// Lifts the quantizable spots as composite functions. std::unique_ptr<OperationPass<ModuleOp>> CreateLiftQuantizableSpotsAsFunctionsPass( const tensorflow::quantization::QuantizationOptions& quant_options); // Apply graph optimizations such as fusing and constant folding to prepare // lifting. std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareLiftingPass(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
// The default minimum number of elements a weights array must have to be // quantized by this transformation. const int kWeightsMinNumElementsDefault = 1024; quantization::QuantizationOptions quantization_options; quantization_options.mutable_quantization_method()->set_preset_method( quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0)