- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 24 for QuantizationOptions (0.26 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h" #include "xla/mlir_hlo/mhlo/transforms/passes.h" namespace tensorflow { namespace quantization { namespace { using ::tensorflow::quantization::QuantizationOptions; void AddConvertTpuToCpuModelPasses(mlir::OpPassManager &pm) { pm.addPass(mlir::quant::CreateConvertTpuModelToCpuPass()); pm.addPass(mlir::createInlinerPass());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/fill_quantization_options_test.cc
const QuantizationComponentSpec expected_bias_component) { QuantizationOptions quantization_options; quantization_options.mutable_quantization_method() ->mutable_preset_quantization_method() ->set_preset_method(preset_quantization_options); QuantizationOptions filled_quantization_options = quant::stablehlo::FillPresetQuantizationOptions(quantization_options);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 09:05:02 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizeWeightsPass) explicit QuantizeWeightsPass() : test_mode_(true) { initializeForTest(); } explicit QuantizeWeightsPass( const tensorflow::quantization::QuantizationOptions& quant_options) : test_mode_(false), quant_options_(quant_options) {} QuantizeWeightsPass(const QuantizeWeightsPass& other) { test_mode_ = other.test_mode_; quant_options_ = other.quant_options_;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
using ::tensorflow::quantization::OpSet; using ::tensorflow::quantization::QuantizationComponentSpec; using ::tensorflow::quantization::QuantizationMethod; using ::tensorflow::quantization::QuantizationOptions; using ::tensorflow::quantization::UnitWiseQuantizationSpec; class LiftQuantizableSpotsAsFunctionsPass : public PassWrapper<LiftQuantizableSpotsAsFunctionsPass, OperationPass<ModuleOp>> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantize_passes.h
namespace quantization { // Adds passes for quantization of individual quantizable components. // (i.e. activation, weight, bias) void AddQuantizationPasses(mlir::PassManager& pass_manager, const QuantizationOptions& quantization_options); } // namespace quantization } // namespace stablehlo
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Mar 22 12:03:14 UTC 2023 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/type_casters.h
: public internal::SerializedProtobufCaster< tensorflow::quantization::ExportedModel> {}; template <> struct type_caster<tensorflow::quantization::QuantizationOptions> : public internal::SerializedProtobufCaster< tensorflow::quantization::QuantizationOptions> {}; template <> struct type_caster<::stablehlo::quantization::CalibrationOptions> : public internal::SerializedProtobufCaster<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h
// bf16 is quantizable. bool IsValueWithQuantizablePrecision(Value val); std::optional<tensorflow::quantization::QuantizationComponentSpec> GetWeightComponentSpec( const tensorflow::quantization::QuantizationOptions& quantization_options); // Returns the spec for the given operation that can be used for both of // dynamic and static range quantization. std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 2.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py
path_map={'serving_default': '/tmp/representative_dataset_path'} ).save({'serving_default': representative_dataset}) ) # Using in QuantizationOptions. quantization_options = tf.quantization.experimental.QuantizationOptions( signature_keys=['serving_default'], representative_datasets=dataset_file_map, ) tf.quantization.experimental.quantize_saved_model( '/tmp/input_model',
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 14.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/concurrency_test.py
temp_path = self.create_tempdir().full_path saved_model_save.save( root, temp_path, signatures=root.add.get_concrete_function() ) quantization_options = quant_opts_pb2.QuantizationOptions( quantization_method=quant_opts_pb2.QuantizationMethod( preset_method=quant_opts_pb2.QuantizationMethod.PresetMethod.METHOD_STATIC_RANGE_INT8 ), tags={tag_constants.SERVING},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Sep 11 00:47:05 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.proto
syntax = "proto3"; package stablehlo.quantization; option cc_enable_arenas = true; // Defines arious options to specify and control the behavior of the // StableHLO quantizer. // NEXT ID: 2 message QuantizationOptions { QuantizationMethod quantization_method = 1; } // NEXT ID: 3 message QuantizationMethod { // Quantization Method can be either preset or custom. oneof quantization_method {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 22 02:20:05 UTC 2023 - 3.6K bytes - Viewed (0)