- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for quant_specs_ (0.22 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
} quant::QuantizationSpecs quant_specs; quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); quant_specs.post_training_quantization = true; quant_specs.disable_per_channel = disable_per_channel; quant_specs.disable_per_channel_for_dense_layers = disable_per_channel_for_dense_layers; quant_specs.verify_numeric = verify_numeric; quant_specs.whole_model_verify = whole_model_verify;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc
quant::QuantizationSpecs quant_specs; quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); quant_specs.weight_quantization = true; quant_specs.weight_only_quantization = weight_only_quantization; quant_specs.minimum_elements_for_weights = minimum_elements_for_weights; quant_specs.disable_per_channel = disable_per_channel; quant_specs.legacy_float_scale = legacy_float_scale;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.cc
const tensorflow::DataType inference_type, QuantizationSpecs* quant_specs) { quant_specs->inference_type = inference_type; // If min/max are not specified, just return; if (node_mins.empty() || node_maxs.empty()) return false; // Otherwise make sure min/max has the same size as inputs. if (IsQuantizationType(inference_type)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc
QuantizationComponentSpec quant_spec; quant_spec.set_quantization_component( QuantizationComponentSpec::COMPONENT_WEIGHT); quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8); std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization( pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec); EXPECT_TRUE(dequantize_op.has_value());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.cc
std::string* result) { using ::tflite::optimize::ReducedPrecisionSupport; mlir::MLIRContext context; GraphImportConfig specs; mlir::quant::QuantizationSpecs quant_specs; // Parse input arrays. std::vector<std::string> node_names; std::vector<std::string> node_dtypes; std::vector<std::optional<std::vector<int>>> node_shapes; std::vector<std::optional<double>> node_mins;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec_test.cc
QuantizationOptions quant_options; QuantizationComponentSpec quant_spec; quant_spec.set_quantization_component( QuantizationComponentSpec::COMPONENT_WEIGHT); quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8); auto mutable_quant_method = quant_options.mutable_quantization_method(); *mutable_quant_method->add_quantization_component_specs() = quant_spec; auto output = GetWeightComponentSpec(quant_options);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 22 18:28:40 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/jax_to_tfl_flatbuffer.cc
model_flags, toco_flags, &quant_specs, &node_names, &node_dtypes, &node_shapes, &node_mins, &node_maxs)); internal::WarningUnusedFlags(model_flags, toco_flags); // Register all custom ops, including user-specified custom ops. TF_RETURN_IF_ERROR(internal::RegisterAllCustomOps(toco_flags)); mlir::TFL::PassConfig pass_config(quant_specs);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/common/tfl_pass_config.h
namespace mlir { namespace TFL { // A config that controls which passes get run as part TFLite converter. struct PassConfig { explicit PassConfig(quant::QuantizationSpecs specs) : quant_specs(std::move(specs)) {} // If `emit_builtin_tflite_ops` is true, TF Lite legalization passes will be // added, which produces TF Lite ops. bool emit_builtin_tflite_ops = true;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:05:30 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.h
// input arrays. Status PopulateQuantizationSpecs( const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags, mlir::quant::QuantizationSpecs* quant_specs, std::vector<string>* node_names, std::vector<string>* node_dtypes, std::vector<std::optional<std::vector<int>>>* node_shapes, std::vector<std::optional<double>>* node_mins,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 3.2K bytes - Viewed (0)