- Sort Score
- Result 10 results
- Languages All
Results 31 - 38 of 38 for QuantizationSpecs (0.4 sec)
-
tensorflow/compiler/mlir/lite/python/jax_to_tfl_flatbuffer.cc
toco::TocoFlags& toco_flags, std::string* result) { mlir::MLIRContext context; mlir::quant::QuantizationSpecs quant_specs; // Parse input arrays. std::vector<std::string> node_names; std::vector<std::string> node_dtypes; std::vector<std::optional<std::vector<int>>> node_shapes;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/saved_model_to_tfl_flatbuffer.cc
const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags, std::string* result, const PyFunctionLibrary* quantization_py_function_lib) { mlir::MLIRContext context; mlir::quant::QuantizationSpecs quant_specs; // Parse input arrays. std::vector<string> node_names; std::vector<string> node_dtypes; std::vector<std::optional<std::vector<int>>> node_shapes;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
constexpr mlir::StringRef kTFLiteDataLayout = "NHWC"; } // namespace void AddQuantizationPasses(const mlir::TFL::PassConfig& pass_config, mlir::OpPassManager& pass_manager) { const mlir::quant::QuantizationSpecs& quant_specs = pass_config.quant_specs; pass_manager.addNestedPass<mlir::func::FuncOp>( mlir::TFL::CreatePrepareQuantizePass(quant_specs)); if (quant_specs.default_ranges.first.has_value() ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 18:45:51 UTC 2024 - 25.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
# Disable quantization for the quantizable unit (lifted function) whose # function name starts with "composite_dot_general". specs=qc.QuantizationSpecs( specs=[ qc.QuantizationSpec( matcher=qc.MatcherSpec( function_name=qc.FunctionNameMatcherSpec(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_tfl_translate.cc
// message. So we can just return here. if (!module.ok()) return kTrFailure; // Set the quantization specifications from the command line flags. mlir::quant::QuantizationSpecs quant_specs; if (mlir::quant::ParseInputNodeQuantSpecs( input_arrays, min_values, max_values, inference_type, &quant_specs)) { llvm::errs() << "Failed to get input quant spec."; return kTrFailure;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc
return RegisterCustomBuiltinOps(extra_tf_opdefs); } absl::Status PopulateQuantizationSpecs( const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags, mlir::quant::QuantizationSpecs* quant_specs, std::vector<std::string>* node_names, std::vector<std::string>* node_dtypes, std::vector<std::optional<std::vector<int>>>* node_shapes, std::vector<std::optional<double>>* node_mins,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun May 12 12:39:37 UTC 2024 - 17.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
"not_quantizable"}; inline constexpr double kNearZeroTolerance = 1.0e-6; using QuantParams = QuantizedType; using QuantSpec = QuantizationSpecs; using SignedInteger = std::pair<unsigned, unsigned>; // bitwidth and sign using QuantParamsForResults = llvm::SmallVector<QuantizedType, 4>; using AccumulatorScaleFunc =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
// quantized input and output types, which are not allowed in TF dialect. // This can be removed when the composite call supports quantized types. pm.enableVerifier(false); QuantizationSpecs quant_specs; quant_specs.inference_type = tensorflow::DT_QINT8; quant_specs.disable_per_channel = !enable_per_channel_quantization_; pm.addPass(CreatePreprocessOpPass(target_opset_, quantization_method_,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0)