- Sort Score
- Result 10 results
- Languages All
Results 1 - 2 of 2 for QuantizeWeights (0.12 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
namespace { tensorflow::string* g_test_model_dir = nullptr; } // namespace namespace tflite { namespace optimize { namespace { using mlir::lite::BufferType; using mlir::lite::CustomOpMap; using mlir::lite::QuantizeWeights; constexpr bool kUseUpdatedHybridSchemeDefault = true; std::unique_ptr<ModelT> CreateMutableModelFromFile(const Model* input_model) { auto copied_model = std::make_unique<ModelT>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
default: return absl::InvalidArgumentError("Quantized type not supported"); break; } bool use_updated_hybrid_scheme = !quant_specs.disable_per_channel; if (::tflite::optimize::QuantizeWeights( &q_builder, input_model, quantized_type, use_updated_hybrid_scheme, ::tflite::optimize::QuantizerType::OLD_QUANTIZER) != kTfLiteOk) { return absl::InvalidArgumentError(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0)