- Sort Score
- Result 10 results
- Languages All
Results 1 - 3 of 3 for QuantizeWeights (0.27 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc
result.size()); return kTfLiteOk; } TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model, int64_t weights_min_num_elements, bool use_hybrid_evaluation) { return QuantizeWeights( builder, input_model, /*inference_type=*/tflite::TensorType_INT8,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
// Overloading methods to support old quantizer versions API TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model, int64_t weights_min_num_elements, bool use_hybrid_evaluation = true); TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
namespace { tensorflow::string* g_test_model_dir = nullptr; } // namespace namespace tflite { namespace optimize { namespace { using mlir::lite::BufferType; using mlir::lite::CustomOpMap; using mlir::lite::QuantizeWeights; constexpr bool kUseUpdatedHybridSchemeDefault = true; std::unique_ptr<ModelT> CreateMutableModelFromFile(const Model* input_model) { auto copied_model = std::make_unique<ModelT>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0)