- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for input_model (0.41 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc
StringRef op_name(tflite_op_name.data(), tflite_op_name.size()); return op_name.lower(); } std::unique_ptr<tflite::ModelT> CreateMutableModelFromFile( const tflite::Model* input_model) { auto copied_model = std::make_unique<tflite::ModelT>(); input_model->UnPackTo(copied_model.get(), nullptr); return copied_model; } } // namespace // TODO(b/214314076): Support MLIR model as an input for the C++ dynamic range
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc
"tensorflow/lite/testdata/sparse_tensor.bin"); tflite::ModelT input_model; input_fbm->GetModel()->UnPackTo(&input_model); // Populate input metadata auto model_metadata_buffer = std::make_unique<tflite::BufferT>(); model_metadata_buffer->data = std::vector<uint8_t>(expected_value.begin(), expected_value.end()); input_model.buffers.push_back(std::move(model_metadata_buffer));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 10 20:16:40 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc
namespace mlir { namespace lite { absl::Status SparsifyModel(const tflite::ModelT& input_model, flatbuffers::FlatBufferBuilder* builder) { MLIRContext context; StatusScopedDiagnosticHandler statusHandler(&context, /*propagate=*/true); // Import input_model to a MLIR module flatbuffers::FlatBufferBuilder input_builder;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 10 20:16:40 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
const tflite::Model* input_model, int64_t weights_min_num_elements, bool use_hybrid_evaluation = true); TfLiteStatus QuantizeWeights(flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model, BufferType quant_type = BufferType::QUANTIZED_INT8,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h" namespace mlir { namespace lite { // Sparsify the `input_model` and write the result to a flatbuffer `builder`. absl::Status SparsifyModel(const tflite::ModelT& input_model, flatbuffers::FlatBufferBuilder* builder); } // namespace lite } // namespace mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 10 20:16:40 UTC 2024 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/odml_to_stablehlo.cc
// * Options for full/partial conversion, Op exceptions list. // * Option to serialize output to TFL flatbuffer format. using llvm::cl::opt; // NOLINTNEXTLINE opt<std::string> input_model(llvm::cl::Positional, llvm::cl::desc("<input model path>"), llvm::cl::Required); // NOLINTNEXTLINE
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:16:49 UTC 2024 - 14.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
quantization_options = tf.quantization.experimental.QuantizationOptions( signature_keys=['your_signature_key'], ) tf.quantization.experimental.quantize_saved_model( '/tmp/input_model', '/tmp/output_model', quantization_options=quantization_options, ) # When quantizing a model trained without QAT (Post-Training Quantization), # a representative dataset is required.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc
flatbuffers::FlatBufferBuilder q_builder(/*initial_size=*/10240); const uint8_t* buffer = reinterpret_cast<const uint8_t*>(translated_result.c_str()); const ::tflite::Model* input_model = ::tflite::GetModel(buffer); ::tflite::optimize::BufferType quantized_type; switch (quant_specs.inference_type) { case DT_QINT8: quantized_type = ::tflite::optimize::BufferType::QUANTIZED_INT8;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
QuantizeWeightsTest() {} void LoadBasicModel() { input_model_ = ReadTestModel(); model_ = input_model_->GetModel(); } void LoadSharedWeightsModel() { input_model_ = ReadSharedWeightsTestModel(); model_ = input_model_->GetModel(); } void LoadGatherTestModel() { input_model_ = ReadGatherTestModel(); model_ = input_model_->GetModel(); } void LoadCustomOpTestModel() {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py
signature_keys=['serving_default'], representative_datasets=dataset_file_map, ) tf.quantization.experimental.quantize_saved_model( '/tmp/input_model', '/tmp/output_model', quantization_options=quantization_options, ) ``` """ def __init__( self, path_map: Mapping[str, os.PathLike[str]],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 14.2K bytes - Viewed (0)