- Sort Score
- Result 10 results
- Languages All
Results 1 - 2 of 2 for output_node (0.1 sec)
-
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
const uint8_t* buffer = builder.GetBufferPointer(); const Model* output_model = GetModel(buffer); ASSERT_TRUE(output_model); // Nothing should change. ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size()); for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); subgraph_idx++) { const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.cc
FromTocoDataTypeToTflitToTensorType(output_data_type); std::string output_model; const absl::string_view input_model_buffer(buf, length); auto status = mlir::lite::QuantizeModel( input_model_buffer, input_type, output_type, inference_tensor_type, /*operator_names=*/{}, disable_per_channel, fully_quantize, output_model, enable_numeric_verify, enable_whole_model_verify,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 19.2K bytes - Viewed (0)