- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 548 for tflite (0.15 sec)
-
tensorflow/compiler/mlir/lite/utils/convert_type.h
// Convert the scalar type of a TFLite tensor to the corresponding // Tensorflow type tensorflow::DataType TflTypeToTfType(tflite::TensorType type); // Convert the Tensorflow scalar type to the corresponding TFLite type absl::StatusOr<tflite::TensorType> TfTypeToTflType(tensorflow::DataType type); // Returns element type from attribute Type 'type_attr'.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc
flatbuffers::Offset<tflite::Model> input_model_location = tflite::Model::Pack(input_builder, &input_model); tflite::FinishModelBuffer(input_builder, input_model_location); std::string serialized_model( reinterpret_cast<const char*>(input_builder.GetBufferPointer()), input_builder.GetSize()); OwningOpRef<mlir::ModuleOp> module = tflite::FlatBufferToMlir(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 10 20:16:40 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api.cc
} tflite::TensorType FromTocoDataTypeToTflitToTensorType(int inference_type) { switch (inference_type) { case toco::IODataType::QUANTIZED_INT16: return tflite::TensorType_INT16; case toco::IODataType::QUANTIZED_UINT8: return tflite::TensorType_UINT8; case toco::IODataType::UINT8: return tflite::TensorType_UINT8; case toco::IODataType::QUANTIZED_INT8:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_operator.cc
.Case("TANH", tflite::ActivationFunctionType_TANH) .Case("SIGN_BIT", tflite::ActivationFunctionType_SIGN_BIT); } static tflite::TensorType ConvertDerivedTFLiteTypeAttrForOptionWriter( tflite::TensorType type, flatbuffers::FlatBufferBuilder* builder) { if (type == tflite::TensorType_INT64) { return tflite::TensorType_INT64; } else if (type == tflite::TensorType_INT32) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 38K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tac_filter.proto
syntax = "proto3"; package third_party.tensorflow.compiler.mlir.lite.experimental.tac; // A list of filters for TAC users to run ops/functions on ML hardwares. The // intuition is that, for ops/functions that can be run on ML hardware (e.g. // EdgeTPU) and TFLite CPU, TAC users give a hint that they're more performant // to run on TFLite CPU. These filters give the TAC users freedom to specify the // parts that they want to use other hardware to accelerate.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/converter_python_api_wrapper.cc
return tensorflow::PyoOrThrow( tflite::MlirSparsifyModel(input_contents_txt_raw.ptr())); }, py::arg("input_contents_txt_raw"), R"pbdoc( Returns a sparsified model. )pbdoc"); m.def( "RegisterCustomOpdefs", [](py::object custom_opdefs_txt_raw) { return tensorflow::PyoOrThrow( tflite::RegisterCustomOpdefs(custom_opdefs_txt_raw.ptr())); },
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 18:18:30 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/analyze_variables.cc
// variables should be legalized to TFLite or not. const char kLegalizeTflVariables[] = "tfl._legalize_tfl_variables"; // Returns true if 'op' is TF op that accepts resource type, but is // supported by TFLite. bool IsSupportedTFLiteResourceOp(Operation* op) { return llvm::isa<TF::ReadVariableOp, TF::AssignVariableOp, TF::VarHandleOp, TF::LookupTableFindV2Op, TF::LookupTableImportV2Op, TF::LookupTableSizeV2Op>(op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
<< ", input_inference_type: " << tflite::EnumNameTensorType(input_type) << ", output_inference_type: " << tflite::EnumNameTensorType(output_type) << "\n"; mlir::Builder mlir_builder(&context); mlir::Type input_mlir_type = tflite::ConvertElementType(input_type, mlir_builder); mlir::Type output_mlir_type = tflite::ConvertElementType(output_type, mlir_builder);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/const_tensor_utils.h
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" namespace mlir { namespace TFL { bool IsQuantized(const tflite::TensorT& tensor); absl::StatusOr<mlir::quant::QuantizedType> GetQuantizedType( const tflite::TensorT& tensor, mlir::Builder builder,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc
// Load input model auto input_fbm = tflite::FlatBufferModel::BuildFromFile( "tensorflow/lite/testdata/sparse_tensor.bin"); tflite::ModelT input_model; input_fbm->GetModel()->UnPackTo(&input_model); // Populate input metadata auto model_metadata_buffer = std::make_unique<tflite::BufferT>(); model_metadata_buffer->data = std::vector<uint8_t>(expected_value.begin(), expected_value.end());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jun 10 20:16:40 UTC 2024 - 2.9K bytes - Viewed (0)