- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 14 for BuiltinOperator (0.24 sec)
-
tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h
namespace tflite { int8_t ConvertBuiltinCodeToDeprecatedBuiltinCode( const BuiltinOperator builtin_code); // The following methods are for backward compatibility for the early version // three, which does not have an extended builtin code. flatbuffers::Offset<OperatorCode> CreateOperatorCode( flatbuffers::FlatBufferBuilder &_fbb, BuiltinOperator builtin_code = BuiltinOperator_ADD,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_utils.cc
// the `builtin_code` as the right value. BuiltinOperator GetBuiltinCode(const OperatorCode* op_code) { // Caller should guarantee that the given argument value is not a nullptr. TFLITE_DCHECK(op_code != nullptr); return std::max( op_code->builtin_code(), static_cast<BuiltinOperator>(op_code->deprecated_builtin_code())); } BuiltinOperator GetBuiltinCode(const OperatorCodeT* op_code) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 2.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_utils.h
// problem. The new builtin operator will be assigned to the extended builtin // code field in the flatbuffer schema. Those methods helps to hide builtin code // details. BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); } // namespace tflite
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 1.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.cc
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h" #include <algorithm> namespace tflite { int8_t ConvertBuiltinCodeToDeprecatedBuiltinCode( const BuiltinOperator builtin_code) { return (builtin_code < BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES) ? static_cast<int8_t>(builtin_code) : static_cast<int8_t>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h
struct CustomOpInfo { std::vector<std::int32_t> quantizable_input_indices; bool is_weight_only = false; bool no_side_effect = true; }; using BuiltinOperatorSet = absl::flat_hash_set<tflite::BuiltinOperator>; // Map from custom op code to custom op quantization information. using CustomOpMap = std::unordered_map<std::string, CustomOpInfo>; // Applies dynamic range quantization for the given model wehre the input_model
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc
} return false; } // Returns the producer op code of the specified tensor_idx. bool GetProducerOpCode(const Model* model, uint32_t subgraph_idx, uint32_t tensor_idx, BuiltinOperator* op_code) { const auto subgraph = model->subgraphs()->Get(subgraph_idx); for (size_t op_idx = 0; op_idx < subgraph->operators()->size(); ++op_idx) { const auto op = subgraph->operators()->Get(op_idx);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 32.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs
// ones, but not by much. Moreover, while custom operators accept an opaque // object containing configuration parameters, builtins have a predetermined // set of acceptable options. // LINT.IfChange enum BuiltinOperator : int32 { ADD = 0, AVERAGE_POOL_2D = 1, CONCATENATION = 2, CONV_2D = 3, DEPTHWISE_CONV_2D = 4, DEPTH_TO_SPACE = 5, DEQUANTIZE = 6, EMBEDDING_LOOKUP = 7, FLOOR = 8,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 14:28:27 UTC 2024 - 30K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/converter_gen.cc
return "LSTM"; } return name.upper(); } // Emits a function that returns built-in operator code for each TFLite op. // // The signature of the function is: // // std::optional<tflite::BuiltinOperator> // mlir::GetBuiltinOpCode(mlir::Operation* op); // // TODO(hinsu): Consider converting this to a static constant associative // container instead of a series of if conditions, if required.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 19 15:05:28 UTC 2023 - 23.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_operator.h
std::string GetMlirOpNameFromOpCode(const ::tflite::OperatorCodeT &op_code); // Returns the builtin op code for the given MLIR operation on success; emits // error and returns std::nullopt on failure. std::optional<tflite::BuiltinOperator> GetBuiltinOpCode(Operation *mlir_op); // Packs the given MLIR operation into a TFLite FlatBuffer operator object. // Returns the FlatBuffer offset for the operator on success; emits error and
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 21:00:09 UTC 2024 - 11.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
// ones, but not by much. Moreover, while custom operators accept an opaque // object containing configuration parameters, builtins have a predetermined // set of acceptable options. // LINT.IfChange enum BuiltinOperator : int32 { ADD = 0, AVERAGE_POOL_2D = 1, CONCATENATION = 2, CONV_2D = 3, DEPTHWISE_CONV_2D = 4, DEPTH_TO_SPACE = 5, DEQUANTIZE = 6, EMBEDDING_LOOKUP = 7, FLOOR = 8,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0)