- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 134 for lstm (0.07 sec)
-
tensorflow/compiler/mlir/lite/schema/schema.fbs
enum LSTMKernelType : byte { // Full LSTM kernel which supports peephole and projection. FULL = 0, // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell. BASIC = 1, } // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td) // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell table LSTMOptions { // Parameters for LSTM version 1 or above.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/guarantee_all_funcs_one_use.cc
// // The tf-shape-inference pass doesn't support functions that have more than // a single use. But some real code from frontends does end up creating code // like that. For example, the same LSTM cell function or loop body function // will be reused. // // This pass clones functions as needed to establish the invariant that all // functions have a single use. This can in principle cause exponential code
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/split-merged-operands.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm_quantized.mlir
%cst = "tfl.no_value"() {value = unit} : () -> none
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 15.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/basic_lstm.mlir
// CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { // CHECK-NEXT: deprecated_builtin_code: 16, // CHECK-NEXT: version: 2 // CHECK-NEXT: builtin_code: LSTM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { // CHECK-NEXT: shape: [ 1, 384 ], // CHECK-NEXT: buffer: 1, // CHECK-NEXT: name: "arg0",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 4.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/converter_gen.cc
for (const auto *def : defs) { const int has_options = HasOptions(*def); if (has_options != builtin_options_id) { continue; } auto option_name = GetOperatorOptionName(*def); // Basic LSTM and LSTM ops share the same option to attribute converter. if (option_name == "BasicLSTMOptions") { continue; } os << formatv(" if(const auto *op = op_union.As{0}()) {{\n", option_name);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 19 15:05:28 UTC 2023 - 23.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
#include "tensorflow/core/framework/types.pb.h" #include "tensorflow/lite/tools/optimize/operator_property.h" //===----------------------------------------------------------------------===// // The prepare-quantize Pass for LSTM. // namespace mlir { namespace TFL { constexpr double power_of_two_scale = 32768.0; // Same with the ordering of //tensorflow/compiler/mlir/lite/ir/tfl_ops.td constexpr const char* intermediate_attributes[] = {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td
]> { let genSpecializedAttr = 0; let cppNamespace = "::mlir::TFL"; } def TFL_MirrorPaddingAttr : EnumAttr<TFL_Dialect, TFL_MirrorPaddingType, "mirror_pad_attr">; // LSTM Kernel Type attributes def TFL_LSTM_KT_FULL : I32EnumAttrCase<"FULL", 0>; def TFL_LSTM_KT_BASIC : I32EnumAttrCase<"BASIC", 1>; def TFL_LSTMKernelType : I32EnumAttr<"LSTMKernelType", "lstm_kernel_type", [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 20 00:05:24 UTC 2022 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tools/op_quant_spec_getters_gen.cc
"GetOpQuantSpec(mlir::Operation *op, bool " "disable_per_channel_for_dense_layers = false) {\n"; // TODO(b/176258587): Move to OpTrait if this should be generalized. // Add special handling for LSTM. OUT(2) << "if (auto lstm_op = llvm::dyn_cast<TFL::LSTMOp>(op)) {\n"; OUT(4) << "return GetLstmOpQuantSpec<TFL::LSTMOp>(lstm_op);\n"; OUT(2) << "} else if (auto lstm_op = "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 11:18:44 UTC 2024 - 4.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
let summary = "Bidirectional sequence lstm operator"; let description = [{ Bidirectional lstm is essentially two lstms, one running forward & the other running backward. And the output is the concatenation of the two lstms. }]; let arguments = ( ins TFL_TensorOf<[F32, I8]>:$input, // Forward LSTM Weights
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0)