Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 150 for lstm (0.04 sec)

  1. tensorflow/compiler/mlir/lite/quantization/tools/op_quant_spec_getters_gen.cc

                "GetOpQuantSpec(mlir::Operation *op, bool "
                "disable_per_channel_for_dense_layers = false) {\n";
      // TODO(b/176258587): Move to OpTrait if this should be generalized.
      // Add special handling for LSTM.
      OUT(2) << "if (auto lstm_op = llvm::dyn_cast<TFL::LSTMOp>(op)) {\n";
      OUT(4) << "return GetLstmOpQuantSpec<TFL::LSTMOp>(lstm_op);\n";
      OUT(2) << "} else if (auto lstm_op = "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 11:18:44 UTC 2024
    - 4.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      let summary = "Bidirectional sequence lstm operator";
    
      let description = [{
        Bidirectional lstm is essentially two lstms, one running forward & the
        other running backward. And the output is the concatenation of the two
        lstms.
      }];
    
      let arguments = (
        ins TFL_TensorOf<[F32, I8]>:$input,
    
        // Forward LSTM Weights
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/ops.mlir

      // expected-error @+1 {{'tfl.lstm' op failed to verify that either projection weight must be specified or both projection weight and projection bias must not be specified}}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfr/README.md

            used inside the composition should have support for these platforms.
            These ops can also be composite ops.
    
    *   (Performance) User defines a custom kernel for a regular structure (i.e.
        LSTM), but it is hard to add the logic to fuse the individual ops to target
        this kernel in the inference graph.
    
        *   *Solution*: The user should define a new TF op, which corresponds to the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 29 18:32:13 UTC 2022
    - 6.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc

                3);
    
      auto it = fused_lstm_func_.getBody().back().rbegin();
      EXPECT_EQ(it->getName().getStringRef(),
                mlir::func::ReturnOp::getOperationName());
      it++;  // tensor_cast
      it++;  // lstm
      EXPECT_EQ(it->getName().getStringRef(),
                mlir::TFL::LSTMOp::getOperationName());
      EXPECT_EQ(it->getNumOperands(), 24);
      EXPECT_EQ(it->getNumResults(), 1);
      // cifg = false, so input2input is not None.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

      // The conversion pipeline has to follow the following orders:
      // 1) Saved model related optimization like decompose resource ops
      // 2) Convert composite functions like lstm/rnns, along with proper function
      // inlining & dce.
      // 3) Lower static tensor list pass.
    
      // This decomposes resource ops like ResourceGather into read-variable op
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/canonicalize.mlir

      %arg20: tensor<1x2048xf32>
    ) -> tensor<1x640xf32> {
      %cst = "tfl.no_value"() {value = unit} : () -> none
      %zero = "tfl.pseudo_const"() {value = dense<0.0> : tensor<640xf32>} : () -> tensor<640xf32>
      %0 = "tfl.lstm"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6, %arg7, %arg8, %cst, %cst, %cst, %arg9, %arg10, %arg11, %arg12, %arg13, %zero, %arg19, %arg20, %arg15, %arg16, %arg17, %arg18) ({}) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    // CHECK-DAG: %[[input_15:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x3x!quant.uniform<i16:f32, 3.0517578125E-5>>) -> tensor<1x3xf32>
    // CHECK: %[[lstm:.*]] = "tfl.unidirectional_sequence_lstm"(
    // CHECK-SAME: %[[input_0]],
    // CHECK-SAME: %[[input_1]], %[[input_2]], %[[input_3]], %[[input_4]],
    // CHECK-SAME: %[[input_5]], %[[input_6]], %[[input_7]], %[[input_8]],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/post_quantize.cc

        rewriter.replaceOpWithNewOp<QConstOp>(op, TypeAttr::get(result_type),
                                              reshaped_elements);
        return success();
      }
    };
    
    // Removes operations with side effect (i.e. LSTM, SVDF) that have dangling
    // output.
    template <typename OpTy>
    struct PruneUnusedOpsWithSideEffect : public OpRewritePattern<OpTy> {
     public:
      explicit PruneUnusedOpsWithSideEffect(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

    // LSTMOpQuantized-DAG: %[[dq3:.*]] = "tfl.dequantize"({{.*}}) : (tensor<20x!quant.uniform<i8<-127:127>:f32, 0.0078740157480314959>>) -> tensor<20xf32>
    // LSTMOpQuantized: %[[lstm:.*]] = "tfl.unidirectional_sequence_lstm"(%arg0, %[[dq1]], %[[dq1]], %[[dq1]], %[[dq1]], %[[dq1]], %[[dq1]], %[[dq1]], %[[dq1]], %[[dq3]], %[[dq3]], %[[dq3]], %cst_0, %cst_0, %cst_0, %cst_0, %[[dq1]], %0, %cst_1, %cst_1, %0, %0, %0, %0)
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
Back to top