Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 5 of 5 for 1x128x3xf32 (0.52 sec)

  1. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

    // CHECK-NOT: "tfl.batch_matmul"
    func.func @Batchmatmul2Fullyconnected(%arg0: tensor<4x128x2xf32>) -> (tensor<4x128x1xf32>) {
      %0 = arith.constant dense<[[1.0], [2.0]]> : tensor<2x1xf32>
      %1 = "tfl.batch_matmul"(%arg0, %0) {adj_x = false, adj_y = false, asymmetric_quantize_inputs = false} : (tensor<4x128x2xf32>, tensor<2x1xf32>) -> tensor<4x128x1xf32>
      func.return %1 : tensor<4x128x1xf32>
      // CHECK-NEXT: %[[CONST_WEIGHT:.*]] = arith.constant
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/lift_tflite_flex_ops.mlir

    func.func @TfBatchMatMulV2(%arg0: tensor<4x128x2xf32>, %arg1:  tensor<2x1xf32>) -> tensor<4x128x1xf32> {
      %0 = "tfl.custom"(%arg0, %arg1) {
        custom_code = "FlexBatchMatMulV2",
        custom_option = #tfl<const_bytes : "0x0D42617463684D61744D756C56320038120D42617463684D61744D756C56321A001A002A070A0154120230012A0B0A0561646A5F78120228002A0B0A0561646A5F791202280032000002493B1414042801">
      } : (tensor<4x128x2xf32>, tensor<2x1xf32>) -> tensor<4x128x1xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir

    func.func @dont_move_transpose_different_ranks(%arg0:tensor<1x1x2x3xf32>, %arg1:tensor<2x3xf32>) -> tensor<1x2x1x3xf32> {
      %cst = "tf.Const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
      %0 = "tf.AddV2"(%arg0, %arg1) {device = ""} : (tensor<1x1x2x3xf32>, tensor<2x3xf32>) -> tensor<1x1x2x3xf32>
      %1 = "tf.Transpose"(%0, %cst) {device = ""} : (tensor<1x1x2x3xf32>, tensor<4xi32>) -> tensor<1x2x1x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range-float16.mlir

      func.return %17 : tensor<1x2x3xf32>
    
      // CHECK: %[[NONE:.*]] = "tfl.no_value"() <{value}> : () -> none
      // CHECK: %[[DQ_1:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_2:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
      // CHECK: %[[DQ_3:.*]] = "tfl.dequantize"({{.*}}) : (tensor<1x1xf16>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/batchmatmul_to_einsum.mlir

    func.func @test_batch_matmul_to_einsum(%arg0: tensor<1x2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x2x4xf32> {
      // CHECK-LABEL: test_batch_matmul_to_einsum
      // CHECK: "tf.Einsum"(%arg0, %arg1) <{equation = "...mk,...kn->...mn"}> : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32>
      %0 = "tf.BatchMatMul"(%arg0, %arg1) {adj_x = false, adj_y = false} : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32>
      func.return %0: tensor<1x2x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3K bytes
    - Viewed (0)
Back to top