Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 113 for 2x5xf32 (0.12 sec)

  1. tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir

        %9 = "tfl.fully_connected"(%8, %cst_10, %cst_0) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<4x5xf32>, tensor<8x5xf32>, tensor<8xf32>) -> tensor<4x8xf32>
        %10:4 = "tfl.split"(%cst_5, %9) {num_splits = 4 : i32} : (tensor<i32>, tensor<4x8xf32>) -> (tensor<4x2xf32>, tensor<4x2xf32>, tensor<4x2xf32>, tensor<4x2xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
      // CHECK: tac.device = "CPU", tac.inference_type = "FLOAT"
      %3 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return
    }
    
    func.func @notAnnotateConst(%arg0: tensor<256x32x32x3xf32>) -> tensor<256x30x30x16xf32> {
      // CHECK-NOT: tac.device tac.inference_type
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/einsum.mlir

    }
    
    func.func @einsum_matmul(%arg0: tensor<7x9xf32>, %arg1: tensor<9x5xf32>) -> tensor<7x5xf32> {
      %0 = "tf.Einsum"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", equation = "ae,ed->ad"}: (tensor<7x9xf32>, tensor<9x5xf32>) -> tensor<7x5xf32>
      func.return %0 : tensor<7x5xf32>
      // CHECK-LABEL: einsum_matmul
      // CHECK: %[[v0:.*]] = "tf.BatchMatMulV2"(%arg0, %arg1) <{adj_x = false, adj_y = false}> : (tensor<7x9xf32>, tensor<9x5xf32>) -> tensor<7x5xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      func.return %3 : tensor<2x1xf32>
    }
    }
    
    // CHECK:   func @simpleTest(%[[VAL_0:.*]]: tensor<1xf32>, %[[VAL_1:.*]]: tensor<1xf32>, %[[VAL_2:.*]]: tensor<1xf32>, %[[VAL_3:.*]]: tensor<1xf32>) -> tensor<2x1xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_2:.*]] = "tf.Div"(%[[VAL_0]], %[[VAL_0]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           %[[VAL_3:.*]] = "tf.FloorDiv"(%[[VAL_0]], %[[VAL_0]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
    // CHECK:           return %[[VAL_3]] : tensor<2xf32>
    // CHECK:         }
    func.func @floordiv_f32(%arg0: tensor<2xf32>) -> tensor<2xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/convert-tf-quant-types.mlir

      // CHECK: return %[[output]] : tensor<6x3xi8>
      %0 = "tf.ConcatV2"(%arg0, %arg1, %axis) : (tensor<3x3xf32>, tensor<3x3xf32>, tensor<i64>) -> tensor<6x3xf32>
      %1 = "tf.UniformQuantize"(%0, %scales, %zps) {
        quantization_axis = -1 : i64, quantization_min_val = -128 : i64, quantization_max_val = 127 : i64
      } : (tensor<6x3xf32>, tensor<f32>, tensor<i32>) -> tensor<6x3x!tf_type.qint8>
      func.return %1 : tensor<6x3x!tf_type.qint8>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 25.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td

        ```mlir
          %0 = "tf.Const"() {value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %1 = "tf.Const"() {device = "", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
          %2 = "tf.Const"() {device = "baz", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
        ```
    
        then running this pass with 'default-device=foobar', we get:
    
        ```mlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 99.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/g3doc/_includes/tf_passes.md

    For example, if we have the code
    
    ```mlir
      %0 = "tf.Const"() {value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
      %1 = "tf.Const"() {device = "", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
      %2 = "tf.Const"() {device = "baz", value = dense<[[42.0]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
    ```
    
    then running this pass with 'default-device=foobar', we get:
    
    ```mlir
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Aug 02 02:26:39 UTC 2023
    - 96.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/mark_ops_for_outside_compilation.mlir

        %2:2 = "tf.RecvTPUEmbeddingActivations"() {_tpu_embedding_layer = "call1", config = "\0A\0B\0C\0D"} : () -> (tensor<2x2xf32>, tensor<4x4xf32>)
        "tf.SendTPUEmbeddingGradients"(%2#0, %2#1) {_tpu_embedding_layer = "call1", config = "\0A\0B\0C\0D", operandSegmentSizes = array<i32: 2, 0>} : (tensor<2x2xf32>, tensor<4x4xf32>) -> ()
        tf_device.return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 16:22:32 UTC 2024
    - 29.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/ops.mlir

      func.return %24 : tensor<1x4xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
Back to top