Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 20 for 4x128xf32 (0.66 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/tpu_rewrite.mlir

        func.return %0 : tensor<4x128xf32>
      }
      func.func @_func(%arg0: tensor<4x128xf32>) -> tensor<4x128xf32> {
        func.return %arg0 : tensor<4x128xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 22:03:30 UTC 2024
    - 172.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_5:.*]] = "tf.Select"(%[[VAL_3]], %[[VAL_0]], %[[VAL_4]]) : (tensor<?x?xi1>, tensor<4x8xf32>, tensor<4x8xf32>) -> tensor<4x8xf32>
    // CHECK:           return %[[VAL_5]] : tensor<4x8xf32>
    // CHECK:         }
    func.func @relu_grad(%arg0: tensor<4x8xf32>, %arg1: tensor<?x?xf32>) -> tensor<4x8xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/shape-inference.mlir

    func.func @testConv2dShapeInferenceDynamic(%arg0: tensor<1x?x?x128xf32>, %arg1: tensor<128x3x3x128xf32>, %arg2: tensor<128xf32>) -> tensor<1x?x?x128xf32> {
      // CHECK: "tfl.conv_2d"(%arg0, %arg1, %arg2) <{dilation_h_factor = 2 : i32, dilation_w_factor = 2 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32}> : (tensor<1x?x?x128xf32>, tensor<128x3x3x128xf32>, tensor<128xf32>) -> tensor<1x?x?x128xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      func.return %4 : tensor<8x128xf32>
    
    // CHECK-LABEL: SoftMaxWithNormalization
    // CHECK: %[[RESULT:.*]] = "tfl.softmax"(%arg0) <{beta = 1.000000e+00 : f32}> : (tensor<8x128xf32>) -> tensor<8x128xf32>
    // CHECK: return %[[RESULT]] : tensor<8x128xf32>
    }
    
    func.func @SoftMaxWithoutNormalization(%arg0: tensor<8x128xf32>) -> tensor<8x128xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

      func.return %1 : tensor<4x128x1xf32>
    
      // CHECK: %[[TRANSPOSED_X:.*]] = "tfl.transpose"
      // CHECK-SAME: (tensor<4x2x128xf32>, tensor<3xi32>) -> tensor<4x128x2xf32>
      // CHECK-NEXT: %[[FC_RES:.*]] = "tfl.fully_connected"(%[[TRANSPOSED_X]]
      // CHECK-SAME: <{fused_activation_function = "NONE", keep_num_dims = true, weights_format = "DEFAULT"}> : (tensor<4x128x2xf32>, tensor<1x2xf32>, none) -> tensor<4x128x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir

        %12 = "tfl.logistic"(%11) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %13 = tfl.mul %arg4, %12 {fused_activation_function = "NONE"} : tensor<4x2xf32>
        %14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

    func.func @testAddV2IdentityBroadcastTensor(%arg0: tensor<4x1xf32>, %arg1: tensor<4x2xf32>) -> (tensor<4x2xf32>, tensor<4x2xf32>, tensor<4x2xf32>, tensor<4x2xf32>) {
      %0 = "tf.Const"() {value = dense<0.0> : tensor<1x2xf32>} : () -> tensor<1x2xf32>
    
      // Operand and identity shapes are broadcastable. However, we cannot fold
      // because the operand does not match the result shape.
      %1 = "tf.AddV2"(%arg0, %0) : (tensor<4x1xf32>, tensor<1x2xf32>) -> tensor<4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %w = arith.constant dense<127.0> : tensor<4x12xf32>
      %b = arith.constant dense<0.0> : tensor<4xf32>
      %fc = "tfl.fully_connected"(%arg0, %w, %b) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<1x224x224x3xf32>, tensor<4x12xf32>, tensor<4xf32>) -> tensor<1x112x112x4xf32>
      func.return %fc : tensor<1x112x112x4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

        // CHECK-NEXT: return %[[DIV]] : tensor<4x10xf32>
        %0 = "tf.Softsign"(%arg0) : (tensor<4x10xf32>) -> tensor<4x10xf32>
        func.return %0 : tensor<4x10xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @softsign_grad
    func.func @softsign_grad(%arg0: tensor<4x10xf32>, %arg1: tensor<4x10xf32>) -> tensor<4x10xf32> {
    
        // CHECK-NEXT: %[[ONE:.*]] = mhlo.constant dense<1.000000e+00> : tensor<f32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

    // CHECK:           %[[VAL_14:.*]] = "tfl.reshape"(%[[VAL_13]], %[[VAL_8]]) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x1x384x128xf32>, tensor<2xi32>) -> tensor<384x128xf32>
    // CHECK:           %[[VAL_15:.*]] = "tfl.reshape"(%[[VAL_14]], %[[VAL_5]]) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<384x128xf32>, tensor<3xi32>) -> tensor<1x384x128xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
Back to top