Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for 1x8x8x128xf32 (0.23 sec)

  1. tensorflow/compiler/mlir/lite/tests/ops.mlir

    }
    
    // -----
    
    func.func @testMaxUnpooling2D(%arg0: tensor<1x8x8x128xf32>, %arg1: tensor<1x8x8x128xf32>) -> tensor<1x8x8x128xf32> {
      // custom op for "tfl.max_unpooling_2d"(%arg0, %arg1) {filter_h = 2 : i32, filter_w = 2 : i32, padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x8x8x128xf32>, tensor<1x8x8x128xf32>) -> (tensor<1x8x8x128xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      // CHECK: %[[VAL_1:.*]] = "tf.Identity"(%[[VAL_0]]) : (tensor<*xf32>) -> tensor<*xf32>
      // CHECK: return %[[VAL_1]]
      %0 = "tf.Conv2D"(%arg2, %arg1) {data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/dilated-conv.mlir

      // CHECK-NEXT: [[RESULT:%.*]] = "tf.BiasAdd"([[SQUEEZE]], [[BIAS]]) : (tensor<1x128x128xf32>, tensor<128xf32>) -> tensor<1x128x128xf32>
      // CHECK-NEXT: return [[RESULT]] : tensor<1x128x128xf32>
    }
    
    func.func @testDilatedDepthWiseConvWithExpandSqueeze1(%arg0: tensor<1x128x128xf32>, %arg1: tensor<5x5x1x1xf32>, %arg2: tensor<128xf32>) -> tensor<1x128x128xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 44.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    // CHECK:           [[VAL_7:%.*]] = "tfl.slice"([[VAL_1]], [[VAL_3]], [[VAL_5]]) : (tensor<1x8x8x1024xf32>, tensor<4xi32>, tensor<4xi32>) -> tensor<1x8x8x256xf32>
    // CHECK:           [[VAL_8:%.*]] = "tfl.slice"([[VAL_1]], [[VAL_4]], [[VAL_5]]) : (tensor<1x8x8x1024xf32>, tensor<4xi32>, tensor<4xi32>) -> tensor<1x8x8x256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %7 = "tfl.add"(%1, %6) {tac.device = "GPU", tac.inference_type = "FLOAT", fused_activation_function = "NONE"} : (tensor<1x128x128xf32>, tensor<1x128x128xf32>) -> tensor<1x128x128xf32>
      func.return %7 : tensor<1x128x128xf32>
    }
    
    // CHECK:   func @norm1(%[[VAL_0:.*]]: tensor<1x128x128xf32>) -> tensor<1x128x128xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

    func.func private @func_20_GPU_FLOAT(%arg0: tensor<128x128xf32>, %arg1: tensor<3xi32>) -> tensor<1x128x128xf32> attributes {tac.device = "GPU", tac.inference_type = "FLOAT", tac.interface_name = "func_20"} {
      %0 = "tfl.reshape"(%arg0, %arg1) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<128x128xf32>, tensor<3xi32>) -> tensor<1x128x128xf32>
      func.return %0 : tensor<1x128x128xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_2:.*]] = "tf.Conv2D"(%[[VAL_0]], %[[VAL_1]]) <{data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> : (tensor<1x8x8x207xf32>, tensor<3x3x207x16xf32>) -> tensor<1x8x8x16xf32>
    // CHECK:           return %[[VAL_2]] : tensor<1x8x8x16xf32>
    // CHECK:         }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %894 = "tfl.reshape"(%arg0, %cst_10) : (tensor<128xf32>, tensor<4xi32>) -> tensor<1x1x1x128xf32>
      %895 = "tfl.mul"(%894, %arg1) {fused_activation_function = "NONE"} : (tensor<1x1x1x128xf32>, tensor<1x512x512x128xf32>) -> tensor<1x512x512x128xf32>
      %896 = "tfl.mul"(%arg2, %894) {fused_activation_function = "NONE"} : (tensor<1x512x512x128xf32>, tensor<1x1x1x128xf32>) -> tensor<1x512x512x128xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
Back to top