Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 17 of 17 for 256x32x32x3xf32 (0.28 sec)

  1. tensorflow/compiler/mlir/lite/tests/optimize.mlir

        padding = "SAME",
        stride_h = 1 : i32,
        stride_w = 1 : i32
      } : (tensor<256x32x32x3xf32>, tensor<2x3x3x3xf32>, tensor<2xf32>) -> tensor<256x32x32x2xf32>
      %1 = "tfl.add"(%0, %cst) {fused_activation_function = "NONE"} : (tensor<256x32x32x2xf32>, tensor<1x1x1x2xf32>) -> tensor<256x32x32x2xf32>
      func.return %1 : tensor<256x32x32x2xf32>
    
      // CHECK-DAG: %cst = arith.constant dense<[2.000000e+00, 4.000000e+00]> : tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

    // -----
    
    // CHECK-LABEL: func @testValidConv2D
    func.func @testValidConv2D(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<3x3x3x16xf32>) -> tensor<256x32x32x16xf32> {
      %0 = "tf.Conv2D"(%arg0, %arg1) {padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<256x32x32x3xf32>, tensor<3x3x3x16xf32>) -> tensor<256x32x32x16xf32>
      func.return %0 : tensor<256x32x32x16xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @testValidDynamicConv2D
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/ops.mlir

      func.return %0 : tensor<256x32x32x16xf32>
    }
    
    // -----
    
    func.func @testConv2D4DBias(tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, tensor<1x1x1x16xf32>) -> tensor<256x32x32x16xf32> {
    ^bb0(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<16x3x3x3xf32>, %arg2: tensor<1x1x1x16xf32>):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

    // CHECK:           return %[[VAL_5]] : tensor<256x30x30x16xf32>
    // CHECK:         }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

      }
    
      // CHECK-LABEL: func @conv2d_unranked_filter
      func.func @conv2d_unranked_filter(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
        // CHECK: "tf.Conv2D"
        // CHECK-SAME: -> tensor<256x?x?x?xf32>
        %0 = "tf.Conv2D"(%arg0, %arg1) {padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<256x32x32x3xf32>, tensor<*xf32>) -> tensor<*xf32>
        func.return %0 : tensor<*xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir

    // TFLite runtime restrictions.
    // RUN: tf-opt %s -tfl-optimize | FileCheck %s
    
    // CHECK-LABEL: fuseScalarAddIntoConv2dHalf
    func.func @fuseScalarAddIntoConv2dHalf(%arg0: tensor<256x32x32x3xf16>, %arg1: tensor<16x3x3x3xf16>) -> tensor<256x8x7x16xf16> {
      %cst = arith.constant dense<1.5> : tensor<f16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

    func.func @conv_explicit_paddings(%arg0: tensor<256x32x32x6xf32>, %arg1: tensor<3x3x3x16xf32>) -> tensor<256x9x7x16xf32> {
    
      // CHECK: mhlo.convolution(%arg0, %arg1)
      // CHECK-SAME{LITERAL}: pad = [[6, 0], [3, 3]]
    
      %0 = "tf.Conv2D"(%arg0, %arg1) {data_format = "NHWC", dilations = [1, 2, 3, 1], padding = "EXPLICIT", explicit_paddings = [0, 0, 6, 0, 3, 3, 0, 0], strides = [1, 4, 5, 1]} : (tensor<256x32x32x6xf32>, tensor<3x3x3x16xf32>) -> tensor<256x9x7x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
Back to top