Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 16 for 5x5x3x8xf32 (0.17 sec)

  1. tensorflow/compiler/mlir/lite/tests/dilated-conv.mlir

      %2 = "tf.BatchToSpaceND"(%1, %cst, %cst_0) : (tensor<4x64x64x8xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<1x120x120x8xf32>
      func.return %2 : tensor<1x120x120x8xf32>
    
      // CHECK-LABEL: testDilatedConv
      // CHECK-SAME: ([[INPUT:%.*]]: tensor<1x128x128x3xf32>, [[FILTER:%.*]]: tensor<5x5x3x8xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 44.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/tf-tfl-translate-serialize-stablehlo-conv.mlir

    module {
    func.func @main(%arg0: tensor<4x68x68x3xf32>, %arg1: tensor<5x5x3x8xf32>) -> tensor<4x64x64x8xf32> {
      %0 = "tf.Conv2D"(%arg0, %arg1) {padding = "VALID", strides = [1, 1, 1, 1]} : (tensor<4x68x68x3xf32>, tensor<5x5x3x8xf32>) -> tensor<4x64x64x8xf32>
      func.return %0 : tensor<4x64x64x8xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Feb 27 23:35:37 UTC 2023
    - 425 bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/push-tpose-through-ewise.mlir

      func.return %1 : tensor<5x2x3x4xf32>
    }
    
    // CHECK: %cst = arith.constant dense<1.000000e+00> : tensor<5x2x3x4xf32>
    // CHECK: %0 = "tfl.transpose"(%arg0, %arg1) : (tensor<2x3x4x5xf32>, tensor<4xi32>) -> tensor<5x2x3x4xf32>
    // CHECK: %1 = tfl.add %0, %cst {fused_activation_function = "NONE"} : tensor<5x2x3x4xf32>
    // CHECK: return %1 : tensor<5x2x3x4xf32>
    
    // -----
    
    // CHECK-LABEL: doubleTposeInput
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir

           } : (tensor<1x32x32x3xf32>, tensor<4xi32>, tensor<1x32x32x8xf32>)
             -> tensor<1x1x3x8xf32>
    
      func.return %0 : tensor<1x1x3x8xf32>
    }
    
    // CHECK-LABEL: func @transposeConv2DBackpropInput
    func.func @transposeConv2DBackpropInput(
      %input_sizes: tensor<4xi32>,
      %filter: tensor<1x1x3x8xf32>,
      %out_backprop: tensor<1x32x32x8xf32>
    ) -> tensor<1x32x32x3xf32> {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/fold_constant_transpose.mlir

    // CHECK-LABEL: transpose_simple_4d
    func.func @transpose_simple_4d() -> tensor<5x2x3x4xf32> {
      %0 = stablehlo.constant dense<1.000000e+0> : tensor<2x3x4x5xf32>
      %1 = stablehlo.transpose %0, dims = [3, 0, 1, 2] : (tensor<2x3x4x5xf32>) -> tensor<5x2x3x4xf32>
      return %1 : tensor<5x2x3x4xf32>
    }
    // CHECK-DAG: %[[CONST_0:.+]] = stablehlo.constant dense<1.000000e+00> : tensor<5x2x3x4xf32>
    // CHECK-NOT: transpose
    // CHECK: return %[[CONST_0]] : tensor<5x2x3x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 12 08:06:02 UTC 2024
    - 2.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/fetch_feed_names.mlir

          %outputs_2, %control_3 = tf_executor.island wraps "tf.Conv2D"(%arg0, %outputs_0) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true} : (tensor<?x28x28x3xf32>, tensor<5x5x3x32xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 12:28:56 UTC 2022
    - 3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nchw.mlir

    // RUN: tf-opt %s -tf-layout-optimization=force-data-format=NCHW -verify-diagnostics | FileCheck %s --dump-input=always
    
    // CHECK-LABEL: func @transposeConv2D
    func.func @transposeConv2D(%arg0: tensor<1x3x32x32xf32>, %arg1: tensor<1x1x3x8xf32>) -> tensor<1x8x32x32xf32> {
    
      // Convert input: NCHW -> NHWC
      %0 = "tf.Const"() {value = dense<[0, 2, 3, 1]> : tensor<4xi32>} : () -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:47:26 UTC 2022
    - 1.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/nchw_convolution_to_nhwc.mlir

      %0 = stablehlo.constant() {value = dense<7.000000e+00> : tensor<8x3x3x8xf32>} : () -> tensor<8x3x3x8xf32>
      %2 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, f, 0, 1]x[i, 0, 1, o]->[b, f, 0, 1], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x8x4x4xf32>, tensor<8x3x3x8xf32>) -> tensor<1x8x4x4xf32>
      return %2 : tensor<1x8x4x4xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 25 23:00:47 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir

    // dilations, etc...). This test only verifies that changing convolution data
    // layout will update all the attributes.
    
    // CHECK-LABEL: func @transposeConv2D
    func.func @transposeConv2D(%input: tensor<1x3x32x32xf32>, %filter: tensor<1x1x3x8xf32>) -> tensor<1x8x7x6xf32> {
    
      // CHECK: %[[ARG_PERM:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}>
      // CHECK: %[[ARG_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[ARG_PERM]])
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/einsum.mlir

      // CHECK: return %[[v2]] : tensor<2x5x3xf32>
    }
    
    func.func @einsum_transposereduceddim(%arg0: tensor<2x5x7xf32>, %arg1: tensor<2x5x3x7xf32>) -> tensor<2x5x3xf32> {
      %0 = "tf.Einsum"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", equation = "bij,binj->bin"}: (tensor<2x5x7xf32>, tensor<2x5x3x7xf32>) -> tensor<2x5x3xf32>
      func.return %0 : tensor<2x5x3xf32>
      // CHECK-LABEL: einsum_transposereduceddim
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 25.9K bytes
    - Viewed (0)
Back to top