Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 11 for 2x5x1x7xf32 (0.24 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/einsum.mlir

      // CHECK: %[[v0:.*]] = "tf.Transpose"(%arg1, %[[cst]]) : (tensor<2x5x3x7xf32>, tensor<4xi32>) -> tensor<2x5x7x3xf32>
      // CHECK: %[[v1:.*]] = "tf.Reshape"(%arg0, %[[cst_1]]) : (tensor<2x5x7xf32>, tensor<4xi64>) -> tensor<2x5x1x7xf32>
      // CHECK: %[[v2:.*]] = "tf.BatchMatMulV2"(%[[v1]], %[[v0]]) <{adj_x = false, adj_y = false}> : (tensor<2x5x1x7xf32>, tensor<2x5x7x3xf32>) -> tensor<2x5x1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_op_with_region.mlir

        %12 = "quantfork.qcast"(%11) {volatile} : (tensor<2x3x1x3xf32>) -> tensor<2x3x1x3x!quant.uniform<i8:f32, 3.000000e-01:1>>
        %13 = "quantfork.dcast"(%12) : (tensor<2x3x1x3x!quant.uniform<i8:f32, 3.000000e-01:1>>) -> tensor<2x3x1x3xf32>
        return %13 : tensor<2x3x1x3xf32>
      }
    
      // CHECK: quantized_dot_general_fn_1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 18.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/convert_func_to_bfloat16.mlir

          stablehlo.return %2 : tensor<f32>
      }) {padding = dense<[[0, 0], [1, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>, window_dimensions = array<i64: 1, 3, 3, 1>} : (tensor<2x3x1x3xf32>, tensor<f32>) -> tensor<2x3x1x3xf32>
      return %1 : tensor<2x3x1x3xf32>
    }
    
    // -----
    
    // CHECK-LABEL: @bitcast_convert_i32_f32(%arg0: tensor<1x256128xi32>) -> tensor<1x256128xbf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 08 22:40:14 UTC 2024
    - 6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul.pbtxt

    # CHECK:           %[[VAL_12:.*]] = "tfl.pack"(%[[VAL_10]], %[[VAL_11]]) <{axis = 0 : i32, values_count = 2 : i32}> : (tensor<5x7xf32>, tensor<5x7xf32>) -> tensor<2x5x7xf32>
    # CHECK:           return %[[VAL_12]] : tensor<2x5x7xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/bridge/optimize.mlir

      return %2 : tensor<?x2x2x1xi8>
    }
    
    // -----
    
    // CHECK-LABEL: func @convolution_add_add_f32
    func.func @convolution_add_add_f32(
        %lhs: tensor<?x3x2x1xf32>, %rhs: tensor<2x1x1x1xf32>,
        %zp_offset: tensor<?x2x2x1xf32>, %bias: tensor<1xf32>
      ) -> tensor<?x2x2x1xf32> {
      // CHECK-DAG: %[[conv:.*]] = mhlo.convolution
      // CHECK-DAG: %[[combined:.*]] = chlo.broadcast_add %[[conv:.*]], %[[zp_offset:.*]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 24 02:26:47 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

    // CHECK:           })
    // CHECK-SAME:        -> tensor<2x4x6x7xf32>
    // CHECK:           %[[RESULT:.*]] = mhlo.divide %[[DIVIDEND]], %[[DIVISOR]] : tensor<2x4x6x7xf32>
    // CHECK:           return %[[RESULT]] : tensor<2x4x6x7xf32>
    // CHECK:         }
    func.func @avgpool_same_padding(%arg0: tensor<2x12x21x7xf32>) -> tensor<2x4x6x7xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      return %0 : tensor<1x1x1x2xf32>
    }
    func.func private @XlaCallModule_aten.avg_pool2d.default.impl_5(%arg0: tensor<1x1x1x7xf32>) -> tensor<1x1x1x2xf32>
    
    // CHECK-LABEL: avg_pool2d_6
    // CHECK: %cst = arith.constant dense<[0, 2, 3, 1]> : tensor<4xi32>
    // CHECK: %0 = "tfl.transpose"(%arg0, %cst) : (tensor<1x1x1x7xf32>, tensor<4xi32>) -> tensor<1x1x7x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir

        func.return %result : tensor<10x24x24x64xf32>
      }
    
      // CHECK-LABEL: @max_pool_grad_same
      func.func @max_pool_grad_same(%orig_input: tensor<2x13x25x7xf32>, %orig_output: tensor<2x4x7x7xf32>, %grad: tensor<2x4x7x7xf32>) -> tensor<2x13x25x7xf32> {
        // CHECK: padding = dense<{{\[\[}}0, 0], [0, 1], [1, 1], [0, 0]]> : tensor<4x2xi64>
        %result = "tf.MaxPoolGrad"(%orig_input, %orig_output, %grad) {
          data_format = "NHWC",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 38.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/legalize-tf.mlir

    }
    
    func.func @gatherWithBatchDims(%arg0 : tensor<2x3x6xf32>, %arg1 : tensor<2x5xi32>) -> tensor<2x5x3x6xf32> {
      %0 = "tf.Const"() { value = dense<[1]> : tensor<1xi32> } : () -> tensor<1xi32>
      %1 = "tf.GatherV2"(%arg0, %arg1, %0) {batch_dims = 1 : i64} : (tensor<2x3x6xf32>, tensor<2x5xi32>, tensor<1xi32>) -> tensor<2x5x3x6xf32>
      func.return %1 : tensor<2x5x3x6xf32>
    
    // CHECK-LABEL:gatherWithBatchDims
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 05 01:54:33 UTC 2024
    - 153.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    // CHECK:           %[[VAL_12:.*]] = "tf.Reshape"(%[[VAL_10]], %[[VAL_11]]) : (tensor<3x5x4xf32>, tensor<4xi64>) -> tensor<3x5x1x4xf32>
    // CHECK:           return %[[VAL_12]] : tensor<3x5x1x4xf32>
    // CHECK:         }
    func.func @convert_dot_general(%arg0: tensor<3x2x6x5x1xf32>, %arg1: tensor<3x2x4x6xf32>) -> tensor<3x5x1x4xf32> {
      %0 = "mhlo.dot_general"(%arg0, %arg1) {
        dot_dimension_numbers = #mhlo.dot<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
Back to top