Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 31 for 1x3x3x3xf32 (0.15 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nchw.mlir

             padding = "EXPLICIT",
             strides = [5, 6, 7, 8]
           } : (tensor<1x32x32x3xf32>, tensor<1x1x3x8xf32>) -> tensor<1x7x7x8xf32>
    
      func.return %0 : tensor<1x7x7x8xf32>
    }
    
    // CHECK-LABEL: func @transposeConv2DWithDefaultAttr
    func.func @transposeConv2DWithDefaultAttr(%input: tensor<1x32x32x3xf32>, %filter: tensor<1x1x3x8xf32>) -> tensor<?x?x?x?xf32>
    {
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir

    // dilations, etc...). This test only verifies that changing convolution data
    // layout will update all the attributes.
    
    // CHECK-LABEL: func @transposeConv2D
    func.func @transposeConv2D(%input: tensor<1x3x32x32xf32>, %filter: tensor<1x1x3x8xf32>) -> tensor<1x8x7x6xf32> {
    
      // CHECK: %[[ARG_PERM:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}>
      // CHECK: %[[ARG_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[ARG_PERM]])
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/cast_bf16_ops_to_f32.mlir

    // CHECK: return %[[identity]] : tensor<1x3x2x2xf32>
    
    func.func @cast_bf16_avg_pool_to_fp32(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x3x2x2xf32>) {
      %cst = "tf.Const"() {device = "", value = dense<1.000000e+00> : tensor<2x3x3x2xbf16>} : () -> tensor<2x3x3x2xbf16>
      %0 = "tf.Cast"(%arg0) {Truncate = false, device = ""} : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3xbf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

    func.func @testConv(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<16x3x3x3xf32>, %arg2: tensor<16xf32>) -> tensor<256x30x30x16xf32> {
      // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<256x30x30x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir

      func.return %0 : tensor<256x32x32x16xf32>
    }
    
    func.func @testConv2DDynamicShape(tensor<?x32x32x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<?x32x32x16xf32> {
    ^bb0(%arg0: tensor<?x32x32x3xf32>, %arg1: tensor<16x3x3x3xf32>, %arg2: tensor<16xf32>):
      // CHECK: _arithmetic_count = -1 : i64
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 04:58:17 UTC 2022
    - 7.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir

    func.func @dont_move_transpose_different_ranks(%arg0:tensor<1x1x2x3xf32>, %arg1:tensor<2x3xf32>) -> tensor<1x2x1x3xf32> {
      %cst = "tf.Const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
      %0 = "tf.AddV2"(%arg0, %arg1) {device = ""} : (tensor<1x1x2x3xf32>, tensor<2x3xf32>) -> tensor<1x1x2x3xf32>
      %1 = "tf.Transpose"(%0, %cst) {device = ""} : (tensor<1x1x2x3xf32>, tensor<4xi32>) -> tensor<1x2x1x3xf32>
    
      func.return %1 : tensor<1x2x1x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nchw.mlir

    // CHECK-LABEL: func @transposeConv2D
    func.func @transposeConv2D(%arg0: tensor<1x3x32x32xf32>, %arg1: tensor<1x1x3x8xf32>) -> tensor<1x8x32x32xf32> {
    
      // Convert input: NCHW -> NHWC
      %0 = "tf.Const"() {value = dense<[0, 2, 3, 1]> : tensor<4xi32>} : () -> tensor<4xi32>
      %1 = "tf.Transpose"(%arg0, %0) : (tensor<1x3x32x32xf32>, tensor<4xi32>) -> tensor<1x32x32x3xf32>
    
      // Compute in NHWC
      %2 = "tf.Conv2D"(%1, %arg1)
        {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:47:26 UTC 2022
    - 1.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize_composite_functions_weight_only.mlir

        return %1 : tensor<1x3x4x2xf32>
      }
    
      func.func private @composite_conv_fn(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<1x3x4x2xf32> attributes {_from_xla_call_module} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 9.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/optimize_graph.mlir

      func.return %dequant : tensor<1x3x4x2xf32>
    }
    
    // -----
    
    // CHECK-LABEL: @dont_merge_quantization_followed_by_quantization
    // CHECK-SAME: %[[ARG_0:.*]]: tensor<1x3x4x3xf32>
    func.func @dont_merge_quantization_followed_by_quantization(%arg0: tensor<1x3x4x3xf32>) -> tensor<1x3x4x3xf32> {
      // CHECK: %[[QUANT_ARG_0:.*]] = stablehlo.uniform_quantize %[[ARG_0]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 08 22:40:14 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir

        %2 = stablehlo.convolution(%1, %0)
          dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f],
          window = {
            stride = [1, 1], pad = [[0, 0], [1, 1]],
            lhs_dilate = [1, 1],
            rhs_dilate = [1, 1]
          }
          {
            batch_group_count = 1 : i64,
            feature_group_count = 1 : i64
          } : (tensor<1x3x2x3xf32>, tensor<2x3x3x2xf32>)
          -> tensor<1x2x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 26 07:48:15 UTC 2024
    - 8.6K bytes
    - Viewed (0)
Back to top