Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 152 for conv2 (0.04 sec)

  1. test/inline.go

    }
    
    // Ensure OCONVNOP is zero cost.
    func Conv(v uint64) uint64 { // ERROR "can inline Conv"
    	return conv2(conv2(conv2(v))) // ERROR "inlining call to (conv1|conv2)"
    }
    func conv2(v uint64) uint64 { // ERROR "can inline conv2"
    	return conv1(conv1(conv1(conv1(v)))) // ERROR "inlining call to conv1"
    }
    func conv1(v uint64) uint64 { // ERROR "can inline conv1"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 23:33:25 UTC 2023
    - 11.7K bytes
    - Viewed (0)
  2. test/newinline.go

    }
    
    // Ensure OCONVNOP is zero cost.
    func Conv(v uint64) uint64 { // ERROR "can inline Conv"
    	return conv2(conv2(conv2(v))) // ERROR "inlining call to (conv1|conv2)"
    }
    func conv2(v uint64) uint64 { // ERROR "can inline conv2"
    	return conv1(conv1(conv1(conv1(v)))) // ERROR "inlining call to conv1"
    }
    func conv1(v uint64) uint64 { // ERROR "can inline conv1"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 16 20:15:25 UTC 2023
    - 11.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      %5 = "tf.Conv2D"(%4, %arg3)
            {
              data_format = "NCHW",
              dilations = [1, 1, 1, 1],
              explicit_paddings = [],
              padding = "VALID",
              strides = [1, 1, 2, 2]
            } : (tensor<?x3x230x230xf32>, tensor<7x7x3x64xf32>) -> tensor<?x64x112x112xf32>
    
      // CHECK: %[[CONV0:[0-9]*]] = "tf.Conv2D"
      // CHECK-SAME: %[[PAD]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %conv2 = "tfl.conv_2d"(%4, %5, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x112x112x32xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x56x56x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %b2 = arith.constant dense<[1.0e-2, 2.1473647e1, -2.1473647e2]> : tensor<3xf32>
      %conv = "tfl.conv_2d"(%0, %w, %b) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
      } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/dilated-conv.mlir

      // CHECK-NEXT: [[CONV:%.*]] = "tf.Conv2D"([[INPUT]], [[FILTER]]) <{dilations = [1, 2, 2, 1], padding = "SAME", strides = [1, 1, 1, 1]}> : (tensor<1x128x128x3xf32>, tensor<5x5x3x8xf32>) -> tensor<1x128x128x8xf32>
      // CHECK-NEXT: [[RESULT:%.*]] = "tf.BiasAdd"([[CONV]], [[BIAS]]) : (tensor<1x128x128x8xf32>, tensor<8xf32>) -> tensor<1x128x128x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 44.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir

    // CHECK-DAG: %[[cst:.*]] = "tf.Const{{.*}} dense<8.000000e+00> : tensor<3x3x3x16xf32>
    // CHECK-DAG: %[[cst_0:.*]] = "tf.Const{{.*}} dense<1.200000e+01> : tensor<16xf32>
    // CHECK-NEXT: %[[conv:.*]] = "tf.Conv2D"(%arg0, %[[cst]])
    // CHECK-NEXT: %[[bias:.*]] = "tf.AddV2"(%[[conv]], %[[cst_0]])
    // CHECK-NEXT: return %[[bias]] : tensor<256x8x7x16xf32>
    }
    
    // CHECK-LABEL: convaddv2mul
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/tests/cast_bf16_ops_to_f32.mlir

    // CHECK: %[[cast:.*]] = "tf.Cast"(%[[cst]]) <{Truncate = false}> : (tensor<2x3x3x2xbf16>) -> tensor<2x3x3x2xf32>
    // CHECK: %[[conv:.*]] = "tf.Conv2D"(%arg0, %[[cast]])
    // CHECK: %[[identity:.*]] = "tf.IdentityN"(%[[conv]]) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32>
    // CHECK: return %[[identity]] : tensor<1x3x2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant_4bit.mlir

    // CHECK: %[[DEQUANTIZE:.*]] = "quantfork.dcast"(%[[QUANTIZE]])
    // CHECK: %[[CONV:.*]] = "tf.Conv2D"(%arg0, %[[DEQUANTIZE]])
    // CHECK: return %[[CONV]]
    }
    
    // CHECK-LABEL: perChannelFakeQuantWithConv2D
    func.func @perChannelFakeQuantWithConv2D(tensor<256x32x32x3xf32>) -> (tensor<256x8x7x16xf32>) {
    ^bb0(%arg: tensor<256x32x32x3xf32>) :
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

    // CHECK: %[[CONV2D:.*]] = "tf.Conv2D"(%arg0, %[[CONST]]) <{data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true}> : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
Back to top