Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 90 of 178 for conv_2d (0.14 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir

      // CHECK: %[[ARG_PERM:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}>
      // CHECK: %[[ARG_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[ARG_PERM]])
    
      // CHECK: %[[CONV2D:[0-9]*]] = "tf.Conv2D"(%[[ARG_TRANSPOSE]], %arg1)
      // CHECK-SAME: data_format = "NHWC"
      // CHECK-SAME: dilations = [1, 3, 4, 2]
      // CHECK-SAME: explicit_paddings = [1, 2, 5, 6, 7, 8, 3, 4]
      // CHECK-SAME: padding = "EXPLICIT"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/tests/keras_imagenet_main.pbtxt

          type: DT_INT32
        }
      }
      attr {
        key: "keep_dims"
        value {
          b: false
        }
      }
    }
    node {
      name: "conv1_1/Conv2D"
      op: "Conv2D"
      input: "conv1_pad_1/Pad"
      input: "conv1_1/Conv2D/Cast"
      device: "/job:localhost/replica:0/task:0/device:GPU:0"
      attr {
        key: "T"
        value {
          type: DT_HALF
        }
      }
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 30 02:52:54 UTC 2019
    - 1.3M bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc

            mul_op.getLoc(), conv_op.getType(), conv_op.getLhs(), new_filter,
            conv_op.getWindowStridesAttr(), conv_op.getPaddingAttr(),
            conv_op.getLhsDilationAttr(), conv_op.getRhsDilationAttr(),
            conv_op.getWindowReversalAttr(), conv_op.getDimensionNumbers(),
            conv_op.getFeatureGroupCount(), conv_op.getBatchGroupCount(),
            conv_op.getPrecisionConfigAttr());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 22:21:19 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

          equation = "",
          attr_map = "equation:0"
        } : (tensor<*xi32>, tensor<*xi32>) -> tensor<*xi32>
    
        func.return %4 : tensor<*xi32>
      }
    
      for main_op in ["Conv2D", "DepthwiseConv2D", "MatMul", "Conv3D", "BatchMatMul", "Einsum"] {
        parameters[
          {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "i8"},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/cast_bf16_ops_to_f32.mlir

      %cst = "tf.Const"() {device = "", value = dense_resource<__elided__> : tensor<2x3x3x2xbf16>} : () -> tensor<2x3x3x2xbf16>
      %0 = "tf.Cast"(%arg0) {Truncate = false, device = ""} : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3xbf16>
      %1 = "tf.Conv2D"(%0, %cst) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xbf16>, tensor<2x3x3x2xbf16>) -> tensor<1x3x2x2xbf16>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla.mlir

    // -----
    
    func.func @conv_with_non_constant_filter(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x2xf32>) -> tensor<*xf32> {
      %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
      %0 = "tf.Conv2D"(%arg0, %arg1) {data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_drq_per_channel.mlir

    // CHECK: return %0 : tensor<*xf32>
    }
    
    // -----
    
    module {
      func.func @conv2d(%arg0: tensor<1x3x4x512xf32>) -> (tensor<*xf32>) {
        %cst_0 = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/fetch_feed_names.mlir

          %outputs_2, %control_3 = tf_executor.island(%control_1) wraps "tf.Const"() {value = dense<0.000000e+00> : tensor<5x5x32x16xf32>} : () -> tensor<5x5x32x16xf32>
          %outputs_4, %control_5 = tf_executor.island wraps "tf.Conv2D"(%outputs, %outputs_2) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true} : (tensor<*xf32>, tensor<5x5x32x16xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 12:28:56 UTC 2022
    - 3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

      %cst_0 = "tf.Const"() {value = dense<-1.000000e+00> : tensor<f32>} : () -> tensor<f32>
      %cst_1 = "tf.Const"() {value = dense<1.000000e+00> : tensor<f32>} : () -> tensor<f32>
      %0 = "tf.Conv2D"(%arg0, %cst) {data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<1x3x4x3xf32>, tensor<1x1x3x2xf32>) -> tensor<1x3x4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

    // Performs a fusion of the following pattern(s), if possible:
    //   Conv2D + BiasAdd + <Activation> -> _FusedConv2D
    class FuseConv2DBiasAdd
        : public FuseContractionWithBiasAdd<Conv2DOp, _FusedConv2DOp> {
     public:
      using FuseContractionWithBiasAdd<Conv2DOp,
                                       _FusedConv2DOp>::FuseContractionWithBiasAdd;
      // Verify that the Conv2D and BiasAdd data formats match. This is necessary
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
Back to top