Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 101 for conv2 (0.04 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %conv2 = "tfl.conv_2d"(%4, %5, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x112x112x32xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x56x56x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %b2 = arith.constant dense<[1.0e-2, 2.1473647e1, -2.1473647e2]> : tensor<3xf32>
      %conv = "tfl.conv_2d"(%0, %w, %b) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
      } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/optimize.mlir

    // CHECK-DAG: %[[cst:.*]] = "tf.Const{{.*}} dense<8.000000e+00> : tensor<3x3x3x16xf32>
    // CHECK-DAG: %[[cst_0:.*]] = "tf.Const{{.*}} dense<1.200000e+01> : tensor<16xf32>
    // CHECK-NEXT: %[[conv:.*]] = "tf.Conv2D"(%arg0, %[[cst]])
    // CHECK-NEXT: %[[bias:.*]] = "tf.AddV2"(%[[conv]], %[[cst_0]])
    // CHECK-NEXT: return %[[bias]] : tensor<256x8x7x16xf32>
    }
    
    // CHECK-LABEL: convaddv2mul
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

    // CHECK: %[[CONV2D:.*]] = "tf.Conv2D"(%arg0, %[[CONST]]) <{data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true}> : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

        %1 = "tf.PartitionedCall"(%arg0, %cst, %cst_0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_conv2d_with_bias_and_relu6_fn_1} : (tensor<1x2x2x3xf32>, tensor<2x2x3x2xf32>, tensor<2xf32>) -> tensor<*xf32> loc(callsite("test@conv"("Conv2D_1") at "QuantizationUnit(\12\08Conv2D_1\1a\04conv)"))
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir

        %conv = "tf.Conv2D"(%arg0, %arg1) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 2, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
        return %conv : tensor<*xf32>
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir

      %conv = "tf.Conv2D"(%dq_input, %dq_weight) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 19:32:28 UTC 2024
    - 6.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt

        key: "narrow_range"
        value {
          b: true
        }
      }
      attr {
        key: "num_bits"
        value {
          i: 4
        }
      }
    }
    node {
      name: "BoxPredictor_4/ClassPredictor/Conv2D"
      op: "Conv2D"
      input: "input"
      input: "BoxPredictor_4/ClassPredictor/weights_quant/FakeQuantWithMinMaxVarsPerChannel"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

            # One pure conv
            out = nn_ops.conv2d(
                out,
                self.conv_filters,
                strides=(1, 1, 2, 1),
                dilations=(1, 1, 1, 1),
                padding='SAME',
                data_format='NHWC',
            )
    
            # One fakequant attached conv
            if is_qat_model:
              out = array_ops.fake_quant_with_min_max_args(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

             (tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>) ->
             tensor<64x3x3x3xf32>
        %conv = "tfl.conv_2d"(%input_act, %w, %bias)
    
        but if it is supported, it will be rewritten as:
    
        %q_w = "tfl.pseudo_qconst"() {
             qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>
        %conv = "tfl.conv_2d"(%input_act, %q_w, %bias)
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
Back to top