Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 153 for conv_2d (0.23 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

    func.func @testConv(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<16x3x3x3xf32>, %arg2: tensor<16xf32>) -> tensor<256x30x30x16xf32> {
      // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir

        %1 = "tfl.dequantize"(%arg1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 1.0>>) -> tensor<32x3x3x3xf32>
        %2 = "tfl.dequantize"(%arg2) : (tensor<32x!quant.uniform<i32:f32, 1.0>>) -> tensor<32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/empty_input_output_names.json

    // If input and output tensors don't have names, there shouldn't be an
    // `tf.entry_function` attribute created.
    // CHECK-NOT: tf.entry_function
    
    {
      "version": 3,
      "operator_codes": [
        {
          "builtin_code": "CONV_2D"
        }
      ],
      "subgraphs": [
        {
          "tensors": [
            {
              "shape": [
                256,
                32,
                32,
                3
              ],
              "quantization": {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 11 21:03:18 UTC 2023
    - 1.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir

    // CHECK-NEXT:    version: 1,
    // CHECK-NEXT:    builtin_code: QUANTIZE
    // CHECK-NEXT:  }, {
    // CHECK-NEXT:    deprecated_builtin_code: 3,
    // CHECK-NEXT:    version: 1,
    // CHECK-NEXT:    builtin_code: CONV_2D
    // CHECK-NEXT:  }, {
    // CHECK-NEXT:    deprecated_builtin_code: 22,
    // CHECK-NEXT:    version: 1,
    // CHECK-NEXT:    builtin_code: RESHAPE
    // CHECK-NEXT:  }, {
    // CHECK-NEXT:    deprecated_builtin_code: 25,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 11.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %conv2 = "tfl.conv_2d"(%4, %5, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<1x112x112x32xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x56x56x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

    // DEBUG: %[[act:.*]] = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>) -> tensor<1x224x224x3xf32>
    // DEBUG: %[[f_conv:.*]] = "tfl.conv_2d"(%[[act]], %[[wt]], %[[bias]])
    // DEBUG: %[[q_conv:.*]] = "tfl.conv_2d"
    // DEBUG: "tfl.NumericVerify"(%[[q_conv]], %[[f_conv]]) <{log_if_failed = true, tolerance = 5.000000e+00 : f32}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/end2end/BUILD

            ":test_utilities",
        ],
        driver = "@llvm-project//mlir:run_lit.sh",
        size_override = {
            "quant_stats.pbtxt": "medium",
        },
        tags_override = {
            "add.pbtxt": ["no_rocm"],
            "conv_2d.pbtxt": ["no_rocm"],
            "fake_quant_per_channel.pbtxt": ["no_rocm"],
        },
        test_file_exts = [
            "pbtxt",
        ],
    )
    
    # Bundle together all of the test utilities that are used by tests.
    filegroup(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 08 15:18:46 UTC 2023
    - 1.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

        }
        func.func private @composite_fn_1(%arg0: tensor<1x4x4x3xf32>, %arg1: tensor<3x1x1x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x4x4x3xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

             tensor<64x3x3x3xf32>
        %conv = "tfl.conv_2d"(%input_act, %w, %bias)
    
        but if it is supported, it will be rewritten as:
    
        %q_w = "tfl.pseudo_qconst"() {
             qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>
        %conv = "tfl.conv_2d"(%input_act, %q_w, %bias)
    
        Note that this is part of reaching feature parity with the old quantizer for
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir

    ^bb0(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<16x3x3x3xf32>, %arg2: tensor<16xf32>):
      // CHECK: _arithmetic_count = 230686720 : i64
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 04:58:17 UTC 2022
    - 7.7K bytes
    - Viewed (0)
Back to top