Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 16 for MAX_POOL_2D (0.12 sec)

  1. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

    // MODEL-DEBUG: %[[q_out1:.*]] = "tfl.average_pool_2d"(%[[q1]])
    // MODEL-DEBUG: "tfl.NumericVerify"(%[[q_out1]], %[[f_out1]])
    // MODEL-DEBUG: %[[f_out2:.*]] = "tfl.max_pool_2d"(%[[f_out1]])
    // MODEL-DEBUG: %[[q_out2:.*]] = "tfl.max_pool_2d"(%[[q_out1]])
    // MODEL-DEBUG: "tfl.NumericVerify"(%[[q_out2]], %[[f_out2]])
    }
    
    // MODEL-DEBUG-LABEL: CheckNumericVerifyWholeModelNoQuantizeOps
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      func.return %0 : tensor<256x30x30x16xf32>
    }
    
    // -----
    
    func.func @testMaxPool(%arg0: tensor<256x32x32x3xf32>) -> tensor<256x30x30x16xf32> {
      // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %0 = "tfl.max_pool_2d"(%arg0) {filter_height = 3 : i32, filter_width = 3 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<256x32x32x3xf32>) -> tensor<256x30x30x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir

      func.return %0 : tensor<10x10xf32>
    }
    
    func.func @testMaxPool2D(tensor<1x10x10x3xf32>) -> tensor<1x10x10x3xf32> {
    ^bb0(%arg0: tensor<1x10x10x3xf32>):
      // CHECK: _arithmetic_count = 2700 : i64
      %0 = "tfl.max_pool_2d"(%arg0) {filter_height = 3 : i32, filter_width = 3 : i32, fused_activation_function = "RELU6", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x10x10x3xf32>) -> tensor<1x10x10x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 04:58:17 UTC 2022
    - 7.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc

          Create);
    
    // Currently used for these ops:
    // tfl.Abs / tfl.Average_pool_2d / tfl.Cos / tfl.div / tfl.exp / tfl.hardswish /
    // tfl.log / tfl.logistic / tfl.max_pool_2d / tfl.mirror_pad / tfl.maximum /
    // tfl.custom / tfl.mean / tfl.minimum / tfl.pad / tfl.pow / tfl.prelu /
    // tfl.relu / tfl.relu6 / tfl.rsqrt / tfl.sin / tfl.slice / tfl.softmax /
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 7.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir

    }
    
    // CHECK-LABEL: reduce_window_not_4d
    // CHECK: stablehlo.reduce_window
    // CHECK-NOT: tfl.max_pool_2d
    
    // -----
    
    // Tests that a quantized `stablehlo.reduce_window` with max that takes multiple
    // inputs is not converted to `tfl.max_pool_2d`.
    
    func.func @reduce_window_not_binary(
      %arg0: tensor<3x2x9x10x3x!quant.uniform<i8:f32, 3.000000e-01:-5>>,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 106.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      %1 = "tfl.max_pool_2d"(%0) {filter_height = 1 : i32, filter_width = 1 : i32, fused_activation_function = "RELU6", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32>
      func.return %1 : tensor<1x1x1x16xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/ops.mlir

    func.func @testMaxPool2D(tensor<256x32x32x3xf32>) -> tensor<?xf32> {
    ^bb0(%arg0: tensor<256x32x32x3xf32>):
      // CHECK: "tfl.max_pool_2d"(%arg0) <{filter_height = 1 : i32, filter_width = 1 : i32, fused_activation_function = "RELU6", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}> : (tensor<256x32x32x3xf32>) -> tensor<?xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/test_schema.fbs

      FLOOR = 8,
      FULLY_CONNECTED = 9,
      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
      LSH_PROJECTION = 15,
      LSTM = 16,
      MAX_POOL_2D = 17,
      MUL = 18,
      RELU = 19,
      // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
      // since different model developers use RELU1 in different ways. Never
      // create another op called RELU1.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 19 19:46:06 UTC 2021
    - 26.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

      FLOOR = 8,
      FULLY_CONNECTED = 9,
      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
      LSH_PROJECTION = 15,
      LSTM = 16,
      MAX_POOL_2D = 17,
      MUL = 18,
      RELU = 19,
      // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
      // since different model developers use RELU1 in different ways. Never
      // create another op called RELU1.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    func.func @NotRequantizeAlreadyQuantizedModel(%arg0: tensor<1x73x73x64x!quant.uniform<u8:f32, 1.0>>, %arg1: tensor<1x147x147x96x!quant.uniform<u8:f32, 2.0>>) -> tensor<1x73x73x160x!quant.uniform<u8:f32, 1.0>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
Back to top