Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for 1x112x112x6xf32 (0.23 sec)

  1. tensorflow/compiler/mlir/lite/tests/get-arithmetic-count.mlir

      func.return %0 : tensor<?x32x32x16xf32>
    }
    
    func.func @testDepthwiseConv2D(tensor<1x112x112x3xf32>, tensor<1x3x3x32xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32> {
    ^bb0(%arg0: tensor<1x112x112x3xf32>, %arg1: tensor<1x3x3x32xf32>, %arg2: tensor<32xf32>):
      // CHECK: _arithmetic_count = 7626752 : i64
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 14 04:58:17 UTC 2022
    - 7.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir

             exponential_avg_factor = 1.0 : f32,
             is_training = false
           }
            : (tensor<1x112x112x64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>)
           -> (tensor<1x112x112x64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>)
    
      func.return %2#0 : tensor<1x112x112x64xf32>
    }
    
    // CHECK-LABEL: func @fold_into_pad_with_extra_uses
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/tests/optimize_layout.mlir

    // CHECK-SAME:          %[[INPUT:.*]]: tensor<1x112x112x64xf32>,
    // CHECK-SAME:          %[[PAD_VAL:.*]]: tensor<f32>) -> tensor<1x64x114x114xf32> {
    // CHECK:           %[[PAD:.*]] = stablehlo.pad %[[INPUT]], %[[PAD_VAL]],
    // CHECK:               low = [0, 1, 1, 0], high = [0, 1, 1, 0], interior = [0, 0, 0, 0]
    // CHECK:               : (tensor<1x112x112x64xf32>, tensor<f32>) -> tensor<1x114x114x64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 21:59:06 UTC 2024
    - 2.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir

        %7 = "tfl.quantize"(%6) {qtype = tensor<1x112x112x32x!quant.uniform<u8:f32, 1.0>>} : (tensor<1x112x112x32xf32>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 1.0>>
        func.return %7 : tensor<1x112x112x32x!quant.uniform<u8:f32, 1.0>>
    
    // CHECK: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %arg1, %arg2)
    // CHECK-SAME: -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.0078431372549019607:128>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      func.return %0 : tensor<1x112x112x32xf32>
    }
    
    // -----
    
    func.func @testAvgPool(%arg0: tensor<256x32x32x3xf32>) -> tensor<256x30x30x16xf32> {
      // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tf_optimize.mlir

    // RUN: tf-opt %s -tf-optimize | FileCheck %s
    
    // CHECK-LABEL: @fuseMulIntoConv2d
    func.func @fuseMulIntoConv2d(%arg0: tensor<1x112x112x3xf32>) -> tensor<1x28x23x2xf32> {
      %cst0 = arith.constant dense<[[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]], [[13.0, 14.0], [15.0, 16.0], [17.0, 18.0]]]]> : tensor<1x3x3x2xf32>
      %cst2 = arith.constant dense<[1.0, 2.0]> : tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir

      func.return %3 : tensor<1x112x112x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir

      func.return %3 : tensor<1x112x112x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 9.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize.mlir

      func.return  %3, %6 : tensor<?xf32>, tensor<?xf32>
    }
    
    // -----
    
    // CHECK-LABEL: func @skip_nan_inf_constant
    // CHECK-SAME: (%[[ARG_0:.*]]: tensor<?x112x112x64xf32>) -> tensor<?x56x56x64xf32>
    func.func @skip_nan_inf_constant(%arg0: tensor<?x112x112x64xf32>) -> tensor<?x56x56x64xf32> {
      // CHECK-DAG: %[[cst0:.*]] = stablehlo.constant dense<0xFF800000> : tensor<f32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 19:52:06 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/g3doc/space_to_depth.md

                   %filter: tensor<7x7x3x64xf32>) {
         %6 = "tf.Conv2D"(%input, %filter)  {strides = [1, 2, 2, 1]}:
                     (tensor<2x230x230x3xf32>, tensor<7x7x3x64xf32>) ->
          tensor<2x112x112x64xf32>
       }
    }
    
    // With this pass, the program will be transformed into:
    module {
       func @while_body {
         %input = "tf.IteratorGetNext"(...) {device = "/CPU:0"}
                   -> tensor<2x224x224x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Oct 24 02:51:43 UTC 2020
    - 8.3K bytes
    - Viewed (0)
Back to top