Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 56 for 1x3x3x1xf32 (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

    func.func @float_depthwise_conv_no_bias(%arg0: tensor<1x3x4x3xf32>, %arg1: tensor<2x3x3x1xf32>) -> (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) {
      %0 = "tf.DepthwiseConv2dNative"(%arg0, %arg1) {
        data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
        padding = "SAME", strides = [1, 1, 2, 1]
      } : (tensor<1x3x4x3xf32>, tensor<2x3x3x1xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_layout_assignment_to_nhwc.mlir

    // dilations, etc...). This test only verifies that changing convolution data
    // layout will update all the attributes.
    
    // CHECK-LABEL: func @transposeConv2D
    func.func @transposeConv2D(%input: tensor<1x3x32x32xf32>, %filter: tensor<1x1x3x8xf32>) -> tensor<1x8x7x6xf32> {
    
      // CHECK: %[[ARG_PERM:.*]] = "tf.Const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi64>}>
      // CHECK: %[[ARG_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[ARG_PERM]])
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    // CHECK:           %[[VAL_6:.*]] = "tfl.reshape"(%[[VAL_1]], %[[VAL_2]]) : (tensor<1xf32>, tensor<4xi32>) -> tensor<1x1x1x1xf32>
    // CHECK:           %[[VAL_7:.*]] = "tfl.concatenation"(%[[VAL_5]], %[[VAL_6]]) <{axis = 3 : i32, fused_activation_function = "NONE"}> : (tensor<1x1x1x1xf32>, tensor<1x1x1x1xf32>) -> tensor<1x1x1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/tac/README.md

        %1 = "tfl.reshape"(%arg1, %cst) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1xf32>, tensor<4xi32>) -> tensor<1x1x1x1xf32>
        %2 = "tfl.concatenation"(%0, %1) {axis = 3 : i32, fused_activation_function = "NONE", tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x1x1x1xf32>, tensor<1x1x1x1xf32>) -> tensor<1x1x1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 29 18:32:13 UTC 2022
    - 11.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/optional_input.json

    // CHECK: return %[[RES0]] : tensor<256x32x32x16xf32>
    
    {
      "version": 3,
      "operator_codes": [
        {
          "builtin_code": "CONV_2D"
        }
      ],
      "subgraphs": [
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/import_json.json

    // CHECK: return %[[RES0]] : tensor<256x32x32x16xf32>
    
    {
      "version": 3,
      "operator_codes": [
        {
          "builtin_code": "CONV_2D"
        }
      ],
      "subgraphs": [
        {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

    // CHECK:           %[[VAL_6:.*]] = "tfl.reshape"(%[[VAL_1]], %[[VAL_2]]) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1xf32>, tensor<4xi32>) -> tensor<1x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    // PerTensor-LABEL: QuantizeMatmulWithActConst
    func.func @QuantizeMatmulWithActConst(%arg0: tensor<1x3x3x512xf32>) -> tensor<1x3x3x12xf32> {
      %w = arith.constant dense<127.0> : tensor<512x12xf32>
      %mm = "tfl.batch_matmul"(%arg0, %w) {adj_x = false, adj_y = false} : (tensor<1x3x3x512xf32>, tensor<512x12xf32>) -> tensor<1x3x3x12xf32>
      func.return %mm : tensor<1x3x3x12xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfr/tests/decompose.mlir

    func.func @attribute_cast(%arg0: tensor<1x4x4x1xf32>) -> tensor<1x2x2x1xf32> {
      %0 = "tfr.cast"(%arg0) : (tensor<1x4x4x1xf32>) -> !tfr.tensor
      %stride_i32 = arith.constant 2 : i32
      %1 = tfr.call @tf__my_max_pool(%0, %stride_i32, %stride_i32) : (!tfr.tensor, i32, i32) -> !tfr.tensor
      %2 = "tfr.cast"(%1) : (!tfr.tensor) -> tensor<1x2x2x1xf32>
      func.return %2 : tensor<1x2x2x1xf32>
    // CHECK: tf__max_pool
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 16.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

        rhs_quantization_min_val = -128 : i64,
        rhs_quantization_max_val = 127 : i64
      } : (tensor<1x2x2x3xf32>, tensor<2x3x3x2x!tf_type.qint8>, tensor<f32>, tensor<i32>) -> tensor<1x3x3x2xf32>
      func.return %0 : tensor<1x3x3x2xf32>
    }
    
    //===----------------------------------------------------------------------===//
    // tf.UniformQuantize and tf.UniformDequantize legalization
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
Back to top