Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 23 for 2x1x1x11xf32 (0.13 sec)

  1. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      func.return %1 : tensor<1x1x1x16xf32>
    
    // CHECK: %[[avgp:.*]] = "tfl.average_pool_2d"(%arg0)
    // CHECK: %[[dq:.*]] = "tfl.dequantize"(%[[avgp]]) : (tensor<1x1x1x16x!quant.uniform<u8:f32, 7.812500e-03:128>>) -> tensor<1x1x1x16xf32>
    // CHECK: return %[[dq]] : tensor<1x1x1x16xf32>
    }
    
    // CHECK-LABEL: QuantizeReshape2D
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    // CHECK:           %[[VAL_6:.*]] = "tfl.reshape"(%[[VAL_1]], %[[VAL_2]]) : (tensor<1xf32>, tensor<4xi32>) -> tensor<1x1x1x1xf32>
    // CHECK:           %[[VAL_7:.*]] = "tfl.concatenation"(%[[VAL_5]], %[[VAL_6]]) <{axis = 3 : i32, fused_activation_function = "NONE"}> : (tensor<1x1x1x1xf32>, tensor<1x1x1x1xf32>) -> tensor<1x1x1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/README.md

        %1 = "tfl.reshape"(%arg1, %cst) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1xf32>, tensor<4xi32>) -> tensor<1x1x1x1xf32>
        %2 = "tfl.concatenation"(%0, %1) {axis = 3 : i32, fused_activation_function = "NONE", tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x1x1x1xf32>, tensor<1x1x1x1xf32>) -> tensor<1x1x1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 29 18:32:13 UTC 2022
    - 11.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/quantize-dynamic-range.mlir

    // CustomOpNotWeightOnly-LABEL: QuantizeCustomOp
    func.func @QuantizeCustomOp(%arg0: tensor<1x1x1x1xf32>) -> tensor<*xf32> attributes {tf.entry_function = {inputs = "input", outputs = "custom_op"}} {
      %0 = "quantfork.stats"(%arg0) {layerStats = dense<[0.000000e+00, 2.550000e+02]> : tensor<2xf32>} : (tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
      %w = arith.constant dense<127.0> : tensor<1024x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 21:09:00 UTC 2024
    - 23.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      func.return %1 : tensor<1x1x1x1x1xf32>
    
      // CHECK-DAG: %[[CST1:.*]] = "tfl.no_value"() <{value}> : () -> none
      // CHECK-DAG: %[[CST2:.*]] = arith.constant dense<2.000000e+00> : tensor<1x1x1x1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

    ^bb0(%arg0: tensor<1x7x7x16xf32>):
      // expected-error @+1 {{requires attribute 'padding'}}
      %0 = "tf.AvgPool"(%arg0) {T = "tfdtype$DT_FLOAT", ksize = [1, 7, 7, 1], strides = [1, 1, 1, 1]} : (tensor<1x7x7x16xf32>) -> tensor<1x1x1x16xf32>
      func.return %0 : tensor<1x1x1x16xf32>
    }
    
    // -----
    
    func.func @testAvgPoolWrongPadding(tensor<1x7x7x16xf32>) -> tensor<1x1x1x16xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %w = arith.constant dense<[[[[0.0]]], [[[127.0]]], [[[-127.0]]]]> : tensor<3x1x1x1xf32>
      %b = arith.constant dense<0.0> : tensor<3xf32>
      %conv = "tfl.conv_2d"(%arg0, %w, %b) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x5x5x1xf32>, tensor<3x1x1x1xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      func.return %conv : tensor<1x5x5x3xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

    // CHECK: %1 = "tfl.pad"(%0, %cst_0) : (tensor<1x1x8x1xf32>, tensor<4x2xi32>) -> tensor<1x1x11x1xf32>
    // CHECK: %2 = "tfl.average_pool_2d"(%1) <{filter_height = 1 : i32, filter_width = 3 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 2 : i32}> : (tensor<1x1x11x1xf32>) -> tensor<1x1x5x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

        } : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32>
      func.return %1 : tensor<1x1x1x16xf32>
    
    // CHECK: %0 = "tfl.dequantize"(%arg0)
    // CHECK: %1 = "tfl.average_pool_2d"(%0)
    // CHECK: %2 = "tfl.quantize"(%1)
    // CHECK: %3 = "tfl.dequantize"(%2)
    // CHECK: return %3 : tensor<1x1x1x16xf32>
    }
    
    // CHECK-LABEL: QuantizeMaximum
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/shape_inference.mlir

      func.func @simple_folding(%arg0: tensor<1x1x1x1xi32>, %arg1: tensor<1x1x1x1xf32>) -> tensor<?x?x?x?xf32> {
        // CHECK: %[[SHAPE:.*]] = "tf.Shape"
        // CHECK: %[[CONV:.*]] = "tf.Conv2DBackpropInput"(%[[SHAPE]]
        // CHECK-SAME: (tensor<4xi32>, tensor<1x1x1x1xf32>, tensor<1x1x1x1xf32>) -> tensor<1x1x1x1xf32>
        // CHECK: return %[[CONV]] : tensor<1x1x1x1xf32>
        %0 = "tf.Shape"(%arg0) : (tensor<1x1x1x1xi32>) -> tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 23 17:24:10 UTC 2024
    - 167.4K bytes
    - Viewed (0)
Back to top