Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 41 for 2x1x2xf32 (5.18 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

    // CHECK-LABEL: QuantizeReshapeOp
    func.func @QuantizeReshapeOp(%arg0: tensor<1x1x3xf32>) -> (tensor<1x3xf32>) {
      %1 = "quantfork.stats"(%arg0) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x1x3xf32>) -> tensor<1x1x3xf32>
      %2 = "tfl.pseudo_const"() {value = dense<[-1, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
      %3 = "tfl.reshape"(%1, %2) : (tensor<1x1x3xf32>, tensor<2xi32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %1 = "tfl.add"(%cst_2, %cst_1) {fused_activation_function = "NONE"} : (tensor<2x2x2xf32>, tensor<  2x2xf32>) -> tensor<2x2x2xf32>
      %2 = "tfl.add"(%cst_0, %cst_2) {fused_activation_function = "NONE"} : (tensor<    2xf32>, tensor<2x2x2xf32>) -> tensor<2x2x2xf32>
    
      func.return %0, %1, %2 : tensor<2x2xf32>, tensor<2x2x2xf32>, tensor<2x2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

    func.func @prepareAdd(%arg0: tensor<2x2xf32>) -> tensor<2x2xf32> {
      %cst = arith.constant dense<[[0.0, 1.0], [2.0, 255.0]]> : tensor<2x2xf32>
      %add = "tfl.add"(%arg0, %cst) {fused_activation_function="NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      func.return %add : tensor<2x2xf32>
    
    // CHECK: %[[cst:.*]] = arith.constant dense<[{{\[}}0.000000e+00, 1.000000e+00], [2.000000e+00, 2.550000e+02]]>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-include-tf2xla-fallback.mlir

    // support dynamic shaped operands like the native lowering. Verify that
    // fallback lowering is preferred for static shaped operands when available.
    
    // CHECK-LABEL: batchmatmulv2
    func.func @batchmatmulv2(%arg0: tensor<1x4x2xf32>, %arg1: tensor<3x2x4xf32>) -> tensor<3x4x4xf32> {
      // NO_FALLBACK: mhlo.dynamic_broadcast_in_dim
      // NO_FALLBACK: mhlo.dot_general
    
      // SUPPORTED_FALLBACK_DEVICE: mhlo.reduce
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 16 19:04:03 UTC 2023
    - 3.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir

        %5 = "quantfork.qcast"(%4) {volatile} : (tensor<3x4x2xf32>) -> tensor<3x4x2x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
        %6 = "quantfork.dcast"(%5) : (tensor<3x4x2x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<3x4x2xf32>
        %7 = "stablehlo.gather"(%6, %arg2) {
        dimension_numbers = #stablehlo.gather<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 35.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/vhlo.mlir

    // CHECK-NEXT:}
    
    func.func @transpose(%arg0: tensor<2x3x2xi32>) -> tensor<2x3x2xi32> {
      %0 = "vhlo.transpose_v1"(%arg0) <{permutation = #vhlo.tensor_v1<dense<[2, 1, 0]> : tensor<3xi64>>}> : (tensor<2x3x2xi32>) -> tensor<2x3x2xi32>
      return %0 : tensor<2x3x2xi32>
    }
    
    // CHECK:func.func private @transpose(%arg0: tensor<2x3x2xi32>) -> tensor<2x3x2xi32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 14 19:15:40 UTC 2024
    - 31.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    func.func @ReorderAddWithConstant(%arg0: tensor<2x2xf32>) -> tensor<2x2xf32> {
      %cst = arith.constant dense<1.0> : tensor<2x2xf32>
      %cst_1 = arith.constant dense<2.0> : tensor<2x2xf32>
      %0 = "tfl.add"(%arg0, %cst) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      %1 = "tfl.add"(%0, %cst_1) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/ops.mlir

    func.func @testMean(%arg0: tensor<2x2xf32>, %arg1 : tensor<1xi32>) -> tensor<1x2xf32> {
      // CHECK: "tfl.mean"(%arg0, %arg1) <{keep_dims = false}>
      %0 = "tfl.mean"(%arg0, %arg1) {keep_dims = false}: (tensor<2x2xf32>, tensor<1xi32>) -> tensor<1x2xf32>
      func.return %0 : tensor<1x2xf32>
    }
    
    // -----
    
    // CHECK-LABEL: testMean_true
    func.func @testMean_true(%arg0: tensor<2x2xf32>, %arg1 : tensor<1xi32>) -> tensor<1x2xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

    func.func @uniform_quantize_and_dequantize_per_axis(%arg0 : tensor<2x2xf32>) -> tensor<2x2xf32> {
      %scales = "tf.Const"() { value = dense<[1.0, 2.0]> : tensor<2xf32> } : () -> tensor<2xf32>
      %zps = "tf.Const"() { value = dense<[3, 4]> : tensor<2xi32> } : () -> tensor<2xi32>
    
      // CHECK: %[[QUANTIZE:.*]] = mhlo.uniform_quantize %arg0 : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<i8:f32:0, {1.000000e+00:3,2.000000e+00:4}>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    func.func @NotQuantizeConcatConstantOperand(%arg0: tensor<1x2xf32>) -> tensor<2x2xf32> {
      %0 = arith.constant dense<1.0> : tensor<1x2xf32>
      %1 = "tfl.concatenation"(%arg0, %0) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
      func.return %1 : tensor<2x2xf32>
    
    // CHECK-NEXT: %[[cst:.*]] = arith.constant dense<1.000000e+00> : tensor<1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
Back to top