Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 37 of 37 for 2x4x2xf32 (0.23 sec)

  1. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    func.func @ReorderAddWithConstant(%arg0: tensor<2x2xf32>) -> tensor<2x2xf32> {
      %cst = arith.constant dense<1.0> : tensor<2x2xf32>
      %cst_1 = arith.constant dense<2.0> : tensor<2x2xf32>
      %0 = "tfl.add"(%arg0, %cst) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      %1 = "tfl.add"(%0, %cst_1) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

    }
    func.func @_func(%arg0: tensor<2x4xf32>, %arg1: tensor<4x2xf32>) -> tensor<2x2xf32> {
      %0 = "tf.MatMul"(%arg0, %arg1) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x4xf32>, tensor<4x2xf32>) -> tensor<2x2xf32>
      %1 = "tf.Identity"(%0) : (tensor<2x2xf32>) -> tensor<2x2xf32>
      return %1 : tensor<2x2xf32>
    }
    
    // -----
    // The following op sharding is used in the following test case:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

    //     : (tensor<i32>, tensor<2x2xi32>, tensor<2xf32>, tensor<2x2x2xf32>)
    //     -> tensor<5x2xf32>
    //
    // is lowered to
    //
    //   %shape = "tf.Const"() {value = dense<[-1, 2]> : tensor<2xi64>}
    //   %inp0 = "tf.Reshape"(%arg0, %shape)
    //     : (tensor<2xf32>, tensor<2xi64>) -> tensor<1x2xf32>
    //   %inp1 = "tf.Reshape"(%arg1, %shape)
    //     : (tensor<2x2x2xf32>, tensor<2xi64>) -> tensor<4x2xf32>
    //   %items0 = "tf.Unpack"(%[[INP0]]) {axis = 0 : i64}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/ops.mlir

    }
    
    // -----
    
    // CHECK-LABEL: topk_2
    func.func @topk_2(%arg0: tensor<3x4x8xf32>) -> (tensor<3x4x2xf32>, tensor<3x4x2xi32>) {
      %0 = arith.constant dense<2> : tensor<i32>
      %1:2 = "tfl.topk_v2"(%arg0, %0) : (tensor<3x4x8xf32>, tensor<i32>) -> (tensor<3x4x2xf32>, tensor<3x4x2xi32>)
      func.return %1#0, %1#1: tensor<3x4x2xf32>, tensor<3x4x2xi32>
    }
    
    // -----
    
    // CHECK-LABEL: topk_d
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

    func.func @testReshapeNoOp(%arg0: tensor<2x4xf32>, %arg1: tensor<2xi32>) -> tensor<2x4xf32> {
      %0 = "tf.Reshape"(%arg0, %arg1) : (tensor<2x4xf32>, tensor<2xi32>) -> tensor<2x4xf32>
    
      // CHECK: return %arg0
      func.return %0 : tensor<2x4xf32>
    }
    
    // CHECK-LABEL: func @testBroadcastToNoOp
    func.func @testBroadcastToNoOp(%arg0: tensor<2x4xf32>, %arg1: tensor<2xi32>) -> tensor<2x4xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/tf-ops.mlir

    func.func @testPackedTPUPartitionedInputV2(tensor<2x4xf32>, tensor<2x4xf32>) -> tensor<4x4xf32> {
    ^bb0(%arg0: tensor<2x4xf32>, %arg1: tensor<2x4xf32>):
      // expected-error @+1 {{expected 1 inputs, got 2}}
      %0 = "tf.TPUPartitionedInputV2"(%arg0, %arg1) {partition_dims = [2, 1], is_packed = true} : (tensor<2x4xf32>, tensor<2x4xf32>) -> tensor<4x4xf32>
      func.return %0 : tensor<4x4xf32>
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 23 14:40:35 UTC 2023
    - 236.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/optimize.cc

        return success();
      }
    };
    
    // Fuses Unpack with proceeding Concatenation to Reshape if output type has
    // static shape and activation function is none. For example:
    //
    //   // %input: tensor<1x3x2xf32>
    //   %unpack:3 = "tfl.unpack"(%input) {axis = 1 : i32, num = 3 : i32}
    //   %res = "tfl.concatenation"(%unpack#0, %unpack#1, %unpack#2)
    //        {axis = -1 : i32, fused_activation_function = "NONE"}
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 00:40:15 UTC 2024
    - 102.3K bytes
    - Viewed (0)
Back to top