Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 169 for 1xi32 (0.03 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir

      %1 = "tf.Transpose"(%arg0, %0) : (tensor<1x4x4x8xf32>, tensor<4xi32>) -> tensor<1x8x4x4xf32>
      %2 = "tf.Transpose"(%1, %0) : (tensor<1x8x4x4xf32>, tensor<4xi32>) -> tensor<1x4x8x4xf32>
      %3 = "tf.Transpose"(%arg1, %0) : (tensor<1x4x4x8xf32>, tensor<4xi32>) -> tensor<1x8x4x4xf32>
      %4 = "tf.Transpose"(%3, %0) : (tensor<1x8x4x4xf32>, tensor<4xi32>) -> tensor<1x4x8x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 9.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/simple-graph.mlir

    module {
    func.func @main(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>, %arg2: tensor<1xf32>, %arg3: tensor<1xf32>) -> tensor<2x1xf32> attributes {tf.entry_function = {inputs = "input0,input1,input2,input3", outputs = "output"}} {
      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      %1 = "tfl.mul"(%0, %arg2) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

    // -----
    
    func.func @testAddReluPack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) {
       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function = "RELU6"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
       // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT"
      %1 = "tfl.add"(%arg0, %0) {fused_activation_function = "RELU"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

    }
    
    // CHECK-LABEL: add
    func.func @add(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
      %0 = "tf.AddV2"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      func.return %0: tensor<1xf32>
    // CHECK: %[[ADD_0:.*]] = "tf.AddV2"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    // CHECK: return %[[ADD_0]] : tensor<1xf32>
    }
    
    // CHECK-LABEL: softmax
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/output-shapes-attr.mlir

    func.func private @simple_callee(%arg0: tensor<2xi32>) -> tensor<2xi32> {
      %graph = tf_executor.graph {
        tf_executor.fetch %arg0 : tensor<2xi32>
      }
      func.return %graph#0 : tensor<2xi32>
    }
    
    // Test with a TPUPartitionedCallOp that is not inlined.
    func.func @dont_inline_tpu_partitioned_call(%arg0: tensor<2xi32> {tf._user_specified_name = "inputs_0"}, %arg1: tensor<2xi32> {tf._user_specified_name = "inputs_1"}) -> tensor<2xi32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 12:28:56 UTC 2022
    - 3.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/convert_tpu_model_to_cpu.mlir

    // CHECK-NOT: tf.BatchFunction
    // CHECK: %[[ADD0:.*]] = "tf.AddV2"(%[[ARG0]], %[[ARG1]])
    // CHECK: return %[[ADD0]] : tensor<1xf32>
    
    func.func private @batched_func(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
      %0 = "tf.Identity"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
      %1 = "tf.Identity"(%arg1) : (tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 4.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/api/v1/testdata/prepare_to_library.mlir

    #loc3])\0Amodule {\0A  func.func @host_func(%arg0: tensor<3360x?xi32> loc(fused[#loc1, #loc2, #loc3]), %arg1: tensor<3xi32> loc(fused[#loc1, #loc2, #loc3]), %arg2: tensor<i32> loc(fused[#loc1, #loc2, #loc3]), %arg3: tensor<2xi32> loc(fused[#loc1, #loc2, #loc3])) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>) {\0A    %0 = \22tf.Reshape\22(%arg0, %arg1) {_xla_outside_compilation = \220\22} : (tensor<3360x?xi32>, tensor<3xi32>) -> tensor<3x1120x?xi32> loc(#loc9)\0A    %1:3 = ...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 31 23:44:50 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/device-transform-nnapi.mlir

      }
    
      // CHECK-LABEL: pack
      func.func @pack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> {
        %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        func.return %0 : tensor<2x1xf32>
        // CHECK: %[[VAL_0:.*]] = arith.constant dense<[2, 1]> : tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/push-tpose-through-ewise.mlir

      %perm = arith.constant dense<[3, 0, 1, 2]> : tensor<4xi32>
      %0 = "tfl.transpose"(%arg0, %perm) : (tensor<2x3x4x5xf32>, tensor<4xi32>) -> tensor<5x2x3x4xf32>
      %perm1 = arith.constant dense<[3, 0, 1, 2]> : tensor<4xi32>
      %1 = "tfl.transpose"(%arg1, %perm1) : (tensor<2x3x4x5xf32>, tensor<4xi32>) -> tensor<5x2x3x4xf32>
      %2 = tfl.add %0, %1 { fused_activation_function = "NONE" } : tensor<5x2x3x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      // Reduce over spatial dimensions (NCHW)
      %1 = "tf.Const"() {value = dense<[2, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
    
      // Transpose input: NHWC -> NCHW
      %2 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
      %3 = "tf.Transpose"(%arg0, %2) : (tensor<?x224x224x3xf32>, tensor<4xi32>) -> tensor<?x3x224x224xf32>
    
      // Pad spatial dimensions.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
Back to top