Search Options

Results per page
Sort
Preferred Languages
Advance

Results 101 - 110 of 287 for 1xi32 (0.06 sec)

  1. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

    func.func @add(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
      %0 = "tf.Add"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
      func.return %0: tensor<1xf32>
    // CHECK: %[[CUSTOM_0:.*]] = "tfl.custom"(%arg0, %arg1) <{custom_code = "FlexAdd", custom_option = #tfl<const_bytes : "0x03416464001412034164641A001A002A070A015412023001320000021B171414042801">}> : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/device-transform-nnapi.mlir

      }
    
      // CHECK-LABEL: pack
      func.func @pack(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<2x1xf32> {
        %0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
        func.return %0 : tensor<2x1xf32>
        // CHECK: %[[VAL_0:.*]] = arith.constant dense<[2, 1]> : tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc

    #loc3])\0Amodule {\0A  func.func @host_func(%arg0: tensor<3360x?xi32> loc(fused[#loc1, #loc2, #loc3]), %arg1: tensor<3xi32> loc(fused[#loc1, #loc2, #loc3]), %arg2: tensor<i32> loc(fused[#loc1, #loc2, #loc3]), %arg3: tensor<2xi32> loc(fused[#loc1, #loc2, #loc3])) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>) {\0A    %0 = \22tf.Reshape\22(%arg0, %arg1) {_xla_outside_compilation = \220\22} : (tensor<3360x?xi32>, tensor<3xi32>) -> tensor<3x1120x?xi32> loc(#loc9)\0A    %1:3 = ...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:08:57 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/push-tpose-through-ewise.mlir

      %perm = arith.constant dense<[3, 0, 1, 2]> : tensor<4xi32>
      %0 = "tfl.transpose"(%arg0, %perm) : (tensor<2x3x4x5xf32>, tensor<4xi32>) -> tensor<5x2x3x4xf32>
      %perm1 = arith.constant dense<[3, 0, 1, 2]> : tensor<4xi32>
      %1 = "tfl.transpose"(%arg1, %perm1) : (tensor<2x3x4x5xf32>, tensor<4xi32>) -> tensor<5x2x3x4xf32>
      %2 = tfl.add %0, %1 { fused_activation_function = "NONE" } : tensor<5x2x3x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      // Reduce over spatial dimensions (NCHW)
      %1 = "tf.Const"() {value = dense<[2, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
    
      // Transpose input: NHWC -> NCHW
      %2 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
      %3 = "tf.Transpose"(%arg0, %2) : (tensor<?x224x224x3xf32>, tensor<4xi32>) -> tensor<?x3x224x224xf32>
    
      // Pad spatial dimensions.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/tests/fold-broadcast.mlir

      %cst = arith.constant dense<[3, 5, 7]> : tensor<3xi32>
      %0 = "tf.BroadcastTo"(%arg1, %cst) : (tensor<5xf32>, tensor<3xi32>) -> tensor<3x5x7xf32>
      %1 = "tf.Mul"(%arg0, %0) : (tensor<5x7xf32>, tensor<3x5x7xf32>) -> tensor<3x5x7xf32>
      func.return %1 : tensor<3x5x7xf32>
      // CHECK: %[[C0:.*]] = arith.constant dense<[3, 5, 7]> : tensor<3xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

    func.func @FoldTranspose(%arg0: tensor<1x10x20x3xf32>) -> tensor<1x20x40x16xf32> {
      %cst = arith.constant dense<[1, 20, 40, 16]> : tensor<4xi32>
      %cst_0 = arith.constant dense<[2, 0, 1, 3]> : tensor<4xi32>
      %0 = "tfl.pseudo_qconst"() {qtype = tensor<16x!quant.uniform<i32:f32, 1.8527095877721169E-10>>, value = dense<0> : tensor<16xi32>} : () -> tensor<16x!quant.uniform<i32:f32, 1.8527095877721169E-10>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize-stablehlo-vhlo.mlir

    func.func @op_with_tfl_control_flow() -> (tensor<1xf32>, !tfl.control) {
      // CHECK: vhlo.constant_v1
      %0 = stablehlo.constant dense<1.000000e+00> : tensor<1xf32>
      // CHECK-NEXT: tfl.control_node
      %outputs, %control = tfl.control_node {
        %1 = "tfl.neg"(%0) : (tensor<1xf32>) -> tensor<1xf32>
        "tfl.yield"(%1) : (tensor<1xf32>) -> ()
      }
      return %outputs, %control : tensor<1xf32>, !tfl.control
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 07 22:39:35 UTC 2024
    - 5.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

      %2 = "tfl.pseudo_const"() {value = dense<[-1, 3]> : tensor<2xi32>} : () -> tensor<2xi32>
      %3 = "tfl.reshape"(%1, %2) : (tensor<1x1x3xf32>, tensor<2xi32>) -> tensor<1x3xf32>
      %4 = "quantfork.stats"(%3) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
      func.return %4 : tensor<1x3xf32>
    
    // CHECK: %[[cst:.*]] = arith.constant dense<[-1, 3]> : tensor<2xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      %14 = "mhlo.broadcast_in_dim"(%1) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor<i32>) -> tensor<4xi32>
      %15 = mhlo.compare  LT, %13, %14,  SIGNED : (tensor<4xi32>, tensor<4xi32>) -> tensor<4xi1>
      %16 = "mhlo.broadcast_in_dim"(%0) <{broadcast_dimensions = dense<> : tensor<0xi64>}> : (tensor<i32>) -> tensor<4xi32>
      %17 = mhlo.add %13, %16 : tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
Back to top