Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 52 for 2x1xi32 (0.11 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir

      // CHECK:  %5 = "tfl.arg_max"(%arg0, %cst) : (tensor<1x32x1xf32>, tensor<1xi32>) -> tensor<1x1xi32>
      // CHECK:  return %4, %5 : tensor<1x1xf32>, tensor<1x1xi32>
    }
    
    // CHECK-LABEL:   func @convert_pytorch_argmax
    func.func @convert_pytorch_argmax(%arg0: tensor<1x9xi32>) -> tensor<1xi32> {
      %0 = mhlo.constant dense<0> : tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 40.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-with-tf2xla-hlo-importer.mlir

      // CHECK-LABEL: @xla_svd
      func.func @xla_svd(%arg0: tensor<1x1xf32>) -> (tensor<1xf32>, tensor<1x1xf32>, tensor<1x1xf32>) {
        // CHECK-NOT: XlaSvd
        %s, %u, %v = "tf.XlaSvd"(%arg0) {max_iter = 1, epsilon = 1.0E-09 : f32, precision_config = ""} : (tensor<1x1xf32>) -> (tensor<1xf32>, tensor<1x1xf32>, tensor<1x1xf32>)
        func.return %s, %u, %v : tensor<1xf32>, tensor<1x1xf32>, tensor<1x1xf32>
      }
    
      func.func @identity(%arg0: f32) -> f32 {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 38.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir

        // CHECK:      %[[ONE:.*]] = "tf.Const"() <{value = dense<1> : tensor<i32>}>
        // CHECK:      %[[RES_READ_VAL:[0-9]*]] = "tf.ReadVariableOp"
        // CHECK-SAME: (tensor<*x!tf_type.resource<tensor<2x8xi32>>>) -> tensor<2x8xi32>
        // CHECK:      "tf.AddV2"(%[[RES_READ_VAL]], %[[ONE]])
        // CHECK-SAME: (tensor<2x8xi32>, tensor<i32>) -> tensor<2x8xi32>
        // CHECK:      "tf.AssignVariableOp"
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 51.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/tpu_sharding_identification.mlir

    }
    func.func @_func(%arg0: tensor<2x4xf32>, %arg1: tensor<4x2xf32>) -> tensor<2x2xf32> {
      %0 = "tf.MatMul"(%arg0, %arg1) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x4xf32>, tensor<4x2xf32>) -> tensor<2x2xf32>
      %1 = "tf.Identity"(%0) : (tensor<2x2xf32>) -> tensor<2x2xf32>
      return %1 : tensor<2x2xf32>
    }
    
    // -----
    // The following op sharding is used in the following test case:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 20 19:07:52 UTC 2024
    - 47.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      %5 = "tf.Identity"(%4) {device = ""} : (tensor<2x4xf32>) -> tensor<2x4xf32>
      %6 = "tf.Identity"(%5) {device = ""} : (tensor<2x4xf32>) -> tensor<2x4xf32>
      func.return %6 : tensor<2x4xf32>
    
      // CHECK-LABEL: QuantDequantTranspose
      // CHECK-DAG: %[[CST:.*]] = "tf.Const"() <{value = dense<[1, 0]> : tensor<2xi32>}> : () -> tensor<?xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/call_xla_module_to_stablehlo.mlir

          %outputs_9, %control_10 = tf_executor.island(%control_4) wraps "tf.Identity"(%outputs_7) {device = ""} : (tensor<2x3xi32>) -> tensor<2x3xi32>
          tf_executor.fetch %outputs_9 : tensor<2x3xi32>
        }
        return %0 : tensor<2x3xi32>
      }
    }
    
    // CHECK: module attributes
    // CHECK-SAME:  tfl.metadata = {{{.*}}keep_stablehlo_constant = "true"{{.*}}}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 31 21:25:51 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/simple-graph.mlir

      %3 = "tfl.pack"(%1, %2) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return %3 : tensor<2x1xf32>
    }
    
    // CHECK: %[[CST:.*]] = arith.constant dense<1> : tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir

    // CHECK-LABEL: hardcode_all
    func.func @hardcode_all(%arg0: tensor<2x2xf32>, %arg1: tensor<2x1xf32>) -> tensor<2x2xf32> {
      %0 = "tfl.add"(%arg0, %arg1) {fused_activation_function="NONE"}: (tensor<2x2xf32>, tensor<2x1xf32>) -> tensor<2x2xf32>
      func.return %0 : tensor<2x2xf32>
    
    // CHECK: %[[q0:.*]] = "tfl.quantize"(%arg1) <{qtype = tensor<2x1x!quant.uniform<u8:f32, 0.0078431372549019607:128>>}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/defer_activation_transpose.mlir

    func.func @add_with_activation_transpose_rank_two(%arg0: tensor<1x2xf32>) -> tensor<2x1xf32> {
      %0 = stablehlo.constant dense<2.000000e+00> : tensor<2x1xf32>
      %1 = stablehlo.transpose %arg0, dims = [1, 0] : (tensor<1x2xf32>) -> tensor<2x1xf32>
      %2 = stablehlo.add %1, %0 : tensor<2x1xf32>
      return %2 : tensor<2x1xf32>
    }
    // CHECK: %[[TRANSPOSE_0:.+]] = stablehlo.transpose
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 14.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

      %3 = "tfl.batch_matmul"(%arg0, %2) {adj_x = false, adj_y = false, asymmetric_quantize_inputs = false} : (tensor<4x128x2xf32>, tensor<2x1xf32>) -> tensor<4x128x1xf32>
      func.return %3 : tensor<4x128x1xf32>
      // CHECK: %[[TRANSPOSED_X:.*]] = "tfl.transpose"
      // CHECK-SAME: (tensor<2x1xf32>, tensor<2xi32>) -> tensor<1x2xf32>
      // CHECK: %[[FC_RES:.*]] = "tfl.fully_connected"(%arg0, %[[TRANSPOSED_X]]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
Back to top