Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 46 for 32x64xf32 (0.15 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

    // CHECK-LABEL: matmulBiasAdd
    func.func @matmulBiasAdd(%arg0: tensor<64xf32>, %arg1: tensor<8x32xf32>, %arg2: tensor<32x64xf32>) -> (tensor<*xf32>) {
      // CHECK: %[[VAL_3:.*]] = "tf._FusedMatMul"(%arg1, %arg2, %arg0) <{epsilon = 0.000000e+00 : f32, fused_ops = ["BiasAdd"], transpose_a = false, transpose_b = false}> : (tensor<8x32xf32>, tensor<32x64xf32>, tensor<64xf32>) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc

          func.func @main() -> (tensor<32x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
            %cst = "tf.Const"() {value = dense<[524170, 523952]> : tensor<2xi32>} : () -> tensor<2xi32>
            %cst_0 = "tf.Const"() {value = dense<[32, 64]> : tensor<2xi32>} : () -> tensor<2xi32>
            %0 = "tf.StatelessRandomNormal"(%cst_0, %cst) : (tensor<2xi32>, tensor<2xi32>) -> tensor<32x64xf32>
            return %0 : tensor<32x64xf32>
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:08:57 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %cst = arith.constant dense<[3, 4]> : tensor<2xi32>
      %cst_0 = arith.constant dense<1.000000e+00> : tensor<3x4xf32>
      %0 = "tfl.reshape"(%arg0, %cst) : (tensor<*xf32>, tensor<2xi32>) -> tensor<3x4xf32>
      %1 = "tfl.add"(%0, %cst_0) {fused_activation_function = "NONE"} : (tensor<3x4xf32>, tensor<3x4xf32>) -> tensor<3x4xf32>
      func.return %1 : tensor<3x4xf32>
    // CHECK-LABEL: ReshapeAddUnknownShape
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_begin.mlir

    func.func @move_transpose_handle_broadcast(%arg0:tensor<8x64xf32>, %arg1:tensor<8x64x64xf32>) -> tensor<512x64xf32> {
      %cst = "tf.Const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32>
      %cst_1 = "tf.Const"() {value = dense<[2, 0, 1]> : tensor<3xi32>} : () -> tensor<3xi32>
      %cst_2 = "tf.Const"() {value = dense<[512, 64]> : tensor<2xi32>} : () -> tensor<2xi32>
      %0 = "tf.ExpandDims"(%arg0, %cst) {device = ""} : (tensor<8x64xf32>, tensor<i32>) -> tensor<8x64x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-BatchMatMulV2.mlir

    func.func @batchmatmulv2_basic(%arg0: tensor<1x4x2xf32>, %arg1: tensor<3x2x4xf32>) -> tensor<3x4x4xf32> {
    // CHECK-LABEL:   func @batchmatmulv2_basic
    // CHECK-SAME:        ([[LHS:%.*]]: tensor<1x4x2xf32>, [[RHS:%.*]]: tensor<3x2x4xf32>) -> tensor<3x4x4xf32>
    // CHECK:           [[LHSSHAPE:%.*]] = shape.shape_of [[LHS]] : tensor<1x4x2xf32>
    // CHECK:           [[RHSSHAPE:%.*]] = shape.shape_of [[RHS]] : tensor<3x2x4xf32>
    // CHECK:           [[CM2:%.*]] = arith.constant -2 : index
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-include-tf2xla-fallback.mlir

    // fallback lowering is preferred for static shaped operands when available.
    
    // CHECK-LABEL: batchmatmulv2
    func.func @batchmatmulv2(%arg0: tensor<1x4x2xf32>, %arg1: tensor<3x2x4xf32>) -> tensor<3x4x4xf32> {
      // NO_FALLBACK: mhlo.dynamic_broadcast_in_dim
      // NO_FALLBACK: mhlo.dot_general
    
      // SUPPORTED_FALLBACK_DEVICE: mhlo.reduce
      // SUPPORTED_FALLBACK_DEVICE: mhlo.dot_general
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 16 19:04:03 UTC 2023
    - 3.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.mlir

        %0 = stablehlo.constant dense<0.000000e+00> : tensor<1x64xf32>
        %1 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x3xf32>, tensor<3x64xf32>) -> tensor<1x64xf32>
        %2 = stablehlo.add %1, %arg2 : tensor<1x64xf32>
        %3 = stablehlo.maximum %2, %0 : tensor<1x64xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 39.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      %4 = "tf.MatMul"(%arg0, %3) {device = "", transpose_a = false, transpose_b = false} : (tensor<2x3xf32>, tensor<3x4xf32>) -> tensor<2x4xf32>
      %5 = "tf.Identity"(%4) {device = ""} : (tensor<2x4xf32>) -> tensor<2x4xf32>
      %6 = "tf.Identity"(%5) {device = ""} : (tensor<2x4xf32>) -> tensor<2x4xf32>
      func.return %6 : tensor<2x4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/legalize_jax_random.mlir

    func.func @tfl_wrapped_jax_random_normal(%arg0: tensor<2xui32>) -> tuple<tensor<3x4xf32>> {
      // This is a fake jax random normal body.
      %0 = stablehlo.constant dense<0.0> : tensor<12xf32>
      %1 = "stablehlo.reshape"(%0) : (tensor<12xf32>) -> tensor<3x4xf32>
      %2 = "stablehlo.tuple"(%1) : (tensor<3x4xf32>) -> tuple<tensor<3x4xf32>>
      func.return %2 : tuple<tensor<3x4xf32>>
    }
    
    
    // CHECK-LABEL:   func @tfl_wrapped_jax_random_uniform(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/batchmatmul_to_einsum.mlir

    func.func @test_batch_matmul_to_einsum(%arg0: tensor<1x2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x2x4xf32> {
      // CHECK-LABEL: test_batch_matmul_to_einsum
      // CHECK: "tf.Einsum"(%arg0, %arg1) <{equation = "...mk,...kn->...mn"}> : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32>
      %0 = "tf.BatchMatMul"(%arg0, %arg1) {adj_x = false, adj_y = false} : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32>
      func.return %0: tensor<1x2x4xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 3K bytes
    - Viewed (0)
Back to top