Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 23 for 3x4x6x2xf32 (0.15 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/einsum.mlir

      // CHECK: %[[v1:.*]] = "tf.BatchMatMulV2"(%arg0, %[[v0]]) <{adj_x = false, adj_y = false}> : (tensor<3x4x5xf32>, tensor<5x12xf32>) -> tensor<3x4x12xf32>
      // CHECK: %[[v2:.*]] = "tf.Reshape"(%[[v1]], %[[cst_1]]) : (tensor<3x4x12xf32>, tensor<4xi64>) -> tensor<3x4x6x2xf32>
      // CHECK: return %[[v2]] : tensor<3x4x6x2xf32>
    }
    
    func.func @einsum_reduceddim(%arg0: tensor<2x5x7xf32>, %arg1: tensor<2x5x7x3xf32>) -> tensor<2x5x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_weight_param.mlir

      // CHECK: return %[[CALL]]
    
      func.func private @composite_dot_general_fn(%arg0: tensor<4x3x6x5xf32>, %arg1: tensor<4x3x5x2xf32>) -> tensor<4x3x6x2xf32> attributes {_from_xla_call_module} {
        %0 = stablehlo.dot_general %arg0, %arg1, batching_dims = [0, 1] x [0, 1], contracting_dims = [3] x [2] : (tensor<4x3x6x5xf32>, tensor<4x3x5x2xf32>) -> tensor<4x3x6x2xf32>
        return %0 : tensor<4x3x6x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 22K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      // CHECK:  %[[MUL3:.*]] = "tf.Mul"(%[[MEAN]], %[[MUL1]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      // CHECK:  %[[SUB:.*]] = "tf.Sub"(%arg2, %[[MUL3]]) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
      // CHECK:  %[[ADD0:.*]] = "tf.Add"(%[[MUL2]], %[[SUB]]) : (tensor<1x1x6x2xf32>, tensor<2xf32>) -> tensor<1x1x6x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/unroll-batch-matmul.mlir

      // CHECK: return %[[MATMUL_PACKED]] : tensor<3x4x6xf32>
    }
    
    // -----
    
    func.func @batchMatMulUnbatchedRight(%arg0: tensor<3x4x5xf32>, %arg1: tensor<5x6xf32>) -> tensor<3x4x6xf32> {
      %0 = "tf.BatchMatMul"(%arg0, %arg1) : (tensor<3x4x5xf32>, tensor<5x6xf32>) -> tensor<3x4x6xf32>
      func.return %0 : tensor<3x4x6xf32>
    
      // CHECK-LABEL: batchMatMulUnbatchedRight
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Dec 06 18:42:28 UTC 2023
    - 63.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/unique_output_name.mlir

    module attributes {tf.versions = {producer = 946 : i32}, tf_saved_model.semantics, tfl.description = "MLIR Converted.", tfl.schema_version = 3 : i32}  {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 12:28:56 UTC 2022
    - 1.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-BatchMatMulV2.mlir

    // CHECK:           [[LHSBCAST:%.*]] = "mhlo.dynamic_broadcast_in_dim"([[LHS]], [[LHSSHAPEEXTENTS]]) <{broadcast_dimensions = dense<[0, 1, 2]> : tensor<3xi64>}> : (tensor<1x4x2xf32>, tensor<3xindex>) -> tensor<3x4x2xf32>
    // CHECK:           [[RHSBCASTSHAPE:%.*]] = shape.concat [[BCASTHEAD]], [[RHSTAIL]]
    // CHECK:           [[RESULT:%.*]] = "mhlo.dot_general"([[LHSBCAST]], [[RHS]])
    // CHECK:           return [[RESULT]] : tensor<3x4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/stablehlo/tests/pipelines/process_nchw_tensor.mlir

          stablehlo.return %6 : tensor<f32>
      }) {
        window_dimensions = array<i64: 1, 1, 2, 2>,
        window_strides = array<i64: 1, 1, 2, 2>
      } : (tensor<1x4x5x5xf32>, tensor<f32>) -> tensor<1x4x2x2xf32>
      return %4 : tensor<1x4x2x2xf32>
    }
    // CHECK-DAG: %[[WEIGHT_CONST:.+]] = stablehlo.constant {{.*}} : tensor<3x3x2x4xf32>
    // CHECK-DAG: %[[BIAS_CONST:.+]] = stablehlo.constant {{.*}} : tensor<1x5x5x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 20:32:46 UTC 2024
    - 12.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

      %w = arith.constant dense<[[[[-1.0, 1.0]]], [[[1.0, 2.0]]], [[[-2.0, 1.0]]]]> : tensor<3x1x1x2xf32>
      %b = arith.constant dense<[1.0e-2, 2.1473647e1, -2.1473647e2]> : tensor<3xf32>
      %transpose_conv = "tfl.transpose_conv"(%arg1, %w, %0, %b) {
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32, fused_activation_function = "NONE"
      } : (tensor<4xi32>, tensor<3x1x1x2xf32>, tensor<1x5x5x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

      %1 = "tf.Const"() {value = dense<[2, 16, 2]> : tensor<3xi32>} : () -> tensor<3xi32>
      %2 = "tf.Const"() {value = dense<[2, 4, 4, 1]> : tensor<4xi32>} : () -> tensor<4xi32>
      %3 = "tf.Sub"(%0, %arg1) {device = ""} : (tensor<1x4x4x2xf32>, tensor<2x4x4x2xf32>) -> tensor<2x4x4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %0 = "quantfork.stats"(%arg0) {
        layerStats = dense<[0.0, 1.0]> : tensor<2xf32>
      } : (tensor<?x5x5x2xf32>) -> tensor<?x5x5x2xf32>
      %1 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<3x5x5x2xf32>} : () -> tensor<3x5x5x2xf32>
      %2 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
      %3 = "tfl.conv_2d"(%0, %1, %2) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
Back to top