Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for 1x64x32xf32 (0.16 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      %44 = "mhlo.broadcast_in_dim"(%0) <{broadcast_dimensions = dense<[2, 3]> : tensor<2xi64>}> : (tensor<32x1xf32>) -> tensor<1x64x32x1xf32>
      %45 = mhlo.reshape %44 : (tensor<1x64x32x1xf32>) -> tensor<1x64x32xf32>
      %46 = "mhlo.broadcast_in_dim"(%45) <{broadcast_dimensions = dense<[0, 1, 2]> : tensor<3xi64>}> : (tensor<1x64x32xf32>) -> tensor<1x64x32x32xf32>
      %47 = mhlo.multiply %43, %46 : tensor<1x64x32x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %13 = stablehlo.subtract %10, %12 : tensor<1x4x3xf32>  // Precalculated zp_neg.
        %14 = stablehlo.broadcast_in_dim %4, dims = [0, 1, 2] : (tensor<1x1x3xf32>) -> tensor<1x4x3xf32>  // Optional
        %15 = stablehlo.multiply %13, %14 : tensor<1x4x3xf32>  // s1 * s2
        %16 = call @uniform_quantize_1(%15, %5, %6) : (tensor<1x4x3xf32>, tensor<1x1x1xf32>, tensor<1x1x1xi8>) -> tensor<1x4x3xi8>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/tests/fold-constants-to-subgraph.mlir

      %0 = "tfl.slice"(%arg0, %arg1, %arg2) : (tensor<4x384x32xf32>, tensor<3xi32>, tensor<3xi32>) -> tensor<1x384x32xf32>
      func.return %0 : tensor<1x384x32xf32>
    }
    
    // PARTIAL:       func @simple_test(%[[VAL_0:.*]]: tensor<4x384x32xf32>, %[[VAL_1:.*]]: tensor<3xi32>, %[[VAL_2:.*]]: tensor<3xi32>) -> tensor<1x384x32xf32> attributes {tac.interface_name = "func1"} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-BatchMatMulV2.mlir

    func.func @batchmatmulv2_basic(%arg0: tensor<1x4x2xf32>, %arg1: tensor<3x2x4xf32>) -> tensor<3x4x4xf32> {
    // CHECK-LABEL:   func @batchmatmulv2_basic
    // CHECK-SAME:        ([[LHS:%.*]]: tensor<1x4x2xf32>, [[RHS:%.*]]: tensor<3x2x4xf32>) -> tensor<3x4x4xf32>
    // CHECK:           [[LHSSHAPE:%.*]] = shape.shape_of [[LHS]] : tensor<1x4x2xf32>
    // CHECK:           [[RHSSHAPE:%.*]] = shape.shape_of [[RHS]] : tensor<3x2x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

    func.func @padSliceTo4D(%arg0: tensor<4x384x32xf32>) -> tensor<1x384x32xf32> {
      %0 = "tfl.pseudo_const"() {value = dense<0> : tensor<3xi32>} : () -> tensor<3xi32>
      %1 = "tfl.pseudo_const"() {value = dense<[1, 384, 32]> : tensor<3xi32>} : () -> tensor<3xi32>
      %2 = "tfl.slice"(%arg0, %0, %1) : (tensor<4x384x32xf32>, tensor<3xi32>, tensor<3xi32>) -> tensor<1x384x32xf32>
      func.return %2 : tensor<1x384x32xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize-variables.mlir

      %43 = "quantfork.stats"(%42) {layerStats = dense<[-1.0, 1.0]> : tensor<2xf32>} : (tensor<1x4x3xf32>) -> tensor<1x4x3xf32>
      %44 = "tfl.strided_slice"(%43, %1, %2, %3) {begin_mask = 7 : i32, ellipsis_mask = 0 : i32, end_mask = 5 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 0 : i32, offset = false} : (tensor<1x4x3xf32>, tensor<3xi32>, tensor<3xi32>, tensor<3xi32>) -> tensor<1x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-include-tf2xla-fallback.mlir

    // support dynamic shaped operands like the native lowering. Verify that
    // fallback lowering is preferred for static shaped operands when available.
    
    // CHECK-LABEL: batchmatmulv2
    func.func @batchmatmulv2(%arg0: tensor<1x4x2xf32>, %arg1: tensor<3x2x4xf32>) -> tensor<3x4x4xf32> {
      // NO_FALLBACK: mhlo.dynamic_broadcast_in_dim
      // NO_FALLBACK: mhlo.dot_general
    
      // SUPPORTED_FALLBACK_DEVICE: mhlo.reduce
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 16 19:04:03 UTC 2023
    - 3.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization_test.cc

        func.func @main(%a : tensor<5x14x1xf32>, %b : tensor<1x14x32xf32>) -> tensor<?x?x?xf32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
          %c = "mhlo.einsum"(%a, %b) {einsum_config = "bji,bjk->bik"} : (tensor<5x14x1xf32>, tensor<1x14x32xf32>) -> tensor<?x?x?xf32>
          return %c : tensor<?x?x?xf32>
        }
      })";
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 7.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf_test.cc

          %%arg0 = "tf.Const"() {value = dense<-3.0> : tensor<1x4x2xf32>} : () -> tensor<1x4x2xf32>
          %%arg1 = "tf.Const"() {value = dense<-3.0> : tensor<1x2x4xf32>} : () -> tensor<1x2x4xf32>
    
          %%1 = "tf.%s"(%%arg0, %%arg1) {T = f32, adj_x = false, adj_y = false, grad_x = false, grad_y = false, device = ""} : (tensor<1x4x2xf32>, tensor<1x2x4xf32>) -> tensor<1x4x4xf32>
    
          func.return %%1 : tensor<1x4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 23:59:33 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions.mlir

    // CHECK: Number of dequantize layers added: 1
    }
    
    
    // -----
    
    module {
      func.func @float_einsum(%arg0: tensor<?x64x32xf32>, %arg1: tensor<32x2x16xf32>) -> (tensor<?x64x2x16xf32>) {
        %0 = "tf.Einsum"(%arg0, %arg1) {equation = "abc,cde->abde"} : (tensor<?x64x32xf32>, tensor<32x2x16xf32>) -> tensor<?x64x2x16xf32>
        func.return %0 : tensor<?x64x2x16xf32>
      }
    
    // CHECK-LABEL: func @float_einsum
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 15.2K bytes
    - Viewed (0)
Back to top