Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 29 of 29 for 4x4xf32 (0.48 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %9 = stablehlo.convert %2 : (tensor<2x3xi8>) -> tensor<2x3xf32>
        %10 = stablehlo.dot_general %8, %9, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
        %11 = stablehlo.convert %3 : (tensor<1x3xi32>) -> tensor<1x3xf32>
        %12 = stablehlo.subtract %10, %11 : tensor<1x3xf32>  // q1 * q2 - z1 * q2
        %13 = stablehlo.multiply %12, %4 : tensor<1x3xf32>  // s1 * s2
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfrt/tests/fuse_tpu_compile_and_execute_ops.mlir

      %1 = "tf.ReadVariableOp"(%0) {device = "/CPU:0"} : (tensor<!tf_type.resource<tensor<1x1xf32>>>) -> tensor<1x1xf32>
      %2:2 = "tf.Split"(%cst, %arg0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x4xf32>) -> (tensor<1x2xf32>, tensor<1x2xf32>)
      %3:2 = "tf.Split"(%cst, %2#0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x2xf32>) -> (tensor<1x1xf32>, tensor<1x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir

        return %0 : tensor<2x2xf32>
      }
      func.func private @composite_matmul_fn_1(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> attributes {tf_quant.composite_function} {
        %0 = "tf.MatMul"(%arg0, %arg1) {attr_map = "0:transpose_a,1:transpose_b", device = "", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
        return %0 : tensor<2x2xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/vhlo.mlir

    //CHECK-NEXT:}
    
    func.func @iota() -> tensor<3x4xf32> {
     %0 = "vhlo.iota_v1" () <{iota_dimension = #vhlo.integer_v1<0 : i64>}> : () -> tensor<3x4xf32>
     return %0 : tensor<3x4xf32>
    }
    
    //CHECK:func.func private @iota() -> tensor<3x4xf32> {
    //CHECK-NEXT: %0 = "vhlo.iota_v1"() <{iota_dimension = #vhlo.integer_v1<0 : i64>}> : () -> tensor<3x4xf32>
    //CHECK-NEXT: return %0 : tensor<3x4xf32>
    //CHECK-NEXT:}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 14 19:15:40 UTC 2024
    - 31.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir

    // NOFALLBACK-LABEL: @xla_svd
    func.func @xla_svd(%arg0: tensor<1x1xf32>) -> (tensor<1xf32>, tensor<1x1xf32>, tensor<1x1xf32>) {
      // NOFALLBACK: XlaSvd
      %s, %u, %v = "tf.XlaSvd"(%arg0) {max_iter = 1, epsilon = 1.0E-09 : f32, precision_config = ""} : (tensor<1x1xf32>) -> (tensor<1xf32>, tensor<1x1xf32>, tensor<1x1xf32>)
      func.return %s, %u, %v : tensor<1xf32>, tensor<1x1xf32>, tensor<1x1xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 06 15:32:52 UTC 2024
    - 15.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/quantize.mlir

    }
    
    // CHECK-LABEL: QuantizeConcat
    func.func @QuantizeConcat(tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>> {
    ^bb0(%arg0: tensor<1x2xf32>, %arg1: tensor<1x2xf32>):
      %0 = "tfl.concatenation"(%arg0, %arg1) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/einsum.mlir

    }
    
    func.func @einsum_matmul(%arg0: tensor<7x9xf32>, %arg1: tensor<9x5xf32>) -> tensor<7x5xf32> {
      %0 = "tf.Einsum"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", equation = "ae,ed->ad"}: (tensor<7x9xf32>, tensor<9x5xf32>) -> tensor<7x5xf32>
      func.return %0 : tensor<7x5xf32>
      // CHECK-LABEL: einsum_matmul
      // CHECK: %[[v0:.*]] = "tf.BatchMatMulV2"(%arg0, %arg1) <{adj_x = false, adj_y = false}> : (tensor<7x9xf32>, tensor<9x5xf32>) -> tensor<7x5xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/constant-fold.mlir

      %0 = "tf.Div"(%arg0, %cst) : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      func.return %0 : tensor<2x2xf32>
    
      // CHECK-LABEL: RemoveTrivialDiv
      // CHECK-NEXT: return %arg0 : tensor<2x2xf32>
    }
    
    func.func @RemoveTrivialRealDiv(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> {
      %cst = arith.constant dense<1.0> : tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 31 23:22:24 UTC 2024
    - 36.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

      %7 = "tfl.add"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<  f32>) -> tensor<4xf32>
      %8 = "tfl.add"(%2, %3) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      %9 = "tfl.add"(%2, %3) {fused_activation_function = "SIGN_BIT"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
Back to top