Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 36 for 32x12xf32 (0.36 sec)

  1. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      %4 = "tfl.dequantize"(%3) : (tensor<32x12x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>) -> tensor<32x12xf32>
      %5 = "tfl.fully_connected"(%2, %4, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<1x224x224x3xf32>, tensor<32x12xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/quantize.mlir

    func.func @NotQuantizeFloatConst() -> tensor<2x2xf32> {
      %0 = arith.constant dense<-0.1> : tensor<2x2xf32>
      %1 = "tfl.quantize"(%0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>
      %2 = "tfl.dequantize"(%1) : (tensor<2x2x!quant.uniform<u8:f32, 7.8431372549019615E-4:128>>) -> tensor<2x2xf32>
      func.return %2 : tensor<2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    // execute the production code path.
    func.func @main(%arg0: tensor<2x1xf32>, %arg1: tensor<2x3xf32>) -> (tensor<2x4xf32>) {
      %0 = "tfl.quantize"(%arg0) {qtype = tensor<2x1x!quant.uniform<i16:f32, 1.0>>} : (tensor<2x1xf32>) -> tensor<2x1x!quant.uniform<i16:f32, 1.0>>
      %1 = "tfl.dequantize"(%0) : (tensor<2x1x!quant.uniform<i16:f32, 1.0>>) -> (tensor<2x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

    }
    
    // CHECK-LABEL: prepareAdd
    func.func @prepareAdd(%arg0: tensor<2x2xf32>) -> tensor<2x2xf32> {
      %cst = arith.constant dense<[[0.0, 1.0], [2.0, 255.0]]> : tensor<2x2xf32>
      %add = "tfl.add"(%arg0, %cst) {fused_activation_function="NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      func.return %add : tensor<2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

      %10:2 = "tf.Unpack"(%arg1) {axis = 2 : i64, device = ""} : (tensor<2x16x2xf32>) -> (tensor<2x16xf32>, tensor<2x16xf32>)
      %11 = "tf.Floor"(%10#0) {device = ""} : (tensor<2x16xf32>) -> tensor<2x16xf32>
      %12 = "tf.Maximum"(%0, %11) {device = ""} : (tensor<f32>, tensor<2x16xf32>) -> tensor<2x16xf32>
      %13 = "tf.Minimum"(%12, %4) {device = ""} : (tensor<2x16xf32>, tensor<f32>) -> tensor<2x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/optimize.mlir

    func.func @ReorderAddWithConstant(%arg0: tensor<2x2xf32>) -> tensor<2x2xf32> {
      %cst = arith.constant dense<1.0> : tensor<2x2xf32>
      %cst_1 = arith.constant dense<2.0> : tensor<2x2xf32>
      %0 = "tfl.add"(%arg0, %cst) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
      %1 = "tfl.add"(%0, %cst_1) {fused_activation_function = "NONE"} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/const-fold.mlir

    func.func @add_dense_dense_float_mixfng_1_n() -> tensor<2x2xf32> {
      %cst_0 = arith.constant dense<[[1.5, -2.5]]> : tensor<1x2xf32>
      %cst_1 = arith.constant dense<[[-3.], [4.]]> : tensor<2x1xf32>
    
      %0 = "tfl.add"(%cst_0, %cst_1) {fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<2x1xf32>) -> tensor<2x2xf32>
    
      func.return %0 : tensor<2x2xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 45.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/api/v2/testdata/func_with_dead_ops.mlir

          %17 = "tf.Concat"(%cst_1, %16#5, %16#15, %16#25, %16#35) : (tensor<i32>, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>, tensor<32x1024xf32>) -> tensor<128x1024xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 13 23:22:50 UTC 2024
    - 15.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    func.func @torch_index_select(%arg0: tensor<2x1xf32>, %arg1: tensor<2xi32>) -> tensor<2x1xf32> {
      %0 = "mhlo.torch_index_select"(%arg0, %arg1) {
        batch_dims = 0 : i64, dim = 0 : i64
      } : (tensor<2x1xf32>, tensor<2xi32>) -> tensor<2x1xf32>
      func.return %0 : tensor<2x1xf32>
    }
    
    // CHECK-LABEL:   func @lowered_cumsum(
    // CHECK-SAME:      %[[VAL_0:.*]]: tensor<4x12xf32>) -> tensor<4x12xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/ifrt/sink_variable_as_named_array.mlir

        %2 = "tf.ReadVariableOp"(%0) : (tensor<!tf_type.resource<tensor<3x1xf32>>>) -> tensor<3x1xf32>
        %3 = "tf.MatMul"(%arg0, %2) : (tensor<1x3xf32>, tensor<3x1xf32>) -> tensor<1x1xf32>
        %result = "tf.IfrtCall"(%arg0, %2) <{program_id = 6515870160938153680 : i64, variable_arg_indices = []}> : (tensor<1x3xf32>, tensor<3x1xf32>) -> (tensor<1x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 15:33:17 UTC 2024
    - 5.3K bytes
    - Viewed (0)
Back to top