Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 40 for 256x1xf32 (0.14 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/fold-broadcast.mlir

    }
    
    // CHECK-LABEL: @broadcast_add_implicit_fold
    func.func @broadcast_add_implicit_fold(%arg0: tensor<5x1xf32>, %arg1: tensor<7xf32>) -> tensor<5x7xf32> {
      %cst = arith.constant dense<[5, 7]> : tensor<2xi32>
      %0 = "tf.BroadcastTo"(%arg1, %cst) : (tensor<7xf32>, tensor<2xi32>) -> tensor<5x7xf32>
      %1 = "tf.AddV2"(%arg0, %0) : (tensor<5x1xf32>, tensor<5x7xf32>) -> tensor<5x7xf32>
      func.return %1 : tensor<5x7xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<256xf32>} : () -> tensor<256xf32>
      %cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<256xf32>} : () -> tensor<256xf32>
      %w = "tf.AddV2"(%cst, %cst_1) : (tensor<48x48x3x1xf32>, tensor<256xf32>) -> tensor<48x48x3x256xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/tests/fuse_tpu_compile_and_execute_ops.mlir

      %1 = "tf.ReadVariableOp"(%0) {device = "/CPU:0"} : (tensor<!tf_type.resource<tensor<2x1xf32>>>) -> tensor<2x1xf32>
      %2:2 = "tf.Split"(%cst, %arg0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x4xf32>) -> (tensor<1x2xf32>, tensor<1x2xf32>)
      %3 = "tf.TPUExecute"(%2#0, %1, %program#0) {_producer_name = "UNKNOWN", device = "/TPU:0"} : (tensor<1x2xf32>, tensor<2x1xf32>, tensor<3x!tf_type.string>) -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/legalize-tensorlist.mlir

    }
    
    // -----
    
    // CHECK-LABEL: listPushBack
    func.func @listPushBack(%arg0: tensor<!tf_type.variant<tensor<?x1xf32>>>, %arg1: tensor<16x1xf32>) -> tensor<!tf_type.variant<tensor<?x1xf32>>>  {
      %0 = "tf.TensorListPushBack"(%arg0, %arg1) : (tensor<!tf_type.variant<tensor<?x1xf32>>>, tensor<16x1xf32>) -> tensor<!tf_type.variant<tensor<?x1xf32>>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/lift_tflite_flex_ops.mlir

    func.func @TfBatchMatMulV2(%arg0: tensor<4x128x2xf32>, %arg1:  tensor<2x1xf32>) -> tensor<4x128x1xf32> {
      %0 = "tfl.custom"(%arg0, %arg1) {
        custom_code = "FlexBatchMatMulV2",
        custom_option = #tfl<const_bytes : "0x0D42617463684D61744D756C56320038120D42617463684D61744D756C56321A001A002A070A0154120230012A0B0A0561646A5F78120228002A0B0A0561646A5F791202280032000002493B1414042801">
      } : (tensor<4x128x2xf32>, tensor<2x1xf32>) -> tensor<4x128x1xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-composite-functions-tf.mlir

      %44 = "tf.AddV2"(%43, %37) {device = ""} : (tensor<2x16x1xf32>, tensor<2x16x1xf32>) -> tensor<2x16x1xf32>
      %45 = "tf.Mul"(%42, %35) {device = ""} : (tensor<2x16x1xf32>, tensor<2x16x1xf32>) -> tensor<2x16x1xf32>
      %46 = "tf.AddV2"(%45, %34) {device = ""} : (tensor<2x16x1xf32>, tensor<2x16x1xf32>) -> tensor<2x16x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 122.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir

      // CHECK-DAG: %[[LOG_SOFTMAX_LOG:.*]] = "tf.Log"(%[[LOG_SOFTMAX_SUM]]) : (tensor<2x1xf32>) -> tensor<2x1xf32>
      // CHECK-DAG: %[[LOG_SOFTMAX:.*]] = "tf.Sub"(%[[LOG_SOFTMAX_SHIFTED]], %[[LOG_SOFTMAX_LOG]]) : (tensor<2x3xf32>, tensor<2x1xf32>) -> tensor<2x3xf32>
    
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 92K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir

      %2 = "tf.Transpose"(%1, %cst_0): (tensor<1x2xf32>, tensor<2xi32>) -> tensor<2x1xf32>
      func.return %2 : tensor<2x1xf32>
    
    // CHECK: %cst = arith.constant
    // CHECK: %[[trans:.*]] = "tf.Transpose"
    // CHECK-SAME: -> tensor<2x1xf32>
    // CHECK: %[[q:.*]] = "tfl.quantize"(%[[trans]]) <{qtype = tensor<2x1x!quant.uniform<u8:f32, 1.000000e+00>>}>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 59.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir

      %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
      // CHECK: tac.device = "CPU", tac.inference_type = "FLOAT"
      %3 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32>
      func.return
    }
    
    func.func @notAnnotateConst(%arg0: tensor<256x32x32x3xf32>) -> tensor<256x30x30x16xf32> {
      // CHECK-NOT: tac.device tac.inference_type
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 19 19:32:06 UTC 2023
    - 6.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    // execute the production code path.
    func.func @main(%arg0: tensor<2x1xf32>, %arg1: tensor<2x3xf32>) -> (tensor<2x4xf32>) {
      %0 = "tfl.quantize"(%arg0) {qtype = tensor<2x1x!quant.uniform<i16:f32, 1.0>>} : (tensor<2x1xf32>) -> tensor<2x1x!quant.uniform<i16:f32, 1.0>>
      %1 = "tfl.dequantize"(%0) : (tensor<2x1x!quant.uniform<i16:f32, 1.0>>) -> (tensor<2x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
Back to top