Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 116 for 1x5xf32 (0.15 sec)

  1. tensorflow/compiler/mlir/tfr/tests/rewrite_quantized_io.mlir

      %arg1: tensor<1x5xf32>) -> (tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>, tensor<1x5xf32>) {
      %0 = "tf.MyRequantize"(%arg0) : (tensor<1x10x!quant.uniform<i8:f32, 0.1:-128>>) -> tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>
      %1 = "tf.Intermediate"(%arg1) : (tensor<1x5xf32>) -> tensor<1x5xf32>
      func.return %0, %1 : tensor<1x10x!quant.uniform<i8:f32, 0.2:42>>, tensor<1x5xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 2.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

            tensor<1x1x5xf32>,
            tensor<2x5xf32>, tensor<2x5xf32>, tensor<2x5xf32>, tensor<2x5xf32>,
            tensor<2x4xf32>, tensor<2x4xf32>, tensor<2x4xf32>, tensor<2x4xf32>,
            tensor<2xf32>, tensor<2xf32>, tensor<2xf32>,
            tensor<2xf32>, tensor<2xf32>, tensor<2xf32>, tensor<2xf32>,
            tensor<4x2xf32>, tensor<4xf32>,
            tensor<1x4xf32>, tensor<1x2xf32>,
            none, none, none, none) -> tensor<*xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/canonicalize.mlir

    func.func @RemoveRedundantPack(%arg0: tensor<2x5xf32>) -> (tensor<2x5xf32>, tensor<5xf32>) {
      %0:2 = "tfl.unpack"(%arg0) {axis = 0 : i32, num = 2 : i32} : (tensor<2x5xf32>) -> (tensor<5xf32>, tensor<5xf32>)
      %1 = "tfl.pack"(%0#0, %0#1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<5xf32>, tensor<5xf32>) -> (tensor<2x5xf32>)
      func.return %1, %0#0: tensor<2x5xf32>, tensor<5xf32>
      // CHECK: %[[UNPACK:.*]]:2 = "tfl.unpack"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

      %8 = "tfl.concatenation"(%2, %0) {axis = -1 : i32, fused_activation_function = "NONE"} : (tensor<1x1xf32>, tensor<1x1xf32>) -> tensor<1x2xf32>
      %9 = "quantfork.stats"(%8) {layerStats = dense<[-0.488159984, 0.189515018]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
      %10 = "tfl.concatenation"(%9, %7) {axis = -1 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<1x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize_same_scale.mlir

        %5 = "quantfork.qcast"(%4) {volatile} : (tensor<1x3xf32>) -> tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
        %6 = "quantfork.dcast"(%5) : (tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<1x3xf32>
        %7 = stablehlo.reshape %6 : (tensor<1x3xf32>) -> tensor<3x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 35.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tfrt/tests/fuse_tpu_compile_and_execute_ops.mlir

      %1 = "tf.ReadVariableOp"(%0) {device = "/CPU:0"} : (tensor<!tf_type.resource<tensor<1x1xf32>>>) -> tensor<1x1xf32>
      %2:2 = "tf.Split"(%cst, %arg0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x4xf32>) -> (tensor<1x2xf32>, tensor<1x2xf32>)
      %3:2 = "tf.Split"(%cst, %2#0) {device = "/CPU:0"} : (tensor<i32>,  tensor<1x2xf32>) -> (tensor<1x1xf32>, tensor<1x1xf32>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir

      %3 = "tf.AddV2"(%arg0, %0): (tensor<4x4xf32>, tensor<1xf32>) -> tensor<4x4xf32>
      %4 = "tf.Log"(%3) {device = "/job:localhost/replica:0/task:0/device:GPU:0"}: (tensor<4x4xf32>) -> tensor<4x4xf32>
    
      // CHECK: %[[ADD1:.*]] = "tf.AddV2"
      // CHECK: %[[LOG1:.*]] = "tf.Log"(%[[ADD1]])
      %5 = "tf.AddV2"(%4, %1): (tensor<4x4xf32>, tensor<1xf32>) -> tensor<4x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 132.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %8 = stablehlo.convert %7 : (tensor<1x2xi32>) -> tensor<1x2xf32>
        %9 = stablehlo.convert %2 : (tensor<2x3xi8>) -> tensor<2x3xf32>
        %10 = stablehlo.dot_general %8, %9, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
        %11 = stablehlo.convert %3 : (tensor<1x3xi32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/quantize/quantize.mlir

    // CHECK: return
    
      func.func private @composite_dot_general_fn(%arg0: tensor<1x4xf32>, %arg1: tensor<4x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
          %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
          return %0 : tensor<1x3xf32>
      }
    }
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 01:38:40 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %1 = "tfl.reshape"(%0, %cst3) : (tensor<4x2xf32>, tensor<2xi32>) -> tensor<1x8xf32>
      %2 = "tfl.mul"(%0, %cst2) {fused_activation_function = "RELU6"} : (tensor<4x2xf32>, tensor<2xf32>) -> tensor<4x2xf32>
    
      func.return %1, %2 : tensor<1x8xf32>, tensor<4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
Back to top