Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 8 of 8 for 1x1x1x256xf32 (0.33 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/tests/device-transform-gpu.mlir

      %0:4 = "tfl.split"(%arg0, %arg1) {num_splits = 4 : i32, tac.device = "CPU"} : (tensor<i32>, tensor<1x8x8x1024xf32>) -> (tensor<1x8x8x256xf32>, tensor<1x8x8x256xf32>, tensor<1x8x8x256xf32>, tensor<1x8x8x256xf32>)
      func.return %0#0, %0#1, %0#3 : tensor<1x8x8x256xf32>, tensor<1x8x8x256xf32>, tensor<1x8x8x256xf32>
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      return %0 : tensor<1x1x1x2xf32>
    }
    func.func private @XlaCallModule_aten.avg_pool2d.default.impl_5(%arg0: tensor<1x1x1x7xf32>) -> tensor<1x1x1x2xf32>
    
    // CHECK-LABEL: avg_pool2d_6
    // CHECK: %cst = arith.constant dense<[0, 2, 3, 1]> : tensor<4xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/vhlo.mlir

    func.func @exp(%arg0: tensor<1x1x1x96xf32>) -> tensor<1x1x1x96xf32> {
      %0 = "vhlo.exponential_v1" (%arg0) : (tensor<1x1x1x96xf32>) -> tensor<1x1x1x96xf32>
      func.return %0 : tensor<1x1x1x96xf32>
    }
    
    //CHECK:func.func private @exp(%arg0: tensor<1x1x1x96xf32>) -> tensor<1x1x1x96xf32> {
    //CHECK-NEXT: %0 = "vhlo.exponential_v1"(%arg0) : (tensor<1x1x1x96xf32>) -> tensor<1x1x1x96xf32>
    //CHECK-NEXT: return %0 : tensor<1x1x1x96xf32>
    //CHECK-NEXT:}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 14 19:15:40 UTC 2024
    - 31.9K bytes
    - Viewed (1)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

      %cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<1x1x1x2xf32>} : () -> tensor<1x1x1x2xf32>
      %cst_1 = "tf.Const"() {value = dense<0.200000e+00> : tensor<1x1x1x2xf32>} : () -> tensor<1x1x1x2xf32>
      %cst_2 = "tf.Const"() {value = dense<0.300000e+00> : tensor<1x1x1x2xf32>} : () -> tensor<1x1x1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      func.return %1 : tensor<1x1x1x16xf32>
    
    // CHECK: %[[avgp:.*]] = "tfl.average_pool_2d"(%arg0)
    // CHECK: %[[dq:.*]] = "tfl.dequantize"(%[[avgp]]) : (tensor<1x1x1x16x!quant.uniform<u8:f32, 7.812500e-03:128>>) -> tensor<1x1x1x16xf32>
    // CHECK: return %[[dq]] : tensor<1x1x1x16xf32>
    }
    
    // CHECK-LABEL: QuantizeReshape2D
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/README.md

        %3 = "tfl.reshape"(%2, %cst_0) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x1x1x2xf32>, tensor<1xi32>) -> tensor<2xf32>
        %4 = "tfl.reshape"(%3, %cst_1) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<2xf32>, tensor<2xi32>) -> tensor<2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 29 18:32:13 UTC 2022
    - 11.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

    // CHECK:           %[[VAL_8:.*]] = "tfl.reshape"(%[[VAL_7]], %[[VAL_3]]) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x1x1x2xf32>, tensor<1xi32>) -> tensor<2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        %5 = "tfl.reshape"(%4, %cst_0) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<1x1x1x200xf32>, tensor<1xi32>) -> tensor<200xf32>
        %6 = "tfl.reshape"(%5, %cst_1) {tac.device = "GPU", tac.inference_type = "FLOAT"} : (tensor<200xf32>, tensor<2xi32>) -> tensor<2x100xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
Back to top