Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 48 for u8 (0.04 sec)

  1. tensorflow/compiler/mlir/lite/tests/default_quant_params.mlir

      %1 = "tfl.quantize"(%arg1) {qtype = tensor<2x1x!quant.uniform<u8:f32, 1.0:128>>}: (tensor<2x1xf32>) -> tensor<2x1x!quant.uniform<u8:f32, 1.0:128>>
      %2 = "tfl.dequantize"(%0) : (tensor<2x2x!quant.uniform<u8:f32, 1.0:128>>) -> tensor<2x2xf32>
      %3 = "tfl.dequantize"(%1) : (tensor<2x1x!quant.uniform<u8:f32, 1.0:128>>) -> tensor<2x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 8.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/quantize.mlir

    ^bb0(%arg0: tensor<1x56x56x24x!quant.uniform<u8:f32, 0.27583434161017922:119>>, %arg1: tensor<1x56x56x24x!quant.uniform<u8:f32, 0.40149296779258581:136>>):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/post-quantize.mlir

      %0 = "tfl.quantize"(%arg0) {qtype = tensor<4x!quant.uniform<u8:f32, 1.0>>} : (tensor<4xf32>) -> tensor<4x!quant.uniform<u8:f32, 1.0>>
      %1:4 = "tfl.split"(%arg1, %0) {num_splits = 4 : i32} : (tensor<i32>, tensor<4x!quant.uniform<u8:f32, 1.0>>)
      -> (tensor<2x!quant.uniform<u8:f32, 1.0>>, tensor<2x!quant.uniform<u8:f32, 1.0>>,tensor<2x!quant.uniform<u8:f32, 1.0>>, tensor<2x!quant.uniform<u8:f32, 1.0>>)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 19.9K bytes
    - Viewed (0)
  4. test/codegen/noextend.go

    // no signed byte load.
    
    func set16(x8 int8, u8 *uint8, y8 int8, z8 uint8) {
    	// Truncate not needed, load does sign/zero extend
    
    	// ppc64x:-"MOVBZ\tR\\d+,\\sR\\d+"
    	val16[0] = uint16(*u8)
    
    	// AND not needed due to size
    	// ppc64x:-"ANDCC"
    	sval16[1] = 255 & int16(x8+y8)
    
    	// ppc64x:-"ANDCC"
    	val16[1] = 255 & uint16(*u8+z8)
    
    }
    func shiftidx(u8 *uint8, x16 *int16, u16 *uint16) {
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Dec 14 17:22:18 UTC 2023
    - 5.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/prepare-quantize.mlir

    func.func @QuantizeMaximum(tensor<1x6x6x16x!quant.uniform<u8:f32, 0.1>>, tensor<1x6x6x16x!quant.uniform<u8:f32, 0.1>>) -> tensor<1x6x6x16xf32> {
    ^bb0(%arg0: tensor<1x6x6x16x!quant.uniform<u8:f32, 0.1>>, %arg1: tensor<1x6x6x16x!quant.uniform<u8:f32, 0.1>>):
      %0 = "tfl.dequantize"(%arg0) : (tensor<1x6x6x16x!quant.uniform<u8:f32, 0.1>>) -> tensor<1x6x6x16xf32>
      %1 = "tfl.dequantize"(%arg1) : (tensor<1x6x6x16x!quant.uniform<u8:f32, 0.1>>) -> tensor<1x6x6x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 67.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir

      %cst = "tfl.pseudo_qconst"() {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>, value = dense<-76> : tensor<1x2xi8>} : () -> tensor<1x2x!quant.uniform<u8:f32, 1.0>>
      %2 = "tfl.concatenation"(%1, %cst) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2x!quant.uniform<u8:f32, 1.0>>, tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2x2x!quant.uniform<u8:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir

      func.return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
    }
    
    // DEBUG-LABEL: QuantizeSplit
    func.func @QuantizeSplit(%arg: tensor<4x!quant.uniform<u8:f32, 1.0>>, %cst: tensor<i32>) -> (tensor<2x!quant.uniform<u8:f32, 1.0>>,tensor<2x!quant.uniform<u8:f32, 1.0>>) {
      %0 = "tfl.dequantize"(%arg) : (tensor<4x!quant.uniform<u8:f32, 1.0>>) -> tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 15.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir

      %5 = "tfl.reshape"(%4, %0) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>) -> tensor<1x401408x!quant.uniform<u8:f32, 0.023528476789885875>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 11.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/constants_offset.mlir

    }
    
    func.func @qu8() -> tensor<3x!quant.uniform<u8<1:255>:f32, 1.0>> {
      // CHECK-LABEL: @qu8
      // CHECK: <{qtype = tensor<3x!quant.uniform<u8<1:255>:f32, 1.000000e+00>>, value = dense<1> : tensor<3xi8>}> : () -> tensor<3x!quant.uniform<u8<1:255>:f32, 1.000000e+00>>
      %0 = "tfl.pseudo_qconst"() { qtype = tensor<3x!quant.uniform<u8<1:255>:f32, 1.0>>, value = dense<1> : tensor<3xi8>} : () -> tensor<3x!quant.uniform<u8<1:255>:f32, 1.0>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 12.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/end2end/quant_stats.pbtxt

    # MLIR-NEXT:    return %[[add]] : tensor<4x!quant.uniform<u8:f32, 0.0078431372549019607:128>>
    # MLIR-NEXT:  }
    
    # CHECK-LABEL: {
    # CHECK-NEXT:  version: 3,
    # CHECK-NEXT:  operator_codes: [ {
    # CHECK-NEXT:    version: 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 4K bytes
    - Viewed (0)
Back to top