Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for 1x1xf16 (0.36 sec)

  1. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir

      %2 = "tfl.pseudo_const"() {value = dense<[[0.2]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
      %3 = "tfl.pseudo_const"() {value = dense<[[0.3]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
      %4 = "tfl.pseudo_const"() {value = dense<[[0.4]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
      %5 = "tfl.pseudo_const"() {value = dense<[[0.5]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 26.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/compose-uniform-quantized-type.mlir

        %5 = stablehlo.constant dense<2.000000e+02> : tensor<1x1xf32>  // Output inverse scale.
        %6 = stablehlo.constant dense<2> : tensor<1x1xi8>  // Output zero point.
        %7 = call @uniform_quantize_0(%arg0, %0, %1) : (tensor<1x2xf32>, tensor<1x1xf32>, tensor<1x1xi8>) -> tensor<1x2xi8>
        %8 = stablehlo.convert %7 : (tensor<1x2xi8>) -> tensor<1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 37K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/constants_offset.mlir

      func.return %0: tensor<3x2xf16>
    }
    
    func.func @sparse_qu8() -> tensor<3x2x!quant.uniform<u8<1:255>:f32, 1.0>> {
      // CHECK-LABEL: @sparse_qu8
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 12.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/constants.mlir

      func.return %0: tensor<3x2xf16>
    }
    
    func.func @sparse_qu8() -> tensor<3x2x!quant.uniform<u8<1:255>:f32, 1.0>> {
      // CHECK-LABEL: @sparse_qu8
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 12.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir

      // CHECK:  %4 = "tfl.reduce_max"(%arg0, %cst) <{keep_dims = false}> : (tensor<1x32x1xf32>, tensor<1xi32>) -> tensor<1x1xf32>
      // CHECK:  %5 = "tfl.arg_max"(%arg0, %cst) : (tensor<1x32x1xf32>, tensor<1xi32>) -> tensor<1x1xi32>
      // CHECK:  return %4, %5 : tensor<1x1xf32>, tensor<1x1xi32>
    }
    
    // CHECK-LABEL:   func @convert_pytorch_argmax
    func.func @convert_pytorch_argmax(%arg0: tensor<1x9xi32>) -> tensor<1xi32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 40.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

    // Float16-DAG: %[[b:.*]] = arith.constant dense<0.000000e+00> : tensor<16xf16>
    // Float16-DAG: %[[const:.*]] = "tfl.no_value"() <{value}> : () -> none
    // Float16-DAG: %[[dq_w:.*]] = "tfl.dequantize"(%[[w]]) : (tensor<3x3x3x8x16xf16>) -> tensor<3x3x3x8x16xf32>
    // Float16-DAG: %[[dq_b:.*]] = "tfl.dequantize"(%[[b]]) : (tensor<16xf16>) -> tensor<16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
Back to top