Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 1 of 1 for QUANTIZED_INT8 (0.2 sec)

  1. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    #include "tensorflow/lite/c/c_api_types.h"
    #include "tensorflow/lite/model.h"
    
    namespace mlir {
    namespace lite {
    
    // Supported resulting types from quantization process.
    enum class BufferType { QUANTIZED_INT8, QUANTIZED_FLOAT16 };
    
    // Stores information about how to quantize a user-specified custom operation.
    // CustomOpInfo contains info of its corresponding CustomOp registered in the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
Back to top