Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 2 of 2 for QUANTIZED_INT8 (0.17 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc

          return 0;
        } else if (to_inference_type == QUANTIZED_INT8 ||
                   to_inference_type == QUANTIZED_UINT8) {
          // QUANT path.
          return kQuantCost * total_element_count_transferred;
        }
      }
    
      if (from_inference_type == QUANTIZED_INT8 ||
          from_inference_type == QUANTIZED_UINT8) {
        // Dequant path.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    #include "tensorflow/lite/c/c_api_types.h"
    #include "tensorflow/lite/model.h"
    
    namespace mlir {
    namespace lite {
    
    // Supported resulting types from quantization process.
    enum class BufferType { QUANTIZED_INT8, QUANTIZED_FLOAT16 };
    
    // Stores information about how to quantize a user-specified custom operation.
    // CustomOpInfo contains info of its corresponding CustomOp registered in the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
Back to top