Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 14 of 14 for QUANTIZED_INT8 (0.22 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/cpu_hardware.cc

    // is just an estimation.
    constexpr float kQuantizedInferenceEfficiency = 0.3;
    
    inline float InferenceTypeEfficiency(InferenceType inference_type) {
      if (inference_type == QUANTIZED_INT8 || inference_type == QUANTIZED_UINT8) {
        return kQuantizedInferenceEfficiency;
      }
      return 1.0;
    }
    
    // CPU hardware class which handles CPU capabilities in TFLite.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

        case toco::IODataType::QUANTIZED_UINT8:
          return tflite::TensorType_UINT8;
        case toco::IODataType::UINT8:
          return tflite::TensorType_UINT8;
        case toco::IODataType::QUANTIZED_INT8:
          return tflite::TensorType_INT8;
        case toco::IODataType::INT8:
          return tflite::TensorType_INT8;
        default:
          return tflite::TensorType_FLOAT32;
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc

        case toco::IODataType::FLOAT16:
          return DT_HALF;
        case toco::IODataType::FLOAT64:
          return DT_DOUBLE;
        case toco::IODataType::QUANTIZED_UINT8:
          return DT_QUINT8;
        case toco::IODataType::QUANTIZED_INT8:
          return DT_QINT8;
        case toco::IODataType::QUANTIZED_INT16:
          return DT_QINT16;
        case toco::IODataType::INT8:
          return DT_INT8;
        case toco::IODataType::INT16:
          return DT_INT16;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 17.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

      ::tflite::optimize::BufferType quantized_type;
      switch (quant_specs.inference_type) {
        case DT_QINT8:
          quantized_type = ::tflite::optimize::BufferType::QUANTIZED_INT8;
          break;
        case DT_HALF:
          quantized_type = ::tflite::optimize::BufferType::QUANTIZED_FLOAT16;
          break;
        default:
          return absl::InvalidArgumentError("Quantized type not supported");
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
Back to top