Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for QUANTIZED_INT8 (0.51 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc

          return 0;
        } else if (to_inference_type == QUANTIZED_INT8 ||
                   to_inference_type == QUANTIZED_UINT8) {
          // QUANT path.
          return kQuantCost * total_element_count_transferred;
        }
      }
    
      if (from_inference_type == QUANTIZED_INT8 ||
          from_inference_type == QUANTIZED_UINT8) {
        // Dequant path.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-alternative-subgraph.mlir

      %0 = "tfl.pseudo_qconst"() {qtype = tensor<128x!quant.uniform<i32:f32, 0.7>>, value = dense<0> : tensor<128xi32>} : () -> tensor<128x!quant.uniform<i32:f32, 0.7>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/tests/pick-subgraphs.mlir

        %0 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU6", tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8"} : tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>
        %1 = tfl.mul %0, %arg2 {fused_activation_function = "RELU6", tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8"} : tensor<100x!quant.uniform<i8:f32, 2.000000e-01:-3>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.h

    #include "tensorflow/lite/c/c_api_types.h"
    #include "tensorflow/lite/model.h"
    
    namespace mlir {
    namespace lite {
    
    // Supported resulting types from quantization process.
    enum class BufferType { QUANTIZED_INT8, QUANTIZED_FLOAT16 };
    
    // Stores information about how to quantize a user-specified custom operation.
    // CustomOpInfo contains info of its corresponding CustomOp registered in the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %2 = "tfl.mul"(%arg0, %0) {tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8", fused_activation_function = "NONE"} : (tensor<1x!quant.uniform<i8:f32, 0.003:-128>>, tensor<1x!quant.uniform<i8:f32, 0.003:-128>>) -> tensor<1x!quant.uniform<i8:f32, 0.003:-128>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/experimental/tac/common/utils.cc

      if (float_type_observed) {
        if (int8_type_observed || uint8_type_observed) {
          return HYBRID;
        } else {
          return FLOAT;
        }
      }
    
      if (int8_type_observed) {
        return QUANTIZED_INT8;
      }
    
      if (uint8_type_observed) {
        return QUANTIZED_UINT8;
      }
    
      // Default to float inference.
      return FLOAT;
    }
    
    }  // namespace tac
    }  // namespace TFL
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 05:37:07 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/python/converter_python_api.cc

        case toco::IODataType::QUANTIZED_UINT8:
          return tflite::TensorType_UINT8;
        case toco::IODataType::UINT8:
          return tflite::TensorType_UINT8;
        case toco::IODataType::QUANTIZED_INT8:
          return tflite::TensorType_INT8;
        case toco::IODataType::INT8:
          return tflite::TensorType_INT8;
        default:
          return tflite::TensorType_FLOAT32;
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc

        case toco::IODataType::FLOAT16:
          return DT_HALF;
        case toco::IODataType::FLOAT64:
          return DT_DOUBLE;
        case toco::IODataType::QUANTIZED_UINT8:
          return DT_QUINT8;
        case toco::IODataType::QUANTIZED_INT8:
          return DT_QINT8;
        case toco::IODataType::QUANTIZED_INT16:
          return DT_QINT16;
        case toco::IODataType::INT8:
          return DT_INT8;
        case toco::IODataType::INT16:
          return DT_INT16;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 17.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

      ::tflite::optimize::BufferType quantized_type;
      switch (quant_specs.inference_type) {
        case DT_QINT8:
          quantized_type = ::tflite::optimize::BufferType::QUANTIZED_INT8;
          break;
        case DT_HALF:
          quantized_type = ::tflite::optimize::BufferType::QUANTIZED_FLOAT16;
          break;
        default:
          return absl::InvalidArgumentError("Quantized type not supported");
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
Back to top