Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 2 of 2 for GetQuantizedInferenceType (0.3 sec)

  1. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

      func::FuncOp func = getOperation();
      MLIRContext* ctx = func.getContext();
      ScopedTFLQuantOpsToMlirQuantOpsConverter converter(func);
      if (use_quantization_flags_) {
        quant_specs_.inference_type = GetQuantizedInferenceType(
            this->quantize_signed_, this->activation_number_of_bits_);
        if (quant_specs_.inference_type == tensorflow::DT_INVALID) {
          func.emitError() << "prepare-quantize pass failed: unsupported "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

        "effective_hidden_scale_intermediate"};
    
    // Calculates the minimum power of two that is not less than the value.
    double PowerOfTwoBound(double value);
    
    tensorflow::DataType GetQuantizedInferenceType(bool is_signed,
                                                   int activation_number_of_bits);
    
    // Returns the element type of LSTM's intermediate tensor designated by the
    // index.
    template <typename LstmOp>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
Back to top