Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 2 of 2 for i8_1 (0.03 sec)

  1. tensorflow/compiler/mlir/lite/quantization/device_target.cc

      i32_max_ = QuantizedType::getDefaultMaximumForInteger(kSigned, k32Bits);
      any_ = AnyQuantizedType();
      qi8_ = AnyQuantizedType::get(kSigned, i8_, f32_, i8_min_, i8_max_);
      qi8n_ = AnyQuantizedType::get(kSigned, i8_, f32_, i8_min_ + 1, i8_max_);
      qi32_ = AnyQuantizedType::get(kSigned, i32_, f32_, i32_min_, i32_max_);
      assert(qi8n_ == qi8n_);
    }
    
    std::optional<KernelSpec> DeviceTarget::GetKernelSpec(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/quantization/device_target.h

          QuantizedMultipliers* output_multipliers, QuantizedRanges* output_ranges);
    
      // A set of parameters are required to build the signatures.
      FloatType f32_;
      IntegerType i8_, i32_;
      int64_t i8_min_, i8_max_, i32_min_, i32_max_;
      quant::AnyQuantizedType any_, qi8_, qi8n_, qi32_;
    
     private:
      // Maps the kernel names to all the available kernels.
      llvm::StringMap<KernelSpecs> specs_;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.1K bytes
    - Viewed (0)
Back to top