Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for QuantizationOptions (0.38 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.h

        mlir::OpPassManager &pm, const QuantizationOptions &quantization_options,
        std::optional<const absl::string_view> mlir_dump_file_prefix =
            std::nullopt);
    
    void AddQuantizePtqPreCalibrationPasses(
        mlir::OpPassManager &pm, const QuantizationOptions &quantization_options);
    
    void AddQuantizePtqPostCalibrationPasses(
        mlir::OpPassManager &pm, const QuantizationOptions &quantization_options,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 10:03:23 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.h

        const std::unordered_set<std::string>& tags,
        const QuantizationOptions& quantization_options);
    
    // Applies post-training dynamic-range quantization to the model.
    absl::StatusOr<ExportedModel> QuantizeDynamicRangePtq(
        absl::string_view saved_model_path,
        const std::vector<std::string>& signature_keys,
        const std::unordered_set<std::string>& tags,
        const QuantizationOptions& quantization_options);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 15:31:08 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.cc

      spec->set_quantization_component(component);
      spec->set_bit_type(bit_type);
      spec->set_bit_width(bit_width);
    }
    
    ::stablehlo::quantization::QuantizationOptions FillPresetQuantizationOptions(
        ::stablehlo::quantization::QuantizationOptions quantization_options_) {
      CustomQuantizationMethod custom_method =
          quantization_options_.quantization_method().custom_quantization_method();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 08:32:43 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/quantize_passes.cc

    namespace stablehlo {
    namespace quantization {
    
    void AddQuantizationPasses(mlir::PassManager& pass_manager,
                               const QuantizationOptions& quantization_options) {
      QuantizationOptions quantization_options_ = quantization_options;
      if (quantization_options.quantization_method()
              .has_preset_quantization_method()) {
        quantization_options_ =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 08:32:43 UTC 2024
    - 2.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/tests/fill_quantization_options_test.cc

        const QuantizationComponentSpec expected_bias_component) {
      QuantizationOptions quantization_options;
      quantization_options.mutable_quantization_method()
          ->mutable_preset_quantization_method()
          ->set_preset_method(preset_quantization_options);
      QuantizationOptions filled_quantization_options =
          quant::stablehlo::FillPresetQuantizationOptions(quantization_options);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 09:05:02 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/python/type_casters.h

        : public internal::SerializedProtobufCaster<
              tensorflow::quantization::ExportedModel> {};
    
    template <>
    struct type_caster<tensorflow::quantization::QuantizationOptions>
        : public internal::SerializedProtobufCaster<
              tensorflow::quantization::QuantizationOptions> {};
    
    template <>
    struct type_caster<::stablehlo::quantization::CalibrationOptions>
        : public internal::SerializedProtobufCaster<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h

    // bf16 is quantizable.
    bool IsValueWithQuantizablePrecision(Value val);
    
    std::optional<tensorflow::quantization::QuantizationComponentSpec>
    GetWeightComponentSpec(
        const tensorflow::quantization::QuantizationOptions& quantization_options);
    
    // Returns the spec for the given operation that can be used for both of
    // dynamic and static range quantization.
    std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 2.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc

        return true;
      return false;
    }
    
    std::optional<tensorflow::quantization::QuantizationComponentSpec>
    GetWeightComponentSpec(
        const tensorflow::quantization::QuantizationOptions& quantization_options) {
      for (auto& cur_spec : quantization_options.quantization_method()
                                .quantization_component_specs()) {
        if (cur_spec.quantization_component() ==
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto

    // None, the default configuration is "do not quantize the model".
    // 2) A set of supported operations.
    // 3) Unit wise quantization precision.
    // 4) Target hardware name.
    // NEXT ID: 18
    message QuantizationOptions {
      // The default quantization configuration for the model. If the below
      // unit-wise configuration does not exist, we use this quantization
      // configuration for the entire model. For each method, default configuration
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 19 06:31:19 UTC 2024
    - 9.2K bytes
    - Viewed (0)
Back to top