Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for QuantizationOptions (0.24 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc

          {asset_file_defs.begin(), asset_file_defs.end()});
    }
    
    absl::StatusOr<ExportedModel> ExportCalibrationModel(
        mlir::ModuleOp module_op, mlir::MLIRContext *context,
        const QuantizationOptions &quantization_options,
        const absl::flat_hash_map<std::string, std::string> &function_aliases,
        absl::string_view calibration_data_dir) {
      // Clone ModuleOp and function aliases so changes in this pipeline won't
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

    from tensorflow.python.util import tf_export
    
    # Type aliases for quant_opts_pb2 messages.
    _QuantizationOptions = tf_export.tf_export(
        'quantization.experimental.QuantizationOptions'
    )(quant_opts_pb2.QuantizationOptions)
    
    _QuantizationMethod = tf_export.tf_export(
        'quantization.experimental.QuantizationMethod'
    )(quant_opts_pb2.QuantizationMethod)
    
    _QuantizationComponentSpec = tf_export.tf_export(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc

    #include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
    #include "xla/mlir_hlo/mhlo/transforms/passes.h"
    
    namespace tensorflow {
    namespace quantization {
    namespace {
    
    using ::tensorflow::quantization::QuantizationOptions;
    
    void AddConvertTpuToCpuModelPasses(mlir::OpPassManager &pm) {
      pm.addPass(mlir::quant::CreateConvertTpuModelToCpuPass());
      pm.addPass(mlir::createInlinerPass());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

    using ::tensorflow::quantization::OpSet;
    using ::tensorflow::quantization::QuantizationComponentSpec;
    using ::tensorflow::quantization::QuantizationMethod;
    using ::tensorflow::quantization::QuantizationOptions;
    using ::tensorflow::quantization::UnitWiseQuantizationSpec;
    
    class LiftQuantizableSpotsAsFunctionsPass
        : public PassWrapper<LiftQuantizableSpotsAsFunctionsPass,
                             OperationPass<ModuleOp>> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    // Lifts the quantizable spots as composite functions.
    std::unique_ptr<OperationPass<ModuleOp>>
    CreateLiftQuantizableSpotsAsFunctionsPass(
        const tensorflow::quantization::QuantizationOptions& quant_options);
    
    // Apply graph optimizations such as fusing and constant folding to prepare
    // lifting.
    std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareLiftingPass(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

          // The default minimum number of elements a weights array must have to be
          // quantized by this transformation.
          const int kWeightsMinNumElementsDefault = 1024;
    
          quantization::QuantizationOptions quantization_options;
    
          quantization_options.mutable_quantization_method()->set_preset_method(
              quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
Back to top