Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 38 for QuantizationSpecs (0.33 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h

        StringRef serialized_stablehlo_module);
    
    std::unique_ptr<OperationPass<ModuleOp>>
    CreateLiftQuantizableSpotsAsFunctionsPass(
        const ::stablehlo::quantization::QuantizationSpecs& quantization_specs);
    
    // Creates a pass that inserts CalibrationStatisticsSaverOp.
    std::unique_ptr<OperationPass<ModuleOp>>
    CreateInsertCalibrationStatisticsSaverPass(
        StringRef calibration_data_dir,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 2.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/cc/post_calibration.h

          ModuleOp module_op,
          const ::stablehlo::quantization::QuantizationConfig& config) override;
    
      void AddPasses(
          OpPassManager& pm,
          const ::stablehlo::quantization::QuantizationSpecs& specs,
          const ::stablehlo::quantization::PipelineConfig& pipeline_config) const;
    
     private:
      absl::Nonnull<MLIRContext*> ctx_;
    };
    
    }  // namespace mlir::quant::stablehlo
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 12:53:33 UTC 2024
    - 2.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

     public:
      explicit ConvertOpStatsToQDQs(MLIRContext* context,
                                    const quant::QuantizationSpecs& quant_specs,
                                    PatternBenefit benefit = 1)
          : OpRewritePattern<SourceOp>(context, benefit),
            quant_specs_(quant_specs) {}
    
     protected:
      quant::QuantizationSpecs quant_specs_;
    
      LogicalResult processInputs(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

    // Creates an instance of the TensorFlow dialect Quantize pass.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass() {
      QuantizationSpecs quant_specs;
      return std::make_unique<QuantizePass>(quant_specs, OpSet::TF);
    }
    
    std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
        QuantizationSpecs quant_specs, OpSet target_opset) {
      return std::make_unique<QuantizePass>(quant_specs, target_opset);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.h

    // Populate quantization specs (or not) given user specified ranges for each
    // input arrays.
    Status PopulateQuantizationSpecs(
        const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags,
        mlir::quant::QuantizationSpecs* quant_specs,
        std::vector<string>* node_names, std::vector<string>* node_dtypes,
        std::vector<std::optional<std::vector<int>>>* node_shapes,
        std::vector<std::optional<double>>* node_mins,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun May 12 12:39:37 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

      // This is only used by test.
      explicit PrepareQuantizePass() : use_quantization_flags_(true) {}
    
      // Constructor used by manually creating the pass.
      explicit PrepareQuantizePass(const quant::QuantizationSpecs& quant_specs)
          : use_quantization_flags_(false), quant_specs_(quant_specs) {}
    
      void runOnOperation() override;
    
     private:
      // Set the quantization parameters of the input nodes. These parameters are
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.cc

        const GraphDebugInfo& debug_info, const GraphDef& input,
        std::string* result) {
      using ::tflite::optimize::ReducedPrecisionSupport;
      mlir::MLIRContext context;
      GraphImportConfig specs;
      mlir::quant::QuantizationSpecs quant_specs;
    
      // Parse input arrays.
      std::vector<std::string> node_names;
      std::vector<std::string> node_dtypes;
      std::vector<std::optional<std::vector<int>>> node_shapes;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 11 19:29:56 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc

      PassManager pm((*module)->getName(), OpPassManager::Nesting::Implicit);
      if (debug_options.has_value()) {
        // Add debugging instrumentation
        tensorflow::InitPassManager(pm, debug_options.value());
      }
      quant::QuantizationSpecs quant_specs;
      quant_specs.inference_type = tflite::TflTypeToTfType(inference_type);
      quant_specs.post_training_quantization = true;
      quant_specs.disable_per_channel = disable_per_channel;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 6.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights.cc

          serialized_model, &context, UnknownLoc::get(&context));
    
      // Apply quantization passes.
      PassManager pm((*module)->getName(), OpPassManager::Nesting::Implicit);
      quant::QuantizationSpecs quant_specs;
      quant_specs.inference_type = tflite::TflTypeToTfType(inference_type);
      quant_specs.weight_quantization = true;
      quant_specs.weight_only_quantization = weight_only_quantization;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 9.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

    // on the translated_result using quant_specs and saving the final output in
    // result.
    absl::Status ApplyDynamicRangeQuantizationFromOldQuantizer(
        const mlir::quant::QuantizationSpecs& quant_specs,
        std::string translated_result, std::string* result) {
      flatbuffers::FlatBufferBuilder q_builder(/*initial_size=*/10240);
      const uint8_t* buffer =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
Back to top