Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for debugger_config (0.27 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc

      // both quantized and unquantized tensors and compare them offline.
      if (quantization_options.has_debugger_config() &&
          quantization_options.debugger_config().debugger_type() ==
              DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL) {
        TF_ASSIGN_OR_RETURN(
            ExportedModel debugging_exported_model,
            ExportDebuggingModel(*module_ref, context.get(), quantization_options,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py

      if quantization_options.HasField('debugger_config'):
        if not quantization_options.debugger_config.log_dir_path:
          quantization_options.debugger_config.log_dir_path = '/tmp/dumps'
    
        if (
            quantization_options.debugger_config.debugger_type
            == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_UNSPECIFIED
        ):
          raise ValueError(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 34.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc

      if (quantization_options.has_debugger_config()) {
        pm.addPass(mlir::quant::CreateAddDumpTensorOpPass(
            quantization_options.debugger_config().debugger_type(),
            quantization_options.debugger_config().log_dir_path()));
      }
      pm.addNestedPass<mlir::func::FuncOp>(
          mlir::quant::CreateInsertCustomAggregationOpsPass(
              quantization_options.calibration_options()));
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto

      // Configures the graph transformation pipeline for quantization.
      PipelineConfig pipeline_config = 3;
    
      QuantizationSpecs specs = 4;
    
      // Configures the quantization debugger.
      DebuggerConfig debugger_config = 5;
    
      // Defines calibration options for quantization. This option is only used for
      // activation of static range quantization (SRQ). Quantization calibration
      // method is set to MIN_MAX by default.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 14.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc

          *this, "debugger_type",
          llvm::cl::init(DebuggerConfig::DEBUGGER_TYPE_UNSPECIFIED),
          llvm::cl::values(
              clEnumValN(DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL, "whole_model",
                         "Whole model verify"),
              clEnumValN(DebuggerConfig::DEBUGGER_TYPE_INT_PER_LAYER,
                         "int_per_layer", "Int Per-layer verify"),
              clEnumValN(DebuggerConfig::DEBUGGER_TYPE_FLOAT_PER_LAYER,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 13K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    // Create a pass that inserts dump tensor to quantizable layer's output.
    std::unique_ptr<OperationPass<ModuleOp>> CreateAddDumpTensorOpPass(
        ::stablehlo::quantization::DebuggerConfig::DebuggerType debugger_type,
        std::string log_dir_path);
    
    // Creates a pass that add QuantizationUnitLoc to quantizable layers.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateAddQuantizationUnitLocPass();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
Back to top