- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for DebuggerConfig (0.2 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
*this, "debugger_type", llvm::cl::init(DebuggerConfig::DEBUGGER_TYPE_UNSPECIFIED), llvm::cl::values( clEnumValN(DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL, "whole_model", "Whole model verify"), clEnumValN(DebuggerConfig::DEBUGGER_TYPE_INT_PER_LAYER, "int_per_layer", "Int Per-layer verify"), clEnumValN(DebuggerConfig::DEBUGGER_TYPE_FLOAT_PER_LAYER,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
using ::stablehlo::quantization::DebuggerConfig; using ::stablehlo::quantization::PipelineConfig; using ::stablehlo::quantization::QuantizationSpecs; void AddPreCalibrationPasses(OpPassManager& pm, const CalibrationOptions& calibration_options, const QuantizationSpecs& quantization_specs, const DebuggerConfig& debugger_config) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h
void AddPreCalibrationPasses( OpPassManager& pm, const ::stablehlo::quantization::CalibrationOptions& calibration_options, const ::stablehlo::quantization::QuantizationSpecs& specs, const ::stablehlo::quantization::DebuggerConfig& debugger_config); // Adds passes for static-range quantization post-calibration. Utilizes tensor // statistics collected from the calibration step and performs quantization. void AddPostCalibrationPasses(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 15 12:53:33 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
} // Configuration for quantization debugger. // The debugging model assumes it should be run on CPU based server, since the // model contains TF::DumpTensorOp. // NEXT ID: 4 message DebuggerConfig { // Type of quantization debugger. Depending on the type, inputs and outputs // are wired differently. // NEXT ID: 4 enum DebuggerType { DEBUGGER_TYPE_UNSPECIFIED = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
using ::mlir::quant::stablehlo::WeightOnlyPtqComponent; using ::stablehlo::quantization::AddCalibrationStatistics; using ::stablehlo::quantization::ChangeToQuantizedFilename; using ::stablehlo::quantization::DebuggerConfig; using ::stablehlo::quantization::ExpandPresets; using ::stablehlo::quantization::IsCalibrationRequired; using ::stablehlo::quantization::PopulateDefaults; using ::stablehlo::quantization::QuantizationConfig;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// Create a pass that inserts dump tensor to quantizable layer's output. std::unique_ptr<OperationPass<ModuleOp>> CreateAddDumpTensorOpPass( ::stablehlo::quantization::DebuggerConfig::DebuggerType debugger_type, std::string log_dir_path); // Creates a pass that add QuantizationUnitLoc to quantizable layers. std::unique_ptr<OperationPass<func::FuncOp>> CreateAddQuantizationUnitLocPass();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto
// method is set to MIN_MAX by default. stablehlo.quantization.CalibrationOptions calibration_options = 15; // Configuration related to quantization debugger. stablehlo.quantization.DebuggerConfig debugger_config = 16; reserved 3;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 19 06:31:19 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
== stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_UNSPECIFIED ): raise ValueError( 'Debugger is enabled but debugger type was not specified.' ) if ( quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_WHOLE_MODEL
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
_PER_CHANNEL_QUANTIZED_OPS = ( 'UniformQuantizedConvolution', 'UniformQuantizedConvolutionHybrid', 'UniformQuantizedDotHybrid', ) _DebuggerConfig = stablehlo_quant_config_pb2.DebuggerConfig # Lists of ops whose channel dimension should be changed if per_channel # quantization is enabled. Respectively refers to (scale, zero_point). _SUFFIXES = ('/filter1', '/filter2') _PER_CHANNEL_OP_NAMES = (
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
RELEASE.md
* GPU * Support for NVIDIA GPUs with compute capability 8.9 (e.g. L4 & L40) has been added to TF binary distributions (Python wheels). * Replace `DebuggerOptions` of TensorFlow Quantizer, and migrate to `DebuggerConfig` of StableHLO Quantizer. * Add TensorFlow to StableHLO converter to TensorFlow pip package. * TensorRT support: this is the last release supporting TensorRT. It will be removed in the next release.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 730.3K bytes - Viewed (0)