- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for debugger_type (0.21 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
explicit AddDumpTensorOpPass() = default; explicit AddDumpTensorOpPass(DebuggerType debugger_type, std::string log_dir_path) : log_dir_path_(std::move(log_dir_path)) { debugger_type_ = debugger_type; } AddDumpTensorOpPass(const AddDumpTensorOpPass &other) { debugger_type_ = other.debugger_type_; log_dir_path_ = other.log_dir_path_; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.cc
// formatted tensors. AddProcessNchwTensorPasses(pm); pm.addPass(CreateLiftQuantizableSpotsAsFunctionsPass(quantization_specs)); if (debugger_config.debugger_type() != DebuggerConfig::DEBUGGER_TYPE_UNSPECIFIED) { pm.addPass(CreateAddDumpTensorOpPass(debugger_config.debugger_type(), debugger_config.log_dir_path())); } pm.addNestedPass<func::FuncOp>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op_stablehlo.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-add-dump-tensor-op='debugger_type=whole_model' | FileCheck --check-prefix=WholeModel %s // RUN: tf-quant-opt %s -split-input-file -quant-add-dump-tensor-op='debugger_type=int_per_layer' | FileCheck --check-prefix=IntPerLayer %s // RUN: tf-quant-opt %s -split-input-file -quant-add-dump-tensor-op='debugger_type=float_per_layer' | FileCheck --check-prefix=FloatPerLayer %s module {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 18K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// both the quantized and unquantized layer. DEBUGGER_TYPE_FLOAT_PER_LAYER = 3; } DebuggerType debugger_type = 1; // Path to save unquantized model with dump tensor ops attached. // Used when debugger_type is WHOLE_MODEL. string unquantized_dump_model_path = 2; // Path to save debugger related logs. Defaults to '/tmp/dumps'. string log_dir_path = 3;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/quantize_passes.cc
// TODO: b/295140328 - Add debugger support for weight only if (quantization_options.has_debugger_config()) { pm.addPass(mlir::quant::CreateAddDumpTensorOpPass( quantization_options.debugger_config().debugger_type(), quantization_options.debugger_config().log_dir_path())); } pm.addNestedPass<mlir::func::FuncOp>( mlir::quant::CreateInsertCustomAggregationOpsPass(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
// RUN: tf-quant-opt %s -split-input-file -quant-add-dump-tensor-op='debugger_type=whole_model' | FileCheck --check-prefix=WholeModel %s // RUN: tf-quant-opt %s -split-input-file -quant-add-dump-tensor-op='debugger_type=int_per_layer' | FileCheck --check-prefix=IntPerLayer %s // RUN: tf-quant-opt %s -split-input-file -quant-add-dump-tensor-op='debugger_type=float_per_layer' | FileCheck --check-prefix=FloatPerLayer %s module {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
// Create a pass that inserts dump tensor to quantizable layer's output. std::unique_ptr<OperationPass<ModuleOp>> CreateAddDumpTensorOpPass( ::stablehlo::quantization::DebuggerConfig::DebuggerType debugger_type, std::string log_dir_path); // Creates a pass that add QuantizationUnitLoc to quantizable layers. std::unique_ptr<OperationPass<func::FuncOp>> CreateAddQuantizationUnitLocPass();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.py
quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_UNSPECIFIED ): raise ValueError( 'Debugger is enabled but debugger type was not specified.' ) if ( quantization_options.debugger_config.debugger_type == stablehlo_quant_config_pb2.DebuggerConfig.DebuggerType.DEBUGGER_TYPE_WHOLE_MODEL
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 34.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8 ), op_set=target_opset, debugger_config=_DebuggerConfig( debugger_type=debugger_type, log_dir_path=log_dir_path, ), tags=tags, signature_keys=['serving_default'], ) converted_model = quantize_model.quantize(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
// both quantized and unquantized tensors and compare them offline. if (quantization_options.has_debugger_config() && quantization_options.debugger_config().debugger_type() == DebuggerConfig::DEBUGGER_TYPE_WHOLE_MODEL) { TF_ASSIGN_OR_RETURN( ExportedModel debugging_exported_model, ExportDebuggingModel(*module_ref, context.get(), quantization_options,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0)