- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for DumpTensor (0.13 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir
"tf.DumpTensor"(%10) {device = "", enabled = true, file_name = "unquantized_tensor_data.pb", func_name = "conv_with_dump", log_dir_path = "/tmp/dumps/composite_conv2d_with_bias_and_relu6_fn_2", node_name = "Conv2D"} : (tensor<*xf32>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Nov 06 01:23:21 UTC 2023 - 80.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_dump_tensor_op_stablehlo.mlir
// WholeModel-DAG: "tf.DumpTensor"(%[[matmul0_q]]) <{enabled = true, file_name = "unquantized_tensor_data.pb", func_name = "composite_dot_general_with_bias_and_relu6_dynamic_fn_2", log_dir_path = "/tmp/dumps/composite_dot_general_with_bias_and_relu6_dynamic_fn_2", node_name = "_empty_node"}>...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 18K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/debugging/dump_tensor_op.cc
return append_result.ok() ? close_result : append_result; } // `DumpTensor` op saves entire value of input to as a tensor proto into a // specified directory and filename. When enabled is set to false, op is // disabled and won't save any value. It also creates `QuantizationUnit` proto // with `func_name` and `node_name` to identify the op. REGISTER_OP("DumpTensor") .Input("tensor_data: T") .Attr("log_dir_path: string")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 03:12:17 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/debugger.h
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_DEBUGGER_H_ #include "mlir/IR/BuiltinOps.h" // from @llvm-project namespace stablehlo::quantization { // Disables debugging on `DumpTensor` ops. void DisableDebugging(mlir::ModuleOp module_op); // Changes the filename from `unquantized_tensor_data.pb` to // `quantized_tensor_data.pb`. void ChangeToQuantizedFilename(mlir::ModuleOp module_op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 00:17:12 UTC 2024 - 1.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.proto
// NEXT ID: 4 enum DebuggerType { DEBUGGER_TYPE_UNSPECIFIED = 0; // DEBUGGER_TYPE_WHOLE_MODEL creates two tf.Savedmodel - unquantized and // quantized model with DumpTensor added to outputs of quantizable layers. // The DumpTensor dumps entire value of its input to a specified file. When // DEBUGGER_TYPE_WHOLE_MODEL is used unquantized_dump_model_path has to be // specified. DEBUGGER_TYPE_WHOLE_MODEL = 1;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 14.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/tf_quant_ops.td
StrAttr:$output_file_path, StrArrayAttr:$ids, I32ArrayAttr:$calibration_methods ); TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>; } def TF_DumpTensorOp : TF_Op<"DumpTensor", []> { let summary = "Dump tensor proto."; let arguments = (ins Arg<TF_Tensor>:$input, StrAttr:$log_dir_path, StrAttr:$file_name, BoolAttr:$enabled, StrAttr:$func_name,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 01:09:50 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc
// the commandline for example). return "quant-add-dump-tensor-op"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Add DumpTensor ops after quantizable ops"; } void getDependentDialects(DialectRegistry ®istry) const override { registry.insert<TF::TensorFlowDialect>(); registry.insert<quant::QuantizationDialect>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 22:55:22 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/component.cc
} // namespace absl::Status RunCalibrationPasses( mlir::ModuleOp module_op, MLIRContext& ctx, absl::string_view calibration_data_dir, const bool force_regenerate_calibration_data) { // Disable DumpTensor ops when running calibration. DisableDebugging(module_op); std::vector<std::string> skipping_aggregator_ops; if (!force_regenerate_calibration_data) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 9.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/quantize_model.cc
// debugger type is `DEBUGGER_TYPE_WHOLE_MODEL`. This is required // because in whole-model debugging mode the `DumpTensor` ops for the // unquantized tensors are only inserted in the unquantized model // whereas `DumpTensor` ops for the quantized tensors are only inserted // in the quantized model. Both models are required to be able to dump
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 23.8K bytes - Viewed (0)