- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 14 for debugInfo (0.19 sec)
-
tensorflow/cc/saved_model/loader.h
// Restore variable and resources in the SavedModel export dir for the // indicated metagraph. // The recommended way to load a saved model is to call LoadSavedModel, // which provides an already initialized Metagraph, Session, and DebugInfo. Status RestoreSession(const RunOptions& run_options, const MetaGraphDef& meta_graph, const string& export_dir, std::unique_ptr<Session>* session);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Dec 05 18:28:37 UTC 2023 - 6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/add_quantization_unit_loc.mlir
// RUN: tf-quant-opt %s -mlir-print-debuginfo -mlir-print-local-scope -quant-add-quantization-unit-loc | FileCheck %s func.func @conv2d_unmatching_loc_pattern(%arg0: tensor<1x3x4x3xf32>) -> (tensor<1x3x2x2xf32>) { %cst = "tf.Const"() {device = "", value = dense_resource<__elided__> : tensor<2x3x3x2xbf16>} : () -> tensor<2x3x3x2xbf16> %0 = "tf.Cast"(%arg0) {Truncate = false, device = ""} : (tensor<1x3x4x3xf32>) -> tensor<1x3x4x3xbf16>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 03 02:39:10 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla_selective_quantization.mlir
// RUN: tf-quant-opt %s -split-input-file -mlir-print-debuginfo -mlir-print-local-scope -quant-add-quantization-unit-loc -inline -quant-prepare-lifting -quant-lift-quantizable-spots-as-functions='target-opset=XLA' | FileCheck %s // This file test the selective quantiation feature in TF Quantizer. In the test // config, the op named "test_opt_out" will not be quantized. module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1269 : i32}} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/import_model.h
const GraphDebugInfo& debug_info) : meta_graph_def_(meta_graph_def), debug_info_(debug_info) { DCHECK(meta_graph_def); } virtual ~SavedModelMLIRImportInput(); // The original MetaGraphDef of the savedmodel. const MetaGraphDef& meta_graph_def() const { return *meta_graph_def_; } const GraphDebugInfo& debug_info() const { return debug_info_; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/cc/saved_model/reader.cc
internal::FileExists(Env::Default(), debug_info_pb_path)); if (debug_info_pb_exists) { GraphDebugInfo debug_info; TF_RETURN_IF_ERROR( ReadBinaryProto(Env::Default(), debug_info_pb_path, &debug_info)); *debug_info_proto = std::make_unique<GraphDebugInfo>(std::move(debug_info)); } return absl::OkStatus(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 00:19:29 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/BUILD
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 17 20:57:18 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.cc
#include "tsl/platform/statusor.h" namespace tensorflow { absl::Status ConvertGraphDefToTFLiteFlatBuffer( const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags, const GraphDebugInfo& debug_info, const GraphDef& input, std::string* result) { using ::tflite::optimize::ReducedPrecisionSupport; mlir::MLIRContext context; GraphImportConfig specs; mlir::quant::QuantizationSpecs quant_specs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 5.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/dump_graph.cc
// specifiable. GraphDebugInfo debug_info; switch (config.dialect) { case MlirDumpConfig::Dialect::kTFG: { TF_ASSIGN_OR_RETURN(module, mlir::tfg::ImportGraphAndFunctionsToMlir( &context, debug_info, graph, flib_def ? *flib_def : graph.flib_def())); break;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Feb 26 03:47:51 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/python/graphdef_to_tfl_flatbuffer.h
// it fails to convert the input. absl::Status ConvertGraphDefToTFLiteFlatBuffer( const toco::ModelFlags& model_flags, toco::TocoFlags& toco_flags, const GraphDebugInfo& debug_info, const GraphDef& input, std::string* result); } // namespace tensorflow
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 11 19:29:56 UTC 2024 - 1.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_graph_optimization_pass.cc
<< pass->name() << ": " << status.message(); return signalPassFailure(); } } // Convert Graph to MLIR GraphDebugInfo debug_info; GraphImportConfig specs; auto module_or_status = ConvertGraphToMlir(**options.graph, debug_info, flib_def, specs, &ctx); if (!module_or_status.ok()) { mlir::emitError(mlir::UnknownLoc::get(&ctx)) << module_or_status.status().message();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 7.6K bytes - Viewed (0)