- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 14 for FunctionLibraryRuntime (0.28 sec)
-
tensorflow/compiler/jit/xla_compiler_options_util.cc
<< ",detailed_logging=" << options.detailed_logging << "]"; } } // namespace XlaCompiler::Options GenerateCompilerOptions( const XlaDeviceCompiler& xla_device_compiler, const FunctionLibraryRuntime& function_library, DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info, bool has_ref_vars) { XlaCompiler::Options options;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
std::move(persistor_config), compilation_device_type), std::make_unique<PjRtDeviceCompilerClient>(pjrt_client)); } absl::StatusOr<std::optional<std::set<int>>> GetAllowedGpus( FunctionLibraryRuntime* flr) { std::optional<std::set<int>> gpu_ids = std::nullopt; if (flr->config_proto()) { string allowed_gpus = flr->config_proto()->gpu_options().visible_device_list();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.h
// the persistor used in the DeviceCompiler. The platform ID from // `platform_info` must not be null in CPU case. Status BuildXlaDeviceCompiler( DeviceBase* dev, FunctionLibraryRuntime* flr, const XlaPlatformInfo& platform_info, DeviceType compilation_device_type, DeviceCompiler<xla::LocalExecutable, xla::LocalClient>** xla_device_compiler);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/compilability_check_util.cc
} Status GetBodyAndConstantsAndResources(FunctionLibraryRuntime* flr, const NameAttrList& function, const FunctionBody** fbody, std::vector<int>* constant_arg_indices, std::vector<int>* resource_arg_indices) { FunctionLibraryRuntime::Handle handle; TF_RETURN_IF_ERROR(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 30.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cluster_util.cc
Status GetNodesRelatedToRefVariablesInDirection( const Graph& graph, FunctionLibraryRuntime* lib_runtime, Direction direction, int depth, absl::flat_hash_set<Node*>* result); absl::StatusOr<bool> DoesAnyCalleeHaveRefNodes( const CallTargetListTy& call_target_list, FunctionLibraryRuntime* lib_runtime, Direction direction, int depth) { const int kMaxDepth = 10;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 21.3K bytes - Viewed (0) -
tensorflow/compiler/jit/force_xla_constants_on_host_pass.cc
OptimizerOptions opts; auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>( nullptr, options.session_options->env, /*config=*/nullptr, TF_GRAPH_DEF_VERSION, options.flib_def, opts); FunctionLibraryRuntime* flr = pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); for (Node* node : graph->nodes()) { if (CanCreateXlaKernel(node->def())) { const FunctionBody* fbody = nullptr;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
std::string device_name; if (!platform_name.empty()) { device_name = absl::StrCat("/device:", platform_name, ":0"); } FunctionLibraryRuntime* flr = pflr->GetFLR(device_name); if (flr == nullptr) { // Use CPU as the fallback to get the `FunctionLibraryRuntime`. flr = pflr->GetFLR("/device:CPU:0"); } TF_ASSIGN_OR_RETURN( std::vector<XlaCompiler::Argument> args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cluster_util.h
// all of the nodes that have ref variables as input or output. absl::StatusOr<absl::flat_hash_set<Node*>> GetNodesRelatedToRefVariables( const Graph& graph, FunctionLibraryRuntime* lib_runtime); // Deterministically serialized the graph to a byte string. absl::StatusOr<std::string> SerializeGraphDeterministic(const Graph& graph);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 4.6K bytes - Viewed (0) -
tensorflow/compiler/jit/extract_outside_compilation_pass.cc
const string& host_graph_func_name, const std::map<string, int>& host_compute_core, FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld, std::vector<string>* shape_inference_graphs, bool* has_outside_compilation) { // Convert the function to graph. const string& func_name = func_name_attrs.name(); FunctionLibraryRuntime::Handle handle; TF_RETURN_IF_ERROR(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 104.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_cluster_util_test.cc
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr( new ProcessFunctionLibraryRuntime( nullptr, Env::Default(), /*config=*/nullptr, TF_GRAPH_DEF_VERSION, flib_def, OptimizerOptions{})); FunctionLibraryRuntime* lib_runtime = pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Node*> nodes_related_to_ref_vars,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 10.8K bytes - Viewed (0)