- Sort Score
- Result 10 results
- Languages All
Results 1 - 3 of 3 for _XlaCompile (0.09 sec)
-
tensorflow/compiler/jit/flags.h
}; // Flags common to the _Xla* ops and their kernels. struct XlaOpsCommonFlags { // If true, _XlaCompile always refuses to compile the cluster, which means the // XLA clusters always run in the TF executor. Defaults to false. bool tf_xla_always_defer_compilation; // If true, _XlaCompile compiles the cluster asynchronously with respect to // the main execution. The fallback path is taken while compilation happens.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 17 18:52:57 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
.HostMemory("constants") .HostMemory("resources"), XlaLocalLaunchOp); REGISTER_KERNEL_BUILDER(Name("_XlaCompile").Device(DEVICE_CPU), XlaCompileOp); REGISTER_KERNEL_BUILDER(Name("_XlaCompile") .Device(DEVICE_GPU) .HostMemory("constants") .HostMemory("key")
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
// operations). Such a cluster can fail compilation (in way that // MarkForCompilation could not have detected) if the CPU JIT is not // linked in. // // So bail out of _XlaCompile in this case, and let the executor handle // the situation for us. const Status& status = compiler_for_platform.status(); if (status.code() == error::NOT_FOUND) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0)