- Sort Score
- Result 10 results
- Languages All
Results 1 - 6 of 6 for XLA_GPU (0.14 sec)
-
tensorflow/compiler/jit/xla_gpu_device.cc
See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Registers the XLA_GPU device, which is an XlaDevice instantiation that runs // operators using XLA via the XLA "CUDA" or "ROCM" (GPU) backend. #include <array> #include <set> #include "absl/memory/memory.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
// Tensors on an XlaDevice are thin wrappers around XLA ScopedShapedBuffers. // // XlaDevice is instantiated separately for each XLA backend (e.g., CPU or GPU), // under different names (e.g., XLA_CPU or XLA_GPU). #ifndef TENSORFLOW_COMPILER_JIT_XLA_DEVICE_H_ #define TENSORFLOW_COMPILER_JIT_XLA_DEVICE_H_ #include <set> #include "absl/types/optional.h" #include "tensorflow/compiler/jit/xla_tensor.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/partially_decluster_pass_test.cc
AddToCluster({shape.node(), reshape.node()}, "cluster_0"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); TF_ASSERT_OK(s.ToGraph(graph.get())); // This is needed to register the XLA_GPU device. std::vector<std::unique_ptr<Device>> devices; TF_ASSERT_OK(DeviceFactory::AddDevices( SessionOptions(), "/job:localhost/replica:0/task:0", &devices));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 10 12:32:39 UTC 2022 - 23K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.cc
return absl::OkStatus(); } // Warn about XLA_CPU/XLA_GPU exactly once. static void ShowXlaDeviceDeprecationWarning( absl::string_view compilation_device_name) { static absl::once_flag once; if (absl::StrContains(compilation_device_name, "CPU") || absl::StrContains(compilation_device_name, "GPU")) { absl::call_once(once, [] { LOG(INFO) << "XLA_GPU and XLA_CPU devices are deprecated and will be "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
jit::DeviceInfoCache* device_info_cache, const Node& n, bool* ignore) { // If a resource operation is assigned to XLA_CPU or XLA_GPU explicitly then // ignore it during resource operation safety analysis. We need this hack // because of two reasons: // // 1. Operations assigned to XLA_CPU and XLA_GPU have to always be compiled. // 2. We don't support live-out values of type DT_RESOURCE and live-in values
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0) -
RELEASE.md
before applying the reduction function. * AutoGraph no longer converts functions passed to `tf.py_function`, `tf.py_func` and `tf.numpy_function`. * Deprecating `XLA_CPU` and `XLA_GPU` devices with this release. * Increasing the minimum bazel version to build TF to 2.0.0 to use Bazel's `cc_experimental_shared_library`. * Keras compile/fit behavior for functional and subclassed models have been
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 730.3K bytes - Viewed (0)