- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 39 for stream_executor_ (0.23 sec)
-
tensorflow/c/experimental/stream_executor/stream_executor.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/tf_status_helper.h" #include "xla/stream_executor/executor_cache.h" #include "xla/stream_executor/host_memory_allocation.h" #include "xla/stream_executor/memory_allocation.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 27.1K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_internal.h
#include "xla/stream_executor/executor_cache.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/stream_common.h" #include "xla/stream_executor/stream_executor.h" #include "tsl/platform/statusor.h" namespace stream_executor { // Plugin initialization function that a device plugin // must define. typedef void (*SEInitPluginFn)(SE_PlatformRegistrationParams* const, TF_Status* const);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jun 14 07:39:19 UTC 2024 - 8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
#include "tsl/platform/statusor.h" namespace tensorflow { // The allocator used for Tensors assigned to the XLA device. XlaDeviceAllocator::XlaDeviceAllocator( stream_executor::StreamExecutor* stream_executor) : stream_executor_(stream_executor) {} XlaDeviceAllocator::~XlaDeviceAllocator() = default; std::string XlaDeviceAllocator::Name() { return "xla"; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/stream_executor_test.cc
#include "tensorflow/c/experimental/stream_executor/stream_executor.h" #include <functional> #include <utility> #include "tensorflow/c/experimental/stream_executor/stream_executor_internal.h" #include "tensorflow/c/experimental/stream_executor/stream_executor_test_util.h" #include "xla/stream_executor/event.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 19:54:04 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc
#include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/BUILD
) cc_library( name = "stream_executor_hdrs", hdrs = ["stream_executor.h"], visibility = ["//tensorflow:internal"], deps = [ "//tensorflow/c:c_api_macros_hdrs", "//tensorflow/c:tf_status_headers", ], ) cc_library( name = "stream_executor", srcs = ["stream_executor.cc"], hdrs = ["stream_executor.h"], visibility = ["//tensorflow:internal"], deps = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 00:27:07 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc
#include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/client_library.h" #include "xla/client/compile_only_client.h" #include "xla/stream_executor/host/host_platform_id.h" #include "xla/stream_executor/platform_manager.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph.pb.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/jit/BUILD
"@local_tsl//tsl/platform:statusor", "@local_xla//xla/stream_executor/tpu:c_api_conversions", "@local_xla//xla/stream_executor/tpu:status_helper", "@local_xla//xla/stream_executor/tpu:tpu_api", "@local_xla//xla/stream_executor/tpu:tpu_executor_base", "@local_xla//xla/stream_executor/tpu:tpu_node_context", "@local_xla//xla/stream_executor/tpu:tpu_platform_interface",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 61.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "xla/layout.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/shape.h" #include "xla/stream_executor/tpu/c_api_conversions.h" #include "xla/stream_executor/tpu/tpu_api.h" #include "xla/translate/mhlo_to_hlo/type_to_shape.h" namespace mlir { static FailureOr<std::vector<int64_t>> GetTPUInfeedLayoutFromAPI( RankedTensorType t) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.h
#ifndef TENSORFLOW_COMPILER_JIT_XLA_HOST_SEND_DEVICE_CONTEXT_H_ #define TENSORFLOW_COMPILER_JIT_XLA_HOST_SEND_DEVICE_CONTEXT_H_ #include "xla/shape.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/stream.h" #include "tensorflow/core/framework/device_base.h" #include "tfrt/concurrency/async_value_ref.h" // from @tf_runtime namespace tensorflow {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.7K bytes - Viewed (0)