- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 47 for stream_executor_ (0.23 sec)
-
tensorflow/compiler/jit/BUILD
"@local_tsl//tsl/platform:statusor", "@local_xla//xla/stream_executor/tpu:c_api_conversions", "@local_xla//xla/stream_executor/tpu:status_helper", "@local_xla//xla/stream_executor/tpu:tpu_api", "@local_xla//xla/stream_executor/tpu:tpu_executor_base", "@local_xla//xla/stream_executor/tpu:tpu_node_context", "@local_xla//xla/stream_executor/tpu:tpu_platform_interface",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 61.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "xla/layout.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/shape.h" #include "xla/stream_executor/tpu/c_api_conversions.h" #include "xla/stream_executor/tpu/tpu_api.h" #include "xla/translate/mhlo_to_hlo/type_to_shape.h" namespace mlir { static FailureOr<std::vector<int64_t>> GetTPUInfeedLayoutFromAPI( RankedTensorType t) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_send_device_context.h
#ifndef TENSORFLOW_COMPILER_JIT_XLA_HOST_SEND_DEVICE_CONTEXT_H_ #define TENSORFLOW_COMPILER_JIT_XLA_HOST_SEND_DEVICE_CONTEXT_H_ #include "xla/shape.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/stream.h" #include "tensorflow/core/framework/device_base.h" #include "tfrt/concurrency/async_value_ref.h" // from @tf_runtime namespace tensorflow {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
#include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/stream_executor/tpu/c_api_conversions.h" #include "xla/stream_executor/tpu/status_helper.h" #include "xla/stream_executor/tpu/tpu_api.h" #include "xla/stream_executor/tpu/tpu_node_context.h" #include "xla/stream_executor/tpu/tpu_platform.h" #include "xla/stream_executor/tpu/tpu_platform_interface.h" #include "xla/stream_executor/tpu/tpu_stream_interface.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
#ifndef TENSORFLOW_COMPILER_JIT_XLA_HOST_RECV_DEVICE_CONTEXT_H_ #define TENSORFLOW_COMPILER_JIT_XLA_HOST_RECV_DEVICE_CONTEXT_H_ #include "xla/shape.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/stream.h" #include "tensorflow/core/framework/device_base.h" #include "tfrt/concurrency/async_value_ref.h" // from @tf_runtime namespace tensorflow {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/decode_attributes_hook.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.h" #include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h" #include "xla/stream_executor/stream_executor.h" #include "tensorflow/core/framework/logging.h" #include "tsl/platform/statusor.h" namespace mlir { namespace { } // anonymous namespace
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 1.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/infeed_ops_xla_adjust_layout.cc
#include "tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.h" #include "xla/layout.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/shape.h" #include "xla/stream_executor/tpu/c_api_conversions.h" #include "xla/stream_executor/tpu/tpu_api.h" #include "xla/translate/mhlo_to_hlo/type_to_shape.h" namespace mlir { namespace mhlo { namespace { #define GEN_PASS_DEF_INFEEDOPSXLAADJUSTLAYOUT
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/translate/BUILD
"@local_xla//xla/service/cpu:cpu_compiler", "@local_xla//xla/service/cpu:cpu_transfer_manager", "@local_xla//xla/stream_executor", "@local_xla//xla/stream_executor/host:host_platform", "@local_xla//xla/stream_executor/host:host_platform_id", ], alwayslink = 1, ) cc_library( name = "split_into_island_per_op_pass",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 22:19:26 UTC 2024 - 11K bytes - Viewed (0) -
tensorflow/c/kernels.h
// OpKernelContext routines // TF_GetStream returns the SP_Stream available in ctx. // This function returns a stream only for devices registered using the // StreamExecutor C API // (tensorflow/c/experimental/stream_executor/stream_executor.h). It will return // nullptr and set error status in all other cases. // Experimental: this function doesn't have compatibility guarantees and subject // to change at any time.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 09 22:46:22 UTC 2024 - 24.6K bytes - Viewed (0) -
tensorflow/c/experimental/stream_executor/test/BUILD
# Description: # test for stream_executor load( "//tensorflow:tensorflow.bzl", "tf_cc_shared_object", ) package( # copybara:uncomment default_applicable_licenses = ["//tensorflow:license"], licenses = ["notice"], ) tf_cc_shared_object( name = "test_pluggable_device.so", srcs = ["test_pluggable_device.cc"], visibility = ["//tensorflow/c:__subpackages__"], deps = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 17 15:20:54 UTC 2022 - 566 bytes - Viewed (0)