- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 21 for device_compiler (0.37 sec)
-
tensorflow/compiler/jit/device_compiler.h
// to disk. // // Since XLA computations must have static shapes, DeviceCompiler generates a // new XLA computation for each new set of input shapes. // TODO(b/255826209): De-templatize once we've moved to Device API completely. template <typename ExecutableType, typename ClientType> class DeviceCompiler : public ResourceBase { public: DeviceCompiler( std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 22.1K bytes - Viewed (0) -
tensorflow/compiler/jit/BUILD
cc_api_version = 2, protodeps = tf_additional_all_protos() + ["@local_xla//xla/service:hlo_proto"], visibility = ["//visibility:public"], ) cc_library( name = "device_compiler", hdrs = ["device_compiler.h"], copts = tf_copts(), visibility = [ ":internal", "//tensorflow/core/common_runtime/next_pluggable_device:__pkg__", ], deps = [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 00:41:19 UTC 2024 - 61.5K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_disable_test.cc
#include <memory> #include <utility> #include <vector> #include "tensorflow/compiler/jit/device_compilation_profiler.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_device_compiler_client.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/client_library.h" #include "tensorflow/core/platform/test.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.cc
#include "tensorflow/compiler/jit/pjrt_compile_util.h" #include <vector> #include "tensorflow/compiler/jit/device_compilation_profiler.h" #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_compile_util.h" #include "tensorflow/compiler/jit/xla_compiler_options_util.h" #include "tensorflow/compiler/jit/xla_platform_info.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util.h
#include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" namespace tensorflow { // Returns created options for the XLA compiler. XlaCompiler::Options GenerateCompilerOptions( const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
device_allocator_ = device_->GetAllocator(device_alloc_attr); // Create the DeviceCompiler to help with compiling executables. auto pjrt_client_or = GetOrCreatePjRtClient(device_type_); TF_CHECK_OK(pjrt_client_or.status()); pjrt_client_ = pjrt_client_or.value(); device_compiler_ = new PjRtDeviceCompiler( std::make_unique<PjRtDeviceExecutablePersistor>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_test.cc
See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/compiler/jit/device_compiler.h" #include <iostream> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.h
absl::string_view visible_device_list); // Returns the device type for building a DeviceCompiler from the given platform // type. absl::StatusOr<DeviceType> GetCompilationDeviceType( const DeviceType& platform_device_type); // Builds a DeviceCompiler that uses xla::LocalClient using `platform_info` and // `compilation_device_type` (in non-TPU case) and sets *xla_device_compiler to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.h
#ifndef TENSORFLOW_COMPILER_JIT_KERNELS_XLA_OPS_H_ #define TENSORFLOW_COMPILER_JIT_KERNELS_XLA_OPS_H_ #include <atomic> #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_device.h" #include "tensorflow/compiler/jit/xla_launch_util.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "xla/stream_executor/integrations/tf_allocator_adapter.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 16 23:44:26 UTC 2023 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
#include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; XlaCompiler::CompileOptions GetCompileOptions(bool for_pjrt = false) { XlaCompiler::CompileOptions compile_options;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0)