- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 10 for device_compiler (0.31 sec)
-
tensorflow/compiler/jit/device_compiler.h
// to disk. // // Since XLA computations must have static shapes, DeviceCompiler generates a // new XLA computation for each new set of input shapes. // TODO(b/255826209): De-templatize once we've moved to Device API completely. template <typename ExecutableType, typename ClientType> class DeviceCompiler : public ResourceBase { public: DeviceCompiler( std::unique_ptr<DeviceExecutablePersistor<ExecutableType, ClientType>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 22.1K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_gpu_test.cc
device_allocator_ = device_->GetAllocator(device_alloc_attr); // Create the DeviceCompiler to help with compiling executables. auto pjrt_client_or = GetOrCreatePjRtClient(device_type_); TF_CHECK_OK(pjrt_client_or.status()); pjrt_client_ = pjrt_client_or.value(); device_compiler_ = new PjRtDeviceCompiler( std::make_unique<PjRtDeviceExecutablePersistor>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 10K bytes - Viewed (0) -
tensorflow/compiler/jit/device_compiler_test.cc
See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/compiler/jit/device_compiler.h" #include <iostream> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 12 06:33:33 UTC 2024 - 19.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
#include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tsl/platform/errors.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; XlaCompiler::CompileOptions GetCompileOptions(bool for_pjrt = false) { XlaCompiler::CompileOptions compile_options;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
#include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "tensorflow/compiler/jit/compilability_check_util.h" #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/variable_info_util.h" #include "tensorflow/compiler/jit/xla_compiler_options_util.h" #include "tensorflow/compiler/jit/xla_launch_util.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
device_allocator_ = device_->GetAllocator(device_alloc_attr); // Create the DeviceCompiler to help with compiling executables. auto pjrt_client_or = GetOrCreatePjRtClient(device_type_); TF_CHECK_OK(pjrt_client_or.status()); pjrt_client_ = pjrt_client_or.value(); device_compiler_ = new PjRtDeviceCompiler( std::make_unique<PjRtDeviceExecutablePersistor>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
} \ } while (0) namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; auto* xla_launch_counter = monitoring::Counter<1>::New( "/tensorflow/core/xla_launch_counter",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
#include "tensorflow/core/tpu/tpu_defs.h" #include "tsl/framework/device_type.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info_test.cc
#include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; class XlaPlatformInfoTest : public ::testing::Test { protected: void SetUp() override {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Jan 14 15:17:12 UTC 2024 - 13.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util_test.cc
#include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using PjRtDeviceExecutablePersistor =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 14.8K bytes - Viewed (0)