- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for PJRT (0.19 sec)
-
tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc
#include <optional> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/log/check.h" #include "xla/pjrt/c/pjrt_c_api.h" #include "xla/pjrt/c/pjrt_c_api_cpu.h" #include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h" #include "xla/pjrt/cpu/cpu_client.h" #include "xla/pjrt/pjrt_api.h" #include "xla/pjrt/pjrt_c_api_client.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tensorflow/core/framework/types.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 01 16:29:40 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/c/experimental/next_pluggable_device/BUILD
"@local_xla//xla/pjrt:pjrt_api", "@local_xla//xla/pjrt:pjrt_c_api_client", "@local_xla//xla/pjrt/c:pjrt_c_api_cpu", "@local_xla//xla/pjrt/c:pjrt_c_api_hdrs", "@local_xla//xla/pjrt/c:pjrt_c_api_wrapper_impl", "@local_xla//xla/pjrt/cpu:cpu_client", ],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 11 23:52:39 UTC 2024 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_device_compiler_client.h
#define TENSORFLOW_COMPILER_JIT_PJRT_DEVICE_COMPILER_CLIENT_H_ #include <memory> #include <optional> #include <string> #include "tensorflow/compiler/jit/device_compiler_client.h" #include "xla/pjrt/pjrt_client.h" namespace tensorflow { // Calls into PjRtClient to provide functionality for building, serializing and // loading PjRtLoadedExecutables. class PjRtDeviceCompilerClient
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 3.4K bytes - Viewed (0) -
tensorflow/c/experimental/next_pluggable_device/c_api.h
TF_Status* status); // ---------------------------- PJRT ----------------------------------------- // Passes the pointer to a vector of PJRT_NamedValue and number of options to // set options for creating a PJRT client. Passes nullptr for create_options and // 0 for num_options if no options need to be set. You can use // ConvertToPjRtNamedValueList in // tensorflow/compiler/xla/pjrt/c/pjrt_c_api_helpers.h to generate the options.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 20 20:01:06 UTC 2023 - 7.2K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_util.h
absl::Span<const DataType> result_types); // Checks if single device compilation and execution with PJRT is enabled for // `device_type` in either the XlaLaunch op or the XlaCompileOnDemand op. bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type); // Gets the resource name of the PjRt DeviceCompiler for `device_type`. std::string GetPjRtDeviceCompilerResourceName(const DeviceType& device_type);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_device_compiler_client.cc
TF_ASSIGN_OR_RETURN(auto executable, client_->Compile(*result.computation, GetPjRtCompileOptions(options, result))); VLOG(2) << "Compiled PJRT executable " << executable->name() << " num_replicas " << executable->num_replicas() << " num_partitions " << executable->num_partitions(); return std::move(executable); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc
#include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h" #include "xla/pjrt/c/pjrt_c_api.h" #include "xla/pjrt/pjrt_c_api_client.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 05:48:24 UTC 2024 - 3.7K bytes - Viewed (0) -
tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.h
#ifndef TENSORFLOW_C_EXPERIMENTAL_NEXT_PLUGGABLE_DEVICE_TENSOR_PJRT_BUFFER_UTIL_H_ #define TENSORFLOW_C_EXPERIMENTAL_NEXT_PLUGGABLE_DEVICE_TENSOR_PJRT_BUFFER_UTIL_H_ #include "xla/pjrt/c/pjrt_c_api.h" #include "xla/pjrt/pjrt_c_api_client.h" #include "tensorflow/core/framework/tensor.h" namespace tensorflow { absl::StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 05:48:24 UTC 2024 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compiler_options_util.h
#include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" namespace tensorflow { // Returns created options for the XLA compiler. XlaCompiler::Options GenerateCompilerOptions( const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 29 01:41:20 UTC 2023 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/jit/pjrt_compile_util.h
// Compiles a `function` to PjRtLoadedExecutable `executable` with `ctx`. // The compilation result is output in `compilation_result`. The PJRT client // used for compilation is output in `client`. The PJRT executable is output in // `executable`. Status CompileToPjRtLoadedExecutable( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info, const NameAttrList& function, const std::vector<XlaCompiler::Argument>& args,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 2.7K bytes - Viewed (0)