- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 23 for genAllocator (0.17 sec)
-
tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.cc
if (!op.getResults().empty()) p << " : " << op.getResults().size(); } void ExecuteOpWithAllocator::print(OpAsmPrinter &p) { ExecuteOpWithAllocator op = *this; p << "(" << op.getAllocator() << ") key(" << op->getAttrOfType<mlir::IntegerAttr>("op_key").getInt() << ") cost(" << op->getAttrOfType<mlir::IntegerAttr>("_tfrt_cost").getInt()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 15.6K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.h
// // This is necessary because for XLA devices the underlying TF allocator returns // dummy tensors. // // `stream` parameter is nullable when running on host. std::shared_ptr<se::DeviceMemoryAllocator> GetAllocator( DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info); } // namespace tensorflow
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/c/kernels/summary_op_test.cc
#include "tensorflow/core/protobuf/error_codes.pb.h" namespace tensorflow { namespace { class DummyDevice : public DeviceBase { public: explicit DummyDevice(Env* env) : DeviceBase(env) {} Allocator* GetAllocator(AllocatorAttributes /*attr*/) override { return cpu_allocator(); } }; // Helper for comparing output and expected output void ExpectSummaryMatches(const Summary& actual, const string& expected_str) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jul 18 15:10:51 UTC 2022 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
for (int i = 0; i < resources.size(); i++) { resource_var_ptrs[resources[i]] = variable_infos[i].var()->tensor(); } std::shared_ptr<se::DeviceMemoryAllocator> allocator = GetAllocator(ctx->device(), GetStream(ctx), platform_info); XlaComputationLaunchContext launch_context = GetLaunchContext(platform_info, ctx, client, allocator.get());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.cc
.set_allowed_devices(allowed_devices_) .set_intra_op_parallelism_threads(intra_op_parallelism_threads_); return xla::ClientLibrary::GetOrCreateLocalClient(options); } Allocator* XlaDevice::GetAllocator(AllocatorAttributes attr) { mutex_lock lock(mu_); return GetAllocatorLocked(attr); } Allocator* XlaDevice::GetAllocatorLocked(AllocatorAttributes attr) { if (attr.on_host()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util_test.cc
host_alloc_attr.set_on_host(true); host_allocator_ = device_->GetAllocator(host_alloc_attr); // Get the device allocator. This should give us an AsyncValueAllocator. AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); // Create the DeviceCompiler to help with compiling executables.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_compile_on_demand_op.cc
se::Stream* stream = ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr; std::shared_ptr<se::DeviceMemoryAllocator> allocator_ptr = GetAllocator(ctx->device(), stream, platform_info_); se::DeviceMemoryAllocator* allocator = allocator_ptr.get(); XlaComputationLaunchContext launch_context( client, allocator, client->default_device_ordinal(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 29 08:39:39 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
std::optional<std::set<int>> allowed_devices; }; // Creates a new XLA Device. XlaDevice(const SessionOptions& session_options, const Options& options); ~XlaDevice() override; Allocator* GetAllocator(AllocatorAttributes attr) override TF_LOCKS_EXCLUDED(mu_); void Compute(OpKernel* op_kernel, OpKernelContext* context) override; void ComputeAsync(AsyncOpKernel* op_kernel, OpKernelContext* context,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc
public: explicit FakeDevice(const DeviceAttributes& device_attributes) : Device(nullptr, device_attributes) {} Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); } Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; } static std::unique_ptr<Device> Make(const string& name, const string& type) { DeviceAttributes device_attributes; device_attributes.set_name(name);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 18.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc
if (!shape_or.ok()) { return op_->emitRemark() << "failed to get shape for expression. " << expr.HumanString(); } tensors.emplace_back( device_->GetAllocator(tensorflow::AllocatorAttributes()), expr.dtype(), shape_or.value()); tensorflow::Tensor& tensor = tensors.back(); tensorflow::XlaExpression::AssignExpressionToTensor(expr, &tensor);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:16:07 UTC 2024 - 18.9K bytes - Viewed (0)