- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for _device_ordinal (0.18 sec)
-
tensorflow/compiler/jit/xla_device.cc
: device_ordinal_(device_ordinal), device_type_(device_type), platform_(platform), shape_determination_fns_(std::move(shape_determination_fns)), padded_shape_fn_(std::move(padded_shape_fn)), use_multiple_streams_(use_multiple_streams) {} int XlaDevice::Metadata::device_ordinal() const { return device_ordinal_; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
class Metadata { public: Metadata(int device_ordinal, se::Platform* platform, const DeviceType& device_type, std::vector<XlaShapeLayoutHelpers::ShapeDeterminationFns> shape_determination_fns, PaddedShapeFn padded_shape_fn, bool use_multiple_streams); // The index of the device on this host. int device_ordinal() const; se::Platform* platform() const;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator, int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams) : client_(client), xla_allocator_(xla_allocator), allocate_xla_tensors_(allocate_xla_tensors), use_multiple_streams_(use_multiple_streams), device_ordinal_(device_ordinal) { if (use_multiple_streams_) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.h
// objects. XlaComputationLaunchContext(xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator, int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams); // Builds a XlaCompiler::Argument vector from the arguments to an XlaLaunch // op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
input->dtype(), shape, dst_xla_context->client(), dst_device_ordinal)); VLOG(2) << "TpuDeviceToDeviceCopy: src: " << src_compute_stream->parent()->device_ordinal() << ", " << " dst: " << dst_compute_stream->parent()->device_ordinal() << ", " << " input buffers: " << xla_input->shaped_buffer().ToString() << " output buffers: " << xla_output->shaped_buffer().ToString();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device_context.cc
TF_RET_CHECK(!xla_tensor->has_shaped_buffer()); TF_RETURN_IF_ERROR( xla_tensor->AllocateShapedBuffer(device_tensor->dtype(), shape, client_, stream_->parent()->device_ordinal())); // The cpu_tensor and literal that we created here hold the data of host // tensor in descending layout. The layout could be different from layout in
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc
auto recv_at_host = rewriter.create<TF::_XlaRecvAtHostOp>( func.getLoc(), op.getOperandTypes(), /*dynamic_key=*/dynamic_key, op.getSendKeyAttr(), /*device_ordinal=*/rewriter.getI64IntegerAttr(0), rewriter.getStringAttr("TPU")); for (auto result : llvm::zip(cloned_func.getArguments(), recv_at_host->getResults())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/jit/get_compiler_ir.cc
xla::ExecutableBuildOptions build_options; if (result.collective_info) { build_options.set_num_replicas(result.collective_info->group_size); } build_options.set_device_ordinal( options.device_ordinal != -1 ? options.device_ordinal : local_client->default_device_ordinal()); build_options.set_result_layout(result.xla_output_shape); build_options.set_device_allocator(options.device_allocator.get());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 06:59:07 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
xla::LocalClient* client, se::DeviceMemoryAllocator* allocator) { se::Stream* stream = GetStream(ctx); int device_ordinal = stream ? stream->parent()->device_ordinal() : client->default_device_ordinal(); XlaComputationLaunchContext launch_context( client, allocator, device_ordinal, /*allocate_xla_tensors=*/platform_info.is_on_xla_device(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.td
%2 = "tf._XlaRecvAtHost"(%1) {device_ordinal = 0 : i64, key = "host_compute_channel_0_0_args"} : (tensor<3x!tf_type.string>) -> tensor<f32> %3 = "tf.Identity"(%2) : (tensor<f32>) -> tensor<f32> "tf._XlaSendFromHost"(%3, %1) {device_ordinal = 0 : i64, key = "host_compute_channel_0_0_retvals"} : (tensor<f32>, tensor<3x!tf_type.string>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 02:01:13 UTC 2024 - 19.8K bytes - Viewed (0)