- Sort Score
- Result 10 results
- Languages All
Results 1 - 9 of 9 for xla_tensor (0.16 sec)
-
tensorflow/compiler/jit/xla_device_context.cc
} XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor); xla_tensor->WaitForDefinitionEventOnStream(device_to_host_stream.get()); // Transfer manager requires the shape of the shaped buffer to be the same as // literal shape except for the layout. Set the literal to use xla_tensor's // shape as it is derived from the cpu_tensor's shape using
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 12.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_tpu_device.cc
const tensorflow::XlaTensor* xla_tensor = tensorflow::XlaTensor::FromTensor(&tensor); if (xla_tensor == nullptr) { return errors::InvalidArgument( "Expected an XlaTensor when computing padded shape"); } if (!xla_tensor->has_shaped_buffer()) { return errors::InvalidArgument( "XlaTensor is expected to have device memory allocated when "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 22:53:47 UTC 2024 - 20.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.cc
ctx->allocate_temp(output_dtype, output_shape, &output_tensor)); if (output_tensor.TotalBytes() > 0) { XlaTensor* xla_tensor = XlaTensor::FromTensor(&output_tensor); TF_RET_CHECK(xla_tensor); xla_tensor->set_shaped_buffer(output.TakeSubTree({output_num})); if (use_multiple_streams) { xla_tensor->ResetDefinitionEvent(definition_event, stream); } } return output_tensor; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 00:36:08 UTC 2024 - 40.4K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.cc
Status DefaultPaddedShapeFn(const Tensor& tensor, xla::Shape* shape) { const tensorflow::XlaTensor* xla_tensor = tensorflow::XlaTensor::FromTensor(&tensor); if (xla_tensor == nullptr) { return TensorShapeToXLAShape(tensor.dtype(), tensor.shape(), shape); } const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer(); *shape = shaped_buffer.on_device_shape(); return absl::OkStatus(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_launch_util.h
#define TENSORFLOW_COMPILER_JIT_XLA_LAUNCH_UTIL_H_ #include <map> #include <memory> #include <set> #include <vector> #include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/xla_tensor.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/service/shaped_buffer.h" #include "xla/stream_executor/device_memory_allocator.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 11.8K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.h
#ifndef TENSORFLOW_COMPILER_JIT_XLA_DEVICE_H_ #define TENSORFLOW_COMPILER_JIT_XLA_DEVICE_H_ #include <set> #include "absl/types/optional.h" #include "tensorflow/compiler/jit/xla_tensor.h" #include "tensorflow/compiler/tf2xla/layout_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/client/local_client.h"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 09:53:30 UTC 2024 - 13.4K bytes - Viewed (0) -
tensorflow/c/eager/dlpack.cc
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor; dlm_tensor->manager_ctx = tf_dlm_tensor_ctx; dlm_tensor->deleter = &DLManagedTensorDeleter; dlm_tensor->dl_tensor.device = tf_dlm_context; int ndim = tensor->dims(); dlm_tensor->dl_tensor.ndim = ndim; dlm_tensor->dl_tensor.data = tf_dlm_data; dlm_tensor->dl_tensor.dtype = tf_dlm_type;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 09:49:45 UTC 2024 - 12.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc
for (const int old_tensor : std::vector<int>(operations_[iop].tensors)) { const auto new_tensor = std::lower_bound(new_tensors.begin(), new_tensors.end(), std::make_pair(old_tensor, 0)); if (new_tensor != new_tensors.end() && new_tensor->first == old_tensor) { DelUse(iop, old_tensor); AddUse(iop, new_tensor->second); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 14 20:57:44 UTC 2023 - 13.7K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_platform_info.cc
// // Importantly we can't use ctx->device()->GetAllocator() as the allocator // (which xla_allocator above uses) as on an XlaDevice, this is a dummy // allocator that returns XlaTensor objects. The XlaCompiler needs a real // allocator to allocate real buffers. platform_id = xla_device_metadata->platform()->id(); custom_allocator =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 17:23:27 UTC 2024 - 17.4K bytes - Viewed (0)