Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 11 for FromTensor (0.41 sec)

  1. tensorflow/compiler/jit/pjrt_device_context.cc

      if (!status.ok()) {
        done(status);
        return;
      }
    
      xla::PjRtBuffer* device_buffer;
      AsyncValueTensor* device_tensor_av =
          tensorflow::AsyncValueTensor::FromTensor(device_tensor);
      if (use_pjrt_tensor_buffer_) {
        if (device_tensor_av) {
          done(absl::InvalidArgumentError(
              "If use_pjrt_tensor_buffer is set, the device tensor should not "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:49:31 UTC 2024
    - 11.6K bytes
    - Viewed (0)
  2. tensorflow/cc/experimental/base/tests/tensorhandle_test.cc

                             /*len=*/sizeof(value),
                             /*deleter=*/[](void*, size_t) {}, &status);
      ASSERT_TRUE(status.ok()) << status.message();
    
      TensorHandle handle =
          TensorHandle::FromTensor(original_tensor, *runtime, &status);
      ASSERT_TRUE(status.ok()) << status.message();
    
      Tensor tensor = handle.Resolve(&status);
      ASSERT_TRUE(status.ok()) << status.message();
    
      EXPECT_EQ(tensor.dims(), 0);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 09:56:08 UTC 2024
    - 6.9K bytes
    - Viewed (0)
  3. tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util.cc

    #include "tsl/platform/statusor.h"
    
    namespace tensorflow {
    
    absl::StatusOr<PJRT_Buffer*> GetPjRtCBufferFromTensor(const Tensor* tensor) {
      tensorflow::AsyncValueTensor* av_tensor =
          tensorflow::AsyncValueTensor::FromTensor(tensor);
      if (av_tensor == nullptr || av_tensor->GetBuffer() == nullptr) {
        return absl::InternalError("Input tensor does not have PjRtBuffer.");
      }
      auto* c_api_buffer =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 05:48:24 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/next_pluggable_device/tensor_pjrt_buffer_util_test.cc

              xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
              nullptr, pjrt_client->addressable_devices()[0]));
      tensorflow::AsyncValueTensor* av_tensor =
          tensorflow::AsyncValueTensor::FromTensor(&tensor);
      av_tensor->SetBuffer(std::move(buffer));
    
      EXPECT_THAT(
          GetPjRtCBufferFromTensor(&tensor),
          StatusIs(
              error::INTERNAL,
              HasSubstr(absl::StrCat(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 01 16:29:40 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. tensorflow/cc/experimental/base/public/tensorhandle.h

      Tensor Resolve(Status* status);
    
      // Constructs a TensorHandle from a Tensor. If an error occurred,
      // status->ok() will be false, and the returned TensorHandle must not be used.
      static TensorHandle FromTensor(const Tensor& tensor, const Runtime& runtime,
                                     Status* status);
    
      // TensorHandle is movable, and not copyable
      TensorHandle(TensorHandle&&) = default;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 12 19:37:48 UTC 2020
    - 3.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_tensor.cc

    /*static*/ XlaTensor* XlaTensor::FromTensor(const Tensor* tensor) {
      if (tensor->NumElements() == 0) {
        return nullptr;
      }
      XlaTensor* xla_tensor =
          FromOpaquePointer(const_cast<char*>(tensor->tensor_data().data()));
      return xla_tensor;
    }
    
    /*static*/ se::DeviceMemoryBase XlaTensor::DeviceMemoryFromTensor(
        const Tensor& tensor) {
      const XlaTensor* xla_tensor = FromTensor(&tensor);
      if (xla_tensor) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_tpu_device.cc

        const int dst_device_ordinal =
            dst_xla_context->stream()->parent()->device_ordinal();
    
        XlaTensor* const xla_input = XlaTensor::FromTensor(input);
        TF_RET_CHECK(xla_input != nullptr && xla_input->has_shaped_buffer());
        XlaTensor* const xla_output = XlaTensor::FromTensor(output);
        TF_RET_CHECK(xla_output != nullptr && !xla_output->has_shaped_buffer());
        TF_RET_CHECK(input->shape() == output->shape());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_tensor.h

    // pointer data stored in the TensorBuffer is a tagged pointer.
    class XlaTensor {
     public:
      // Downcast from a Tensor to an XlaTensor. Return nullptr if the downcast
      // fails.
      static XlaTensor* FromTensor(const Tensor* tensor);
    
      // Create a DeviceMemoryBase from a Tensor. The Tensor can be an XlaTensor, in
      // which case the returned value is shaped_buffer()->root_buffer(), or a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/xla_launch_util.cc

        if (use_multiple_streams_) {
          CHECK(ctx->op_device_context() && ctx->op_device_context()->stream())
              << "Must have a stream available when using XLA tensors!";
          XlaTensor* xla_tensor = XlaTensor::FromTensor(t);
          CHECK(xla_tensor);
          xla_tensor->WaitForDefinitionEventOnStream(
              ctx->op_device_context()->stream());
        }
    
        arguments.emplace_back(device_shape, host_shape);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_device_context.cc

              << " " << cpu_tensor->NumElements() << " "
              << cpu_tensor->shape().DebugString() << " "
              << device_tensor->shape().DebugString();
    
      XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor);
      CHECK(xla_tensor);
    
      XlaLayoutPreference layout_preference =
          shape_determination_fns_.layout_preference_fn(
              device_tensor->shape(), device_tensor->dtype(), std::nullopt);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
Back to top