Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 7 of 7 for shaped_buffer (0.15 sec)

  1. tensorflow/compiler/jit/xla_tensor.h

      // manage the memory for these tensors a ShapedBuffer may be required.
    
      // Return true if this XlaTensor contains a ShapedBuffer.
      bool has_shaped_buffer() const { return shaped_buffer_.has_value(); }
      // Return the contained ShapedBuffer.
      // REQUIRES: has_shaped_buffer()
      const xla::ShapedBuffer& shaped_buffer() const {
        CHECK(has_shaped_buffer());
        return *shaped_buffer_;
      }
      xla::ShapedBuffer& shaped_buffer() {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_tpu_device.cc

                << " dst: " << dst_compute_stream->parent()->device_ordinal()
                << ", "
                << " input buffers: " << xla_input->shaped_buffer().ToString()
                << " output buffers: " << xla_output->shaped_buffer().ToString();
    
        // Wait for definition event of the source tensor so the input buffers are
        // available.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_tensor.cc

                                subshape.layout().memory_space()));
        // Move our buffer into shaped_buffer, which takes ownership of it.
        index_to_buffer.second = buffer.Release();
      }
    
      VLOG(4) << shaped_buffer.ToString();
    
      set_shaped_buffer(std::move(shaped_buffer));
      return absl::OkStatus();
    }
    
    void XlaTensor::WaitForDefinitionEventOnStream(se::Stream* stream) {
      mutex_lock lock(mu_);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_device_context.cc

        VLOG(2) << "Transfer to device as literal: " << literal.ToString() << " "
                << xla_tensor->shaped_buffer().ToString();
        if (UseMultipleStreams() &&
            !transfer_manager_->CanShapedBufferBeAccessedNow(
                stream_->parent(), xla_tensor->shaped_buffer())) {
          // Initially wait for the compute stream so that memory allocations are
          // synchronized.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_device.cc

          tensorflow::XlaTensor::FromTensor(&tensor);
      if (xla_tensor == nullptr) {
        return TensorShapeToXLAShape(tensor.dtype(), tensor.shape(), shape);
      }
    
      const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer();
      *shape = shaped_buffer.on_device_shape();
      return absl::OkStatus();
    }
    
    // Caches a XlaDeviceAllocator per <backend, device ordinal> pair. A
    // XlaDeviceAllocator is created on demand and is associated with a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_launch_util.h

    #include "tensorflow/compiler/jit/xla_tensor.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "xla/client/local_client.h"
    #include "xla/pjrt/pjrt_client.h"
    #include "xla/service/shaped_buffer.h"
    #include "xla/stream_executor/device_memory_allocator.h"
    #include "tensorflow/core/framework/allocation_description.pb.h"
    #include "tensorflow/core/framework/tensor.h"
    #include "tensorflow/core/lib/core/status.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_launch_util.cc

      // buffer with a nullptr root index table. This allows the code below to treat
      // output as a tuple unconditionally.
      if (!output.on_host_shape().IsTuple()) {
        ShapedBuffer nontuple_buffer = output.release();
        ShapedBuffer buffer(
            xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}),
            xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_device_shape()}),
            output.device_ordinal());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
Back to top