Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 17 of 17 for device_vendor (0.16 sec)

  1. tensorflow/compiler/jit/xla_launch_util_test.cc

        Tensor* host_tensor = CreateHostTensor<T>(shape, data);
        Tensor* device_tensor =
            new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape);
        TF_EXPECT_OK(device_context_->CopyCPUTensorToDeviceSync(
            host_tensor, device_, device_tensor));
    
        tensors_.push_back(device_tensor);
        return device_tensor;
      }
    
      // Gets the `output_index`-th output set in the context_
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 28.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfrt/ir/gpu_ops.td

    // GPU MIG.
    def TransferToDeviceOp: Gpu_Op<"transfer_to_device"> {
      let summary = "Transfer a CPU tensor to device.";
    
      let description = [{
        Transfer a CPU tensor to device.
    
        Example:
          %device_tensor = gpurt.transfer_to_device %cpu_tensor
      }];
    
      let arguments = (ins TFTensorType);
      let results = (outs TFTensorType);
      let assemblyFormat = "operands attr-dict";
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 18 15:01:21 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_launch_util_gpu_test.cc

        Tensor* host_tensor = CreateHostTensor<T>(shape, data);
        Tensor* device_tensor =
            new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape);
        TF_EXPECT_OK(device_context_->CopyCPUTensorToDeviceSync(
            host_tensor, device_, device_tensor));
    
        tensors_.push_back(device_tensor);
        return device_tensor;
      }
    
      // Compiles the op set in the context_ to a PjRtLoadedExecutable
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 10K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_device_context.h

          thread::ThreadPool* thread_pool);
    
      void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
                                 Tensor* device_tensor, StatusCallback done,
                                 bool sync_dst_compute) const override;
      void CopyDeviceTensorToCPU(const Tensor* device_tensor,
                                 absl::string_view tensor_name, Device* device,
                                 Tensor* cpu_tensor, StatusCallback done) override;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_host_send_device_context.cc

    #include "tensorflow/compiler/tf2xla/type_util.h"
    
    namespace tensorflow {
    
    void XlaHostSendDeviceContext::CopyCPUTensorToDevice(
        const Tensor* cpu_tensor, Device* device, Tensor* device_tensor,
        StatusCallback done, bool sync_dst_compute) const {
      auto status = stream_->Memcpy(device_memory_base_, cpu_tensor->data(),
                                    device_memory_base_->size());
      if (!status.ok()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_host_recv_device_context.cc

    #include "tensorflow/compiler/tf2xla/shape_util.h"
    #include "tensorflow/compiler/tf2xla/type_util.h"
    
    namespace tensorflow {
    
    void XlaHostRecvDeviceContext::CopyDeviceTensorToCPU(
        const Tensor* device_tensor, StringPiece tensor_name, Device* device,
        Tensor* cpu_tensor, StatusCallback done) {
      DataType dtype = EncodePrimitiveTypeAsDataType(shape_.element_type()).value();
      TensorShape tensor_shape;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/kernels/xla_ops.cc

            args.device_context = new XlaHostSendDeviceContext(
                stream, device_memory_base, shape, done_event);
    
            Tensor device_tensor;
            bool is_dead;
            TF_RETURN_IF_ERROR(ctx->rendezvous()->Recv(
                parsed_key, args, &device_tensor, /*is_dead=*/&is_dead));
    
            return std::move(done_event);
          };
    }
    
    absl::StatusOr<xla::ExecutionOutput> RunExecutable(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
Back to top