Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for device_vendor (0.18 sec)

  1. tensorflow/c/experimental/stream_executor/stream_executor_test.cc

                                       SE_CreateDeviceParams* params,
                                       TF_Status* status) {
        params->device->hardware_name = hardware_name;
        params->device->device_vendor = vendor;
        params->device->pci_bus_id = pci_bus_id;
      };
    
      device_fns_.get_numa_node = [](const SP_Device* device) { return 123; };
      device_fns_.get_memory_bandwidth = [](const SP_Device* device) -> int64_t {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 19:54:04 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  2. tensorflow/c/experimental/stream_executor/stream_executor.cc

        internal::DeviceDescriptionBuilder builder;
        if (device_.hardware_name != nullptr) {
          builder.set_name(device_.hardware_name);
        }
        if (device_.device_vendor != nullptr) {
          builder.set_device_vendor(device_.device_vendor);
        }
        if (device_.pci_bus_id != nullptr) {
          builder.set_pci_bus_id(device_.pci_bus_id);
        }
    
        if (device_fns_->get_numa_node != nullptr) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jun 14 07:39:19 UTC 2024
    - 27.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_host_send_device_context.h

      // Copies 'cpu_tensor' to `device_memory_base_` with `shape_`.
      // `device_tensor` is unused.
      void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
                                 Tensor* device_tensor, StatusCallback done,
                                 bool sync_dst_compute) const override;
    
      void CopyDeviceTensorToCPU(const Tensor* device_tensor,
                                 StringPiece tensor_name, Device* device,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc

    TEST_F(XlaHostSendRecvDeviceContextTest, CopyDeviceTensorToCPU) {
      SetDevice("GPU");
      Tensor origin_cpu_tensor(host_allocator_, DT_FLOAT, TensorShape({2, 2}));
      test::FillValues<float>(&origin_cpu_tensor, {1.2, 2.3, 3.4, 4.5});
      Tensor device_tensor(device_allocator_, DT_FLOAT, TensorShape({2, 2}));
      Tensor dest_cpu_tensor(host_allocator_, DT_FLOAT, TensorShape({2, 2}));
    
      stream_executor::Platform* platform =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 7.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_device_context.cc

              device_tensor->shape(), device_tensor->dtype(), std::nullopt);
      Status status = [&]() -> Status {
        TF_ASSIGN_OR_RETURN(xla::Shape shape,
                            shape_determination_fns_.shape_representation_fn(
                                device_tensor->shape(), device_tensor->dtype(),
                                /*fast_mem=*/false, layout_preference));
    
        // The device tensor should always be fresh.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_host_recv_device_context.h

                                 Tensor* device_tensor, StatusCallback done,
                                 bool sync_dst_compute) const override {
        done(errors::Internal("host->device copy not implemented."));
      }
    
      // Copies `device_memory_base_` with `shape_` into `cpu_tensor`.
      // `device_tensor` is unused.
      void CopyDeviceTensorToCPU(const Tensor* device_tensor,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_host_send_device_context.cc

    #include "tensorflow/compiler/tf2xla/type_util.h"
    
    namespace tensorflow {
    
    void XlaHostSendDeviceContext::CopyCPUTensorToDevice(
        const Tensor* cpu_tensor, Device* device, Tensor* device_tensor,
        StatusCallback done, bool sync_dst_compute) const {
      auto status = stream_->Memcpy(device_memory_base_, cpu_tensor->data(),
                                    device_memory_base_->size());
      if (!status.ok()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 1.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_host_recv_device_context.cc

    #include "tensorflow/compiler/tf2xla/shape_util.h"
    #include "tensorflow/compiler/tf2xla/type_util.h"
    
    namespace tensorflow {
    
    void XlaHostRecvDeviceContext::CopyDeviceTensorToCPU(
        const Tensor* device_tensor, StringPiece tensor_name, Device* device,
        Tensor* cpu_tensor, StatusCallback done) {
      DataType dtype = EncodePrimitiveTypeAsDataType(shape_.element_type()).value();
      TensorShape tensor_shape;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 1.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/kernels/xla_ops.cc

            args.device_context = new XlaHostSendDeviceContext(
                stream, device_memory_base, shape, done_event);
    
            Tensor device_tensor;
            bool is_dead;
            TF_RETURN_IF_ERROR(ctx->rendezvous()->Recv(
                parsed_key, args, &device_tensor, /*is_dead=*/&is_dead));
    
            return std::move(done_event);
          };
    }
    
    absl::StatusOr<xla::ExecutionOutput> RunExecutable(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
Back to top