Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for xla_tensor (0.19 sec)

  1. tensorflow/compiler/jit/xla_tensor.cc

    }
    
    /*static*/ se::DeviceMemoryBase XlaTensor::DeviceMemoryFromTensor(
        const Tensor& tensor) {
      const XlaTensor* xla_tensor = FromTensor(&tensor);
      if (xla_tensor) {
        CHECK(xla_tensor->has_shaped_buffer());
        return xla_tensor->shaped_buffer().root_buffer();
      } else {
        return se::DeviceMemoryBase(const_cast<char*>(tensor.tensor_data().data()),
                                    tensor.tensor_data().size());
      }
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_tensor.h

    //
    // To distinguish between "normal" device tensors and XlaTensors, the raw
    // pointer data stored in the TensorBuffer is a tagged pointer.
    class XlaTensor {
     public:
      // Downcast from a Tensor to an XlaTensor. Return nullptr if the downcast
      // fails.
      static XlaTensor* FromTensor(const Tensor* tensor);
    
      // Create a DeviceMemoryBase from a Tensor. The Tensor can be an XlaTensor, in
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 4.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_device_ops.cc

    limitations under the License.
    ==============================================================================*/
    
    #include "tensorflow/compiler/jit/xla_device_ops.h"
    
    #include <memory>
    
    #include "tensorflow/compiler/jit/xla_tensor.h"
    
    namespace tensorflow {
    
    XlaDeviceDummyOp::XlaDeviceDummyOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
    
    void XlaDeviceDummyOp::Compute(OpKernelContext* ctx) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 3K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_device_context.h

    #ifndef TENSORFLOW_COMPILER_JIT_XLA_DEVICE_CONTEXT_H_
    #define TENSORFLOW_COMPILER_JIT_XLA_DEVICE_CONTEXT_H_
    
    #include <memory>
    
    #include "absl/synchronization/mutex.h"
    #include "tensorflow/compiler/jit/xla_tensor.h"
    #include "tensorflow/compiler/tf2xla/layout_util.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "xla/client/global_data.h"
    #include "xla/client/local_client.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.1K bytes
    - Viewed (0)
  5. tensorflow/c/eager/dlpack_test.cc

        num_elements *= shape[i];
      }
      std::vector<float> data(num_elements);
      for (size_t j = 0; j < num_elements; ++j) {
        data[j] = j;
      }
      DLManagedTensor dlm_in = {};
      DLTensor* dltensor_in = &dlm_in.dl_tensor;
      dltensor_in->data = data.data();
      dltensor_in->device = {kDLCPU, 0};
      dltensor_in->ndim = static_cast<int32_t>(shape.size());
      dltensor_in->dtype = {kDLFloat, 32, 1};
      dltensor_in->shape = shape.data();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jun 30 03:04:46 UTC 2023
    - 4.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_cpu_device.cc

      // context in tensorflow_accelerator_device_info(). Also,
      // tensorflow_accelerator_device_info() == nullptr is used as an IsCPU test.
      // We need XlaCpuDevice to be treated not as CPU because it allocates
      // XlaTensors, not regular Tensors.
      Status status = device->UseAcceleratorDeviceInfo();
      if (!status.ok()) {
        errors::AppendToMessage(&status, "while setting up ", DEVICE_GPU_XLA_JIT);
        return status;
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.5K bytes
    - Viewed (0)
Back to top