Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 16 of 16 for xla_tensor (0.4 sec)

  1. tensorflow/compiler/jit/xla_device.h

    #ifndef TENSORFLOW_COMPILER_JIT_XLA_DEVICE_H_
    #define TENSORFLOW_COMPILER_JIT_XLA_DEVICE_H_
    #include <set>
    
    #include "absl/types/optional.h"
    #include "tensorflow/compiler/jit/xla_tensor.h"
    #include "tensorflow/compiler/tf2xla/layout_util.h"
    #include "tensorflow/compiler/tf2xla/xla_compiler.h"
    #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
    #include "xla/client/local_client.h"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  2. tensorflow/c/eager/dlpack.cc

      DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
      dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
      dlm_tensor->deleter = &DLManagedTensorDeleter;
      dlm_tensor->dl_tensor.device = tf_dlm_context;
      int ndim = tensor->dims();
      dlm_tensor->dl_tensor.ndim = ndim;
      dlm_tensor->dl_tensor.data = tf_dlm_data;
      dlm_tensor->dl_tensor.dtype = tf_dlm_type;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 15 09:49:45 UTC 2024
    - 12.8K bytes
    - Viewed (0)
  3. tensorflow/c/eager/dlpack_test.cc

        num_elements *= shape[i];
      }
      std::vector<float> data(num_elements);
      for (size_t j = 0; j < num_elements; ++j) {
        data[j] = j;
      }
      DLManagedTensor dlm_in = {};
      DLTensor* dltensor_in = &dlm_in.dl_tensor;
      dltensor_in->data = data.data();
      dltensor_in->device = {kDLCPU, 0};
      dltensor_in->ndim = static_cast<int32_t>(shape.size());
      dltensor_in->dtype = {kDLFloat, 32, 1};
      dltensor_in->shape = shape.data();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jun 30 03:04:46 UTC 2023
    - 4.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc

        for (const int old_tensor : std::vector<int>(operations_[iop].tensors)) {
          const auto new_tensor =
              std::lower_bound(new_tensors.begin(), new_tensors.end(),
                               std::make_pair(old_tensor, 0));
          if (new_tensor != new_tensors.end() && new_tensor->first == old_tensor) {
            DelUse(iop, old_tensor);
            AddUse(iop, new_tensor->second);
          }
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 14 20:57:44 UTC 2023
    - 13.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_cpu_device.cc

      // context in tensorflow_accelerator_device_info(). Also,
      // tensorflow_accelerator_device_info() == nullptr is used as an IsCPU test.
      // We need XlaCpuDevice to be treated not as CPU because it allocates
      // XlaTensors, not regular Tensors.
      Status status = device->UseAcceleratorDeviceInfo();
      if (!status.ok()) {
        errors::AppendToMessage(&status, "while setting up ", DEVICE_GPU_XLA_JIT);
        return status;
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_platform_info.cc

        //
        // Importantly we can't use ctx->device()->GetAllocator() as the allocator
        // (which xla_allocator above uses) as on an XlaDevice, this is a dummy
        // allocator that returns XlaTensor objects. The XlaCompiler needs a real
        // allocator to allocate real buffers.
        platform_id = xla_device_metadata->platform()->id();
        custom_allocator =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
Back to top