Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for allocate_xla_tensors_ (0.22 sec)

  1. tensorflow/compiler/jit/xla_launch_util.cc

        int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams)
        : client_(client),
          xla_allocator_(xla_allocator),
          allocate_xla_tensors_(allocate_xla_tensors),
          use_multiple_streams_(use_multiple_streams),
          device_ordinal_(device_ordinal) {
      if (use_multiple_streams_) {
        CHECK(allocate_xla_tensors_) << "To use multiple streams correctly we must "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_launch_util.h

     public:
      // Create a new launch context. 'allocate_xla_tensors' is true if allocated
      // output tensors and variables are always XlaTensors. If false they are
      // assumed to be "normal" device pointers.
      // If 'use_multiple_streams' is true, tensors may be defined and used on
      // multiple streams and so se::Events must be defined and waited for. If
      // 'use_multiple_streams' is true, 'allocate_xla_tensors' must also be true
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_compile_on_demand_op.cc

      se::DeviceMemoryAllocator* allocator = allocator_ptr.get();
      XlaComputationLaunchContext launch_context(
          client, allocator, client->default_device_ordinal(),
          /*allocate_xla_tensors=*/platform_info_.xla_device_metadata() != nullptr,
          platform_info_.xla_device_metadata()
              ? platform_info_.xla_device_metadata()->UseMultipleStreams()
              : false);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 29 08:39:39 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/kernels/xla_ops.cc

                                  : client->default_device_ordinal();
      XlaComputationLaunchContext launch_context(
          client, allocator, device_ordinal,
          /*allocate_xla_tensors=*/platform_info.is_on_xla_device(),
          /*use_multiple_streams=*/platform_info.UseMultipleStreams());
      return launch_context;
    }
    
    Status GetTaskName(const std::string_view device_name, std::string* task_name) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
Back to top