Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for use_multiple_streams_ (0.18 sec)

  1. tensorflow/compiler/jit/xla_launch_util.cc

        int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams)
        : client_(client),
          xla_allocator_(xla_allocator),
          allocate_xla_tensors_(allocate_xla_tensors),
          use_multiple_streams_(use_multiple_streams),
          device_ordinal_(device_ordinal) {
      if (use_multiple_streams_) {
        CHECK(allocate_xla_tensors_) << "To use multiple streams correctly we must "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_device.cc

            shape_determination_fns,
        PaddedShapeFn padded_shape_fn, bool use_multiple_streams)
        : device_ordinal_(device_ordinal),
          device_type_(device_type),
          platform_(platform),
          shape_determination_fns_(std::move(shape_determination_fns)),
          padded_shape_fn_(std::move(padded_shape_fn)),
          use_multiple_streams_(use_multiple_streams) {}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_tpu_device.cc

        options.device_name_prefix = name_prefix;
        options.device_name = DEVICE_TPU_NODE;
        options.device_ordinal = i;
        options.compilation_device_name = DEVICE_TPU_XLA_JIT;
        options.use_multiple_streams = true;
        // We set `use_global_compute_stream` to true for TPUs as TPUs can only
        // have one program running on each core at the same time.
        options.use_global_compute_stream = true;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/kernels/xla_ops.cc

      XlaComputationLaunchContext launch_context(
          client, allocator, device_ordinal,
          /*allocate_xla_tensors=*/platform_info.is_on_xla_device(),
          /*use_multiple_streams=*/platform_info.UseMultipleStreams());
      return launch_context;
    }
    
    Status GetTaskName(const std::string_view device_name, std::string* task_name) {
      string ignored;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
Back to top