Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 13 for use_multiple_streams (0.21 sec)

  1. tensorflow/compiler/jit/xla_device.cc

            shape_determination_fns,
        PaddedShapeFn padded_shape_fn, bool use_multiple_streams)
        : device_ordinal_(device_ordinal),
          device_type_(device_type),
          platform_(platform),
          shape_determination_fns_(std::move(shape_determination_fns)),
          padded_shape_fn_(std::move(padded_shape_fn)),
          use_multiple_streams_(use_multiple_streams) {}
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_device.h

      // for each device to host transfer.
      const bool use_multiple_streams_;
      // If use_multiple_streams_, host to device transfers are performed using this
      // stream.
      std::shared_ptr<se::Stream> host_to_device_stream_ TF_GUARDED_BY(mu_);
      // If use_multiple_streams_, transfers between different devices are performed
      // using these streams.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/xla_launch_util.h

      // output tensors and variables are always XlaTensors. If false they are
      // assumed to be "normal" device pointers.
      // If 'use_multiple_streams' is true, tensors may be defined and used on
      // multiple streams and so se::Events must be defined and waited for. If
      // 'use_multiple_streams' is true, 'allocate_xla_tensors' must also be true
      // because we track inter-stream dependencies through events inside XlaTensor
      // objects.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/xla_launch_util.cc

        int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams)
        : client_(client),
          xla_allocator_(xla_allocator),
          allocate_xla_tensors_(allocate_xla_tensors),
          use_multiple_streams_(use_multiple_streams),
          device_ordinal_(device_ordinal) {
      if (use_multiple_streams_) {
        CHECK(allocate_xla_tensors_) << "To use multiple streams correctly we must "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 40.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/xla_cpu_device.cc

      options.platform = platform;
      options.device_name_prefix = name_prefix;
      options.device_name = DEVICE_XLA_CPU;
      options.device_ordinal = 0;
      options.compilation_device_name = DEVICE_CPU_XLA_JIT;
      options.use_multiple_streams = false;
      XlaShapeLayoutHelpers::ShapeDeterminationFns shape_representation_fns{
          UseNoPreferenceLayoutFn(), IdentityShapeRepresentationFn()};
      options.shape_determination_fns = {shape_representation_fns};
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/xla_gpu_device.cc

        options.device_name_prefix = name_prefix;
        options.device_name = DEVICE_XLA_GPU;
        options.device_ordinal = i;
        options.compilation_device_name = DEVICE_GPU_XLA_JIT;
        options.use_multiple_streams = true;
        options.allowed_devices = gpu_ids;
        XlaShapeLayoutHelpers::ShapeDeterminationFns shape_representation_fns{
            UseNoPreferenceLayoutFn(), IdentityShapeRepresentationFn()};
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/xla_compiler_options_util_test.cc

      return std::make_unique<XlaDevice::Metadata>(
          /*device_ordinal=*/0, /*platform=*/nullptr, compilation_device_type,
          GetShapeDeterminationFns(), XlaDevice::PaddedShapeFn(),
          /*use_multiple_streams=*/false);
    }
    
    std::unique_ptr<PjRtBaseDevice::Metadata> CreatePjRtDeviceMetadata(
        DeviceType compilation_device_type) {
      return std::make_unique<PjRtBaseDevice::Metadata>(compilation_device_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 29 01:41:20 UTC 2023
    - 14.8K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_tpu_device.cc

        options.device_name_prefix = name_prefix;
        options.device_name = DEVICE_TPU_NODE;
        options.device_ordinal = i;
        options.compilation_device_name = DEVICE_TPU_XLA_JIT;
        options.use_multiple_streams = true;
        // We set `use_global_compute_stream` to true for TPUs as TPUs can only
        // have one program running on each core at the same time.
        options.use_global_compute_stream = true;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 20.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/kernels/xla_ops.cc

      XlaComputationLaunchContext launch_context(
          client, allocator, device_ordinal,
          /*allocate_xla_tensors=*/platform_info.is_on_xla_device(),
          /*use_multiple_streams=*/platform_info.UseMultipleStreams());
      return launch_context;
    }
    
    Status GetTaskName(const std::string_view device_name, std::string* task_name) {
      string ignored;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_device_context.cc

            xla::ShapeUtil::MakeShape(shape.element_type(), shape.dimensions()));
    
        VLOG(2) << "Transfer to device as literal: " << literal.ToString() << " "
                << xla_tensor->shaped_buffer().ToString();
        if (UseMultipleStreams() &&
            !transfer_manager_->CanShapedBufferBeAccessedNow(
                stream_->parent(), xla_tensor->shaped_buffer())) {
          // Initially wait for the compute stream so that memory allocations are
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 00:36:08 UTC 2024
    - 12.7K bytes
    - Viewed (0)
Back to top