Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 3 of 3 for set_intra_op_parallelism_threads (0.38 sec)

  1. tensorflow/compiler/jit/xla_platform_info.cc

        }
      }
    
      xla::LocalClientOptions client_options;
      client_options.set_platform(platform.value());
      if (device != nullptr) {
        client_options.set_intra_op_parallelism_threads(
            device->tensorflow_cpu_worker_threads()->num_threads);
      }
    
      if (flr != nullptr) {
        TF_ASSIGN_OR_RETURN(auto allowed_gpus, GetAllowedGpus(flr));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 17:23:27 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/xla_device.cc

      // to a simulator.
    
      xla::LocalClientOptions options;
      options.set_platform(platform_)
          .set_allowed_devices(allowed_devices_)
          .set_intra_op_parallelism_threads(intra_op_parallelism_threads_);
      return xla::ClientLibrary::GetOrCreateLocalClient(options);
    }
    
    Allocator* XlaDevice::GetAllocator(AllocatorAttributes attr) {
      mutex_lock lock(mu_);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 21:05:42 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  3. tensorflow/c/c_api_experimental.cc

      // Setting a larger thread pool does not help with the Swift caller, as we use
      // a different TFE context for each thread of execution (for running graph
      // functions, and their send/recvs corountines).
      config.set_inter_op_parallelism_threads(1);
    
      TF_Buffer* ret = TF_NewBuffer();
      TF_CHECK_OK(MessageToBuffer(config, ret));
      return ret;
    }
    
    TF_Buffer* TF_CreateRunOptions(unsigned char enable_full_trace) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 29.4K bytes
    - Viewed (0)
Back to top