Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for QThread (0.19 sec)

  1. tensorflow/c/c_api.cc

    #include "tensorflow/core/platform/status.h"
    #include "tensorflow/core/platform/str_util.h"
    #include "tensorflow/core/platform/strcat.h"
    #include "tensorflow/core/platform/stringpiece.h"
    #include "tensorflow/core/platform/thread_annotations.h"
    #include "tensorflow/core/platform/types.h"
    #include "tensorflow/core/public/session.h"
    #include "tensorflow/core/public/version.h"
    
    // The implementation below is at the top level instead of the
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 102.3K bytes
    - Viewed (0)
  2. tensorflow/c/eager/parallel_device/parallel_device_lib.cc

      // unnecessary allocations each Execute call, we keep one heap-allocated
      // version for the thread.
      StatusPtr status_ TF_GUARDED_BY(execution_mutex_);
    
      const std::string device_;
      ExecutorPtr executor_ TF_GUARDED_BY(execution_mutex_);
      mutable OpPtr op_ TF_GUARDED_BY(execution_mutex_);
      std::unique_ptr<Thread> thread_;
    };
    
    DeviceThread::~DeviceThread() {
      {
        tensorflow::mutex_lock l(execution_mutex_);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Fri Feb 09 07:47:20 GMT 2024
    - 25.4K bytes
    - Viewed (1)
  3. tensorflow/c/c_api_experimental.cc

      // (as in the case of ConstOp kernel creation on GPU, which involves copying a
      // CPU tensor to GPU).
      // Setting a larger thread pool does not help with the Swift caller, as we use
      // a different TFE context for each thread of execution (for running graph
      // functions, and their send/recvs corountines).
      config.set_inter_op_parallelism_threads(1);
    
      TF_Buffer* ret = TF_NewBuffer();
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Apr 15 03:35:10 GMT 2024
    - 29.4K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache_test.cc

        notification.Notify();
        // Wait for other thread to issue read.
        Env::Default()->SleepForMicroseconds(100000);  // 0.1 secs
        TF_SetStatus(status, TF_OK, "");
        return n;
      };
      tf_gcs_filesystem::RamFileBlockCache cache(block_size, block_size, 0,
                                                 fetcher);
      // Fork off thread for parallel read.
      std::unique_ptr<Thread> concurrent(
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Fri Oct 15 03:16:57 GMT 2021
    - 23.2K bytes
    - Viewed (0)
  5. tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache.cc

                                      const std::shared_ptr<Block>& block,
                                      TF_Status* status) {
      absl::MutexLock lock(&mu_);
      if (block->timestamp == 0) {
        // The block was evicted from another thread. Allow it to remain evicted.
        return TF_SetStatus(status, TF_OK, "");
      }
      if (block->lru_iterator != lru_list_.begin()) {
        lru_list_.erase(block->lru_iterator);
        lru_list_.push_front(key);
    C++
    - Registered: Tue Apr 23 12:39:09 GMT 2024
    - Last Modified: Thu Jul 16 01:39:09 GMT 2020
    - 11.1K bytes
    - Viewed (0)
  6. tensorflow/c/eager/c_api_test.cc

      TFE_ContextOptionsSetDevicePlacementPolicy(opts, global_policy);
      TFE_Context* ctx = TFE_NewContext(opts, status.get());
      if (thread_policy != global_policy) {
        TFE_ContextSetThreadLocalDevicePlacementPolicy(ctx, thread_policy);
      }
      TFE_DeleteContextOptions(opts);
      ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Thu Aug 03 20:50:20 GMT 2023
    - 94.6K bytes
    - Viewed (1)
  7. tensorflow/c/env.cc

      cc_options.numa_node = options->numa_node;
      return reinterpret_cast<TF_Thread*>(::tensorflow::Env::Default()->StartThread(
          cc_options, thread_name, [=]() { (*work_func)(param); }));
    }
    
    void TF_JoinThread(TF_Thread* thread) {
      // ::tensorflow::Thread joins on destruction
      delete reinterpret_cast<::tensorflow::Thread*>(thread);
    }
    
    void* TF_LoadSharedLibrary(const char* library_filename, TF_Status* status) {
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Wed Aug 11 01:20:50 GMT 2021
    - 7K bytes
    - Viewed (0)
  8. tensorflow/c/env_test.cc

    }
    
    }  // namespace
    
    TEST(TestEnv, TestThreads) {
      TF_ThreadOptions options;
      TF_DefaultThreadOptions(&options);
      SomeThreadData data;
      TF_Thread* thread =
          TF_StartThread(&options, "SomeThreadName", &SomeThreadFunc, &data);
      TF_JoinThread(thread);
      ::tensorflow::mutex_lock l(data.mu);
      ASSERT_TRUE(data.did_work);
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Mon Dec 10 20:52:48 GMT 2018
    - 4.2K bytes
    - Viewed (0)
  9. tensorflow/c/eager/c_api.cc

      tensorflow::unwrap(ctx)->SetThreadLocalDevicePlacementPolicy(
          static_cast<tensorflow::ContextDevicePlacementPolicy>(policy));
    }
    
    // Note: this function looks up a thread local policy. So it should be called in
    // the appropriate client thread. In particular, in async mode, it may not be
    // safe to call this function from the async EagerExecutor threads.
    extern TFE_ContextDevicePlacementPolicy TFE_ContextGetDevicePlacementPolicy(
    C++
    - Registered: Tue Apr 30 12:39:09 GMT 2024
    - Last Modified: Tue Mar 12 20:00:09 GMT 2024
    - 43.9K bytes
    - Viewed (2)
Back to top