- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 55 for mu (1.3 sec)
-
tensorflow/c/c_api_internal.h
}; struct TF_Graph { TF_Graph(); mutable tensorflow::mutex mu; tensorflow::Graph graph TF_GUARDED_BY(mu); // Runs shape inference. tensorflow::ShapeRefiner refiner TF_GUARDED_BY(mu); // Maps from name of an operation to the Node* in 'graph'. std::unordered_map<tensorflow::string, tensorflow::Node*> name_map TF_GUARDED_BY(mu); // The keys of this map are all the active sessions using this graph. EachRegistered: Tue Dec 30 12:39:10 UTC 2025 - Last Modified: Sat May 13 00:49:12 UTC 2023 - 7.6K bytes - Viewed (0) -
cmd/bootstrap-peer-server.go
var incorrectConfigs []error var retries int var mu sync.Mutex for onlineServers < len(clnts)/2 { var wg sync.WaitGroup wg.Add(len(clnts)) onlineServers = 0 for _, clnt := range clnts { go func(clnt *bootstrapRESTClient) { defer wg.Done() if clnt.gridConn.State() != grid.StateConnected { mu.Lock()
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Tue Oct 01 22:13:18 UTC 2024 - 8.4K bytes - Viewed (0) -
internal/lsync/lrwmutex.go
const isWriteLock = false return lm.lockLoop(ctx, id, source, timeout, isWriteLock) } func (lm *LRWMutex) lock(id, source string, isWriteLock bool) (locked bool) { lm.mu.Lock() defer lm.mu.Unlock() lm.id = id lm.source = source if isWriteLock { if lm.ref == 0 && !lm.isWriteLock { lm.ref = 1 lm.isWriteLock = true locked = true } } else { if !lm.isWriteLock {
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Jan 02 17:15:06 UTC 2022 - 4.8K bytes - Viewed (0) -
cmd/batch-handlers.go
return } ri.mu.Lock() defer ri.mu.Unlock() if success { ri.Bucket = bucket ri.Object = info.Name } ri.countItem(info.Size, info.DeleteMarker, success, attempt) } func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInfo) { if ri == nil { return } ri.mu.Lock() defer ri.mu.Unlock() ri.Bucket = bucket
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 63.5K bytes - Viewed (1) -
cmd/perf-tests.go
} uploadsCancel() return } response := time.Since(t) atomic.AddUint64(&totalBytesWritten, uint64(info.Size)) objCountPerThread[i]++ mu.Lock() uploadTimes = append(uploadTimes, response) mu.Unlock() } }(i) } wg.Wait() // We already saw write failures, no need to proceed into read's if retError != "" { return SpeedTestResult{
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 11.8K bytes - Viewed (0) -
tensorflow/c/c_api_experimental.cc
return nullptr; #else TF_Library* lib_handle = new TF_Library; static tensorflow::mutex mu(tensorflow::LINKER_INITIALIZED); static std::unordered_map<std::string, void*>* loaded_libs = new std::unordered_map<std::string, void*>(); tensorflow::Env* env = tensorflow::Env::Default(); { tensorflow::mutex_lock lock(mu); auto it = loaded_libs->find(library_filename); if (it != loaded_libs->end()) {Registered: Tue Dec 30 12:39:10 UTC 2025 - Last Modified: Sat Oct 04 05:55:32 UTC 2025 - 29.4K bytes - Viewed (0) -
cmd/naughty-disk_test.go
} func (d *naughtyDisk) Close() (err error) { if err = d.calcError(); err != nil { return err } return d.disk.Close() } func (d *naughtyDisk) calcError() (err error) { d.mu.Lock() defer d.mu.Unlock() d.callNR++ if err, ok := d.errors[d.callNR]; ok { return err } if d.defaultErr != nil { return d.defaultErr } return nil }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Apr 25 05:41:04 UTC 2025 - 10.1K bytes - Viewed (0) -
cmd/metacache-server-pool.go
go func(i int, set *erasureObjects) { defer wg.Done() err := set.listPath(listCtx, o, innerResults) mu.Lock() defer mu.Unlock() if err == nil { allAtEOF = false } errs[i] = err }(len(errs), set) errs = append(errs, nil) } } mu.Unlock() // Gather results to a single channel. // Quorum is one since we are merging across sets.
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 12.9K bytes - Viewed (0) -
cmd/bucket-replication.go
if p.ActiveWorkers() < maxWorkers { p.mu.RLock() workers := min(len(p.workers)+1, maxWorkers) existing := len(p.workers) p.mu.RUnlock() p.ResizeWorkers(workers, existing) } maxMRFWorkers := min(maxWorkers, MRFWorkerMaxLimit) if p.ActiveMRFWorkers() < maxMRFWorkers { p.mu.RLock() workers := min(p.mrfWorkerSize+1, maxMRFWorkers) p.mu.RUnlock() p.ResizeFailedWorkers(workers)
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Sun Sep 28 20:59:21 UTC 2025 - 118.2K bytes - Viewed (0) -
cmd/bucket-lifecycle.go
return workers[h%uint64(len(workers))] } func (es *expiryState) ResizeWorkers(n int) { if n == 0 { n = 100 } // Lock to avoid multiple resizes to happen at the same time. es.mu.Lock() defer es.mu.Unlock() var workers []chan expiryOp if v := es.workers.Load(); v != nil { // Copy to new array. workers = append(workers, *v...) } if n == len(workers) || n < 1 { return }
Registered: Sun Dec 28 19:28:13 UTC 2025 - Last Modified: Fri Aug 29 02:39:48 UTC 2025 - 33.7K bytes - Viewed (0)