- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 304 for Dequeue (0.15 sec)
-
staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/fifo_list_test.go
arrival := []*request{{}, {}, {}, {}, {}, {}} list := newRequestFIFO() for i := range arrival { list.Enqueue(arrival[i]) } dequeued := make([]*request, 0) for list.Length() > 0 { req, _ := list.Dequeue() dequeued = append(dequeued, req) } verifyOrder(t, arrival, dequeued) } func TestFIFOWithEnqueueDequeueSomeRequestsRemainInQueue(t *testing.T) { list := newRequestFIFO()
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Jul 28 08:48:40 UTC 2022 - 7.8K bytes - Viewed (0) -
pilot/pkg/xds/pushqueue_test.go
t.Parallel() p := NewPushQueue() defer p.ShutDown() p.Enqueue(proxies[0], &model.PushRequest{}) p.Enqueue(proxies[1], &model.PushRequest{}) ExpectDequeue(t, p, proxies[0]) ExpectDequeue(t, p, proxies[1]) }) t.Run("remove too many", func(t *testing.T) { t.Parallel() p := NewPushQueue() defer p.ShutDown() p.Enqueue(proxies[0], &model.PushRequest{}) ExpectDequeue(t, p, proxies[0])
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue Apr 30 00:26:45 UTC 2024 - 8.8K bytes - Viewed (0) -
src/sync/poolqueue.go
// // This is implemented as a doubly-linked list queue of poolDequeues // where each dequeue is double the size of the previous one. Once a // dequeue fills up, this allocates a new one and only ever pushes to // the latest dequeue. Pops happen from the other end of the list and // once a dequeue is exhausted, it gets removed from the list. type poolChain struct {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 26 18:12:29 UTC 2024 - 8.3K bytes - Viewed (0) -
src/internal/fuzz/queue.go
q.head = 0 } func (q *queue) enqueue(e any) { if q.len+1 > q.cap() { q.grow() } i := (q.head + q.len) % q.cap() q.elems[i] = e q.len++ } func (q *queue) dequeue() (any, bool) { if q.len == 0 { return nil, false } e := q.elems[q.head] q.elems[q.head] = nil q.head = (q.head + 1) % q.cap() q.len-- return e, true } func (q *queue) peek() (any, bool) { if q.len == 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 05 21:02:45 UTC 2022 - 1.5K bytes - Viewed (0) -
test/chanlinear.go
d := make(chan bool) a = append(a, d) go func() { for j := 0; j < messages; j++ { // queue ourselves on the global channel select { case <-c: case <-d: } } }() } for i := 0; i < messages; i++ { // wake each goroutine up, forcing it to dequeue and then enqueue // on the global channel. for _, d := range a { d <- true } } })
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:25 UTC 2023 - 2.1K bytes - Viewed (0) -
src/runtime/sema.go
if s.parent.next != s { panic("semaRoot queue") } root.rotateLeft(s.parent) } } } // dequeue searches for and finds the first goroutine // in semaRoot blocked on addr. // If the sudog was being profiled, dequeue returns the time // at which it was woken up as now. Otherwise now is 0. // If there are additional entries in the wait list, dequeue // returns tailtime set to the last entry's acquiretime.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 19K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_cleanup_attributes.cc
// device. Device attribute is used to infer the appropriate sharding // within TPUs for this op. // TODO(b/183598857): Use explicit sharding ops from the front-end. // For example, dequeue ops generated by // tensorflow/python/tpu/tpu_feed.py if (!tensorflow::IsTPUReplicatedCore(attr.getValue()) && !isa<tf_device::LaunchOp>(op)) { op->removeAttr(kDeviceAttr); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 23:50:19 UTC 2022 - 3K bytes - Viewed (0) -
src/runtime/chan.go
// its item at the tail of the queue. Since the // queue is full, those are both the same slot. qp := chanbuf(c, c.recvx) if raceenabled { racenotify(c, c.recvx, nil) racenotify(c, c.recvx, sg) } // copy data from queue to receiver if ep != nil { typedmemmove(c.elemtype, ep, qp) } // copy data from sender to queue typedmemmove(c.elemtype, qp, sg.elem) c.recvx++
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:50 UTC 2024 - 25.9K bytes - Viewed (0) -
src/runtime/select.go
x := sgp.prev y := sgp.next if x != nil { if y != nil { // middle of queue x.next = y y.prev = x sgp.next = nil sgp.prev = nil return } // end of queue x.next = nil q.last = x sgp.prev = nil return } if y != nil { // start of queue y.prev = nil q.first = y sgp.next = nil return }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 13 21:36:04 UTC 2024 - 15K bytes - Viewed (0) -
pkg/queue/delay_test.go
} success <- struct{}{} }() select { case <-success: dq := dq.(*delayQueue) dq.mu.Lock() if dq.queue.Len() < queuedItems { t.Fatalf("expected 50 items in the queue, got %d", dq.queue.Len()) } dq.mu.Unlock() return case <-timeout: t.Fatal("timed out waiting for enqueues") } } func TestPriorityQueueShrinking(t *testing.T) {
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Thu Jul 20 06:27:31 UTC 2023 - 4.3K bytes - Viewed (0)