Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 72 for hashed (0.23 sec)

  1. src/runtime/mgcmark.go

    	}
    
    	// Scan this shard.
    	scanblock(b, n, ptrmask, gcw, nil)
    	return int64(n)
    }
    
    // markrootFreeGStacks frees stacks of dead Gs.
    //
    // This does not free stacks of dead Gs cached on Ps, but having a few
    // cached stacks around isn't a problem.
    func markrootFreeGStacks() {
    	// Take list of dead Gs with stacks.
    	lock(&sched.gFree.lock)
    	list := sched.gFree.stack
    	sched.gFree.stack = gList{}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  2. pkg/scheduler/internal/cache/cache_test.go

    			}
    
    			// Step 2: dump cached nodes successfully.
    			cachedNodes := NewEmptySnapshot()
    			if err := cache.UpdateSnapshot(logger, cachedNodes); err != nil {
    				t.Error(err)
    			}
    			newNode, found := cachedNodes.nodeInfoMap[node.Name]
    			if !found || len(cachedNodes.nodeInfoMap) != len(tc.nodes) {
    				t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes.nodeInfoMap, tc.nodes)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Oct 17 01:38:03 UTC 2023
    - 63.8K bytes
    - Viewed (0)
  3. src/runtime/mgc.go

    			}
    			if gcw.wbuf2 == nil {
    				print(" wbuf2=<nil>")
    			} else {
    				print(" wbuf2.n=", gcw.wbuf2.nobj)
    			}
    			print("\n")
    			throw("P has cached GC work at end of mark termination")
    		}
    		// There may still be cached empty buffers, which we
    		// need to flush since we're going to free them. Also,
    		// there may be non-zero stats because we allocated
    		// black after the gcMarkDone barrier.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  4. src/runtime/mheap.go

    	// if sweepgen == h->sweepgen, the span is swept and ready to use
    	// if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping
    	// if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached
    	// h->sweepgen is incremented by 2 after every GC
    
    	sweepgen              uint32
    	divMul                uint32        // for divide by elemsize
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  5. pkg/controller/endpointslice/endpointslice_controller_test.go

    func (cmc *cacheMutationCheck) Check(t *testing.T) {
    	for _, o := range cmc.objects {
    		if !reflect.DeepEqual(o.original, o.deepCopy) {
    			// Cached objects can't be safely mutated and instead should be deep
    			// copied before changed in any way.
    			t.Errorf("Cached object was unexpectedly mutated. Original: %+v, Mutated: %+v", o.deepCopy, o.original)
    		}
    	}
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Jun 04 08:33:32 UTC 2024
    - 65.5K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/types/type.go

    // ABIInternal calling conventions.
    //
    // If t must be passed by memory, Registers returns (math.MaxUint8,
    // math.MaxUint8).
    func (t *Type) Registers() (uint8, uint8) {
    	CalcSize(t)
    	return t.intRegs, t.floatRegs
    }
    
    func (*Type) CanBeAnSSAAux() {}
    
    const (
    	typeNotInHeap  = 1 << iota // type cannot be heap allocated
    	typeNoalg                  // suppress hash and eq algorithm generation
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 49.5K bytes
    - Viewed (0)
  7. src/index/suffixarray/sais2.go

    	// Cache recently used bucket index:
    	// we're processing suffixes in sorted order
    	// and accessing buckets indexed by the
    	// byte before the sorted order, which still
    	// has very good locality.
    	// Invariant: b is cached, possibly dirty copy of bucket[cB].
    	cB := c1
    	b := bucket[cB]
    	sa[b] = int64(k)
    	b++
    
    	for i := 0; i < len(sa); i++ {
    		j := int(sa[i])
    		if j == 0 {
    			// Skip empty entry.
    			continue
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 18 23:57:18 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  8. src/runtime/mbitmap.go

    	typ *_type
    }
    
    // typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size).
    //
    // addr and addr+size must be in the range [span.base(), span.limit).
    //
    // Note: addr+size must be passed as the limit argument to the iterator's next method on
    // each iteration. This slightly awkward API is to allow typePointers to be destructured
    // by the compiler.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  9. pkg/controller/volume/persistentvolume/pv_controller.go

    			// with the same name created.
    			// in some cases, the cached claim is not the newest, and the volume.Spec.ClaimRef.UID is newer than cached.
    			// so we should double check by calling apiserver and get the newest claim, then compare them.
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri May 10 08:42:31 UTC 2024
    - 89.2K bytes
    - Viewed (0)
  10. src/net/http/transport.go

    		// If this request requires HTTP/1, don't use the
    		// "https" alternate protocol, which is used by the
    		// HTTP/2 code to take over requests if there's an
    		// existing cached HTTP/2 connection.
    		return false
    	}
    	return true
    }
    
    // alternateRoundTripper returns the alternate RoundTripper to use
    // for this request if the Request's URL scheme requires one,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jun 06 21:59:21 UTC 2024
    - 91K bytes
    - Viewed (0)
Back to top