Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 338 for happen (0.13 sec)

  1. src/runtime/mgcpacer.go

    		heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use.
    		mappedReady = c.mappedReady.Load()                   // Total unreleased mapped memory.
    		if heapFree+heapAlloc <= mappedReady {
    			break
    		}
    		// It is impossible for total unreleased mapped memory to exceed heap memory, but
    		// because these stats are updated independently, we may observe a partial update
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  2. pilot/pkg/model/push_context.go

    		for _, privateServices := range ps.ServiceIndex.privateByNamespace {
    			out = append(out, privateServices...)
    		}
    	} else {
    		out = make([]*Service, 0, len(ps.ServiceIndex.privateByNamespace[ns])+
    			len(ps.ServiceIndex.exportedToNamespace[ns])+len(ps.ServiceIndex.public))
    		out = append(out, ps.ServiceIndex.privateByNamespace[ns]...)
    		out = append(out, ps.ServiceIndex.exportedToNamespace[ns]...)
    	}
    
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Wed May 15 09:02:11 UTC 2024
    - 91.8K bytes
    - Viewed (0)
  3. pkg/scheduler/framework/runtime/framework.go

    			if s.Code() == framework.UnschedulableAndUnresolvable {
    				// In this case, the preemption shouldn't happen in this scheduling cycle.
    				// So, no need to execute all PreFilter.
    				return nil, s
    			}
    			if s.Code() == framework.Unschedulable {
    				// In this case, the preemption should happen later in this scheduling cycle.
    				// So we need to execute all PreFilter.
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri May 17 09:07:27 UTC 2024
    - 60.9K bytes
    - Viewed (0)
  4. staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go

    		if foundInOriginal {
    			if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) {
    				// This error actually should never happen.
    				return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch)
    			}
    		} else {
    			original[retainKeysDirective] = retainKeysInPatch
    		}
    		return nil
    	}
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Mon May 01 23:34:23 UTC 2023
    - 75.5K bytes
    - Viewed (0)
  5. src/runtime/mprof.go

    	unlock(&profMemActiveLock)
    }
    
    // Called by malloc to record a profiled block.
    func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
    	if mp.profStack == nil {
    		// mp.profStack is nil if we happen to sample an allocation during the
    		// initialization of mp. This case is rare, so we just ignore such
    		// allocations. Change MemProfileRate to 1 if you need to reproduce such
    		// cases for testing purposes.
    		return
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ssa/regalloc.go

    			if !mustBeFirst(v.Op) {
    				break
    			}
    			nfirst++
    		}
    		oldSched = append(oldSched[:0], b.Values[nfirst:]...)
    		b.Values = b.Values[:nfirst]
    		b.Values = append(b.Values, start[b.ID]...)
    		for _, v := range oldSched {
    			b.Values = append(b.Values, v)
    			b.Values = append(b.Values, after[v.ID]...)
    		}
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 17:49:56 UTC 2023
    - 87.2K bytes
    - Viewed (0)
  7. src/cmd/go/internal/modget/get.go

    	}
    
    	for _, q := range queries {
    		if q.pattern == "all" {
    			r.patternAllQueries = append(r.patternAllQueries, q)
    		} else if q.patternIsLocal {
    			r.localQueries = append(r.localQueries, q)
    		} else if q.isWildcard() {
    			r.wildcardQueries = append(r.wildcardQueries, q)
    		} else {
    			r.pathQueries = append(r.pathQueries, q)
    		}
    
    		if q.version == "none" {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 18:26:32 UTC 2024
    - 66.5K bytes
    - Viewed (0)
  8. src/runtime/traceback.go

    //
    //	var u unwinder
    //	for u.init(gp, 0); u.valid(); u.next() {
    //		// ... use frame info in u ...
    //	}
    //
    // Implementation note: This is carefully structured to be pointer-free because
    // tracebacks happen in places that disallow write barriers (e.g., signals).
    // Even if this is stack-allocated, its pointer-receiver methods don't know that
    // their receiver is on the stack, so they still emit write barriers. Here we
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 55.1K bytes
    - Viewed (0)
  9. pkg/scheduler/framework/plugins/defaultpreemption/default_preemption_test.go

    				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
    			)
    			registeredPlugins = append(registeredPlugins, tt.registerPlugins...)
    			var objs []runtime.Object
    			for _, p := range append(tt.testPods, tt.initPods...) {
    				objs = append(objs, p)
    			}
    			for _, n := range nodes {
    				objs = append(objs, n)
    			}
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri May 31 15:52:16 UTC 2024
    - 82.8K bytes
    - Viewed (0)
  10. staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go

    		return nil, false
    	}
    
    	result := make([]string, 0, 2)
    	result = append(result, c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.Object)))
    	if event.PrevObject == nil {
    		return result, true
    	}
    	prevTriggerValue := c.indexedTrigger.indexerFunc(baseObjectThreadUnsafe(event.PrevObject))
    	if result[0] != prevTriggerValue {
    		result = append(result, prevTriggerValue)
    	}
    	return result, true
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Jun 12 10:12:02 UTC 2024
    - 51.8K bytes
    - Viewed (0)
Back to top