- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 454 for zeroed (0.13 sec)
-
pkg/kubelet/cm/devicemanager/manager.go
return m.podDevices.getContainerDevices(podUID, containerName) } // ShouldResetExtendedResourceCapacity returns whether the extended resources should be zeroed or not, // depending on whether the node has been recreated. Absence of the checkpoint file strongly indicates the node // has been recreated. func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Apr 15 12:01:56 UTC 2024 - 43K bytes - Viewed (0) -
src/math/big/int_test.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 58.5K bytes - Viewed (0) -
src/runtime/time.go
// // The wakeTime method implementation reads minWhenModified *before* minWhenHeap, // so that if the minWhenModified is observed to be 0, that means the minWhenHeap that // follows will include the information that was zeroed out of it. // // Originally Step 3 locked every timer, which made sure any timer update that was // already in progress during Steps 1+2 completed and was observed by Step 3.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 29 14:36:24 UTC 2024 - 37.5K bytes - Viewed (0) -
src/runtime/arena.go
// Add zero bits up to the bitmap word boundary if zeros > 0 { z := ptrBits - h.valid if z > zeros { z = zeros } h.valid += z zeros -= z } // Find word in bitmap that we're going to write. bitmap := s.heapBits() idx := h.offset / (ptrBits * goarch.PtrSize) // Write remaining bits. if h.valid != h.low { m := uintptr(1)<<h.low - 1 // don't clear existing bits below "low"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/cmd/compile/internal/liveness/plive.go
// zero it in case that malloc causes a stack scan. n.SetNeedzero(true) livedefer.Set(int32(i)) } if n.OpenDeferSlot() { // Open-coded defer args slots must be live // everywhere in a function, since a panic can // occur (almost) anywhere. Because it is live // everywhere, it must be zeroed on entry. livedefer.Set(int32(i))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 15:22:22 UTC 2024 - 45.2K bytes - Viewed (0) -
src/cmd/compile/internal/walk/order.go
// For maps tmp is just one word so it hardly matters. r := n.X n.X = o.copyExpr(r) // n.Prealloc is the temp for the iterator. // MapIterType contains pointers and needs to be zeroed. n.Prealloc = o.newTemp(reflectdata.MapIterType(), true) } n.Key = o.exprInPlace(n.Key) n.Value = o.exprInPlace(n.Value) if orderBody { orderBlock(&n.Body, o.free) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 08 02:00:33 UTC 2024 - 42.7K bytes - Viewed (0) -
src/runtime/runtime2.go
_Pdead ) // Mutual exclusion locks. In the uncontended case, // as fast as spin locks (just a few user-level instructions), // but on the contention path they sleep in the kernel. // A zeroed Mutex is unlocked (no need to initialize each lock). // Initialization is helpful for static lock ranking, but not required. type mutex struct { // Empty struct if lock ranking is disabled, otherwise includes the lock rank
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 47.9K bytes - Viewed (0) -
src/cmd/cgo/out.go
// The results part of the argument structure must be // initialized to 0 so the write barriers generated by // the assignments to these fields in Go are safe. // // We use a local static variable to get the zeroed // value of the argument type. This avoids including // string.h for memset, and is also robust to C++ // types with constructors. Both GCC and LLVM optimize // this into just zeroing _cgo_a.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 29 16:41:10 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/map.go
} func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer { e := mapaccess1(t, h, key) if e == unsafe.Pointer(&zeroVal[0]) { return zero } return e } func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { e := mapaccess1(t, h, key) if e == unsafe.Pointer(&zeroVal[0]) { return zero, false } return e, true }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 57.6K bytes - Viewed (0) -
pkg/kubelet/kubelet_node_status_test.go
assert.NoError(t, err) for i, cond := range updatedNode.Status.Conditions { assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed May 08 19:23:19 UTC 2024 - 115.8K bytes - Viewed (0)