- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 79 for zeroed (0.12 sec)
-
src/runtime/slice.go
// Added entries [oldLen, newLen) are not initialized by growslice // (although for pointer-containing element types, they are zeroed). They // must be initialized by the caller. // Trailing entries [newLen, newCap) are zeroed. // // growslice's odd calling convention makes the generated code that calls // this function simpler. In particular, it accepts and returns the
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 12.2K bytes - Viewed (0) -
src/runtime/string.go
// The storage is not zeroed. Callers should use // b to set the string contents and then drop b. func rawstring(size int) (s string, b []byte) { p := mallocgc(uintptr(size), nil, false) return unsafe.String((*byte)(p), size), unsafe.Slice((*byte)(p), size) } // rawbyteslice allocates a new byte slice. The byte slice is not zeroed. func rawbyteslice(size int) (b []byte) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 13.4K bytes - Viewed (0) -
pkg/kubelet/kubelet_node_status.go
} } return requiresUpdate } // Zeros out extended resource capacity during reconciliation. func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool { requiresUpdate := updateDefaultResources(initialNode, node) // Check with the device manager to see if node has been recreated, in which case extended resources should be zeroed until they are available
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:25:43 UTC 2024 - 31.1K bytes - Viewed (0) -
src/crypto/internal/bigmod/nat.go
func (x *Nat) IsZero() choice { // Eliminate bounds checks in the loop. size := len(x.limbs) xLimbs := x.limbs[:size] zero := yes for i := 0; i < size; i++ { zero &= ctEq(xLimbs[i], 0) } return zero } // cmpGeq returns 1 if x >= y, and 0 otherwise. // // Both operands must have the same announced length. func (x *Nat) cmpGeq(y *Nat) choice {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 18:57:38 UTC 2024 - 24K bytes - Viewed (0) -
src/cmd/internal/obj/link.go
// those bulk allocators should always be used, rather than new(Prog). // // The other fields not yet mentioned are for use by the back ends and should // be left zeroed by creators of Prog lists. type Prog struct { Ctxt *Link // linker context Link *Prog // next Prog in linked list From Addr // first source operand
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 33.1K bytes - Viewed (0) -
src/net/netip/netip.go
func (ip Addr) appendTo6(ret []byte) []byte { zeroStart, zeroEnd := uint8(255), uint8(255) for i := uint8(0); i < 8; i++ { j := i for j < 8 && ip.v6u16(j) == 0 { j++ } if l := j - i; l >= 2 && l > zeroEnd-zeroStart { zeroStart, zeroEnd = i, j } } for i := uint8(0); i < 8; i++ { if i == zeroStart { ret = append(ret, ':', ':') i = zeroEnd if i >= 8 { break }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 17:10:01 UTC 2024 - 43.2K bytes - Viewed (0) -
src/cmd/compile/internal/liveness/plive.go
// zero it in case that malloc causes a stack scan. n.SetNeedzero(true) livedefer.Set(int32(i)) } if n.OpenDeferSlot() { // Open-coded defer args slots must be live // everywhere in a function, since a panic can // occur (almost) anywhere. Because it is live // everywhere, it must be zeroed on entry. livedefer.Set(int32(i))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 15:22:22 UTC 2024 - 45.2K bytes - Viewed (0) -
src/runtime/runtime2.go
_Pdead ) // Mutual exclusion locks. In the uncontended case, // as fast as spin locks (just a few user-level instructions), // but on the contention path they sleep in the kernel. // A zeroed Mutex is unlocked (no need to initialize each lock). // Initialization is helpful for static lock ranking, but not required. type mutex struct { // Empty struct if lock ranking is disabled, otherwise includes the lock rank
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 47.9K bytes - Viewed (0) -
cmd/metacache-entries.go
merged = append(merged, b[0]) b = b[1:] } if limit > 0 && len(merged) >= limit { break } } // Append anything left. if limit < 0 || len(merged) < limit { merged = append(merged, a...) merged = append(merged, b...) } m.o = merged } // filterPrefix will filter m to only contain entries with the specified prefix. func (m *metaCacheEntriesSorted) filterPrefix(s string) {
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Mon Jun 10 04:34:26 UTC 2024 - 24K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc
Operation* first_op_after = islands.back()->getNextNode(); // We create the merged island at the location of the first island that was // merged (excluding special TPU input/output ops). IslandOp new_island = CreateMergedIsland(island, islands, wrapped_ops); // Ensure dominance by sorting the range of islands that were merged. return SortTopologically(Block::iterator(new_island.getOperation()),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 27.6K bytes - Viewed (0)