- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 250 for zeroing (0.42 sec)
-
src/cmd/compile/internal/ssa/_gen/RISCV64.rules
(MOVDstore ptr (MOVDconst [0]) mem)))) // Medium 8-aligned zeroing uses a Duff's device // 8 and 128 are magic constants, see runtime/mkduff.go (Zero [s] {t} ptr mem) && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice => (DUFFZERO [8 * (128 - s/8)] ptr mem) // Generic zeroing uses a loop (Zero [s] {t} ptr mem) => (LoweredZero [t.Alignment()] ptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 14:57:07 UTC 2024 - 40.3K bytes - Viewed (0) -
src/runtime/arena.go
// not Linux decides to back this memory with transparent huge // pages. There's latency involved in this zeroing, but the hugepage // gains are almost always worth it. Note: it's important that we // clear even if it's freshly mapped and we know there's no point // to zeroing as *that* is the critical signal to use huge pages. memclrNoHeapPointers(unsafe.Pointer(s.base()), s.elemsize) s.needzero = 0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/block.go
b.Kind = kind b.ResetControls() b.Aux = nil b.AuxInt = 0 b.Controls[0] = v b.Controls[1] = w v.Uses++ w.Uses++ } // truncateValues truncates b.Values at the ith element, zeroing subsequent elements. // The values in b.Values after i must already have had their args reset, // to maintain correct value uses counts. func (b *Block) truncateValues(i int) { tail := b.Values[i:]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 15:44:14 UTC 2024 - 12.2K bytes - Viewed (0) -
android/guava/src/com/google/common/primitives/UnsignedBytes.java
* corresponds to the least significant nonzero byte in lw ^ rw, since lw and rw are * little-endian. Long.numberOfTrailingZeros(diff) tells us the least significant * nonzero bit, and zeroing out the first three bits of L.nTZ gives us the shift to get * that least significant nonzero byte. */ int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Fri Jun 07 22:25:23 UTC 2024 - 18.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/WasmOps.go
{name: "LoweredZero", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, aux: "Int64"}, // large zeroing. arg0=start, arg1=mem, auxint=len, returns mem {name: "LoweredGetClosurePtr", reg: gp01}, // returns wasm.REG_CTXT, the closure pointer
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 24 00:21:13 UTC 2023 - 17.7K bytes - Viewed (0) -
guava/src/com/google/common/primitives/UnsignedBytes.java
* corresponds to the least significant nonzero byte in lw ^ rw, since lw and rw are * little-endian. Long.numberOfTrailingZeros(diff) tells us the least significant * nonzero bit, and zeroing out the first three bits of L.nTZ gives us the shift to get * that least significant nonzero byte. */ int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Fri Jun 07 22:25:23 UTC 2024 - 18.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
clobbers: buildReg("R20 R21 R1"), }, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, }, // large or unaligned zeroing // arg0 = address of memory to zero (in R20, changed as side effect) // arg1 = address of the last element to zero // arg2 = mem // auxint = alignment // returns mem // MOVx R0, (R20)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 19:04:19 UTC 2023 - 25.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go
reg: regInfo{ inputs: []regMask{buildReg("R2"), buildReg("R1")}, clobbers: buildReg("R1 R2 R31"), }, faultOnNilArg0: true, faultOnNilArg1: true, }, // large or unaligned zeroing // arg0 = address of memory to zero (in R1, changed as side effect) // arg1 = address of the last element to zero // arg2 = mem // auxint = alignment // returns mem // SUBV $8, R1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 24 03:36:31 UTC 2023 - 25.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/MIPSOps.go
{name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, // large or unaligned zeroing // arg0 = address of memory to zero (in R1, changed as side effect) // arg1 = address of the last element to zero // arg2 = mem // auxint = alignment // returns mem // SUBU $4, R1
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 24 14:43:03 UTC 2023 - 24K bytes - Viewed (0) -
test/prove.go
b := make([]int, 5) c := make([]int, 5) for i := -1; i <= 0; i-- { b[i] = i n++ if n > 10 { break } } useSlice(a) useSlice(c) } // Check that prove is zeroing these right shifts of positive ints by bit-width - 1. // e.g (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) && ft.isNonNegative(n) -> 0 func sh64(n int64) int64 { if n < 0 { return n }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jan 23 00:02:36 UTC 2024 - 21.2K bytes - Viewed (0)