- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 227 for aligned (0.16 sec)
-
staging/src/k8s.io/apiserver/pkg/cel/value.go
} return types.True } // Find returns the value for the key in the map, if found. func (m *MapValue) Find(name ref.Val) (ref.Val, bool) { // Currently only maps with string keys are supported as this is best aligned with JSON, // and also much simpler to support. n, ok := name.(types.String) if !ok { return types.MaybeNoSuchOverloadErr(n), true } nameStr := string(n) field, found := m.fieldMap[nameStr]
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Oct 10 22:05:55 UTC 2022 - 20.5K bytes - Viewed (0) -
src/runtime/asm_arm.s
// Assert ctxt is zero. See func save. MOVW (g_sched+gobuf_ctxt)(g), R11 TST R11, R11 B.EQ 2(PC) BL runtime·abort(SB) RET // func asmcgocall_no_g(fn, arg unsafe.Pointer) // Call fn(arg) aligned appropriately for the gcc ABI. // Called on a system stack, and there may be no g yet (during needm). TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-8 MOVW fn+0(FP), R1 MOVW arg+4(FP), R0 MOVW R13, R2 SUB $32, R13
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 23 21:00:52 UTC 2024 - 32.1K bytes - Viewed (0) -
src/encoding/gob/debug.go
r *peekReader wireType map[typeId]*wireType tmp []byte // scratch space for decoding uints. } // dump prints the next nBytes of the input. // It arranges to print the output aligned from call to // call, to make it easy to see what has been consumed. func (deb *debugger) dump(format string, args ...any) { if !dumpBytes { return } fmt.Fprintf(os.Stderr, format+" ", args...)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jan 20 09:34:41 UTC 2023 - 18.3K bytes - Viewed (0) -
src/runtime/mpagealloc_test.go
// unmapped summary memory and crashes. // Figure out how many chunks are in a physical page, then align BaseChunkIdx // to a physical page in the chunk summary array. Here we only assume that // each summary array is aligned to some physical page. sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes) baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Dec 06 19:16:48 UTC 2021 - 32.6K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
// 4: h₂₆[3]->h₂₆[4] // // The result is that all of the limbs are limited to 26-bits // except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. // // Note that although each limb is aligned at 26-bit intervals // they may contain values that exceed 2²⁶ - 1, hence the need // to carry the excess bits in each limb. #define REDUCE(h0, h1, h2, h3, h4) \ VESRLG $26, h0, T_0 \ VESRLG $26, h3, T_1 \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:33 UTC 2023 - 17.5K bytes - Viewed (0) -
src/cmd/link/internal/ld/pe.go
var oh pe.OptionalHeader32 l = binary.Size(&oh) } if ctxt.LinkMode == LinkExternal { // .rdata section will contain "masks" and "shifts" symbols, and they // need to be aligned to 16-bytes. So make all sections aligned // to 32-byte and mark them all IMAGE_SCN_ALIGN_32BYTES so external // linker will honour that requirement. PESECTALIGN = 32 PEFILEALIGN = 0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 19:01:27 UTC 2023 - 48.8K bytes - Viewed (0) -
src/runtime/mgcscavenge_test.go
check(rand.Uint64(), m) if m > 1 { // For m != 1, let's construct a slightly more interesting // random test. Generate a bitmap which is either 0 or // randomly set bits for each m-aligned group of m bits. val := uint64(0) for n := uint(0); n < 64; n += m { // For each group of m bits, flip a coin: // * Leave them as zero. // * Set them randomly. if rand.Uint64()%2 == 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 25.2K bytes - Viewed (0) -
src/crypto/aes/asm_ppc64x.s
LVX (PTR), ESPERM ADD $0x10, PTR #else MOVD $·rcon+0x10(SB), PTR // PTR points to rcon addr (skipping permute vector) #endif // Get key from memory and write aligned into VR P8_LXVB16X(INP, R0, IN0) ADD $0x10, INP, INP MOVD $0x20, TEMP CMPW ROUNDS, $12 LVX (PTR)(R0), RCON // lvx 4,0,6 Load first 16 bytes into RCON LVX (PTR)(TEMP), MASK
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 18:05:32 UTC 2024 - 18.6K bytes - Viewed (0) -
doc/asm.html
<p> The <code>PCALIGN</code> pseudo-instruction is used to indicate that the next instruction should be aligned to a specified boundary by padding with no-op instructions. </p> <p> It is currently supported on arm64, amd64, ppc64, loong64 and riscv64. For example, the start of the <code>MOVD</code> instruction below is aligned to 32 bytes: <pre> PCALIGN $32 MOVD $2, R0 </pre> </p>
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 28 19:15:27 UTC 2023 - 36.3K bytes - Viewed (1) -
src/syscall/syscall_linux_test.go
strconv.FormatUint(uint64(uid), 10) if got != want { if filesystemIsNoSUID(tmpBinary) { t.Skip("skipping test when temp dir is mounted nosuid") } // formatted so the values are aligned for easier comparison t.Errorf("expected %s,\ngot %s", want, got) } } // filesystemIsNoSUID reports whether the filesystem for the given // path is mounted nosuid.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 23K bytes - Viewed (0)