- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 503 for sP (0.03 sec)
-
src/cmd/compile/internal/ssa/allocators.go
s = make([]*Value, 1<<b) } else { sp := v.(*[]*Value) s = *sp *sp = nil c.hdrValueSlice = append(c.hdrValueSlice, sp) } s = s[:n] return s } func (c *Cache) freeValueSlice(s []*Value) { for i := range s { s[i] = nil } b := bits.Len(uint(cap(s)) - 1) var sp *[]*Value if len(c.hdrValueSlice) == 0 { sp = new([]*Value) } else { sp = c.hdrValueSlice[len(c.hdrValueSlice)-1]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 15 23:00:54 UTC 2023 - 7.4K bytes - Viewed (0) -
pkg/kubelet/server/stats/summary.go
func (sp *summaryProviderImpl) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) { // TODO(timstclair): Consider returning a best-effort response if any of // the following errors occur. node, err := sp.provider.GetNode() if err != nil { return nil, fmt.Errorf("failed to get node info: %v", err) } nodeConfig := sp.provider.GetNodeConfig() rootStats, err := sp.provider.GetCgroupCPUAndMemoryStats("/", false)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Nov 01 18:46:33 UTC 2023 - 5.1K bytes - Viewed (0) -
src/runtime/signal_loong64.go
// Push the LR to stack, as we'll clobber it in order to // push the call. The function being pushed is responsible // for restoring the LR and setting the SP back. // This extra slot is known to gentraceback. sp := c.sp() - 8 c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link() // Set up PC and LR to pretend the function being signaled // calls targetPC at resumePC. c.set_link(uint64(resumePC))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 21 06:51:28 UTC 2023 - 3K bytes - Viewed (0) -
src/runtime/signal_riscv64.go
// Push the LR to stack, as we'll clobber it in order to // push the call. The function being pushed is responsible // for restoring the LR and setting the SP back. // This extra slot is known to gentraceback. sp := c.sp() - goarch.PtrSize c.set_sp(sp) *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra() // Set up PC and LR to pretend the function being signaled // calls targetPC at resumePC. c.set_ra(uint64(resumePC))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Oct 04 02:55:17 UTC 2023 - 2.9K bytes - Viewed (0) -
src/runtime/signal_arm.go
// Push the LR to stack, as we'll clobber it in order to // push the call. The function being pushed is responsible // for restoring the LR and setting the SP back. // This extra slot is known to gentraceback. sp := c.sp() - 4 c.set_sp(sp) *(*uint32)(unsafe.Pointer(uintptr(sp))) = c.lr() // Set up PC and LR to pretend the function being signaled // calls targetPC at resumePC. c.set_lr(uint32(resumePC)) c.set_pc(uint32(targetPC))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 28 18:17:57 UTC 2021 - 2.5K bytes - Viewed (0) -
src/cmd/asm/internal/asm/testdata/riscv64error.s
MOV $8(SP), (X5) // ERROR "address load must target register" MOVB $8(SP), X5 // ERROR "unsupported address load" MOVH $8(SP), X5 // ERROR "unsupported address load" MOVW $8(SP), X5 // ERROR "unsupported address load" MOVF $8(SP), X5 // ERROR "unsupported address load" MOV $1234, 0(SP) // ERROR "constant load must target register" MOV $1234, 8(SP) // ERROR "constant load must target register"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Apr 07 03:32:27 UTC 2024 - 2.8K bytes - Viewed (0) -
src/crypto/aes/gcm_amd64.s
increment(5) MOVOU T0, (8*16 + 6*16)(SP) increment(6) MOVOU T0, (8*16 + 7*16)(SP) increment(7) MOVOU (8*16 + 0*16)(SP), B0 MOVOU (8*16 + 1*16)(SP), B1 MOVOU (8*16 + 2*16)(SP), B2 MOVOU (8*16 + 3*16)(SP), B3 MOVOU (8*16 + 4*16)(SP), B4 MOVOU (8*16 + 5*16)(SP), B5 MOVOU (8*16 + 6*16)(SP), B6 MOVOU (8*16 + 7*16)(SP), B7 aesRound(1) increment(0) aesRound(2) increment(1) aesRound(3)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 23.4K bytes - Viewed (0) -
src/crypto/sha1/sha1block_386.s
// // The stack holds the intermediate word array - 16 uint32s - at 0(SP) up to 64(SP). // The saved a, b, c, d, e (R11 through R15 on amd64) are at 64(SP) up to 84(SP). // The saved limit pointer (DI on amd64) is at 84(SP). // The saved data pointer (SI on amd64) is at 88(SP). #define LOAD(index, e) \ MOVL 88(SP), SI; \ MOVL (index*4)(SI), DI; \ BSWAPL DI; \ MOVL DI, (index*4)(SP); \ ADDL DI, e #define SHUFFLE(index, e) \
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 6K bytes - Viewed (0) -
src/runtime/asm_wasm.s
TEXT runtime·morestack(SB), NOSPLIT, $0-0 // R1 = g.m MOVD g_m(g), R1 // R2 = g0 MOVD m_g0(R1), R2 // Set g->sched to context in f. NOP SP // tell vet SP changed - stop checking offsets MOVD 0(SP), g_sched+gobuf_pc(g) MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP MOVD CTXT, g_sched+gobuf_ctxt(g) // Cannot grow scheduler stack (m->g0). Get g Get R2 I64Eq If
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 20 21:26:51 UTC 2023 - 11.8K bytes - Viewed (0) -
src/runtime/sys_x86.go
// adjust Gobuf as if it executed a call to fn with context ctxt // and then stopped before the first instruction in fn. func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) { sp := buf.sp sp -= goarch.PtrSize *(*uintptr)(unsafe.Pointer(sp)) = buf.pc buf.sp = sp buf.pc = uintptr(fn) buf.ctxt = ctxt
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 28 18:17:57 UTC 2021 - 552 bytes - Viewed (0)