- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 186 for xadd (0.04 sec)
-
src/cmd/link/internal/arm64/asm.go
if label != 0 { xadd = ldr.SymValue(rs) + xadd - ldr.SymValue(label) rs = label } if xadd != signext24(xadd) { ldr.Errorf(s, "internal error: relocation addend overflow: %s+0x%x", ldr.SymName(rs), xadd) } } if rt == objabi.R_CALLARM64 && xadd != 0 { label := ldr.Lookup(offsetLabelName(ldr, rs, xadd), ldr.SymVersion(rs)) if label != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jan 30 20:09:45 UTC 2024 - 47K bytes - Viewed (1) -
src/cmd/link/internal/loong64/asm.go
out.Write64(uint64(r.Xadd)) case 8: out.Write64(uint64(sectoff)) out.Write64(uint64(elf.R_LARCH_64) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) default: return false } case objabi.R_LOONG64_TLS_LE_LO: out.Write64(uint64(sectoff)) out.Write64(uint64(elf.R_LARCH_TLS_LE_LO12) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) case objabi.R_LOONG64_TLS_LE_HI:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Feb 27 17:26:07 UTC 2024 - 7.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_386.s
SETEQ ret+12(FP) RET // uint32 Xadd(uint32 volatile *val, int32 delta) // Atomically: // *val += delta; // return *val; TEXT ·Xadd(SB), NOSPLIT, $0-12 MOVL ptr+0(FP), BX MOVL delta+4(FP), AX MOVL AX, CX LOCK XADDL AX, 0(BX) ADDL CX, AX MOVL AX, ret+8(FP) RET TEXT ·Xadd64(SB), NOSPLIT, $0-20 NO_LOCAL_POINTERS // no XADDQ so use CMPXCHG8B loop MOVL ptr+0(FP), BP
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mipsx.s
MOVW R1, ret+4(FP) RET TEXT ·Load8(SB),NOSPLIT,$0-5 MOVW ptr+0(FP), R1 SYNC MOVB 0(R1), R1 SYNC MOVB R1, ret+4(FP) RET // uint32 Xadd(uint32 volatile *val, int32 delta) // Atomically: // *val += delta; // return *val; TEXT ·Xadd(SB),NOSPLIT,$0-12 MOVW ptr+0(FP), R2 MOVW delta+4(FP), R3 SYNC try_xadd: LL (R2), R1 // R1 = *R2 ADDU R1, R3, R4 MOVW R4, R1 SC R4, (R2) // *R2 = R4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 4.9K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_amd64.s
TEXT ·Casuintptr(SB), NOSPLIT, $0-25 JMP ·Cas64(SB) TEXT ·CasRel(SB), NOSPLIT, $0-17 JMP ·Cas(SB) // uint32 Xadd(uint32 volatile *val, int32 delta) // Atomically: // *val += delta; // return *val; TEXT ·Xadd(SB), NOSPLIT, $0-20 MOVQ ptr+0(FP), BX MOVL delta+8(FP), AX MOVL AX, CX LOCK XADDL AX, 0(BX) ADDL CX, AX MOVL AX, ret+16(FP) RET // uint64 Xadd64(uint64 volatile *val, int64 delta)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 5.2K bytes - Viewed (1) -
src/internal/runtime/atomic/atomic_arm.go
} func addrLock(addr *uint64) *spinlock { return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l } // Atomic add and return new value. // //go:nosplit func Xadd(val *uint32, delta int32) uint32 { for { oval := *val nval := oval + uint32(delta) if Cas(val, oval, nval) { return nval } } } //go:noescape
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 4.8K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_riscv64.s
// return *val; // func Xadd(ptr *uint32, delta int32) uint32 TEXT ·Xadd(SB), NOSPLIT, $0-20 MOV ptr+0(FP), A0 MOVW delta+8(FP), A1 AMOADDW A1, (A0), A2 ADD A2,A1,A0 MOVW A0, ret+16(FP) RET // func Xadd64(ptr *uint64, delta int64) uint64 TEXT ·Xadd64(SB), NOSPLIT, $0-24 MOV ptr+0(FP), A0 MOV delta+8(FP), A1 AMOADDD A1, (A0), A2 ADD A2, A1, A0 MOV A0, ret+16(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_wasm.go
// //go:linkname Load //go:linkname Loadp //go:linkname Load64 //go:linkname Loadint32 //go:linkname Loadint64 //go:linkname Loaduintptr //go:linkname LoadAcquintptr //go:linkname Xadd //go:linkname Xaddint32 //go:linkname Xaddint64 //go:linkname Xadd64 //go:linkname Xadduintptr //go:linkname Xchg //go:linkname Xchg64 //go:linkname Xchgint32 //go:linkname Xchgint64 //go:linkname Xchguintptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 5.4K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_s390x.s
// return 1 // } else { // return 0 // } TEXT ·Casp1(SB), NOSPLIT, $0-25 BR ·Cas64(SB) // func Xadd(ptr *uint32, delta int32) uint32 // Atomically: // *ptr += delta // return *ptr TEXT ·Xadd(SB), NOSPLIT, $0-20 MOVD ptr+0(FP), R4 MOVW delta+8(FP), R5 MOVW (R4), R3 repeat: ADD R5, R3, R6 CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) BNE repeat MOVW R6, ret+16(FP) RET
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.1K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_arm64.go
//go:build arm64 package atomic import ( "internal/cpu" "unsafe" ) const ( offsetARM64HasATOMICS = unsafe.Offsetof(cpu.ARM64.HasATOMICS) ) //go:noescape func Xadd(ptr *uint32, delta int32) uint32 //go:noescape func Xadd64(ptr *uint64, delta int64) uint64 //go:noescape func Xadduintptr(ptr *uintptr, delta uintptr) uintptr //go:noescape
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 2.1K bytes - Viewed (0)