- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 24 for Or8 (0.02 sec)
-
src/internal/runtime/atomic/atomic_loong64.s
DBAR MOVB R5, 0(R4) DBAR RET TEXT ·Store64(SB), NOSPLIT, $0-16 MOVV ptr+0(FP), R4 MOVV val+8(FP), R5 DBAR MOVV R5, 0(R4) DBAR RET // void Or8(byte volatile*, byte); TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R4 MOVBU val+8(FP), R5 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R6 AND R4, R6 // R7 = ((ptr & 3) * 8) AND $3, R4, R7
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.3K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mipsx.go
//go:noescape func LoadAcq(ptr *uint32) uint32 //go:noescape func LoadAcquintptr(ptr *uintptr) uintptr //go:noescape func And8(ptr *uint8, val uint8) //go:noescape func Or8(ptr *uint8, val uint8) //go:noescape func And(ptr *uint32, val uint32) //go:noescape func Or(ptr *uint32, val uint32) //go:noescape func And32(ptr *uint32, val uint32) uint32 //go:noescape
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 20:08:37 UTC 2024 - 3.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_test.go
t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint32(0), v) } } } func TestOr8(t *testing.T) { // Basic sanity check. x := uint8(0) for i := uint8(0); i < 8; i++ { atomic.Or8(&x, 1<<i) if r := (uint8(1) << (i + 1)) - 1; x != r { t.Fatalf("setting bit %#x: want %#x, got %#x", uint8(1)<<i, r, x) } } // Start with every bit in array set to 0. a := make([]uint8, 1<<12)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 8.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_s390x.s
TEXT ·Xchgint64(SB), NOSPLIT, $0-24 BR ·Xchg64(SB) // func Xchguintptr(ptr *uintptr, new uintptr) uintptr TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 BR ·Xchg64(SB) // func Or8(addr *uint8, v uint8) TEXT ·Or8(SB), NOSPLIT, $0-9 MOVD ptr+0(FP), R3 MOVBZ val+8(FP), R4 // We don't have atomic operations that work on individual bytes so we // need to align addr down to a word boundary and create a mask
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.1K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_arm64.go
//go:noescape func LoadAcq(addr *uint32) uint32 //go:noescape func LoadAcq64(ptr *uint64) uint64 //go:noescape func LoadAcquintptr(ptr *uintptr) uintptr //go:noescape func Or8(ptr *uint8, val uint8) //go:noescape func And8(ptr *uint8, val uint8) //go:noescape func And(ptr *uint32, val uint32) //go:noescape func Or(ptr *uint32, val uint32) //go:noescape
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 2.1K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_amd64.go
//go:noescape func Xchguintptr(ptr *uintptr, new uintptr) uintptr //go:nosplit //go:noinline func Load8(ptr *uint8) uint8 { return *ptr } //go:noescape func And8(ptr *uint8, val uint8) //go:noescape func Or8(ptr *uint8, val uint8) //go:noescape func And(ptr *uint32, val uint32) //go:noescape func Or(ptr *uint32, val uint32) //go:noescape func And32(ptr *uint32, val uint32) uint32 //go:noescape
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 2.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_ppc64x.s
MOVW val+8(FP), R4 LWSYNC MOVW R4, 0(R3) RET TEXT ·StoreRel64(SB), NOSPLIT, $0-16 MOVD ptr+0(FP), R3 MOVD val+8(FP), R4 LWSYNC MOVD R4, 0(R3) RET // void ·Or8(byte volatile*, byte); TEXT ·Or8(SB), NOSPLIT, $0-9 MOVD ptr+0(FP), R3 MOVBZ val+8(FP), R4 LWSYNC again: LBAR (R3), R6 OR R4, R6 STBCCC R6, (R3) BNE again RET // void ·And8(byte volatile*, byte);
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mips64x.s
SYNC MOVB R2, 0(R1) SYNC RET TEXT ·Store64(SB), NOSPLIT, $0-16 MOVV ptr+0(FP), R1 MOVV val+8(FP), R2 SYNC MOVV R2, 0(R1) SYNC RET // void Or8(byte volatile*, byte); TEXT ·Or8(SB), NOSPLIT, $0-9 MOVV ptr+0(FP), R1 MOVBU val+8(FP), R2 // Align ptr down to 4 bytes so we can use 32-bit load/store. MOVV $~3, R3 AND R1, R3 // Compute val shift. #ifdef GOARCH_mips64
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 21:29:34 UTC 2024 - 7.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_386.s
// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2). XORL AX, AX LOCK XADDL AX, (SP) RET // void ·Or8(byte volatile*, byte); TEXT ·Or8(SB), NOSPLIT, $0-5 MOVL ptr+0(FP), AX MOVB val+4(FP), BX LOCK ORB BX, (AX) RET // void ·And8(byte volatile*, byte); TEXT ·And8(SB), NOSPLIT, $0-5 MOVL ptr+0(FP), AX
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 6.5K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_riscv64.s
MOV ptr+0(FP), A0 MOVBU val+8(FP), A1 AND $3, A0, A2 AND $-4, A0 SLL $3, A2 XOR $255, A1 SLL A2, A1 XOR $-1, A1 AMOANDW A1, (A0), ZERO RET // func Or8(ptr *uint8, val uint8) TEXT ·Or8(SB), NOSPLIT, $0-9 MOV ptr+0(FP), A0 MOVBU val+8(FP), A1 AND $3, A0, A2 AND $-4, A0 SLL $3, A2 SLL A2, A1 AMOORW A1, (A0), ZERO RET // func And(ptr *uint32, val uint32)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 7K bytes - Viewed (0)