- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 38 for ctr (0.63 sec)
-
src/crypto/aes/aes_gcm.go
//go:noescape func gcmAesData(productTable *[256]byte, data []byte, T *[16]byte) //go:noescape func gcmAesEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32) //go:noescape func gcmAesDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32) //go:noescape func gcmAesFinish(productTable *[256]byte, tagMask, T *[16]byte, pLen, dLen uint64) const ( gcmBlockSize = 16
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Mar 27 18:23:49 UTC 2024 - 5.4K bytes - Viewed (0) -
src/runtime/signal_ppc64x.go
print("r28 ", hex(c.r28()), "\t") print("r29 ", hex(c.r29()), "\n") print("r30 ", hex(c.r30()), "\t") print("r31 ", hex(c.r31()), "\n") print("pc ", hex(c.pc()), "\t") print("ctr ", hex(c.ctr()), "\n") print("link ", hex(c.link()), "\t") print("xer ", hex(c.xer()), "\n") print("ccr ", hex(c.ccr()), "\t") print("trap ", hex(c.trap()), "\n") } //go:nosplit //go:nowritebarrierrec
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 15:08:04 UTC 2023 - 3.7K bytes - Viewed (0) -
src/crypto/cipher/cfb.go
x.outUsed = 0 } if x.decrypt { // We can precompute a larger segment of the // keystream on decryption. This will allow // larger batches for xor, and we should be // able to match CTR/OFB performance. copy(x.next[x.outUsed:], src) } n := subtle.XORBytes(dst, src, x.out[x.outUsed:]) if !x.decrypt { copy(x.next[x.outUsed:], dst) } dst = dst[n:] src = src[n:] x.outUsed += n
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Oct 13 17:09:47 UTC 2023 - 2K bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
ADD $-64, LEN STXVW4X VS62, (OUT)(R10) ADD $64, OUT MOVD $10, R14 MOVD R14, CTR BNE loop_outer_vsx done_vsx: // Increment counter by number of 64 byte blocks MOVD (CNT), R14 ADD BLOCKS, R14 MOVD R14, (CNT) RET tail_vsx: ADD $32, R1, R11 MOVD LEN, CTR // Save values on stack to copy from STXVW4X VS32, (R11)(R0) STXVW4X VS36, (R11)(R8)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 05 22:18:42 UTC 2024 - 9K bytes - Viewed (0) -
src/runtime/defs_aix_ppc64.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 20 21:27:51 UTC 2023 - 3.6K bytes - Viewed (0) -
src/runtime/defs_linux_ppc64.go
_O_TRUNC = 0x200 _O_NONBLOCK = 0x800 _O_CLOEXEC = 0x80000 _SA_RESTORER = 0 ) type ptregs struct { gpr [32]uint64 nip uint64 msr uint64 orig_gpr3 uint64 ctr uint64 link uint64 xer uint64 ccr uint64 softe uint64 trap uint64 dar uint64 dsisr uint64 result uint64 } type vreg struct {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 19:05:10 UTC 2023 - 3.7K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/list9.go
} if r == REG_CR { return "CR" } if REG_SPR0 <= r && r <= REG_SPR0+1023 { switch r { case REG_XER: return "XER" case REG_LR: return "LR" case REG_CTR: return "CTR" } return fmt.Sprintf("SPR(%d)", r-REG_SPR0) } if r == REG_FPSCR { return "FPSCR" } if r == REG_MSR { return "MSR" } return fmt.Sprintf("Rgok(%d)", r-obj.RBasePPC64) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Sep 15 21:12:43 UTC 2022 - 3.3K bytes - Viewed (0) -
src/crypto/internal/bigmod/nat_ppc64x.s
// If other callers are added this function might // need to change. TEXT addMulVVWx<>(SB), NOSPLIT, $0 MOVD z+0(FP), R3 MOVD x+8(FP), R4 MOVD y+16(FP), R5 MOVD $0, R9 // R9 = c = 0 MOVD R6, CTR // Initialize loop counter PCALIGN $16 loop: MOVD 0(R4), R14 // x[i] MOVD 8(R4), R16 // x[i+1] MOVD 16(R4), R18 // x[i+2] MOVD 24(R4), R20 // x[i+3] MOVD 0(R3), R15 // z[i] MOVD 8(R3), R17 // z[i+1]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Jan 25 19:32:43 UTC 2024 - 1.9K bytes - Viewed (0) -
src/runtime/defs_linux_ppc64le.go
_O_TRUNC = 0x200 _O_NONBLOCK = 0x800 _O_CLOEXEC = 0x80000 _SA_RESTORER = 0 ) type ptregs struct { gpr [32]uint64 nip uint64 msr uint64 orig_gpr3 uint64 ctr uint64 link uint64 xer uint64 ccr uint64 softe uint64 trap uint64 dar uint64 dsisr uint64 result uint64 } type vreg struct {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 22 19:05:10 UTC 2023 - 3.7K bytes - Viewed (0) -
src/crypto/subtle/xor_ppc64x.s
CMPU R6, $8, CR6 // Check if 8 ≤ n < 64 bytes BLE CR6, small // <= 8 BLT CR7, xor32 // Case for 32 ≤ n < 64 bytes // Case for n ≥ 64 bytes preloop64: SRD $6, R6, R7 // Set up loop counter MOVD R7, CTR MOVD $16, R10 MOVD $32, R14 MOVD $48, R15 ANDCC $63, R6, R9 // Check for tailing bytes for later PCALIGN $16 // Case for >= 64 bytes // Process 64 bytes per iteration // Load 4 vectors of a and b
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 18:17:17 UTC 2024 - 2.9K bytes - Viewed (0)