Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 185 for andc (0.04 sec)

  1. src/crypto/md5/md5block_386.s

    	LEAL	const(a)(DI*1), a; \
    	ANDL	b, BP; \
    	XORL d, BP; \
    	MOVL (index*4)(SI), DI; \
    	ADDL BP, a; \
    	ROLL $shift, a; \
    	MOVL c, BP; \
    	ADDL b, a
    
    #define ROUND2(a, b, c, d, index, const, shift) \
    	LEAL	const(a)(DI*1),a; \
    	MOVL	d,		DI; \
    	ANDL	b,		DI; \
    	MOVL	d,		BP; \
    	NOTL	BP; \
    	ANDL	c,		BP; \
    	ORL	DI,		BP; \
    	MOVL	(index*4)(SI),DI; \
    	ADDL	BP,		a; \
    	ROLL	$shift,	a; \
    	ADDL	b,		a
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  2. src/internal/bytealg/compare_386.s

    	CMPL	BX, CX
    	JEQ	allsame
    
    diff4:
    	BSWAPL	BX	// reverse order of bytes
    	BSWAPL	CX
    	XORL	BX, CX	// find bit differences
    	BSRL	CX, CX	// index of highest bit difference
    	SHRL	CX, BX	// move a's bit to bottom
    	ANDL	$1, BX	// mask bit
    	LEAL	-1(BX*2), BX // 1/0 => +1/-1
    	MOVL	BX, (AX)
    	RET
    
    	// 0-3 bytes in common
    small:
    	LEAL	(BP*8), CX
    	NEGL	CX
    	JEQ	allsame
    
    	// load si
    	CMPB	SI, $0xfc
    	JA	si_high
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Aug 23 21:22:58 UTC 2021
    - 2.6K bytes
    - Viewed (0)
  3. src/internal/bytealg/equal_ppc64x.s

    	// lower address if that does not cross the lower page. Or, load a few
    	// extra bytes from the higher addresses. And align those values
    	// consistently in register as either address may have differing
    	// alignment requirements.
    	ANDCC	$PAGE_OFFSET, R8, R6	// &sX & PAGE_OFFSET
    	ANDCC	$PAGE_OFFSET, R4, R9
    	SUBC	R5, $8, R12		// 8-len
    	SLD	$3, R12, R14		// (8-len)*8
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 16:47:45 UTC 2023
    - 4.9K bytes
    - Viewed (0)
  4. src/crypto/md5/md5block_amd64.s

    // Uses https://github.com/animetosho/md5-optimisation#dependency-shortcut-in-g-function
    
    #define ROUND2(a, b, c, d, index, const, shift) \
    	XORL	R11, R9; \
    	ADDL	$const,	a; \
    	ADDL	R8,	a; \
    	ANDL	b,		R10; \
    	ANDL	c,		R9; \
    	MOVL	(index*4)(SI),R8; \
    	ADDL	R9,	a; \
    	ADDL	R10,	a; \
    	MOVL	c,		R9; \
    	MOVL	c,		R10; \
    	ROLL	$shift,	a; \
    	ADDL	b,		a
    
    	ROUND2(AX,BX,CX,DX, 6,0xf61e2562, 5);
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  5. src/runtime/memclr_plan9_386.s

    tail:
    	TESTL	BX, BX
    	JEQ	_0
    	CMPL	BX, $2
    	JBE	_1or2
    	CMPL	BX, $4
    	JB	_3
    	JE	_4
    	CMPL	BX, $8
    	JBE	_5through8
    	CMPL	BX, $16
    	JBE	_9through16
    	MOVL	BX, CX
    	SHRL	$2, CX
    	REP
    	STOSL
    	ANDL	$3, BX
    	JNE	tail
    	RET
    
    _1or2:
    	MOVB	AX, (DI)
    	MOVB	AX, -1(DI)(BX*1)
    	RET
    _0:
    	RET
    _3:
    	MOVW	AX, (DI)
    	MOVB	AX, 2(DI)
    	RET
    _4:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jan 29 19:11:07 UTC 2021
    - 983 bytes
    - Viewed (0)
  6. src/runtime/memmove_plan9_386.s

    //
    // Permission is hereby granted, free of charge, to any person obtaining a copy
    // of this software and associated documentation files (the "Software"), to deal
    // in the Software without restriction, including without limitation the rights
    // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    // copies of the Software, and to permit persons to whom the Software is
    // furnished to do so, subject to the following conditions:
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jun 04 07:25:06 UTC 2020
    - 3.1K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/ssa/_gen/dec64.rules

    	(OrB
    		(Less32U (Int64Hi x) (Int64Hi y))
    		(AndB
    			(Eq32 (Int64Hi x) (Int64Hi y))
    			(Leq32U (Int64Lo x) (Int64Lo y))))
    
    (Less64 x y) =>
    	(OrB
    		(Less32 (Int64Hi x) (Int64Hi y))
    		(AndB
    			(Eq32 (Int64Hi x) (Int64Hi y))
    			(Less32U (Int64Lo x) (Int64Lo y))))
    
    (Leq64 x y) =>
    	(OrB
    		(Less32 (Int64Hi x) (Int64Hi y))
    		(AndB
    			(Eq32 (Int64Hi x) (Int64Hi y))
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Oct 04 19:35:46 UTC 2022
    - 14.2K bytes
    - Viewed (0)
  8. test/codegen/memops.go

    	// amd64: `SUBL\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*`
    	s -= x[i+2]
    	// 386: `IMULL\t12\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
    	s *= x[i+3]
    	// 386: `ANDL\t16\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
    	// amd64: `ANDL\t16\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*`
    	s &= x[i+4]
    	// 386: `ORL\t20\([A-Z]+\)\([A-Z]+\*4\), [A-Z]+`
    	// amd64: `ORL\t20\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), [A-Z]+[0-9]*`
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Aug 04 16:40:24 UTC 2023
    - 12.5K bytes
    - Viewed (0)
  9. src/internal/runtime/atomic/bench_test.go

    func BenchmarkAnd8(b *testing.B) {
    	var x [512]uint8 // give byte its own cache line
    	sink = &x
    	for i := 0; i < b.N; i++ {
    		atomic.And8(&x[255], uint8(i))
    	}
    }
    
    func BenchmarkAnd(b *testing.B) {
    	var x [128]uint32 // give x its own cache line
    	sink = &x
    	for i := 0; i < b.N; i++ {
    		atomic.And(&x[63], uint32(i))
    	}
    }
    
    func BenchmarkAnd8Parallel(b *testing.B) {
    	var x [512]uint8 // give byte its own cache line
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  10. src/internal/runtime/atomic/atomic_mips64x.go

    //go:noescape
    func LoadAcq64(ptr *uint64) uint64
    
    //go:noescape
    func LoadAcquintptr(ptr *uintptr) uintptr
    
    //go:noescape
    func And8(ptr *uint8, val uint8)
    
    //go:noescape
    func Or8(ptr *uint8, val uint8)
    
    // NOTE: Do not add atomicxor8 (XOR is not idempotent).
    
    //go:noescape
    func And(ptr *uint32, val uint32)
    
    //go:noescape
    func Or(ptr *uint32, val uint32)
    
    //go:noescape
    func And32(ptr *uint32, val uint32) uint32
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 21:29:34 UTC 2024
    - 2.1K bytes
    - Viewed (0)
Back to top