Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 58 for MOVW (0.09 sec)

  1. src/runtime/race_s390x.s

    TEXT	sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
    	GO_ARGS
    	MOVD	$__tsan_go_atomic32_fetch_add(SB), R1
    	BL	racecallatomic<>(SB)
    	// TSan performed fetch_add, but Go needs add_fetch.
    	MOVW	add+8(FP), R0
    	MOVW	ret+16(FP), R1
    	ADD	R0, R1, R0
    	MOVW	R0, ret+16(FP)
    	RET
    
    TEXT	sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
    	GO_ARGS
    	MOVD	$__tsan_go_atomic64_fetch_add(SB), R1
    	BL	racecallatomic<>(SB)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 13.1K bytes
    - Viewed (0)
  2. test/codegen/math.go

    	// mips64/hardfloat:"MOVV\tF.*, R.*"
    	return math.Float64bits(f64+1) + 1
    }
    
    func fromFloat32(f32 float32) uint32 {
    	// amd64:"MOVL\tX.*, [^X].*"
    	// arm64:"FMOVS\tF.*, R.*"
    	// mips64/hardfloat:"MOVW\tF.*, R.*"
    	return math.Float32bits(f32+1) + 1
    }
    
    func toFloat64(u64 uint64) float64 {
    	// amd64:"MOVQ\t[^X].*, X.*"
    	// arm64:"FMOVD\tR.*, F.*"
    	// ppc64x:"MTVSRD"
    	// mips64/hardfloat:"MOVV\tR.*, F.*"
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 15:24:29 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  3. test/codegen/arithmetic.go

    	// 386:"MOVL\t[$]-252645135","MULL",-"DIVL"
    	// arm64:`MOVD`,`UMULH`,-`DIV`
    	// arm:`MOVW`,`MUL`,-`.*udiv`
    	a := n1 / 17 // unsigned
    
    	// amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ"
    	// 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL"
    	// arm64:`SMULH`,-`DIV`
    	// arm:`MOVW`,`MUL`,-`.*udiv`
    	b := n2 / 17 // signed
    
    	return a, b
    }
    
    func FloatDivs(a []float32) float32 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 15:28:00 UTC 2024
    - 15.2K bytes
    - Viewed (0)
  4. src/runtime/asm_s390x.s

    	MOVD	R15, (g_stack+stack_hi)(g)
    
    	// if there is a _cgo_init, call it using the gcc ABI.
    	MOVD	_cgo_init(SB), R11
    	CMPBEQ	R11, $0, nocgo
    	MOVW	AR0, R4			// (AR0 << 32 | AR1) is the TLS base pointer; MOVD is translated to EAR
    	SLD	$32, R4, R4
    	MOVW	AR1, R4			// arg 2: TLS base pointer
    	MOVD	$setg_gcc<>(SB), R3 	// arg 1: setg
    	MOVD	g, R2			// arg 0: G
    	// C functions expect 160 bytes of space on caller stack frame
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jan 25 09:18:28 UTC 2024
    - 28.1K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/ssa/_gen/PPC64Ops.go

    		// starting with the largest sizes and generating as
    		// many as needed, using the appropriate offset value.
    		//	MOVD  n(R4),R14
    		//	MOVD  R14,n(R3)
    		//	MOVW  n1(R4),R14
    		//	MOVW  R14,n1(R3)
    		//	MOVH  n2(R4),R14
    		//	MOVH  R14,n2(R3)
    		//	MOVB  n3(R4),R14
    		//	MOVB  R14,n3(R3)
    
    		{
    			name:      "LoweredMove",
    			aux:       "Int64",
    			argLength: 3,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 19:59:38 UTC 2024
    - 43.8K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ssa/_gen/ARM64Ops.go

    		{name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"},      // load from arg0 + auxInt + aux.  arg1=mem.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 15:49:20 UTC 2024
    - 58.8K bytes
    - Viewed (0)
  7. src/crypto/aes/asm_arm64.s

    	LDP	rotInvSRows<>(SB), (R0, R1)
    	VMOV	R0, V3.D[0]
    	VMOV	R1, V3.D[1]
    	VEOR	V0.B16, V0.B16, V0.B16 // All zeroes
    	MOVW	$1, R13
    	TBZ	$1, R8, ks192
    	TBNZ	$2, R8, ks256
    	LDPW	(R9), (R4, R5)
    	LDPW	8(R9), (R6, R7)
    	STPW.P	(R4, R5), 8(R10)
    	STPW.P	(R6, R7), 8(R10)
    	MOVW	$0x1b, R14
    ks128Loop:
    		VMOV	R7, V2.S[0]
    		WORD	$0x4E030042       // TBL V3.B16, [V2.B16], V2.B16
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 6.9K bytes
    - Viewed (0)
  8. src/cmd/compile/internal/ppc64/ssa.go

    		// of the following instructions with the appropriate
    		// offsets depending which instructions are needed
    		//
    		//	MOVW R0,n1(R20)	4 bytes
    		//	MOVH R0,n2(R20)	2 bytes
    		//	MOVB R0,n3(R20)	1 byte
    		//
    		// 7 bytes: MOVW, MOVH, MOVB
    		// 6 bytes: MOVW, MOVH
    		// 5 bytes: MOVW, MOVB
    		// 3 bytes: MOVH, MOVB
    
    		// each loop iteration does 32 bytes
    		ctr := v.AuxInt / 32
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 19:59:38 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  9. src/runtime/race_arm64.s

    // Add
    TEXT	sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
    	GO_ARGS
    	MOVD	$__tsan_go_atomic32_fetch_add(SB), R9
    	BL	racecallatomic<>(SB)
    	MOVW	add+8(FP), R0	// convert fetch_add to add_fetch
    	MOVW	ret+16(FP), R1
    	ADD	R0, R1, R0
    	MOVW	R0, ret+16(FP)
    	RET
    
    TEXT	sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
    	GO_ARGS
    	MOVD	$__tsan_go_atomic64_fetch_add(SB), R9
    	BL	racecallatomic<>(SB)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:37:29 UTC 2024
    - 15.5K bytes
    - Viewed (0)
  10. src/crypto/subtle/xor_ppc64x.s

    	SUB	$8, R6          // n = n - 8
    	MOVD	R16, (R3)(R8)   // Store to dst
    	ADD	$8, R8
    xor4:
    	CMP	R6, $4
    	BLT	xor2
    	MOVWZ	(R4)(R8), R14
    	MOVWZ	(R5)(R8), R15
    	XOR	R14, R15, R16
    	MOVW	R16, (R3)(R8)
    	ADD	$4,R8
    	ADD	$-4,R6
    xor2:
    	CMP	R6, $2
    	BLT	xor1
    	MOVHZ	(R4)(R8), R14
    	MOVHZ	(R5)(R8), R15
    	XOR	R14, R15, R16
    	MOVH	R16, (R3)(R8)
    	ADD	$2,R8
    	ADD	$-2,R6
    xor1:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 18:17:17 UTC 2024
    - 2.9K bytes
    - Viewed (0)
Back to top