- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 35 for clobber (0.13 sec)
-
src/cmd/compile/internal/ssa/_gen/AMD64.rules
(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem) (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 93.9K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/regalloc.go
} // Dump any registers which will be clobbered if s.doClobber && v.Op.IsCall() { // clobber registers that are marked as clobber in regmask, but // don't clobber inputs. s.clobberRegs(regspec.clobbers &^ s.tmpused &^ s.nospill) } s.freeRegs(regspec.clobbers) s.tmpused |= regspec.clobbers // Pick registers for outputs. {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 17:49:56 UTC 2023 - 87.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/S390X.rules
&& clobber(x) => (STMG3 [i-16] {s} p w0 w1 w2 mem) (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) && x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x) => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem)) && x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x) => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 12 18:09:26 UTC 2023 - 74.3K bytes - Viewed (0) -
src/runtime/asm_amd64.s
// gcWriteBarrier does NOT follow the Go ABI. It accepts the // number of bytes of buffer needed in R11, and returns a pointer // to the buffer space in R11. // It clobbers FLAGS. It does not clobber any general-purpose registers, // but may clobber others (e.g., SSE registers). // Typical use would be, when doing *(CX+88) = AX // CMPL $0, runtime.writeBarrier(SB) // JEQ dowrite
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 60.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64.rules
(MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))]) (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem) (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem) // Rules for MOV* or FMOV* ops determine when indexed (MOV*loadidx or MOV*storeidx)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 53.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewrite.go
return nil // too far away } // clobber invalidates values. Returns true. // clobber is used by rewrite rules to: // // A) make sure the values are really dead and never used again. // B) decrement use counts of the values' args. func clobber(vv ...*Value) bool { for _, v := range vv { v.reset(OpInvalid) // Note: leave v.Block intact. The Block field is used after clobber. } return true }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/S390XOps.go
// It saves all GP registers if necessary, // but clobbers R14 (LR) because it's a call, // and also clobbers R1 as the PLT stub does. // Returns a pointer to a write barrier buffer in R9. {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R14") | r1, outputs: []regMask{r9}}, clobberFlags: true, aux: "Int64"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 24 00:21:13 UTC 2023 - 52.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
// It saves all GP registers if necessary, // but clobbers R30 (LR) because it's a call. // R16 and R17 may be clobbered by linker trampoline. // Returns a pointer to a write barrier buffer in R25. {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 15:49:20 UTC 2024 - 58.8K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/debug.go
} // Handle any register clobbering. Call operations, for example, // clobber all registers even though they don't explicitly write to // them. clobbers := uint64(opcodeTable[v.Op].reg.clobbers) for { if clobbers == 0 { break } reg := uint8(bits.TrailingZeros64(clobbers)) clobbers &^= 1 << reg for _, slot := range locs.registers[reg] { if state.loggingLevel > 1 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 58.4K bytes - Viewed (0) -
src/crypto/internal/nistec/p256_asm_ppc64le.s
LXVD2X (R16)(CPOOL), P1 LXVD2X (R0)(CPOOL), P0 CALL p256MulInternal<>(SB) MOVD n+16(FP), N ADD $-1, N CMP $0, N BEQ done MOVD N, n+16(FP) // Save counter to avoid clobber VOR T0, T0, X0 VOR T1, T1, X1 BR sqrLoop done: MOVD $p256mul<>+0x00(SB), CPOOL XXPERMDI T0, T0, $2, T0 XXPERMDI T1, T1, $2, T1 STXVD2X T0, (R0)(res_ptr)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 56.5K bytes - Viewed (0)