Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 1,246 for moveWB (0.16 sec)

  1. src/cmd/compile/internal/ssa/writebarrier.go

    		// find the memory before the WB stores
    		mem := stores[0].MemoryArg()
    		pos := stores[0].Pos
    
    		// If the source of a MoveWB is volatile (will be clobbered by a
    		// function call), we need to copy it to a temporary location, as
    		// marshaling the args of wbMove might clobber the value we're
    		// trying to move.
    		// Look for volatile source, copy it to temporary before we check
    		// the write barrier flag.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 08 19:09:14 UTC 2023
    - 23.5K bytes
    - Viewed (0)
  2. src/runtime/memmove_s390x.s

    move0to3:
    	CMPBEQ	R5, $0, done
    move1:
    	CMPBNE	R5, $1, move2
    	MOVB	0(R4), R3
    	MOVB	R3, 0(R6)
    	RET
    move2:
    	CMPBNE	R5, $2, move3
    	MOVH	0(R4), R3
    	MOVH	R3, 0(R6)
    	RET
    move3:
    	MOVH	0(R4), R3
    	MOVB	2(R4), R7
    	MOVH	R3, 0(R6)
    	MOVB	R7, 2(R6)
    	RET
    
    move4to7:
    	CMPBNE	R5, $4, move5
    	MOVW	0(R4), R3
    	MOVW	R3, 0(R6)
    	RET
    move5:
    	CMPBNE	R5, $5, move6
    	MOVW	0(R4), R3
    	MOVB	4(R4), R7
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jan 22 18:54:48 UTC 2020
    - 2.9K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/ssa/_gen/genericOps.go

    	// Normally we require that the source and destination of Move do not overlap.
    	// There is an exception when we know all the loads will happen before all
    	// the stores. In that case, overlap is ok. See
    	// memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
    	// returns true, we must do all loads before all stores, when lowering Move.
    	// The type of Move is used for the write barrier pass to insert write barriers
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 15:49:20 UTC 2024
    - 42.6K bytes
    - Viewed (0)
  4. src/runtime/memmove_386.s

    	// into registers lets us ignore copy direction).
    tail:
    	// BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
    	TESTL	BX, BX
    	JEQ	move_0
    	CMPL	BX, $2
    	JBE	move_1or2
    	CMPL	BX, $4
    	JB	move_3
    	JE	move_4
    	CMPL	BX, $8
    	JBE	move_5through8
    	CMPL	BX, $16
    	JBE	move_9through16
    #ifdef GO386_softfloat
    	JMP	nosse2
    #endif
    	CMPL	BX, $32
    	JBE	move_17through32
    	CMPL	BX, $64
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Nov 06 10:24:44 UTC 2021
    - 4.4K bytes
    - Viewed (0)
  5. src/runtime/memmove_plan9_amd64.s

    	// with some straightline code. The REP MOVSQ instruction is really fast
    	// for large sizes. The cutover is approximately 1K.
    tail:
    	TESTQ	BX, BX
    	JEQ	move_0
    	CMPQ	BX, $2
    	JBE	move_1or2
    	CMPQ	BX, $4
    	JBE	move_3or4
    	CMPQ	BX, $8
    	JB	move_5through7
    	JE	move_8
    	CMPQ	BX, $16
    	JBE	move_9through16
    
    /*
     * check and set for backwards
     */
    	CMPQ	SI, DI
    	JLS	back
    
    /*
     * forward copy loop
     */
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jun 04 07:25:06 UTC 2020
    - 3K bytes
    - Viewed (0)
  6. src/runtime/memmove_amd64.s

    	// direction is forward.
    	//
    	// BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
    	TESTQ	BX, BX
    	JEQ	move_0
    	CMPQ	BX, $2
    	JBE	move_1or2
    	CMPQ	BX, $4
    	JB	move_3
    	JBE	move_4
    	CMPQ	BX, $8
    	JB	move_5through7
    	JE	move_8
    	CMPQ	BX, $16
    	JBE	move_9through16
    	CMPQ	BX, $32
    	JBE	move_17through32
    	CMPQ	BX, $64
    	JBE	move_33through64
    	CMPQ	BX, $128
    	JBE	move_65through128
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sun Apr 10 15:52:08 UTC 2022
    - 12.5K bytes
    - Viewed (0)
  7. src/runtime/memmove_plan9_386.s

    	// with some straightline code. The REP MOVSL instruction is really fast
    	// for large sizes. The cutover is approximately 1K.
    tail:
    	TESTL	BX, BX
    	JEQ	move_0
    	CMPL	BX, $2
    	JBE	move_1or2
    	CMPL	BX, $4
    	JB	move_3
    	JE	move_4
    	CMPL	BX, $8
    	JBE	move_5through8
    	CMPL	BX, $16
    	JBE	move_9through16
    
    /*
     * check and set for backwards
     */
    	CMPL	SI, DI
    	JLS	back
    
    /*
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jun 04 07:25:06 UTC 2020
    - 3.1K bytes
    - Viewed (0)
  8. src/cmd/compile/internal/ssa/shortcircuit.go

    		}
    	}
    }
    
    // moveTo moves v to dst, adjusting the appropriate Block.Values slices.
    // The caller is responsible for ensuring that this is safe.
    // i is the index of v in v.Block.Values.
    func (v *Value) moveTo(dst *Block, i int) {
    	if dst.Func.scheduled {
    		v.Fatalf("moveTo after scheduling")
    	}
    	src := v.Block
    	if src.Values[i] != v {
    		v.Fatalf("moveTo bad index %d", v, i)
    	}
    	if src == dst {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Oct 03 17:47:02 UTC 2022
    - 12.6K bytes
    - Viewed (0)
  9. maven-embedder/src/main/java/org/fusesource/jansi/Ansi.java

        /**
         * Moves the cursor up. If the parameter y is negative it moves the cursor down.
         *
         * @param y the number of lines to move up
         * @return this Ansi instance
         */
        public Ansi cursorUp(final int y) {
            return y > 0 ? appendEscapeSequence('A', y) : y < 0 ? cursorDown(-y) : this;
        }
    
        /**
         * Moves the cursor down. If the parameter y is negative it moves the cursor up.
         *
    Registered: Wed Jun 12 09:55:16 UTC 2024
    - Last Modified: Mon May 13 09:53:45 UTC 2024
    - 23.1K bytes
    - Viewed (0)
  10. src/cmd/compile/internal/ssa/tighten.go

    				}
    				if mem := v.MemoryArg(); mem != nil {
    					if startMem[t.ID] != mem {
    						// We can't move a value with a memory arg unless the target block
    						// has that memory arg as its starting memory.
    						continue
    					}
    				}
    				if f.pass.debug > 0 {
    					b.Func.Warnl(v.Pos, "%v is moved", v.Op)
    				}
    				// Move v to the block which dominates its uses.
    				t.Values = append(t.Values, v)
    				v.Block = t
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 16 01:01:38 UTC 2023
    - 7.7K bytes
    - Viewed (0)
Back to top