Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 1,104 for moveWB (0.26 sec)

  1. src/cmd/compile/internal/ssa/writebarrier.go

    		// find the memory before the WB stores
    		mem := stores[0].MemoryArg()
    		pos := stores[0].Pos
    
    		// If the source of a MoveWB is volatile (will be clobbered by a
    		// function call), we need to copy it to a temporary location, as
    		// marshaling the args of wbMove might clobber the value we're
    		// trying to move.
    		// Look for volatile source, copy it to temporary before we check
    		// the write barrier flag.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 08 19:09:14 UTC 2023
    - 23.5K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ssa/_gen/genericOps.go

    	// Normally we require that the source and destination of Move do not overlap.
    	// There is an exception when we know all the loads will happen before all
    	// the stores. In that case, overlap is ok. See
    	// memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
    	// returns true, we must do all loads before all stores, when lowering Move.
    	// The type of Move is used for the write barrier pass to insert write barriers
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 15:49:20 UTC 2024
    - 42.6K bytes
    - Viewed (0)
  3. src/runtime/memmove_386.s

    	// into registers lets us ignore copy direction).
    tail:
    	// BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
    	TESTL	BX, BX
    	JEQ	move_0
    	CMPL	BX, $2
    	JBE	move_1or2
    	CMPL	BX, $4
    	JB	move_3
    	JE	move_4
    	CMPL	BX, $8
    	JBE	move_5through8
    	CMPL	BX, $16
    	JBE	move_9through16
    #ifdef GO386_softfloat
    	JMP	nosse2
    #endif
    	CMPL	BX, $32
    	JBE	move_17through32
    	CMPL	BX, $64
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Nov 06 10:24:44 UTC 2021
    - 4.4K bytes
    - Viewed (0)
  4. src/runtime/memmove_amd64.s

    	// direction is forward.
    	//
    	// BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing.
    	TESTQ	BX, BX
    	JEQ	move_0
    	CMPQ	BX, $2
    	JBE	move_1or2
    	CMPQ	BX, $4
    	JB	move_3
    	JBE	move_4
    	CMPQ	BX, $8
    	JB	move_5through7
    	JE	move_8
    	CMPQ	BX, $16
    	JBE	move_9through16
    	CMPQ	BX, $32
    	JBE	move_17through32
    	CMPQ	BX, $64
    	JBE	move_33through64
    	CMPQ	BX, $128
    	JBE	move_65through128
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sun Apr 10 15:52:08 UTC 2022
    - 12.5K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/ssa/shortcircuit.go

    		}
    	}
    }
    
    // moveTo moves v to dst, adjusting the appropriate Block.Values slices.
    // The caller is responsible for ensuring that this is safe.
    // i is the index of v in v.Block.Values.
    func (v *Value) moveTo(dst *Block, i int) {
    	if dst.Func.scheduled {
    		v.Fatalf("moveTo after scheduling")
    	}
    	src := v.Block
    	if src.Values[i] != v {
    		v.Fatalf("moveTo bad index %d", v, i)
    	}
    	if src == dst {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Oct 03 17:47:02 UTC 2022
    - 12.6K bytes
    - Viewed (0)
  6. maven-embedder/src/main/java/org/fusesource/jansi/Ansi.java

        /**
         * Moves the cursor up. If the parameter y is negative it moves the cursor down.
         *
         * @param y the number of lines to move up
         * @return this Ansi instance
         */
        public Ansi cursorUp(final int y) {
            return y > 0 ? appendEscapeSequence('A', y) : y < 0 ? cursorDown(-y) : this;
        }
    
        /**
         * Moves the cursor down. If the parameter y is negative it moves the cursor up.
         *
    Registered: Wed Jun 12 09:55:16 UTC 2024
    - Last Modified: Mon May 13 09:53:45 UTC 2024
    - 23.1K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/ssa/tighten.go

    				}
    				if mem := v.MemoryArg(); mem != nil {
    					if startMem[t.ID] != mem {
    						// We can't move a value with a memory arg unless the target block
    						// has that memory arg as its starting memory.
    						continue
    					}
    				}
    				if f.pass.debug > 0 {
    					b.Func.Warnl(v.Pos, "%v is moved", v.Op)
    				}
    				// Move v to the block which dominates its uses.
    				t.Values = append(t.Values, v)
    				v.Block = t
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 16 01:01:38 UTC 2023
    - 7.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc

          parallel_execute.getOperation()->isBeforeInBlock(resource_handle_op))
        return nullptr;
    
      return assign_var;
    }
    
    // Finds AssignVariableOps that can be moved into a parallel_execute region and
    // moves them. Leftover parallel_execute results that were used by the
    // such AssignVariableOp are also pruned.
    void SinkResourceWritesIntoParallelExecute(
        tf_device::ParallelExecuteOp parallel_execute) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 06 04:46:18 UTC 2022
    - 6.6K bytes
    - Viewed (0)
  9. platforms/native/language-native/src/main/java/org/gradle/language/swift/tasks/internal/SymbolHider.java

            public DataReader(byte[] dataBytes) {
                this.dataBytes = dataBytes;
            }
    
            public int getPosition() {
                return position;
            }
    
            public void moveTo(int position) {
                this.position = position;
            }
    
            public int readByte() {
                return Byte.toUnsignedInt(dataBytes[position++]);
            }
    
            public int readWord() {
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Nov 16 20:20:03 UTC 2023
    - 5.1K bytes
    - Viewed (0)
  10. src/runtime/memmove_ppc64x.s

    	STXVL	V0, TGT, TMP
    	RET
    #endif
    lt8:	// Move word if possible
    	CMP BYTES, $4
    	BLT lt4
    	MOVWZ 0(SRC), TMP
    	ADD $-4, BYTES
    	MOVW TMP, 0(TGT)
    	ADD $4, SRC
    	ADD $4, TGT
    lt4:	// Move halfword if possible
    	CMP BYTES, $2
    	BLT lt2
    	MOVHZ 0(SRC), TMP
    	ADD $-2, BYTES
    	MOVH TMP, 0(TGT)
    	ADD $2, SRC
    	ADD $2, TGT
    lt2:	// Move last byte if 1 left
    	CMP BYTES, $1
    	BC 12, 0, LR	// ble lr
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 21 16:47:45 UTC 2023
    - 4.9K bytes
    - Viewed (0)
Back to top