Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for safe_point (0.25 sec)

  1. tests/transaction_test.go

    	}
    
    	if err := tx.SavePoint("save_point1").Error; err != nil {
    		t.Fatalf("Failed to save point, got error %v", err)
    	}
    
    	user1 := *GetUser("transaction-save-point-1", Config{})
    	tx.Create(&user1)
    
    	if err := tx.First(&User{}, "name = ?", user1.Name).Error; err != nil {
    		t.Fatalf("Should find saved record")
    	}
    
    	if err := tx.RollbackTo("save_point1").Error; err != nil {
    Registered: Wed Jun 12 16:27:09 UTC 2024
    - Last Modified: Wed May 08 04:07:58 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/liveness/plive.go

    	lv.livevars = lv.livevars[:0]
    }
    
    func (lv *liveness) enableClobber() {
    	// The clobberdead experiment inserts code to clobber pointer slots in all
    	// the dead variables (locals and args) at every synchronous safepoint.
    	if !base.Flag.ClobberDead {
    		return
    	}
    	if lv.fn.Pragma&ir.CgoUnsafeArgs != 0 {
    		// C or assembly code uses the exact frame layout. Don't clobber.
    		return
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 15:22:22 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  3. src/cmd/internal/obj/link.go

    	Asym    *LSym
    	Aoffset int32
    	Name    AddrName
    	Gotype  *LSym
    }
    
    // RegSpill provides spill/fill information for a register-resident argument
    // to a function.  These need spilling/filling in the safepoint/stackgrowth case.
    // At the time of fill/spill, the offset must be adjusted by the architecture-dependent
    // adjustment to hardware SP that occurs in a call instruction.  E.g., for AMD64,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 19:57:43 UTC 2024
    - 33.1K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/ssa/_gen/genericOps.go

    	// Convert converts between pointers and integers.
    	// We have a special op for this so as to not confuse GC
    	// (particularly stack maps).  It takes a memory arg so it
    	// gets correctly ordered with respect to GC safepoints.
    	// It gets compiled to nothing, so its result must in the same
    	// register as its argument. regalloc knows it can use any
    	// allocatable integer register for OpConvert.
    	// arg0=ptr/int arg1=mem, output=int/ptr
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 15:49:20 UTC 2024
    - 42.6K bytes
    - Viewed (0)
  5. src/runtime/runtime2.go

    	freem *m
    
    	gcwaiting  atomic.Bool // gc is waiting to run
    	stopwait   int32
    	stopnote   note
    	sysmonwait atomic.Bool
    	sysmonnote note
    
    	// safePointFn should be called on each P at the next GC
    	// safepoint if p.runSafePointFn is set.
    	safePointFn   func(*p)
    	safePointWait int32
    	safePointNote note
    
    	profilehz int32 // cpu profiling rate
    
    	procresizetime int64 // nanotime() of last change to gomaxprocs
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ppc64/ssa.go

    		p := s.Prog(storeOp)
    		p.From.Type = obj.TYPE_REG
    		p.From.Reg = v.Args[0].Reg()
    		ssagen.AddrAuto(&p.To, v)
    
    	case ssa.OpArgIntReg, ssa.OpArgFloatReg:
    		// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
    		// The loop only runs once.
    		for _, a := range v.Block.Func.RegArgs {
    			// Pass the spill/unspill information along to the assembler, offset by size of
    			// the saved LR slot.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 19:59:38 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  7. src/runtime/malloc.go

    	return assistG
    }
    
    // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
    // on chunks of the buffer to be zeroed, with opportunities for preemption
    // along the way.  memclrNoHeapPointers contains no safepoints and also
    // cannot be preemptively scheduled, so this provides a still-efficient
    // block copy that can also be preempted on a reasonable granularity.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  8. src/runtime/proc.go

    func forEachP(reason waitReason, fn func(*p)) {
    	systemstack(func() {
    		gp := getg().m.curg
    		// Mark the user stack as preemptible so that it may be scanned.
    		// Otherwise, our attempt to force all P's to a safepoint could
    		// result in a deadlock as we attempt to preempt a worker that's
    		// trying to preempt us (e.g. for a stack scan).
    		//
    		// N.B. The execution tracer is not aware of this status
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssagen/ssa.go

    	if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
    		// First, see if it is already spilled before it may be live. Look for a spill
    		// in the entry block up to the first safepoint.
    		type nameOff struct {
    			n   *ir.Name
    			off int64
    		}
    		partLiveArgsSpilled := make(map[nameOff]bool)
    		for _, v := range f.Entry.Values {
    			if v.Op.IsCall() {
    				break
    			}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 10 19:44:43 UTC 2024
    - 284.9K bytes
    - Viewed (0)
Back to top