Search Options

Results per page
Sort
Preferred Languages
Advance

Results 131 - 140 of 461 for Atack (0.06 sec)

  1. src/syscall/exec_libc2.go

    // they might have been locked at the time of the fork. This means
    // no rescheduling, no malloc calls, and no new stack segments.
    // For the same reason compiler does not race instrument it.
    // The calls to rawSyscall are okay because they are assembly
    // functions that do not grow the stack.
    //
    //go:norace
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Sep 29 18:51:35 UTC 2023
    - 8.2K bytes
    - Viewed (0)
  2. src/runtime/rand.go

    		// so we don't need to use mp.locks
    		// on the fast path, which is that the
    		// first attempt succeeds.
    		x, ok := c.Next()
    		if ok {
    			return x
    		}
    		mp.locks++ // hold m even though c.Refill may do stack split checks
    		c.Refill()
    		mp.locks--
    	}
    }
    
    // mrandinit initializes the random state of an m.
    func mrandinit(mp *m) {
    	var seed [4]uint64
    	for i := range seed {
    		seed[i] = bootstrapRand()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 31 14:32:47 UTC 2024
    - 8K bytes
    - Viewed (0)
  3. src/cmd/vendor/golang.org/x/mod/module/module.go

    // needing different casings of a file path, but it would be fairly illegible
    // to most programmers when those paths appeared in the file system
    // (including in file paths in compiler errors and stack traces)
    // in web server logs, and so on. Instead, we want a safe escaped form that
    // leaves most paths unaltered.
    //
    // The safe escaped form is to replace every uppercase letter
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 09 20:17:07 UTC 2024
    - 26.9K bytes
    - Viewed (0)
  4. src/runtime/mbitmap.go

    		// ep's data pointer sometime before this point and it's possible
    		// for that memory to get freed.
    		KeepAlive(ep)
    		return
    	}
    
    	// stack
    	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
    		found := false
    		var u unwinder
    		for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/walk/convert.go

    		// n is a readonly global; use it directly.
    		value = n
    	case conv.Esc() == ir.EscNone && fromType.Size() <= 1024:
    		// n does not escape. Use a stack temporary initialized to n.
    		value = typecheck.TempAt(base.Pos, ir.CurFunc, fromType)
    		init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n)))
    	}
    	if value != nil {
    		// The interface data word is &value.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Oct 09 17:28:22 UTC 2023
    - 18.2K bytes
    - Viewed (0)
  6. src/runtime/metrics_test.go

    					}) {
    						// stk is a call stack that is still on the user stack when
    						// it calls runtime.unlock. Add the extra function that
    						// we'll see, when the static lock ranking implementation of
    						// runtime.unlockWithRank switches to the system stack.
    						stk = append([]string{"runtime.unlockWithRank"}, stk...)
    					}
    				}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
  7. src/runtime/string.go

    	return unsafe.String((*byte)(p), n)
    }
    
    // stringDataOnStack reports whether the string's data is
    // stored on the current goroutine's stack.
    func stringDataOnStack(s string) bool {
    	ptr := uintptr(unsafe.Pointer(unsafe.StringData(s)))
    	stk := getg().stack
    	return stk.lo <= ptr && ptr < stk.hi
    }
    
    func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) {
    	if buf != nil && l <= len(buf) {
    		b = buf[:l]
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:17:26 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  8. src/cmd/cover/cover.go

    	cv := mkCounterVarName(len(f.pkg.counterLengths))
    	f.fn.counterVar = cv
    }
    
    func (f *File) postFunc(fn ast.Node, funcname string, flit bool, body *ast.BlockStmt) {
    
    	// Tack on single counter write if we are in "perfunc" mode.
    	singleCtr := ""
    	if pkgconfig.Granularity == "perfunc" {
    		singleCtr = "; " + f.newCounter(fn.Pos(), fn.Pos(), 1)
    	}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 14 19:41:17 UTC 2024
    - 34.5K bytes
    - Viewed (0)
  9. src/index/suffixarray/sais.go

    	// max(maxID, numLMS/2). This level of the recursion needs maxID,
    	// and all deeper levels of the recursion will need no more than numLMS/2,
    	// so this one allocation is guaranteed to suffice for the entire stack
    	// of recursive calls.
    	tmp := oldTmp
    	if len(tmp) < len(saTmp) {
    		tmp = saTmp
    	}
    	if len(tmp) < numLMS {
    		// TestSAIS/forcealloc reaches this code.
    		n := maxID
    		if n < numLMS/2 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 18 23:57:18 UTC 2024
    - 32.4K bytes
    - Viewed (0)
  10. src/math/big/int_test.go

    	for _, n := range []int64{0, 7, -7, 1 << 30, -1 << 30, 1 << 50, -1 << 50} {
    		x := NewInt(3)
    		got := testing.AllocsPerRun(100, func() {
    			// NewInt should inline, and all its allocations
    			// can happen on the stack. Passing the result of NewInt
    			// to Add should not cause any of those allocations to escape.
    			x.Add(x, NewInt(n))
    		})
    		if got != 0 {
    			t.Errorf("x.Add(x, NewInt(%d)), wanted 0 allocations, got %f", n, got)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 18:42:28 UTC 2024
    - 58.5K bytes
    - Viewed (0)
Back to top