Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 138 for Malloc (0.1 sec)

  1. src/syscall/exec_plan9.go

    //
    // In the child, this function must not acquire any locks, because
    // they might have been locked at the time of the fork. This means
    // no rescheduling, no malloc calls, and no new stack segments.
    // The calls to RawSyscall are okay because they are assembly
    // functions that do not grow the stack.
    //
    //go:norace
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Feb 26 21:03:59 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  2. src/go/types/call.go

    			pname.used = true
    			pkg := pname.imported
    
    			var exp Object
    			funcMode := value
    			if pkg.cgo {
    				// cgo special cases C.malloc: it's
    				// rewritten to _CMalloc and does not
    				// support two-result calls.
    				if sel == "malloc" {
    					sel = "_CMalloc"
    				} else {
    					funcMode = cgofunc
    				}
    				for _, prefix := range cgoPrefixes {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 19:19:55 UTC 2024
    - 33.5K bytes
    - Viewed (0)
  3. src/testing/benchmark.go

    		return 0
    	}
    	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
    }
    
    // AllocsPerOp returns the "allocs/op" metric,
    // which is calculated as r.MemAllocs / r.N.
    func (r BenchmarkResult) AllocsPerOp() int64 {
    	if v, ok := r.Extra["allocs/op"]; ok {
    		return int64(v)
    	}
    	if r.N <= 0 {
    		return 0
    	}
    	return int64(r.MemAllocs) / int64(r.N)
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:00:11 UTC 2024
    - 23.9K bytes
    - Viewed (0)
  4. src/internal/reflectlite/all_test.go

    		}
    	}
    }
    
    func noAlloc(t *testing.T, n int, f func(int)) {
    	if testing.Short() {
    		t.Skip("skipping malloc count in short mode")
    	}
    	if runtime.GOMAXPROCS(0) > 1 {
    		t.Skip("skipping; GOMAXPROCS>1")
    	}
    	i := -1
    	allocs := testing.AllocsPerRun(n, func() {
    		f(i)
    		i++
    	})
    	if allocs > 0 {
    		t.Errorf("%d iterations: got %v mallocs, want 0", n, allocs)
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 16 19:26:08 UTC 2023
    - 24.2K bytes
    - Viewed (0)
  5. src/syscall/exec_linux.go

    // (Pipe is close-on-exec so if exec succeeds, it will be closed.)
    // In the child, this function must not acquire any locks, because
    // they might have been locked at the time of the fork. This means
    // no rescheduling, no malloc calls, and no new stack segments.
    // For the same reason compiler does not race instrument it.
    // The calls to RawSyscall are okay because they are assembly
    // functions that do not grow the stack.
    //
    //go:norace
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 07:45:37 UTC 2024
    - 23K bytes
    - Viewed (0)
  6. src/internal/abi/type.go

    // bytes with any other objects, allowing the GC program execution to
    // assume an aligned start and not use atomic operations. In the current
    // runtime, this means all malloc size classes larger than the cutoff must
    // be multiples of four words. On 32-bit systems that's 16 bytes, and
    // all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Apr 17 21:09:59 UTC 2024
    - 21.8K bytes
    - Viewed (0)
  7. src/runtime/os_windows.go

    		for p[0] != 0 {
    			p = p[1:]
    		}
    		p = p[1:] // skip nil byte
    	}
    
    	stdcall1(_FreeEnvironmentStringsW, uintptr(strings))
    
    	// We call these all the way here, late in init, so that malloc works
    	// for the callback functions these generate.
    	var fn any = ctrlHandler
    	ctrlHandlerPC := compileCallback(*efaceOf(&fn), true)
    	stdcall2(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
    
    	monitorSuspendResume()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Apr 26 22:55:25 UTC 2024
    - 41.5K bytes
    - Viewed (0)
  8. src/runtime/stack.go

    			// hchan locks. Normally, we only allow acquiring hchan
    			// locks and then getting a gscan bit. In this case, we
    			// already have the gscan bit. We allow acquiring hchan
    			// locks here as a special case, since a deadlock can't
    			// happen because the G involved must already be
    			// suspended. So, we get a special hchan lock rank here
    			// that is lower than gscan, but doesn't allow acquiring
    			// any other locks other than hchan.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 41.1K bytes
    - Viewed (0)
  9. src/path/filepath/path_test.go

    	}
    
    	if testing.Short() {
    		t.Skip("skipping malloc count in short mode")
    	}
    	if runtime.GOMAXPROCS(0) > 1 {
    		t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1")
    		return
    	}
    
    	for _, test := range tests {
    		allocs := testing.AllocsPerRun(100, func() { filepath.Clean(test.result) })
    		if allocs > 0 {
    			t.Errorf("Clean(%q): %v allocs, want zero", test.result, allocs)
    		}
    	}
    }
    
    type IsLocalTest struct {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 22 16:38:19 UTC 2024
    - 47.1K bytes
    - Viewed (0)
  10. src/runtime/signal_unix.go

    		// signals then delivers them itself by calling
    		// the signal handler directly when C code,
    		// including C code called via cgo, calls a
    		// TSAN-intercepted function such as malloc.
    		//
    		// We check this condition last as g0.stack.lo
    		// may be not very accurate (see mstart).
    		st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
    		setSignalstackSP(&st, mp.g0.stack.lo)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 10 16:04:54 UTC 2024
    - 45K bytes
    - Viewed (0)
Back to top