Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for blockProfile (0.24 sec)

  1. src/runtime/mprof.go

    	Cycles int64
    	StackRecord
    }
    
    // BlockProfile returns n, the number of records in the current blocking profile.
    // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
    // If len(p) < n, BlockProfile does not change p and returns n, false.
    //
    // Most clients should use the [runtime/pprof] package or
    // the [testing] package's -test.blockprofile flag instead
    // of calling BlockProfile directly.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  2. src/testing/testing.go

    			os.Exit(2)
    		}
    		f.Close()
    	}
    	if *blockProfile != "" && *blockProfileRate >= 0 {
    		f, err := os.Create(toOutputDir(*blockProfile))
    		if err != nil {
    			fmt.Fprintf(os.Stderr, "testing: %s\n", err)
    			os.Exit(2)
    		}
    		if err = m.deps.WriteProfileTo("block", f, 0); err != nil {
    			fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err)
    			os.Exit(2)
    		}
    		f.Close()
    	}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:00:11 UTC 2024
    - 76.1K bytes
    - Viewed (0)
  3. src/runtime/pprof/pprof.go

    	name:  "heap",
    	count: countHeap,
    	write: writeHeap,
    }
    
    var allocsProfile = &Profile{
    	name:  "allocs",
    	count: countHeap, // identical to heap profile
    	write: writeAlloc,
    }
    
    var blockProfile = &Profile{
    	name:  "block",
    	count: countBlock,
    	write: writeBlock,
    }
    
    var mutexProfile = &Profile{
    	name:  "mutex",
    	count: countMutex,
    	write: writeMutex,
    }
    
    func lockProfiles() {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  4. src/cmd/go/internal/test/test.go

    profile the tests during execution:
    
    	-benchmem
    	    Print memory allocation statistics for benchmarks.
    	    Allocations made in C or using C.malloc are not counted.
    
    	-blockprofile block.out
    	    Write a goroutine blocking profile to the specified file
    	    when all tests are complete.
    	    Writes test binary as -c would.
    
    	-blockprofilerate n
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 16 14:34:32 UTC 2024
    - 71.9K bytes
    - Viewed (0)
  5. src/cmd/go/alldocs.go

    // profile the tests during execution:
    //
    //	-benchmem
    //	    Print memory allocation statistics for benchmarks.
    //	    Allocations made in C or using C.malloc are not counted.
    //
    //	-blockprofile block.out
    //	    Write a goroutine blocking profile to the specified file
    //	    when all tests are complete.
    //	    Writes test binary as -c would.
    //
    //	-blockprofilerate n
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 11 16:54:28 UTC 2024
    - 142.4K bytes
    - Viewed (0)
  6. src/runtime/lock_futex.go

    	v := atomic.Xchg(key32(&l.key), mutex_unlocked)
    	if v == mutex_unlocked {
    		throw("unlock of unlocked lock")
    	}
    	if v == mutex_sleeping {
    		futexwakeup(key32(&l.key), 1)
    	}
    
    	gp := getg()
    	gp.m.mLockProfile.recordUnlock(l)
    	gp.m.locks--
    	if gp.m.locks < 0 {
    		throw("runtimeĀ·unlock: lock count")
    	}
    	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:34 UTC 2024
    - 5.4K bytes
    - Viewed (0)
  7. src/runtime/lock_sema.go

    			// Dequeue an M.
    			mp = muintptr(v &^ locked).ptr()
    			if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
    				// Dequeued an M.  Wake it.
    				semawakeup(mp)
    				break
    			}
    		}
    	}
    	gp.m.mLockProfile.recordUnlock(l)
    	gp.m.locks--
    	if gp.m.locks < 0 {
    		throw("runtimeĀ·unlock: lock count")
    	}
    	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  8. src/runtime/runtime2.go

    	lockedExt     uint32      // tracking for external LockOSThread
    	lockedInt     uint32      // tracking for internal lockOSThread
    	nextwaitm     muintptr    // next m waiting for lock
    
    	mLockProfile mLockProfile // fields relating to runtime.lock contention
    	profStack    []uintptr    // used for memory/block/mutex stack traces
    
    	// wait* are used to carry arguments from gopark into park_m, because
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 47.9K bytes
    - Viewed (0)
  9. src/runtime/proc.go

    // malloc and runtime locks for mLockProfile.
    // TODO(mknyszek): Implement lazy allocation if this becomes a problem.
    func mProfStackInit(mp *m) {
    	if debug.profstackdepth == 0 {
    		// debug.profstack is set to 0 by the user, or we're being called from
    		// schedinit before parsedebugvars.
    		return
    	}
    	mp.profStack = makeProfStackFP()
    	mp.mLockProfile.stack = makeProfStackFP()
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 207.5K bytes
    - Viewed (0)
Back to top