Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 9 of 9 for allg (0.07 sec)

  1. src/runtime/mgc.go

    		mheap_.sweepPagesPerByte = 0
    		unlock(&mheap_.lock)
    		// Flush all mcaches.
    		for _, pp := range allp {
    			pp.mcache.prepareForSweep()
    		}
    		// Sweep all spans eagerly.
    		for sweepone() != ^uintptr(0) {
    		}
    		// Free workbufs eagerly.
    		prepareFreeWorkbufs()
    		for freeSomeWbufs(false) {
    		}
    		// All "free" events for this mark/sweep cycle have
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/types/type.go

    	intRegs, floatRegs uint8 // registers needed for ABIInternal
    
    	flags bitset8
    	alg   AlgKind // valid if Align > 0
    
    	// size of prefix of object that contains all pointers. valid if Align > 0.
    	// Note that for pointers, this is always PtrSize even if the element type
    	// is NotInHeap. See size.go:PtrDataSize for details.
    	ptrBytes int64
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 04 14:29:45 UTC 2024
    - 49.5K bytes
    - Viewed (0)
  3. src/runtime/mprof.go

    // Stack formats a stack trace of the calling goroutine into buf
    // and returns the number of bytes written to buf.
    // If all is true, Stack formats stack traces of all other goroutines
    // into buf after the trace for the current goroutine.
    func Stack(buf []byte, all bool) int {
    	var stw worldStop
    	if all {
    		stw = stopTheWorld(stwAllGoroutinesStack)
    	}
    
    	n := 0
    	if len(buf) > 0 {
    		gp := getg()
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  4. src/cmd/internal/obj/mips/asm0.go

    	{AMOVBU, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
    	{AMOVWL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
    	{AMOVVL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0},
    	{ALL, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, 0, 0},
    	{ALLV, C_SOREG, C_NONE, C_REG, 8, 4, REGZERO, sys.MIPS64, 0},
    
    	{AMOVW, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0},
    	{AMOVWU, C_REG, C_NONE, C_LEXT, 35, 12, REGSB, sys.MIPS64, 0},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 16 17:46:09 UTC 2024
    - 53.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/decompose_resource_ops.mlir

        // correctly.
        // CHECK-NOT: tf.RngReadAndSkip
        %alg = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
        %delta = "tf.Const"() {value = dense<10> : tensor<ui64>} : () -> tensor<ui64>
        %1 = "tf.RngReadAndSkip"(%resource, %alg, %delta) : (tensor<!tf_type.resource<tensor<3xi64>>>, tensor<i32>, tensor<ui64>) -> tensor<3xi64>
        tf_device.return %1 : tensor<3xi64>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 19:47:48 UTC 2024
    - 51.3K bytes
    - Viewed (0)
  6. src/cmd/internal/obj/loong64/asm.go

    	{AMOVWL, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
    	{AMOVVL, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
    	{ALL, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
    	{ALLV, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
    
    	{AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, C_NONE, 35, 12, 0, 0},
    	{AMOVWU, C_REG, C_NONE, C_NONE, C_LEXT, C_NONE, 35, 12, 0, 0},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 02:04:54 UTC 2024
    - 61.8K bytes
    - Viewed (0)
  7. src/runtime/mgcpacer.go

    	// maxScanWork is a worst-case estimate of the amount of scan work that
    	// needs to be performed in this GC cycle. Specifically, it represents
    	// the case where *all* scannable memory turns out to be live, and
    	// *all* allocated stack space is scannable.
    	maxStackScan := c.maxStackScan.Load()
    	maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load())
    	if work > scanWorkExpected {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
  8. src/runtime/pprof/pprof_test.go

    				}
    
    				// cpuHog1 called below is the primary source of CPU
    				// load, but there may be some background work by the
    				// runtime. Since the OS rusage measurement will
    				// include all work done by the process, also compare
    				// against all samples in our profile.
    				var value time.Duration
    				for _, sample := range p.Sample {
    					value += time.Duration(sample.Value[1]) * time.Nanosecond
    				}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 18:42:28 UTC 2024
    - 68.8K bytes
    - Viewed (0)
  9. src/runtime/mgcmark.go

    	}
    
    	gcw := &getg().m.p.ptr().gcw
    	gcw.bytesMarked += uint64(span.elemsize)
    }
    
    // gcMarkTinyAllocs greys all active tiny alloc blocks.
    //
    // The world must be stopped.
    func gcMarkTinyAllocs() {
    	assertWorldStopped()
    
    	for _, p := range allp {
    		c := p.mcache
    		if c == nil || c.tiny == 0 {
    			continue
    		}
    		_, span, objIndex := findObject(c.tiny, 0, 0)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
Back to top