Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 181 for room (0.07 sec)

  1. src/runtime/mheap.go

    	// and is otherwise unrelated to h.curArena.base.
    	end := h.curArena.base + ask
    	nBase := alignUp(end, physPageSize)
    	if nBase > h.curArena.end || /* overflow */ end < h.curArena.base {
    		// Not enough room in the current arena. Allocate more
    		// arena space. This may not be contiguous with the
    		// current arena, so we have to request the full ask.
    		av, asize := h.sysAlloc(ask, &h.arenaHints, true)
    		if av == nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  2. tensorflow/compiler/jit/mark_for_compilation_pass.cc

            // not compiled by XLA altogether or, if assigned to an XLA_* device
            // with "must compile" semantics, compiled into a trivial single-op
            // cluster.  This approach leaves some room for improvement, and we can
            // consider implementing a more aggressive data-flow-analysis based
            // solution in the future if needed.
            //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  3. src/cmd/link/internal/ld/lib.go

    	// text size is smaller than TrampLimit, we won't need to insert trampolines.
    	// It is pretty close to the offset range of a direct CALL machine instruction.
    	// We leave some room for extra stuff like PLT stubs.
    	TrampLimit uint64
    
    	// Empty spaces between codeblocks will be padded with this value.
    	// For example an architecture might want to pad with a trap instruction to
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 18:45:27 UTC 2024
    - 88.6K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/ssa/regalloc.go

    	s.used |= regMask(1) << r
    	s.f.setHome(c, &s.registers[r])
    }
    
    // allocReg chooses a register from the set of registers in mask.
    // If there is no unused register, a Value will be kicked out of
    // a register to make room.
    func (s *regAllocState) allocReg(mask regMask, v *Value) register {
    	if v.OnWasmStack {
    		return noRegister
    	}
    
    	mask &= s.allocatable
    	mask &^= s.nospill
    	if mask == 0 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 17:49:56 UTC 2023
    - 87.2K bytes
    - Viewed (0)
  5. staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go

    			Status:  metav1.StatusFailure,
    			Message: err.Error(),
    			Reason:  metav1.StatusReasonInternalError,
    			Code:    http.StatusInternalServerError,
    		}
    	}
    
    	// Create a watcher with room for a single event, populate it, and close the channel
    	watcher := &errWatcher{result: make(chan watch.Event, 1)}
    	watcher.result <- errEvent
    	close(watcher.result)
    
    	return watcher
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Jun 12 10:12:02 UTC 2024
    - 51.8K bytes
    - Viewed (0)
  6. src/crypto/tls/conn.go

    			payloadBytes -= c.out.mac.Size()
    		case cipher.AEAD:
    			payloadBytes -= ciph.Overhead()
    		case cbcMode:
    			blockSize := ciph.BlockSize()
    			// The payload must fit in a multiple of blockSize, with
    			// room for at least one padding byte.
    			payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1
    			// The MAC is appended before padding so affects the
    			// payload size directly.
    			payloadBytes -= c.out.mac.Size()
    		default:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 03:10:12 UTC 2024
    - 51.8K bytes
    - Viewed (0)
  7. src/time/time_test.go

    	// divisors of Second
    	f1 := func(ti int64, tns int32, logdi int32) bool {
    		d := Duration(1)
    		a, b := uint(logdi%9), (logdi>>16)%9
    		d <<= a
    		for i := 0; i < int(b); i++ {
    			d *= 5
    		}
    
    		// Make room for unix ↔ internal conversion.
    		// We don't care about behavior too close to ± 2^63 Unix seconds.
    		// It is full of wraparounds but will never happen in a reasonable program.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 03:13:47 UTC 2024
    - 56.5K bytes
    - Viewed (0)
  8. src/time/time.go

    			// print nanoseconds
    			prec = 0
    			buf[w] = 'n'
    		case u < uint64(Millisecond):
    			// print microseconds
    			prec = 3
    			// U+00B5 'µ' micro sign == 0xC2 0xB5
    			w-- // Need room for two bytes.
    			copy(buf[w:], "µ")
    		default:
    			// print milliseconds
    			prec = 6
    			buf[w] = 'm'
    		}
    		w, u = fmtFrac(buf[:w], u, prec)
    		w = fmtInt(buf[:w], u)
    	} else {
    		w--
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 50.7K bytes
    - Viewed (0)
  9. src/runtime/mgcscavenge.go

    		// Tuned loosely via Ziegler-Nichols process.
    		kp: 0.3375,
    		ti: 3.2e6,
    		tt: 1e9, // 1 second reset time.
    
    		// These ranges seem wide, but we want to give the controller plenty of
    		// room to hunt for the optimal value.
    		min: 0.001,  // 1:1000
    		max: 1000.0, // 1000:1
    	}
    	s.sleepRatio = startingScavSleepRatio
    
    	// Install real functions if stubs aren't present.
    	if s.scavenge == nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  10. src/runtime/mgcpacer.go

    	// intended to help recover from such a situation. By pushing the heap goal down, we also
    	// push the trigger down, triggering and finishing a GC sooner in order to make room for
    	// other memory sources. Note that since we're effectively reducing the heap goal by X bytes,
    	// we're actually giving more than X bytes of headroom back, because the heap goal is in
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 55.4K bytes
    - Viewed (0)
Back to top