Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 205 for Accounting (0.23 sec)

  1. src/runtime/mcache.go

    	}
    
    	// Indicate that this span is cached and prevent asynchronous
    	// sweeping in the next sweep phase.
    	s.sweepgen = mheap_.sweepgen + 3
    
    	// Store the current alloc count for accounting later.
    	s.allocCountBeforeCache = s.allocCount
    
    	// Update heapLive and flush scanAlloc.
    	//
    	// We have not yet allocated anything new into the span, but we
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 25 19:53:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  2. src/runtime/testdata/testprog/gc.go

    		target <- i
    
    		// Check to make sure the memory limit is maintained.
    		// We're just sampling here so if it transiently goes over we might miss it.
    		// The internal accounting is inconsistent anyway, so going over by a few
    		// pages is certainly possible. Just make sure we're within some bound.
    		// Note that to avoid flakiness due to #52433 (especially since we're allocating
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sun Oct 02 02:28:27 UTC 2022
    - 12.1K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/linalg_grad.cc

      std::string left_subs;
      for (const char s : input_subs) {
        if (!reduced_label_set.contains(s)) {
          left_subs.push_back(s);
        }
      }
    
      // Compute the gradient wrt the input, without accounting for the operation
      // "abc->ac". So, now we have the VJP of the operation "ac,cd->ad".
      Output grad_reduced =
          Einsum(scope, {output_grad, other_operand},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 07 23:11:54 UTC 2022
    - 20.4K bytes
    - Viewed (0)
  4. src/runtime/mgclimit.go

    		//
    		// The reason we do this instead of just waiting for those events to finish and push updates
    		// is to ensure that all the time we're accounting for happened sometime between lastUpdate
    		// and now. This dramatically simplifies reasoning about the limiter because we're not at
    		// risk of extra time being accounted for in this window than actually happened in this window,
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 22:07:41 UTC 2024
    - 17.3K bytes
    - Viewed (0)
  5. src/runtime/mheap.go

    //
    // If typ.manual() == false, allocSpan allocates a heap span of class spanclass
    // and updates heap accounting. If manual == true, allocSpan allocates a
    // manually-managed span (spanclass is ignored), and the caller is
    // responsible for any accounting related to its use of the span. Either
    // way, allocSpan will atomically add the bytes in the newly allocated
    // span to *sysStat.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 22:31:00 UTC 2024
    - 78K bytes
    - Viewed (0)
  6. src/runtime/arena.go

    		// Failed to allocate.
    		mp.mallocing = 0
    		releasem(mp)
    		return nil
    	}
    	if s.needzero != 0 {
    		throw("arena chunk needs zeroing, but should already be zeroed")
    	}
    	// Set up heap bitmap and do extra accounting.
    	if typ.Pointers() {
    		if cap >= 0 {
    			userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
    		} else {
    			userArenaHeapBitsSetType(typ, ptr, s)
    		}
    		c := getMCache(mp)
    		if c == nil {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:44:56 UTC 2024
    - 37.9K bytes
    - Viewed (0)
  7. src/runtime/metrics_test.go

    			//
    			// Instead, check against a much more reasonable upper-bound: the amount of
    			// mapped heap memory. We can't possibly overcount to the point of exceeding
    			// total mapped heap memory, except if there's an accounting bug.
    			if live := samples[i].Value.Uint64(); live > mstats.HeapSys {
    				t.Errorf("live bytes: %d > heap sys: %d", live, mstats.HeapSys)
    			} else if live == 0 {
    				// Might happen if we don't call runtime.GC() above.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:52:17 UTC 2024
    - 45K bytes
    - Viewed (0)
  8. src/runtime/stack_test.go

    	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
    	if consumed > estimate {
    		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    	}
    	// Due to broken stack memory accounting (https://golang.org/issue/7468),
    	// StackInuse can decrease during function execution, so we cast the values to int64.
    	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jun 14 00:03:57 UTC 2023
    - 23.1K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/syntax/printer.go

    	pending []whitespace // pending whitespace
    	lastTok token        // last token (after any pending semi) processed by print
    }
    
    // write is a thin wrapper around p.output.Write
    // that takes care of accounting and error handling.
    func (p *printer) write(data []byte) {
    	n, err := p.output.Write(data)
    	p.written += n
    	if err != nil {
    		panic(writeError{err})
    	}
    }
    
    var (
    	tabBytes    = []byte("\t\t\t\t\t\t\t\t")
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Aug 24 07:17:27 UTC 2023
    - 21.5K bytes
    - Viewed (0)
  10. src/go/scanner/scanner.go

    			// Optimization: we've encountered an ASCII character that's not a letter
    			// or number. Avoid the call into s.next() and corresponding set up.
    			//
    			// Note that s.next() does some line accounting if s.ch is '\n', so this
    			// shortcut is only possible because we know that the preceding character
    			// is not '\n'.
    			s.ch = rune(b)
    			s.offset = s.rdOffset
    			s.rdOffset++
    			goto exit
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Oct 19 12:02:03 UTC 2023
    - 24.3K bytes
    - Viewed (0)
Back to top