- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 86 for slotIDs (1.03 sec)
-
src/runtime/stack.go
minp := adjinfo.old.lo maxp := adjinfo.old.hi delta := adjinfo.delta num := uintptr(bv.n) // If this frame might contain channel receive slots, use CAS // to adjust pointers. If the slot hasn't been received into // yet, it may contain stack pointers and a concurrent send // could race with adjusting those pointers. (The sent value
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js
// sumpos: number; // Sum of |self| of positive nodes in tree (>= 0) // sumneg: number; // Sum of |self| of negative nodes in tree (>= 0) // places: Place[]; // Stack slots that contributed to this group // } // // // Box is a rendered item. // interface Box { // x: number; // X coordinate of top-left // y: number; // Y coordinate of top-left
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 19:48:28 UTC 2024 - 18.5K bytes - Viewed (0) -
src/runtime/malloc.go
// Scan the mspan's free bitmap to find a free slot. // If there is a free slot, allocate it. // This can all be done without acquiring a lock. // // 2. If the mspan has no free slots, obtain a new mspan // from the mcentral's list of mspans of the required size // class that have free space. // Obtaining a whole span amortizes the cost of locking // the mcentral. //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc
// For LLVM style RTTI. static bool classof(const AbstractOperation* ptr) { return ptr->getKind() == kMlir; } private: // Return true is there are still unfilled ODS slots for adding more inputs. bool IsNextODSArgAvailable(); MLIRContext* context_; MlirFunctionContext* function_context_; SmallVector<Value, 8> operands_; llvm::StringMap<Attribute> attrs_;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 28.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/compile.go
{name: "late nilcheck", fn: nilcheckelim2}, {name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register {name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots {name: "loop rotate", fn: loopRotate}, {name: "trim", fn: trim}, // remove empty blocks } // Double-check phase ordering constraints. // This code is intended to document the ordering requirements
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 14:55:18 UTC 2024 - 18.6K bytes - Viewed (0) -
src/runtime/sys_openbsd_amd64.s
// Transition from C ABI to Go ABI. PUSH_REGS_HOST_TO_ABI0() // Set up ABIInternal environment: g in R14, cleared X15. get_tls(R12) MOVQ g(R12), R14 PXOR X15, X15 // Reserve space for spill slots. NOP SP // disable vet stack checking ADJSP $24 // Call into the Go signal handler MOVQ DI, AX // sig MOVQ SI, BX // info MOVQ DX, CX // ctx CALL ·sigtrampgo<ABIInternal>(SB) ADJSP $-24
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 06 18:49:01 UTC 2023 - 15.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
// variables are removed from the loop variabled during // canonicalizationW, we need to create new operand/result slots. The // input operands for these slots are the read values // prior to the op, and all references to these are replaced by the // corresponding slot argument. We need to generate writes following
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
src/runtime/arena.go
h.offset = offset - h.low*goarch.PtrSize // We don't have any bits yet. h.mask = 0 h.valid = h.low return } // write appends the pointerness of the next valid pointer slots // using the low valid bits of bits. 1=pointer, 0=scalar. func (h writeUserArenaHeapBits) write(s *mspan, bits, valid uintptr) writeUserArenaHeapBits { if h.valid+valid <= ptrBits { // Fast path - just accumulate the bits.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go
// but libopcodes still always puts a suffix on crc32. continue case PUSH, POP: // Even though segment registers are 16-bit, push and pop // can save/restore them from 32-bit slots, so they // do not imply operand size. if ES <= a && a <= GS { continue } case CVTSI2SD, CVTSI2SS: // The integer register argument takes priority. if X0 <= a && a <= X15 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:33 UTC 2023 - 21.4K bytes - Viewed (0) -
src/internal/bisect/bisect.go
d.m[h] = true d.mu.Unlock() return seen } // seenLossy is a variant of seen that avoids a lock by using a cache of recently seen hashes. // Each cache entry is N-way set-associative: h can appear in any of the slots. // If h does not appear in any of them, then it is inserted into a random slot, // overwriting whatever was there before. func (d *dedup) seenLossy(h uint64) bool { cache := &d.recent[uint(h)%uint(len(d.recent))]
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 17:28:43 UTC 2024 - 22.9K bytes - Viewed (0)