- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 32 for offset_ (1 sec)
-
src/runtime/mheap.go
func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool) { // Find splice point, check for existing record. iter := &span.specials found := false for { s := *iter if s == nil { break } if offset == uintptr(s.offset) && kind == s.kind { found = true break } if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64.rules
// Indexed ops generate indexed load or store instructions for all GOPPC64 values. // Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits, // and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops. // On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits. // and support for PC relative addressing must be available if relocation is needed.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 53.2K bytes - Viewed (0) -
cmd/xl-storage.go
if !st.Mode().IsRegular() { file.Close() return nil, errIsNotRegular } if st.Size() < offset+length { // Expected size cannot be satisfied for // requested offset and length file.Close() return nil, errFileCorrupt } if offset > 0 { if _, err = file.Seek(offset, io.SeekStart); err != nil { file.Close() return nil, err } }
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Mon Jun 10 15:51:27 UTC 2024 - 85.3K bytes - Viewed (2) -
src/runtime/mprof.go
if readgstatus(gp1) == _Grunning { print("doRecordGoroutineProfile gp1=", gp1.goid, "\n") throw("cannot read stack of running goroutine") } offset := int(goroutineProfile.offset.Add(1)) - 1 if offset >= len(goroutineProfile.records) { // Should be impossible, but better to return a truncated profile than // to crash the entire process at this point. Instead, deal with it in
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
if (IsValidBufferOffset(buffers[const_tensor.buffer]->offset)) { const uint8_t* file_begin_ptr = reinterpret_cast<const uint8_t*>(model_ptr->allocation()->base()); buffer = std::vector<uint8_t>( file_begin_ptr + buffers[const_tensor.buffer]->offset, file_begin_ptr + buffers[const_tensor.buffer]->offset + buffers[const_tensor.buffer]->size);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
src/runtime/mbitmap.go
bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits) } return tp } // Move up elem and addr. // Offsets within an element are always at a ptrBits*goarch.PtrSize boundary. if n >= tp.typ.Size_ { // elem needs to be moved to the element containing // tp.addr + n. oldelem := tp.elem
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
src/cmd/link/internal/loader/loader.go
if es == 0 { delete(l.localElfSym, i) } else { l.localElfSym[i] = es } } // SymPlt returns the PLT offset of symbol s. func (l *Loader) SymPlt(s Sym) int32 { if v, ok := l.plt[s]; ok { return v } return -1 } // SetPlt sets the PLT offset of symbol i. func (l *Loader) SetPlt(i Sym, v int32) { if i >= Sym(len(l.objSyms)) || i == 0 { panic("bad symbol for SetPlt")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 24 20:26:10 UTC 2024 - 81.5K bytes - Viewed (0) -
src/runtime/malloc.go
// index 0 in the heap arena map. // // On amd64, the address space is 48 bits, sign extended to 64 // bits. This offset lets us handle "negative" addresses (or // high addresses if viewed as unsigned). // // On aix/ppc64, this offset allows to keep the heapAddrBits to // 48. Otherwise, it would be 60 in order to handle mmap addresses // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/debug.go
} } return changed } // varOffset returns the offset of slot within the user variable it was // decomposed from. This has nothing to do with its stack offset. func varOffset(slot LocalSlot) int64 { offset := slot.Off s := &slot for ; s.SplitOf != nil; s = s.SplitOf { offset += s.SplitOffset } return offset } type partsByVarOffset struct { slotIDs []SlotID
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 58.4K bytes - Viewed (0) -
src/runtime/pprof/pprof_test.go
fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f))) if fFunc == nil || fFunc.Entry() == 0 { panic("failed to locate function entry") } for offset := 0; offset < maxBytes; offset++ { innerPC := fFunc.Entry() + uintptr(offset) inner := runtime.FuncForPC(innerPC) if inner == nil { // No function known for this PC value. // It might simply be misaligned, so keep searching. continue }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 18:42:28 UTC 2024 - 68.8K bytes - Viewed (0)