- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 29 for coffsets (0.15 sec)
-
src/time/format.go
// as some people do write offsets of 24 hours // or 60 minutes or 60 seconds. if hr > 24 { rangeErrString = "time zone offset hour" } if mm > 60 { rangeErrString = "time zone offset minute" } if ss > 60 { rangeErrString = "time zone offset second" } zoneOffset = (hr*60+mm)*60 + ss // offset is in seconds switch sign[0] { case '+':
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 11 17:09:28 UTC 2024 - 49.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/S390X.rules
(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(-c+(-c&^(-c-1))) => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))]) (SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))])) // Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them). (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 12 18:09:26 UTC 2023 - 74.3K bytes - Viewed (0) -
src/bytes/bytes_test.go
} b1[j] = 0 pos = IndexByte(b1, 'x') if pos != -1 { t.Errorf("IndexByte(%q, 'x') = %v", b1, pos) } } } } // test a small index across all page offsets func TestIndexByteSmall(t *testing.T) { b := make([]byte, 5015) // bigger than a page // Make sure we find the correct byte even when straddling a page. for i := 0; i <= len(b)-15; i++ { for j := 0; j < 15; j++ {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 03 12:58:37 UTC 2024 - 56.5K bytes - Viewed (0) -
src/time/time.go
if t.Location() == UTC { offsetMin = -1 } else { _, offset := t.Zone() if offset%60 != 0 { version = timeBinaryVersionV2 offsetSec = int8(offset % 60) } offset /= 60 if offset < -32768 || offset == -1 || offset > 32767 { return nil, errors.New("Time.MarshalBinary: unexpected zone offset") } offsetMin = int16(offset) } sec := t.sec() nsec := t.nsec()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 50.7K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/debug.go
} } return changed } // varOffset returns the offset of slot within the user variable it was // decomposed from. This has nothing to do with its stack offset. func varOffset(slot LocalSlot) int64 { offset := slot.Off s := &slot for ; s.SplitOf != nil; s = s.SplitOf { offset += s.SplitOffset } return offset } type partsByVarOffset struct { slotIDs []SlotID
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 58.4K bytes - Viewed (0) -
src/runtime/mgcmark.go
// //go:nowritebarrier func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 { if rootBlockBytes%(8*goarch.PtrSize) != 0 { // This is necessary to pick byte offsets in ptrmask0. throw("rootBlockBytes must be a multiple of 8*ptrSize") } // Note that if b0 is toward the end of the address space, // then b0 + rootBlockBytes might wrap around.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
cmd/api-errors.go
}, ErrInvalidCopyPartRange: { Code: "InvalidArgument", Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", HTTPStatusCode: http.StatusBadRequest, }, ErrInvalidCopyPartRangeSource: { Code: "InvalidArgument",
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Jun 13 22:26:38 UTC 2024 - 92.1K bytes - Viewed (1) -
src/cmd/link/internal/ld/elf.go
s.AddUint16(ctxt.Arch, uint16(j)) // aux count s.AddUint32(ctxt.Arch, uint32(dynstr.Addstring(l.file))) // file string offset s.AddUint32(ctxt.Arch, 16) // offset from header to first aux if l.next != nil { s.AddUint32(ctxt.Arch, 16+uint32(j)*16) // offset from this header to next } else { s.AddUint32(ctxt.Arch, 0) } for x := l.aux; x != nil; x = x.next {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 22 13:29:54 UTC 2024 - 63.6K bytes - Viewed (0) -
src/runtime/mbitmap.go
bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits) } return tp } // Move up elem and addr. // Offsets within an element are always at a ptrBits*goarch.PtrSize boundary. if n >= tp.typ.Size_ { // elem needs to be moved to the element containing // tp.addr + n. oldelem := tp.elem
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass.cc
if (!IsCompilationCandidate(node)) { cluster_for_node_[node->id()].Get() = nullptr; continue; } // We want clusters to be big enough that the benefit from XLA's // optimizations offsets XLA related overhead (for instance we add some // Switch/Merge nodes into the graph to implement lazy compilation). To // this end, we don't count Identity and Constant nodes because they do not
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 12:19:41 UTC 2024 - 85.3K bytes - Viewed (0)