- Sort Score
- Result 10 results
- Languages All
Results 71 - 80 of 112 for alignof (0.18 sec)
-
pkg/kube/util.go
// We allow an additional 2x buffer, as it is still fairly cheap (6mb) const MaxRequestBodyBytes = int64(6 * 1024 * 1024) // HTTPConfigReader is reads an HTTP request, imposing size restrictions aligned with Kubernetes limits func HTTPConfigReader(req *http.Request) ([]byte, error) { defer req.Body.Close() lr := &io.LimitedReader{ R: req.Body, N: MaxRequestBodyBytes + 1, } data, err := io.ReadAll(lr)
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Thu Jun 06 05:10:23 UTC 2024 - 18.6K bytes - Viewed (0) -
src/cmd/link/internal/ld/deadcode.go
case abi.Struct: // reflect.structType off += 4 * arch.PtrSize case abi.Pointer: // reflect.ptrType off += arch.PtrSize case abi.Func: // reflect.funcType off += arch.PtrSize // 4 bytes, pointer aligned case abi.Slice: // reflect.sliceType off += arch.PtrSize case abi.Array: // reflect.arrayType off += 3 * arch.PtrSize case abi.Chan: // reflect.chanType off += 2 * arch.PtrSize
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 14:52:41 UTC 2024 - 19K bytes - Viewed (0) -
src/internal/poll/fd_unix.go
// even on 64-bit systems. // The same is true of socket implementations on many systems. // See golang.org/issue/7812 and golang.org/issue/16266. // Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned. const maxRW = 1 << 30 // Read implements io.Reader. func (fd *FD) Read(p []byte) (int, error) { if err := fd.readLock(); err != nil { return 0, err } defer fd.readUnlock() if len(p) == 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 04:09:44 UTC 2024 - 17.9K bytes - Viewed (0) -
src/runtime/race_ppc64le.s
// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go. // A brief recap of the ppc64le calling convention. // Arguments are passed in R3, R4, R5 ... // SP must be 16-byte aligned. // Note that for ppc64x, LLVM follows the standard ABI and // expects arguments in registers, so these functions move // the arguments from storage to the registers expected // by the ABI.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 17K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_export.cc
// used by the TOCO export. (It does not explain rationale for this choice.) constexpr size_t kInitialBufferSize = 10240; // Flatbuffer fields to be padded to 16 bytes aligned. constexpr size_t kFbAlignment = 16; // Set `isSigned` to false if the `type` is an 8-bit unsigned integer type. // Since tflite doesn't support unsigned for other types, returns error if
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:41:49 UTC 2024 - 164.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/block.go
// Fatal if not BranchUnknown and len(Succs) > 2. Likely BranchPrediction // After flagalloc, records whether flags are live at the end of the block. FlagsLiveAtEnd bool // A block that would be good to align (according to the optimizer's guesses) Hotness Hotness // Subsequent blocks, if any. The number and order depend on the block kind. Succs []Edge // Inverse of successors.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 15:44:14 UTC 2024 - 12.2K bytes - Viewed (0) -
src/syscall/syscall_linux_test.go
strconv.FormatUint(uint64(uid), 10) if got != want { if filesystemIsNoSUID(tmpBinary) { t.Skip("skipping test when temp dir is mounted nosuid") } // formatted so the values are aligned for easier comparison t.Errorf("expected %s,\ngot %s", want, got) } } // filesystemIsNoSUID reports whether the filesystem for the given // path is mounted nosuid.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 23K bytes - Viewed (0) -
src/crypto/aes/asm_ppc64x.s
LVX (PTR), ESPERM ADD $0x10, PTR #else MOVD $·rcon+0x10(SB), PTR // PTR points to rcon addr (skipping permute vector) #endif // Get key from memory and write aligned into VR P8_LXVB16X(INP, R0, IN0) ADD $0x10, INP, INP MOVD $0x20, TEMP CMPW ROUNDS, $12 LVX (PTR)(R0), RCON // lvx 4,0,6 Load first 16 bytes into RCON LVX (PTR)(TEMP), MASK
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 18:05:32 UTC 2024 - 18.6K bytes - Viewed (0) -
src/runtime/runtime1.go
for i := int32(0); i < n; i++ { envs[i] = gostring(argv_index(argv, argc+1+i)) } } func environ() []string { return envs } // TODO: These should be locals in testAtomic64, but we don't 8-byte // align stack variables on 386. var test_z64, test_x64 uint64 func testAtomic64() { test_z64 = 42 test_x64 = 0 if atomic.Cas64(&test_z64, test_x64, 1) { throw("cas64 failed") } if test_x64 != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:52:17 UTC 2024 - 19.3K bytes - Viewed (0) -
src/runtime/cgocall.go
if *restore { // Restore sp saved by cgocallback during // unwind of g's stack (see comment at top of file). mp := acquirem() sched := &mp.g0.sched sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign))) // Do the accounting that cgocall will not have a chance to do // during an unwind. // // In the case where a Go call originates from C, ncgo is 0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:16:47 UTC 2024 - 24.2K bytes - Viewed (0)