- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 164 for preemption (0.55 sec)
-
src/runtime/arena.go
// Redirect allocations that don't fit into a chunk well directly // from the heap. if cap >= 0 { return newarray(typ, cap) } return newobject(typ) } // Prevent preemption as we set up the space for a new object. // // Act like we're allocating. mp := acquirem() if mp.mallocing != 0 { throw("malloc deadlock") } if mp.gsignal == getg() {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
src/cmd/internal/obj/loong64/obj.go
} p.To.Type = obj.TYPE_REG p.To.Reg = REG_R20 // Mark the stack bound check and morestack call async nonpreemptible. // If we get preempted here, when resumed the preemption request is // cleared, but we'll still call morestack, which will double the stack // unnecessarily. See issue #35470. p = c.ctxt.StartUnsafePoint(p, c.newprog) var q *obj.Prog
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Nov 21 19:22:18 UTC 2023 - 19.7K bytes - Viewed (0) -
src/cmd/cgo/internal/testerrors/ptr_test.go
c: `typedef union { unsigned long i; } u39; void f39(u39 *pu) {}`, imports: []string{"unsafe"}, body: `var b C.char; p := &b; C.f39((*C.u39)(unsafe.Pointer(&p)))`, fail: false, }, { // Test preemption while entering a cgo call. Issue #21306. name: "preemptduringcall", c: `void f30() {}`, imports: []string{"runtime", "sync"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Aug 03 16:07:49 UTC 2023 - 21.2K bytes - Viewed (0) -
src/cmd/internal/obj/arm/obj5.go
} p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 // Mark the stack bound check and morestack call async nonpreemptible. // If we get preempted here, when resumed the preemption request is // cleared, but we'll still call morestack, which will double the stack // unnecessarily. See issue #35470. p = c.ctxt.StartUnsafePoint(p, c.newprog) if framesize <= abi.StackSmall {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Nov 20 17:19:36 UTC 2023 - 21.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/writebarrier.go
mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem) } } // The last store becomes the WBend marker. This marker is used by the liveness // pass to determine what parts of the code are preemption-unsafe. // All subsequent memory operations use this memory, so we have to sacrifice the // previous last memory op to become this new value. bEnd.Values = append(bEnd.Values, last) last.Block = bEnd
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Sep 08 19:09:14 UTC 2023 - 23.5K bytes - Viewed (0) -
src/cmd/internal/obj/s390x/objz.go
} p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 // Mark the stack bound check and morestack call async nonpreemptible. // If we get preempted here, when resumed the preemption request is // cleared, but we'll still call morestack, which will double the stack // unnecessarily. See issue #35470. p = c.ctxt.StartUnsafePoint(p, c.newprog) if framesize <= abi.StackSmall {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 19:28:53 UTC 2023 - 21K bytes - Viewed (0) -
src/runtime/chan_test.go
testenv.SkipFlaky(t, 51482) } // The goal of this test is to trigger a "racy sudog adjustment" // throw. Basically, there's a window between when a goroutine // becomes available for preemption for stack scanning (and thus, // stack shrinking) but before the goroutine has fully parked on a // channel. See issue 40641 for more details on the problem. // // The way we try to induce this failure is to set up two
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Oct 31 20:47:35 UTC 2023 - 23.4K bytes - Viewed (0) -
tensorflow/c/eager/c_api.cc
// TFE_ContextUpdateServerDefWithTimeout to be simple wrappers around the same // C++ function. // Retries are used for CreateContext calls, which is used in // ParameterServerStrategy initialization to be robust to worker preemption. TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeoutAndRetries( TFE_Context* ctx, int keep_alive_secs, const void* proto, size_t proto_len, int64_t init_timeout_in_ms, int retries, TF_Status* status,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 08:11:23 UTC 2024 - 44K bytes - Viewed (0) -
tensorflow/c/eager/c_api_experimental.h
int64_t init_timeout_in_ms, TF_Status* status, bool clear_existing_contexts); // Set server def with retries and timeout. This is helpful for fault-tolerant // initial connection in high-preemption environments, such as // ParameterServerStrategy training. // This API is for experimental usage and may be subject to change. TF_CAPI_EXPORT extern void TFE_ContextSetServerDefWithTimeoutAndRetries(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 22:37:46 UTC 2024 - 39.5K bytes - Viewed (0) -
src/os/signal/signal_test.go
// deliver the signal. start := time.Now() timer := time.NewTimer(settleTime / 10) defer timer.Stop() // If the caller notified for all signals on c, filter out SIGURG, // which is used for runtime preemption and can come at unpredictable times. // General user code should filter out all unexpected signals instead of just // SIGURG, but since os/signal is tightly coupled to the runtime it seems // appropriate to be stricter here.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Nov 09 15:34:56 UTC 2023 - 27.2K bytes - Viewed (0)