- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 23 for conservative (0.18 sec)
-
src/runtime/mgcmark.go
// registers for the asynchronously stopped // parent frame. Scan the parent // conservatively. state.conservative = true } else { // We only wanted to scan those two frames // conservatively. Clear the flag for future // frames. state.conservative = false } return } locals, args, objs := frame.getStackMap(false)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 18 21:25:11 UTC 2024 - 52.5K bytes - Viewed (0) -
src/runtime/malloc.go
// freeIndexForScan now so x is seen by the GC // (including conservative scan) as an allocated object. // While this pointer can't escape into user code as a // _live_ pointer until we return, conservative scanning // may find a dead pointer that happens to point into this // object. Delaying this update until now ensures that // conservative scanning considers this pointer dead until // this point.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
backgroundTime atomic.Int64 } const ( // It doesn't really matter what value we start at, but we can't be zero, because // that'll cause divide-by-zero issues. Pick something conservative which we'll // also use as a fallback. startingScavSleepRatio = 0.001 // Spend at least 1 ms scavenging, otherwise the corresponding // sleep time to maintain our desired utilization is too low to
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/cmd/cgo/gcc.go
} // We can't figure out the type. Conservative // approach is to assume it has a pointer. return true case *ast.SelectorExpr: if l, ok := t.X.(*ast.Ident); !ok || l.Name != "C" { // Type defined in a different package. // Conservative approach is to assume it has a // pointer. return true } if f == nil { // Conservative approach: assume pointer. return true }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 15:50:06 UTC 2024 - 97K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
// XLA does not support Const nodes of Variant type since it needs // to know the original ops to be able to compile them to the relevant // XLA form. // TODO(srbs): This filter is a little conservative. E.g. a subgraph of // the form: // Const // | // EmptyTensorList -> TensorListPushBack -> TensorListPopBack -> Op
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 51K bytes - Viewed (0) -
src/net/http/transport.go
// This is the documented way to disable http2 on a // Transport. return } if !t.ForceAttemptHTTP2 && (t.TLSClientConfig != nil || t.Dial != nil || t.DialContext != nil || t.hasCustomTLSDialer()) { // Be conservative and don't automatically enable // http2 if they've specified a custom TLS config or // custom dialers. Let them opt-in themselves via // http2.ConfigureTransport so we don't surprise them
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Jun 06 21:59:21 UTC 2024 - 91K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
rewriter.replaceOp(identity, identity.getInput()); return success(); } // Replace the op with the input if output is only used by TF ops. // Currently this is more on the conservative side since we need to ensure // every consumer op to be a TF op before applying this pattern. We can // consider to revisit this in the future if this turns out to be too // restrictive.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
src/crypto/tls/conn.go
} // sendAlert sends a TLS alert message. func (c *Conn) sendAlert(err alert) error { c.out.Lock() defer c.out.Unlock() return c.sendAlertLocked(err) } const ( // tcpMSSEstimate is a conservative estimate of the TCP maximum segment // size (MSS). A constant is used, rather than querying the kernel for // the actual MSS, to avoid complexity. The value here is the IPv6
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 03:10:12 UTC 2024 - 51.8K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/prove.go
// fact table for this because we don't know what the // facts of b.Preds[1] are (in general, b.Preds[1] is // a loop-back edge, so we haven't even been there // yet). As a conservative approximation, we look for // this condition in the predecessor chain until we // hit a join point. uniquePred := func(b *Block) *Block { if len(b.Preds) == 1 { return b.Preds[0].b }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:30:21 UTC 2024 - 48.9K bytes - Viewed (0) -
pkg/scheduler/framework/runtime/framework.go
// for running them. It is ok if the current "pod" take resources freed for // lower priority pods. // Requiring that the new pod is schedulable in both circumstances ensures that // we are making a conservative decision: filters like resources and inter-pod // anti-affinity are more likely to fail when the nominated pods are treated // as running, while filters like pod affinity are more likely to fail when
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri May 17 09:07:27 UTC 2024 - 60.9K bytes - Viewed (0)