- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 95 for completely (0.32 sec)
-
staging/src/k8s.io/api/admissionregistration/v1/generated.proto
// However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. // +listType=atomic
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu May 23 17:42:49 UTC 2024 - 51.8K bytes - Viewed (0) -
tensorflow/BUILD
# the stream executor cuda plugins. Targets that want to use cuda APIs should # instead depend on the dummy plugins in @local_tsl//tsl/platform/default/build_config # and use header only targets. # TODO(ddunleavy): This seems completely broken. :tensorflow_cc depends on # cuda_platform from tf_additional_binary_deps and this doesn't break. check_deps( name = "cuda_plugins_check_deps", disallowed_deps = if_static( [],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 16:51:59 UTC 2024 - 53.5K bytes - Viewed (0) -
staging/src/k8s.io/api/admissionregistration/v1/types.go
// However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. // +listType=atomic
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu May 23 17:42:49 UTC 2024 - 61.6K bytes - Viewed (0) -
src/runtime/malloc.go
// their own spans. The small window of object sizes between maxSmallSize-mallocHeaderSize // and maxSmallSize will be considered large, even though they might fit in // a size class. In practice this is completely fine, since the largest small // size class has a single object in it already, precisely to make the transition // to large objects smooth. if size <= maxSmallSize-mallocHeaderSize {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
pilot/pkg/networking/core/route/route.go
serviceRegistry map[host.Name]*model.Service, mostSpecificWildcardVsIndex map[host.Name]types.NamespacedName, ) ([]string, []*model.Service) { // TODO: A further optimization would be to completely rely on the index and not do the loop below // However, that requires assuming that serviceRegistry never got filtered after the // egressListener was created. rule := virtualService.Spec.(*networking.VirtualService)
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue May 14 14:12:39 UTC 2024 - 56.1K bytes - Viewed (0) -
pilot/pkg/networking/core/listener.go
// In general, for handling conflicts we: // * Turn on sniffing if its HTTP and TCP mixed // * Merge filter chains switch conflictType { case NoConflict, AutoOverHTTP: // This is a new entry (NoConflict), or completely overriding (AutoOverHTTP); add it to the map listenerMap[listenerMapKey] = &outboundListenerEntry{ servicePort: listenerOpts.port, bind: listenerOpts.bind, chains: opts,
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Mon May 06 04:44:06 UTC 2024 - 55.1K bytes - Viewed (0) -
src/testing/testing.go
// completely suspended during the call to Parallel. t.checkRaces() if t.chatty != nil { t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name) } running.Delete(t.name) t.signal <- true // Release calling test. <-t.parent.barrier // Wait for the parent test to complete. t.context.waitParallel() if t.chatty != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 76.1K bytes - Viewed (0) -
src/runtime/traceback.go
// stopped nicely, and the stack walk may not be able to complete. gp := u.g.ptr() if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && u.frame.sp != gp.stktopsp { print("runtime: g", gp.goid, ": frame.sp=", hex(u.frame.sp), " top=", hex(gp.stktopsp), "\n") print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "\n") throw("traceback did not unwind completely") } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0) -
src/crypto/tls/common.go
} } // supportsRSAFallback returns nil if the certificate and connection support // the static RSA key exchange, and unsupported otherwise. The logic for // supporting static RSA is completely disjoint from the logic for // supporting signed key exchanges, so we just check it as a fallback. supportsRSAFallback := func(unsupported error) error { // TLS 1.3 dropped support for the static RSA key exchange.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 03:10:12 UTC 2024 - 59.1K bytes - Viewed (0) -
src/internal/trace/order.go
} o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) return curCtx, true, nil } // Handle the GC mark phase. // // We have sequence numbers for both start and end because they // can happen on completely different threads. We want an explicit // partial order edge between start and end here, otherwise we're // relying entirely on timestamps to make sure we don't advance a
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 03 14:56:25 UTC 2024 - 52.4K bytes - Viewed (0)