- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 2,509 for lets (0.05 sec)
-
staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go
// not support this field on all resource types, but if it does and more results remain it // will set the continue field on the returned list object. Limit int64 // Continue is a token returned by the server that lets a client retrieve chunks of results // from the server by specifying limit. The server may reject requests for continuation tokens // it does not recognize and will return a 410 error if the token can no longer be used because
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Mar 01 09:55:40 UTC 2023 - 4.5K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go
lister corev1listers.ResourceQuotaLister // liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures. // This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results. // We track the lookup result here so that for repeated requests, we don't look it up very often. liveLookupCache *lru.Cache group singleflight.Group
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Apr 10 13:54:56 UTC 2024 - 5.6K bytes - Viewed (0) -
tensorflow/cc/client/client_session.h
struct ThreadPoolOptions; } } // namespace tsl namespace tensorflow { namespace thread { using tsl::thread::ThreadPoolOptions; } /// @addtogroup core /// @{ /// A `ClientSession` object lets the caller drive the evaluation of the /// TensorFlow graph constructed with the C++ API. /// /// Example: /// /// Scope root = Scope::NewRootScope(); /// auto a = Placeholder(root, DT_INT32);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 20 08:11:46 UTC 2022 - 6.1K bytes - Viewed (0) -
src/cmd/go/internal/cache/prog.go
// writes to stdout on startup (with ID==0). It includes the // ProgRequest.Command types that are supported by the program. // // This lets us extend the protocol gracefully over time (adding "get2", // etc), or fail gracefully when needed. It also lets us verify the program // wants to be a cache helper. KnownCommands []ProgCmd `json:",omitempty"` // For Get requests.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 14 19:23:25 UTC 2023 - 11.8K bytes - Viewed (0) -
internal/jwt/parser.go
func NewStandardClaims() *StandardClaims { return &StandardClaims{} } // SetIssuer sets issuer for these claims func (c *StandardClaims) SetIssuer(issuer string) { c.Issuer = issuer } // SetAudience sets audience for these claims func (c *StandardClaims) SetAudience(aud string) { c.Audience = aud } // SetExpiry sets expiry in unix epoch secs func (c *StandardClaims) SetExpiry(t time.Time) {
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue May 09 07:53:08 UTC 2023 - 13.9K bytes - Viewed (0) -
pkg/kubelet/cm/topologymanager/bitmask/bitmask.go
func (s *bitMask) IsEqual(mask BitMask) bool { return *s == *mask.(*bitMask) } // IsNarrowerThan checks if one mask is narrower than another. // // A mask is said to be "narrower" than another if it has lets bits set. If the // same number of bits are set in both masks, then the mask with more // lower-numbered bits set wins out. func (s *bitMask) IsNarrowerThan(mask BitMask) bool { if s.Count() == mask.Count() {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Nov 03 09:45:09 UTC 2022 - 5.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssagen/nowb.go
} func (c *nowritebarrierrecChecker) check() { // We walk the call graph as late as possible so we can // capture all calls created by lowering, but this means we // only get to see the obj.LSyms of calls. symToFunc lets us // get back to the ODCLFUNCs. symToFunc := make(map[*obj.LSym]*ir.Func) // funcs records the back-edges of the BFS call graph walk. It // maps from the ODCLFUNC of each function that must not have
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 17:29:46 UTC 2024 - 5.9K bytes - Viewed (0) -
src/runtime/memmove_386.s
// for large sizes. The cutover is approximately 1K. We implement up to // 128 because that is the maximum SSE register load (loading all data // into registers lets us ignore copy direction). tail: // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. TESTL BX, BX JEQ move_0 CMPL BX, $2 JBE move_1or2 CMPL BX, $4 JB move_3 JE move_4
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 4.4K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go
sum = sum + (fraction-mean)*(fraction-mean) } std = math.Sqrt(sum / float64(len(resourceToFractions))) } // STD (standard deviation) is always a positive value. 1-deviation lets the score to be higher for node which has least deviation and // multiplying it with `MaxNodeScore` provides the scaling factor needed. return int64((1 - std) * float64(framework.MaxNodeScore))
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Dec 15 03:30:06 UTC 2023 - 6.5K bytes - Viewed (0) -
istioctl/pkg/internaldebug/internal-debug.go
istioctl x internal-debug syncz # Retrieve syncz debug information directly from the control plane, using RSA certificate security # (Certificates must be obtained before this step. The --cert-dir flag lets istioctl bypass the Kubernetes API server.) istioctl x internal-debug syncz --xds-address istio.example.com:15012 --cert-dir ~/.istio-certs
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Fri Mar 15 04:16:55 UTC 2024 - 6.9K bytes - Viewed (0)