- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 18 for healthcheck (0.2 sec)
-
cmd/healthcheck-handler.go
if GlobalKMS != nil { ctx, cancel := context.WithTimeout(r.Context(), time.Minute) defer cancel() if _, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kms.Context{"healthcheck": ""}}); err != nil { switch r.Method { case http.MethodHead: apiErr := toAPIError(r.Context(), err) writeResponse(w, apiErr.HTTPStatusCode, nil, mimeNone) case http.MethodGet:
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Jun 26 07:44:34 UTC 2024 - 6.9K bytes - Viewed (0) -
cni/pkg/iptables/iptables.go
) // TODO BML I don't think we need UDP? TCP healthcheck redir should catch everything. // This is effectively an analog for Istio's old-style podSpec-based health check rewrites. // Before Istio would update the pod manifest to rewrite healthchecks to go to sidecar Envoy port 15021, // so that it could distinguish things that can be unauthenticated (healthchecks) from other kinds of node traffic
Registered: Wed Nov 06 22:53:10 UTC 2024 - Last Modified: Tue Oct 15 15:39:28 UTC 2024 - 23.3K bytes - Viewed (0) -
cmd/storage-rest-client.go
func newStorageRESTClient(endpoint Endpoint, healthCheck bool, gm *grid.Manager) (*storageRESTClient, error) { serverURL := &url.URL{ Scheme: endpoint.Scheme, Host: endpoint.Host, Path: path.Join(storageRESTPrefix, endpoint.Path, storageRESTVersion), } restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken()) if healthCheck { // Use a separate client to avoid recursive calls.
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sun Oct 13 13:07:21 UTC 2024 - 30.2K bytes - Viewed (0) -
cmd/bucket-targets.go
lastOnline time.Time lastHCAt time.Time offlineDuration time.Duration latency latencyStat } // isOffline returns current liveness result of remote target. Add endpoint to // healthCheck map if missing and default to online status func (sys *BucketTargetSys) isOffline(ep *url.URL) bool { sys.hMutex.RLock() defer sys.hMutex.RUnlock() if h, ok := sys.hc[ep.Host]; ok { return !h.Online }
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jun 21 22:22:24 UTC 2024 - 20.9K bytes - Viewed (0) -
.github/workflows/mint/minio-resiliency.yaml
MINIO_CI_CD: "on" MINIO_ROOT_USER: "minio" MINIO_ROOT_PASSWORD: "minio123" MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw=" MINIO_DRIVE_MAX_TIMEOUT: "5s" healthcheck: test: ["CMD", "mc", "ready", "local"] interval: 5s timeout: 5s retries: 5 # starts 4 docker containers running minio server instances. # using nginx reverse proxy, load balancing, you can access
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed May 22 23:07:14 UTC 2024 - 1.7K bytes - Viewed (0) -
docs/orchestration/docker-compose/docker-compose.yaml
command: server --console-address ":9001" http://minio{1...4}/data{1...2} expose: - "9000" - "9001" # environment: # MINIO_ROOT_USER: minioadmin # MINIO_ROOT_PASSWORD: minioadmin healthcheck: test: ["CMD", "mc", "ready", "local"] interval: 5s timeout: 5s retries: 5 # starts 4 docker containers running minio server instances. # using nginx reverse proxy, load balancing, you can access
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Wed Oct 30 21:24:58 UTC 2024 - 1.5K bytes - Viewed (0) -
cmd/routers.go
if globalIsDistErasure { registerDistErasureRouters(router, endpointServerPools) } // Add Admin router, all APIs are enabled in server mode. registerAdminRouter(router, true) // Add healthCheck router registerHealthCheckRouter(router) // Add server metrics router registerMetricsRouter(router) // Add STS router always. registerSTSRouter(router) // Add KMS router
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Mon Jul 29 18:10:04 UTC 2024 - 3.9K bytes - Viewed (0) -
cmd/xl-storage-disk-id-check.go
e.mu.Lock() defer e.mu.Unlock() return e.lastMinuteLatency.getTotal() } func newXLStorageDiskIDCheck(storage *xlStorage, healthCheck bool) *xlStorageDiskIDCheck { xl := xlStorageDiskIDCheck{ storage: storage, health: newDiskHealthTracker(), healthCheck: healthCheck && globalDriveMonitoring, metricsCache: cachevalue.New[DiskMetrics](), } xl.SetDiskID(emptyDiskID)
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Sat Oct 26 09:56:26 UTC 2024 - 34.5K bytes - Viewed (0) -
cni/pkg/nodeagent/server.go
log.Errorf("failed to annotate pod enrollment: %v", err) retErr = err } // ipset is only relevant for pod healthchecks. // therefore, if we had *any* error adding the pod to the mesh // do not add the pod to the ipset, so that it will definitely *not* pass healthchecks, // and the operator can investigate. // // This is also important to avoid ipset sync issues if we add the pod ip to the ipset, but
Registered: Wed Nov 06 22:53:10 UTC 2024 - Last Modified: Wed Sep 25 20:54:34 UTC 2024 - 13.4K bytes - Viewed (0) -
cmd/prepare-storage.go
} var ( tries int verbose bool ) // Initialize all storage disks storageDisks, errs := initStorageDisksWithErrors(endpoints, storageOpts{cleanUp: true, healthCheck: true}) if err := checkDiskFatalErrs(errs); err != nil { return nil, nil, err } defer func() { if err == nil && format != nil { // Assign globalDeploymentID() on first run for the
Registered: Sun Nov 03 19:28:11 UTC 2024 - Last Modified: Fri Jul 12 20:51:54 UTC 2024 - 11.1K bytes - Viewed (1)