Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 96 of 96 for locale (0.15 sec)

  1. common-protos/k8s.io/api/networking/v1/generated.proto

      // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
      // (and cluster policy otherwise allows the traffic), OR if the traffic source is
      // the pod's local node, OR if the traffic matches at least one ingress rule
      // across all of the NetworkPolicy objects whose podSelector matches the pod. If
      // this field is empty then this NetworkPolicy does not allow any traffic (and serves
    Plain Text
    - Registered: Wed May 08 22:53:08 GMT 2024
    - Last Modified: Mon Mar 11 18:43:24 GMT 2024
    - 25.2K bytes
    - Viewed (0)
  2. common-protos/k8s.io/api/storage/v1beta1/generated.proto

      // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.
      //
      // For more information about implementing this mode, see
      // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
      // A driver can support one or more of these modes and
      // more modes may be added in the future.
      //
      // This field is immutable.
      //
      // +optional
    Plain Text
    - Registered: Wed May 08 22:53:08 GMT 2024
    - Last Modified: Mon Mar 11 18:43:24 GMT 2024
    - 24.9K bytes
    - Viewed (0)
  3. internal/dsync/drwmutex.go

    		for {
    			select {
    			case <-ctx.Done():
    				return
    			case <-refreshTimer.C:
    				noQuorum, err := refreshLock(ctx, dm.clnt, id, source, quorum)
    				if err == nil && noQuorum {
    					// Clean the lock locally and in remote nodes
    					forceUnlock(ctx, dm.clnt, id)
    					// Execute the caller lock loss callback
    					if lockLossCallback != nil {
    						lockLossCallback()
    					}
    					return
    				}
    
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Thu Feb 22 06:26:06 GMT 2024
    - 19.7K bytes
    - Viewed (0)
  4. cmd/erasure-multipart.go

    	}
    	g.Wait()
    }
    
    // Clean-up the old multipart uploads. Should be run in a Go routine.
    func (er erasureObjects) cleanupStaleUploads(ctx context.Context, expiry time.Duration) {
    	// run multiple cleanup's local to this server.
    	var wg sync.WaitGroup
    	for _, disk := range er.getLocalDisks() {
    		if disk != nil {
    			wg.Add(1)
    			go func(disk StorageAPI) {
    				defer wg.Done()
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Sun Apr 28 17:53:50 GMT 2024
    - 43K bytes
    - Viewed (0)
  5. cmd/metacache-set.go

    	Replication replicationConfig `msg:"-"`
    
    	// StopDiskAtLimit will stop listing on each disk when limit number off objects has been returned.
    	StopDiskAtLimit bool
    
    	// pool and set of where the cache is located.
    	pool, set int
    }
    
    func init() {
    	gob.Register(listPathOptions{})
    }
    
    func (o *listPathOptions) setBucketMeta(ctx context.Context) {
    	lc, _ := globalLifecycleSys.Get(o.Bucket)
    Go
    - Registered: Sun May 05 19:28:20 GMT 2024
    - Last Modified: Wed May 01 17:59:08 GMT 2024
    - 30.4K bytes
    - Viewed (0)
  6. docs/pt/docs/deployment/docker.md

    Então, nesse caso, poderia ser mais simples ter **um único contêiner** com **múltiplos processos**, e uma ferramenta local (por exemplo, um exportador do Prometheus) no mesmo contêiner coletando métricas do Prometheus para todos os processos internos e expor essas métricas no único contêiner.
    
    ---
    
    Plain Text
    - Registered: Sun May 05 07:19:11 GMT 2024
    - Last Modified: Thu Apr 18 19:53:19 GMT 2024
    - 37.4K bytes
    - Viewed (0)
Back to top