Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 656 for effort (0.12 sec)

  1. pkg/controller/volume/persistentvolume/pv_controller.go

    // shuttle.
    //
    // Originally, the work of this controller was split amongst three
    // controllers. This controller is the result a large effort to simplify the
    // PV subsystem. During that effort, it became clear that we needed to ensure
    // that every single condition was handled and accounted for in the code, even
    // if it resulted in no-op code branches.
    //
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri May 10 08:42:31 UTC 2024
    - 89.2K bytes
    - Viewed (0)
  2. src/reflect/type.go

    	// visited records the structs that have been considered already.
    	// Embedded pointer fields can create cycles in the graph of
    	// reachable embedded types; visited avoids following those cycles.
    	// It also avoids duplicated effort: if we didn't find the field in an
    	// embedded type T at level 2, we won't find it in one at level 4 either.
    	visited := map[*structType]bool{}
    
    	for len(next) > 0 {
    		current, next = next, current[:0]
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 85.5K bytes
    - Viewed (0)
  3. pkg/apis/core/types.go

    	// +optional
    	Value string
    	// Effect indicates the taint effect to match. Empty means match all taint effects.
    	// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
    	// +optional
    	Effect TaintEffect
    	// TolerationSeconds represents the period of time the toleration (which must be
    	// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed May 29 22:40:29 UTC 2024
    - 268.9K bytes
    - Viewed (0)
  4. android/guava/src/com/google/common/util/concurrent/AbstractFuture.java

        }
        // The future has already been set to something. If it is cancellation we should cancel the
        // incoming future.
        if (localValue instanceof Cancellation) {
          // we don't care if it fails, this is best-effort.
          future.cancel(((Cancellation) localValue).wasInterrupted);
        }
        return false;
      }
    
      /**
       * Returns a value that satisfies the contract of the {@link #value} field based on the state of
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Fri Jun 07 22:25:23 UTC 2024
    - 63.1K bytes
    - Viewed (1)
  5. guava/src/com/google/common/util/concurrent/AbstractFuture.java

        }
        // The future has already been set to something. If it is cancellation we should cancel the
        // incoming future.
        if (localValue instanceof Cancellation) {
          // we don't care if it fails, this is best-effort.
          future.cancel(((Cancellation) localValue).wasInterrupted);
        }
        return false;
      }
    
      /**
       * Returns a value that satisfies the contract of the {@link #value} field based on the state of
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Fri Jun 07 22:25:23 UTC 2024
    - 62.8K bytes
    - Viewed (1)
  6. src/runtime/mgcmark.go

    		}
    		gcw.heapScanWork = 0
    	}
    }
    
    // gcDrainN blackens grey objects until it has performed roughly
    // scanWork units of scan work or the G is preempted. This is
    // best-effort, so it may perform less work if it fails to get a work
    // buffer. Otherwise, it will perform at least n units of work, but
    // may perform more because scanning is always done in whole object
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Apr 18 21:25:11 UTC 2024
    - 52.5K bytes
    - Viewed (0)
  7. src/cmd/dist/test.go

    	type pathMode struct {
    		path string
    		mode os.FileMode
    	}
    	var dirs []pathMode // in lexical order
    
    	undo = func() {
    		for i := range dirs {
    			os.Chmod(dirs[i].path, dirs[i].mode) // best effort
    		}
    	}
    
    	filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
    		if suffix := strings.TrimPrefix(path, dir+string(filepath.Separator)); suffix != "" {
    			if suffix == ".git" {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 20 16:01:35 UTC 2024
    - 50K bytes
    - Viewed (0)
  8. src/cmd/go/internal/modload/buildlist.go

    			return true
    		}
    	}
    	return false
    }
    
    // Graph returns the graph of module requirements loaded from the current
    // root modules (as reported by RootModules).
    //
    // Graph always makes a best effort to load the requirement graph despite any
    // errors, and always returns a non-nil *ModuleGraph.
    //
    // If the requirements of any relevant module fail to load, Graph also
    // returns a non-nil error of type *mvs.BuildListError.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 15 16:04:44 UTC 2024
    - 53.8K bytes
    - Viewed (0)
  9. pkg/kubelet/kuberuntime/kuberuntime_manager.go

    	//	is really slim.
    	//	2. When working with old version containers which have no restart count label,
    	//	we can only assume their restart count is 0.
    	// Anyhow, we only promised "best-effort" restart count reporting, we can just ignore
    	// these limitations now.
    	// TODO: move this comment to SyncPod.
    	podSandboxIDs, err := m.getSandboxIDByPodUID(ctx, uid, nil)
    	if err != nil {
    		return nil, err
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed May 22 02:01:31 UTC 2024
    - 64.7K bytes
    - Viewed (0)
  10. src/runtime/mgcscavenge.go

    // during memory allocation) further ensures that chunks it identifies as "dense" are
    // immediately eligible for being backed by huge pages. Note that for the most part these
    // density heuristics are best-effort heuristics. It's totally possible (but unlikely)
    // that a chunk that just became dense is scavenged in the case of a race between memory
    // allocation and scavenging.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
Back to top