- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 139 for nodeInfos (0.13 sec)
-
pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go
t.Errorf("preFilter failed with status: %v", s) } for _, node := range tt.nodes { nodeInfo, _ := snapshot.NodeInfos().Get(node.Name) status := p.Filter(context.Background(), state, tt.pod, nodeInfo) if len(tt.wantStatusCode) != 0 && status.Code() != tt.wantStatusCode[node.Name] {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Feb 28 10:42:29 UTC 2024 - 143.1K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/nodeports/node_ports_test.go
} func TestNodePorts(t *testing.T) { tests := []struct { pod *v1.Pod nodeInfo *framework.NodeInfo name string wantPreFilterStatus *framework.Status wantFilterStatus *framework.Status }{ { pod: &v1.Pod{}, nodeInfo: framework.NewNodeInfo(), name: "skip filter",
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Oct 19 11:02:11 UTC 2023 - 10.1K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go
tests := []struct { pod *v1.Pod nodeInfo *framework.NodeInfo name string preFilterWantStatus *framework.Status wantStatus *framework.Status }{ { pod: &v1.Pod{}, nodeInfo: framework.NewNodeInfo(), name: "nothing",
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Oct 20 17:40:39 UTC 2023 - 17.3K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/noderesources/resource_allocation.go
} switch resource { case v1.ResourceCPU: return nodeInfo.Allocatable.MilliCPU, (requested.MilliCPU + podRequest) case v1.ResourceMemory: return nodeInfo.Allocatable.Memory, (requested.Memory + podRequest) case v1.ResourceEphemeralStorage: return nodeInfo.Allocatable.EphemeralStorage, (nodeInfo.Requested.EphemeralStorage + podRequest) default: if _, exists := nodeInfo.Allocatable.ScalarResources[resource]; exists {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 20 14:53:43 UTC 2023 - 5.3K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/noderesources/fit_test.go
enoughPodsTests := []struct { pod *v1.Pod nodeInfo *framework.NodeInfo name string args config.NodeResourcesFitArgs wantInsufficientResources []InsufficientResource wantStatus *framework.Status }{ { pod: &v1.Pod{}, nodeInfo: framework.NewNodeInfo(
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Jun 12 13:26:09 UTC 2024 - 57.4K bytes - Viewed (0) -
pkg/kubelet/lifecycle/predicate_test.go
), expectedPod: makeTestPod( v1.ResourceList{}, // Requests v1.ResourceList{}, // Limits ), }, } { nodeInfo := schedulerframework.NewNodeInfo() nodeInfo.SetNode(test.node) pod := removeMissingExtendedResources(test.pod, nodeInfo) if diff := cmp.Diff(test.expectedPod, pod); diff != "" { t.Errorf("unexpected pod (-want, +got):\n%s", diff) } } }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Mar 01 23:13:50 UTC 2024 - 14.2K bytes - Viewed (0) -
pkg/volume/csi/nodeinfomanager/nodeinfomanager.go
maxAttachLimit int64, topology map[string]string) error { nodeInfo, err := csiKubeClient.StorageV1().CSINodes().Get(context.TODO(), string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { nodeInfo, err = nim.CreateCSINode() } if err != nil { return err } return nim.installDriverToCSINode(nodeInfo, driverName, driverNodeID, maxAttachLimit, topology) }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 11 09:02:45 UTC 2024 - 20.1K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/nodename/node_name.go
if !Fits(pod, nodeInfo) { return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason) } return nil } // Fits actually checks if the pod fits the node. func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name }
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Sep 20 09:49:54 UTC 2023 - 2.3K bytes - Viewed (0) -
pkg/scheduler/framework/plugins/nodeports/node_ports.go
// Fits checks if the pod fits the node. func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { return fitsPorts(getContainerPorts(pod), nodeInfo) } func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *framework.NodeInfo) bool { // try to see whether existingPorts and wantPorts will conflict or not existingPorts := nodeInfo.UsedPorts for _, cp := range wantPorts {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Mar 26 10:53:29 UTC 2024 - 7.5K bytes - Viewed (0) -
pkg/kubelet/lifecycle/predicate.go
return e.PredicateDesc } // generalFilter checks a group of filterings that the kubelet cares about. func generalFilter(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) []PredicateFailureReason { admissionResults := scheduler.AdmissionCheck(pod, nodeInfo, true) var reasons []PredicateFailureReason for _, r := range admissionResults { if r.InsufficientResource != nil {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Oct 19 00:47:50 UTC 2023 - 10.9K bytes - Viewed (0)