Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 4 of 4 for existingVolumes (0.18 sec)

  1. pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go

    	}
    
    	// count unique volumes
    	existingVolumes := sets.New[string]()
    	for _, existingPod := range nodeInfo.Pods {
    		if err := pl.filterVolumes(logger, existingPod.Pod, false /* existing pod */, existingVolumes); err != nil {
    			return framework.AsStatus(err)
    		}
    	}
    	numExistingVolumes := len(existingVolumes)
    
    	// filter out already-mounted volumes
    	for k := range existingVolumes {
    		delete(newVolumes, k)
    	}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat Dec 30 23:00:56 UTC 2023
    - 19.6K bytes
    - Viewed (0)
  2. pkg/kubelet/kubelet_node_status_test.go

    	assert.IsType(t, core.GetActionImpl{}, actions[9])
    }
    
    func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
    	cases := []struct {
    		desc                  string
    		existingVolumes       []v1.UniqueVolumeName // volumes to initially populate volumeManager
    		existingNode          *v1.Node              // existing node object
    		expectedNode          *v1.Node              // new node object after patch
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed May 08 19:23:19 UTC 2024
    - 115.8K bytes
    - Viewed (0)
  3. pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go

    			if iqn == eiqn && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) {
    				return true
    			}
    		}
    
    		if volume.RBD != nil && existingVolume.RBD != nil {
    			mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage
    			emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Fri Oct 20 17:40:39 UTC 2023
    - 12.5K bytes
    - Viewed (0)
  4. pkg/kubelet/volumemanager/cache/desired_state_of_world.go

    		// It will be added below as podToMount, now just report SELinux metric.
    		if pluginSupportsSELinuxContextMount {
    			existingVolume := dsw.volumesToMount[volumeName]
    			if seLinuxFileLabel != existingVolume.originalSELinuxLabel {
    				fullErr := fmt.Errorf("conflicting SELinux labels of volume %s: %q and %q", volumeSpec.Name(), existingVolume.originalSELinuxLabel, seLinuxFileLabel)
    				supported := util.VolumeSupportsSELinuxMount(volumeSpec)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Jun 04 06:25:43 UTC 2024
    - 27.1K bytes
    - Viewed (0)
Back to top