Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 244 for allocable (0.27 sec)

  1. src/debug/elf/file.go

    		s.Size = binary.BigEndian.Uint64(b[4:12])
    		zrd = zlib.NewReader
    
    	} else if s.Flags&SHF_ALLOC != 0 {
    		return errorReader{&FormatError{int64(s.Offset),
    			"SHF_COMPRESSED applies only to non-allocable sections", s.compressionType}}
    	}
    
    	switch s.compressionType {
    	case COMPRESS_ZLIB:
    		zrd = zlib.NewReader
    	case COMPRESS_ZSTD:
    		zrd = func(r io.Reader) (io.ReadCloser, error) {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 23 16:49:58 UTC 2024
    - 43.1K bytes
    - Viewed (0)
  2. pkg/kubelet/cm/devicemanager/manager_test.go

    				t.Fatalf("timeout while waiting for manager update")
    			}
    			capacity, allocatable, _ := m.GetCapacity()
    			resourceCapacity := capacity[v1.ResourceName(testResourceName)]
    			resourceAllocatable := allocatable[v1.ResourceName(testResourceName)]
    			require.Equal(t, resourceCapacity.Value(), resourceAllocatable.Value(), "capacity should equal to allocatable")
    			require.Equal(t, int64(2), resourceAllocatable.Value(), "Devices are not updated.")
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue Jun 04 06:25:43 UTC 2024
    - 65K bytes
    - Viewed (0)
  3. pkg/registry/core/service/portallocator/storage/storage_test.go

    		t.Fatalf("unexpected error: %v", err)
    	}
    
    	// Allocate a port inside the valid port range
    	if err := storage.Allocate(30100); err != nil {
    		t.Fatal(err)
    	}
    
    	// Try to allocate the same port in the local bitmap
    	// The local bitmap stores the offset of the port
    	// offset = port - base (30100 - 30000 = 100)
    	ok, err := backing.Allocate(100)
    	if err != nil {
    		t.Fatal(err)
    	}
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed May 08 07:15:02 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  4. pkg/kubelet/cm/node_container_manager_linux.go

    )
    
    // createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true
    func (cm *containerManagerImpl) createNodeAllocatableCgroups() error {
    	nodeAllocatable := cm.internalCapacity
    	// Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable.
    	nc := cm.NodeConfig.NodeAllocatableConfig
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue May 21 10:18:16 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  5. pkg/registry/core/service/portallocator/allocator_test.go

    	if err := r.Release(released); err != nil {
    		t.Fatal(err)
    	}
    
    	err = r.Allocate(1)
    	if _, ok := err.(*ErrNotInRange); !ok {
    		t.Fatal(err)
    	}
    
    	if err := r.Allocate(10001); err != ErrAllocated {
    		t.Fatal(err)
    	}
    
    	err = r.Allocate(20000)
    	if _, ok := err.(*ErrNotInRange); !ok {
    		t.Fatal(err)
    	}
    
    	err = r.Allocate(10201)
    	if _, ok := err.(*ErrNotInRange); !ok {
    		t.Fatal(err)
    	}
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed May 08 07:15:02 UTC 2024
    - 14K bytes
    - Viewed (0)
  6. pkg/kubelet/kubelet_node_status_test.go

    	assert.NoError(t, err)
    	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", cmp.Diff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
    }
    
    func TestUpdateDefaultLabels(t *testing.T) {
    	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed May 08 19:23:19 UTC 2024
    - 115.8K bytes
    - Viewed (0)
  7. pkg/scheduler/framework/types.go

    	// many zero-request pods onto one node.
    	NonZeroRequested *Resource
    	// We store allocatedResources (which is Node.Status.Allocatable.*) explicitly
    	// as int64, to avoid conversions and accessing map.
    	Allocatable *Resource
    
    	// ImageStates holds the entry of an image if and only if this image is on the node. The entry can be used for
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Jun 12 19:28:17 UTC 2024
    - 36.7K bytes
    - Viewed (0)
  8. pkg/kubelet/cm/container_manager_linux.go

    		}
    		klog.InfoS("Container manager verified user specified cgroup-root exists", "cgroupRoot", cgroupRoot)
    		// Include the top level cgroup for enforcing node allocatable into cgroup-root.
    		// This way, all sub modules can avoid having to understand the concept of node allocatable.
    		cgroupRoot = NewCgroupName(cgroupRoot, defaultNodeAllocatableCgroupName)
    	}
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Tue May 21 10:18:16 UTC 2024
    - 35.1K bytes
    - Viewed (0)
  9. pkg/kubelet/nodestatus/setters.go

    			if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() {
    				klog.V(2).InfoS("Updated allocatable", "device", k, "allocatable", v.Value())
    			}
    			node.Status.Allocatable[k] = v
    		}
    		// for every huge page reservation, we need to remove it from allocatable memory
    		for k, v := range node.Status.Capacity {
    			if v1helper.IsHugePageResourceName(k) {
    				allocatableMemory := node.Status.Allocatable[v1.ResourceMemory]
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Thu Apr 25 12:12:04 UTC 2024
    - 30.5K bytes
    - Viewed (0)
  10. pkg/kubelet/cm/cpumanager/policy_static_test.go

    			defaultCPUSet: testCase.stDefaultCPUSet,
    		}
    		pod := testCase.pod
    
    		// allocate
    		for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
    			policy.Allocate(st, pod, &container)
    		}
    		if !reflect.DeepEqual(st.defaultCPUSet, testCase.expCSetAfterAlloc) {
    			t.Errorf("StaticPolicy Allocate() error (%v). expected default cpuset %v but got %v",
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Apr 24 18:25:29 UTC 2024
    - 40.8K bytes
    - Viewed (0)
Back to top