Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 34 for Size_ (0.04 sec)

  1. src/runtime/slice.go

    		newcap = int(capmem >> shift)
    		capmem = uintptr(newcap) << shift
    	default:
    		lenmem = uintptr(oldLen) * et.Size_
    		newlenmem = uintptr(newLen) * et.Size_
    		capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
    		capmem = roundupsize(capmem, noscan)
    		newcap = int(capmem / et.Size_)
    		capmem = uintptr(newcap) * et.Size_
    	}
    
    	// The check of overflow in addition to capmem > maxAlloc is needed
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 12.2K bytes
    - Viewed (0)
  2. src/runtime/mbarrier.go

    		racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
    		racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
    	}
    	if msanenabled {
    		msanwrite(dstPtr, uintptr(n)*typ.Size_)
    		msanread(srcPtr, uintptr(n)*typ.Size_)
    	}
    	if asanenabled {
    		asanwrite(dstPtr, uintptr(n)*typ.Size_)
    		asanread(srcPtr, uintptr(n)*typ.Size_)
    	}
    
    	if goexperiment.CgoCheck2 {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 15.7K bytes
    - Viewed (0)
  3. src/runtime/cgocheck.go

    		at := (*arraytype)(unsafe.Pointer(typ))
    		for i := uintptr(0); i < at.Len; i++ {
    			if off < at.Elem.Size_ {
    				cgoCheckUsingType(at.Elem, src, off, size)
    			}
    			src = add(src, at.Elem.Size_)
    			skipped := off
    			if skipped > at.Elem.Size_ {
    				skipped = at.Elem.Size_
    			}
    			checked := at.Elem.Size_ - skipped
    			off -= skipped
    			if size <= checked {
    				return
    			}
    			size -= checked
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 04:07:57 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  4. src/runtime/unsafe.go

    func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
    	if len < 0 {
    		panicunsafeslicelen1(getcallerpc())
    	}
    
    	if et.Size_ == 0 {
    		if ptr == nil && len > 0 {
    			panicunsafeslicenilptr1(getcallerpc())
    		}
    	}
    
    	mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
    	if overflow || mem > -uintptr(ptr) {
    		if ptr == nil {
    			panicunsafeslicenilptr1(getcallerpc())
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 02 21:51:18 UTC 2024
    - 3.1K bytes
    - Viewed (0)
  5. src/runtime/select.go

    		}
    	}
    	if msanenabled {
    		if casi < nsends {
    			msanread(cas.elem, c.elemtype.Size_)
    		} else if cas.elem != nil {
    			msanwrite(cas.elem, c.elemtype.Size_)
    		}
    	}
    	if asanenabled {
    		if casi < nsends {
    			asanread(cas.elem, c.elemtype.Size_)
    		} else if cas.elem != nil {
    			asanwrite(cas.elem, c.elemtype.Size_)
    		}
    	}
    
    	selunlock(scases, lockorder)
    	goto retc
    
    bufrecv:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Mar 13 21:36:04 UTC 2024
    - 15K bytes
    - Viewed (0)
  6. src/runtime/mbitmap.go

    	bits := span.elemsize / goarch.PtrSize
    	scanSize = typ.PtrBytes
    	src := src0
    	switch typ.Size_ {
    	case goarch.PtrSize:
    		src = (1 << (dataSize / goarch.PtrSize)) - 1
    	default:
    		for i := typ.Size_; i < dataSize; i += typ.Size_ {
    			src |= src0 << (i / goarch.PtrSize)
    			scanSize += typ.Size_
    		}
    	}
    
    	// Since we're never writing more than one uintptr's worth of bits, we're either going
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  7. src/runtime/syscall_windows.go

    			srcStackOffset: p.srcStackSize,
    			dstStackOffset: p.dstStackSize,
    			len:            t.Size_,
    		}
    		// Add this step to the adapter.
    		if len(p.parts) == 0 || !p.parts[len(p.parts)-1].tryMerge(part) {
    			p.parts = append(p.parts, part)
    		}
    		// The Go ABI packs arguments.
    		p.dstStackSize += t.Size_
    	}
    
    	// cdecl, stdcall, fastcall, and arm pad arguments to word size.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 22 20:12:46 UTC 2024
    - 16.6K bytes
    - Viewed (0)
  8. pkg/kubelet/apis/podresources/server_v1_test.go

    		},
    	}
    
    	cpus := []int64{12, 23, 30}
    
    	memory := []*podresourcesapi.ContainerMemory{
    		{
    			MemoryType: "memory",
    			Size_:      1073741824,
    			Topology:   &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
    		},
    		{
    			MemoryType: "hugepages-1Gi",
    			Size_:      1073741824,
    			Topology:   &podresourcesapi.TopologyInfo{Nodes: []*podresourcesapi.NUMANode{{ID: numaID}}},
    		},
    	}
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Apr 24 18:25:29 UTC 2024
    - 45.9K bytes
    - Viewed (0)
  9. pkg/kubelet/kuberuntime/kuberuntime_image.go

    	if err != nil {
    		klog.ErrorS(err, "Failed to get image status", "image", image.Image)
    		return 0, err
    	}
    	if resp.Image == nil {
    		return 0, nil
    	}
    	return resp.Image.Size_, nil
    }
    
    // ListImages gets all images currently on the machine.
    func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubecontainer.Image, error) {
    	var images []kubecontainer.Image
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Thu Feb 08 00:30:31 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  10. src/runtime/map.go

    	}
    	if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
    		t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
    		throw("key size wrong")
    	}
    	if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 57.6K bytes
    - Viewed (0)
Back to top