Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 11 for typedmemmove (0.17 sec)

  1. src/runtime/mbarrier.go

    // remove the deletion barrier, we'll have to work out a new way to
    // handle the profile logging.
    
    // typedmemmove copies a value of type typ to dst from src.
    // Must be nosplit, see #16026.
    //
    // TODO: Perfect for go:nosplitrec since we can't have a safe point
    // anywhere in the bulk barrier or memmove.
    //
    // typedmemmove should be an internal detail,
    // but widely used packages access it using linkname.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 15.7K bytes
    - Viewed (0)
  2. src/internal/reflectlite/value.go

    	return add(p, uintptr(i)*eltSize, "i < len")
    }
    
    func ifaceE2I(t *abi.Type, src any, dst unsafe.Pointer)
    
    // typedmemmove copies a value of type t to dst from src.
    //
    //go:noescape
    func typedmemmove(t *abi.Type, dst, src unsafe.Pointer)
    
    // Dummy annotation marking that the value x escapes,
    // for use in cases where the reflect code is so clever that
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 07 17:01:54 UTC 2024
    - 13.6K bytes
    - Viewed (0)
  3. src/runtime/map_fast64.go

    					} else {
    						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
    						// Give up and call typedmemmove.
    						typedmemmove(t.Key, dst.k, k)
    					}
    				} else {
    					*(*uint64)(dst.k) = *(*uint64)(k)
    				}
    
    				typedmemmove(t.Elem, dst.e, e)
    				dst.i++
    				// These updates might push these pointers past the end of the
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:17:26 UTC 2024
    - 14.1K bytes
    - Viewed (0)
  4. src/runtime/chan.go

    		if raceenabled {
    			racenotify(c, c.recvx, nil)
    			racenotify(c, c.recvx, sg)
    		}
    		// copy data from queue to receiver
    		if ep != nil {
    			typedmemmove(c.elemtype, ep, qp)
    		}
    		// copy data from sender to queue
    		typedmemmove(c.elemtype, qp, sg.elem)
    		c.recvx++
    		if c.recvx == c.dataqsiz {
    			c.recvx = 0
    		}
    		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
    	}
    	sg.elem = nil
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:50 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  5. src/runtime/select.go

    		msanwrite(cas.elem, c.elemtype.Size_)
    	}
    	if asanenabled && cas.elem != nil {
    		asanwrite(cas.elem, c.elemtype.Size_)
    	}
    	recvOK = true
    	qp = chanbuf(c, c.recvx)
    	if cas.elem != nil {
    		typedmemmove(c.elemtype, cas.elem, qp)
    	}
    	typedmemclr(c.elemtype, qp)
    	c.recvx++
    	if c.recvx == c.dataqsiz {
    		c.recvx = 0
    	}
    	c.qcount--
    	selunlock(scases, lockorder)
    	goto retc
    
    bufsend:
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Mar 13 21:36:04 UTC 2024
    - 15K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/typecheck/_builtin/runtime.go

    func chanlen(hchan any) int
    func chancap(hchan any) int
    
    var writeBarrier struct {
    	enabled bool
    	pad     [3]byte
    	cgo     bool
    	alignme uint64
    }
    
    // *byte is really *runtime.Type
    func typedmemmove(typ *byte, dst *any, src *any)
    func typedmemclr(typ *byte, dst *any)
    func typedslicecopy(typ *byte, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
    
    func selectnbsend(hchan chan<- any, elem *any) bool
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 21:08:03 UTC 2024
    - 10.6K bytes
    - Viewed (0)
  7. src/runtime/map_fast32.go

    					// Write with a write barrier.
    					*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
    				} else {
    					*(*uint32)(dst.k) = *(*uint32)(k)
    				}
    
    				typedmemmove(t.Elem, dst.e, e)
    				dst.i++
    				// These updates might push these pointers past the end of the
    				// key or elem arrays.  That's ok, as we have the overflow pointer
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:17:26 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  8. src/cmd/compile/internal/typecheck/builtin.go

    	{"chanrecv2", funcTag, 102},
    	{"chansend1", funcTag, 104},
    	{"closechan", funcTag, 105},
    	{"chanlen", funcTag, 106},
    	{"chancap", funcTag, 106},
    	{"writeBarrier", varTag, 108},
    	{"typedmemmove", funcTag, 109},
    	{"typedmemclr", funcTag, 110},
    	{"typedslicecopy", funcTag, 111},
    	{"selectnbsend", funcTag, 112},
    	{"selectnbrecv", funcTag, 113},
    	{"selectsetpc", funcTag, 114},
    	{"selectgo", funcTag, 115},
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue May 21 21:08:03 UTC 2024
    - 16.2K bytes
    - Viewed (0)
  9. src/runtime/map_faststr.go

    				}
    				dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
    
    				// Copy key.
    				*(*string)(dst.k) = *(*string)(k)
    
    				typedmemmove(t.Elem, dst.e, e)
    				dst.i++
    				// These updates might push these pointers past the end of the
    				// key or elem arrays.  That's ok, as we have the overflow pointer
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:17:26 UTC 2024
    - 15.3K bytes
    - Viewed (0)
  10. src/runtime/iface.go

    		raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT))
    	}
    	if msanenabled {
    		msanread(v, t.Size_)
    	}
    	if asanenabled {
    		asanread(v, t.Size_)
    	}
    	x := mallocgc(t.Size_, t, true)
    	typedmemmove(t, x, v)
    	return x
    }
    func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
    	// TODO: maybe take size instead of type?
    	if raceenabled {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 22.5K bytes
    - Viewed (0)
Back to top