- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 26 for Size_ (0.21 sec)
-
src/runtime/arena.go
func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) { mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) if overflow || n < 0 || mem > maxAlloc { panic(plainError("runtime: allocation size out of range")) } for i := 0; i < n; i++ { userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s) } } // userArenaHeapBitsSetType is the equivalent of heapSetType but for
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:44:56 UTC 2024 - 37.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.cc
assert(axis >= 0); assert(index[axis] == 0); offset_ = IndexToOffset(shape, index); stride_ = StrideForAxis(shape, axis); size_ = shape[axis]; } // Returns the size of the 1-d slice across the tensor. int64_t size() const { return size_; } // Calculates the next index in a tensor excluding a specified axis. // // Returns the next index where one exists.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.6K bytes - Viewed (0) -
src/runtime/alg.go
if t.TFlag&abi.TFlagRegularMemory != 0 { // Handle ptr sizes specially, see issue 37086. switch t.Size_ { case 4: return memhash32(p, h) case 8: return memhash64(p, h) default: return memhash(p, h, t.Size_) } } switch t.Kind_ & abi.KindMask { case abi.Float32: return f32hash(p, h) case abi.Float64: return f64hash(p, h)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 13.6K bytes - Viewed (0) -
src/runtime/checkptr.go
} // Check that (*[n]elem)(p) doesn't straddle multiple heap objects. // TODO(mdempsky): Fix #46938 so we don't need to worry about overflow here. if checkptrStraddles(p, n*elem.Size_) { throw("checkptr: converted pointer straddles multiple allocations") } } // checkptrStraddles reports whether the first size-bytes of memory // addressed by ptr is known to straddle more than one Go allocation.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 3.6K bytes - Viewed (0) -
src/runtime/export_debug_test.go
return nil, plainError("args must be a pointer or nil") } argp := a.data var argSize uintptr if argp != nil { argSize = (*ptrtype)(unsafe.Pointer(a._type)).Elem.Size_ } h := new(debugCallHandler) h.gp = gp // gp may not be running right now, but we can still get the M // it will run on since it's locked. h.mp = gp.lockedm.ptr()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:41:45 UTC 2024 - 5.1K bytes - Viewed (0) -
src/runtime/map_fast64.go
memclrHasPointers(k, 8) } } e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)) if t.Elem.Pointers() { memclrHasPointers(e, t.Elem.Size_) } else { memclrNoHeapPointers(e, t.Elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. if i == abi.MapBucketCount-1 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 14.1K bytes - Viewed (0) -
src/runtime/map_fast32.go
*(*unsafe.Pointer)(k) = nil } e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)) if t.Elem.Pointers() { memclrHasPointers(e, t.Elem.Size_) } else { memclrNoHeapPointers(e, t.Elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. if i == abi.MapBucketCount-1 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 13.9K bytes - Viewed (0) -
src/runtime/map_faststr.go
k.str = nil e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) if t.Elem.Pointers() { memclrHasPointers(e, t.Elem.Size_) } else { memclrNoHeapPointers(e, t.Elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. if i == abi.MapBucketCount-1 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:17:26 UTC 2024 - 15.3K bytes - Viewed (0) -
src/runtime/race.go
kind := t.Kind_ & abi.KindMask if kind == abi.Array || kind == abi.Struct { // for composite objects we have to read every address // because a write might happen to any subobject. racereadrangepc(addr, t.Size_, callerpc, pc) } else { // for non-composite objects we can read just the start // address, as any write must write the first byte. racereadpc(addr, callerpc, pc) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 20.4K bytes - Viewed (0) -
src/runtime/mfinal.go
// As an implementation detail we allow to set finalizers for an inner byte // of an object if it could come from tiny alloc (see mallocgc for details). if ot.Elem == nil || ot.Elem.Pointers() || ot.Elem.Size_ >= maxTinySize { throw("runtime.SetFinalizer: pointer not at beginning of allocated block") } } f := efaceOf(&finalizer) ftyp := f._type if ftyp == nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 01:56:56 UTC 2024 - 19K bytes - Viewed (0)