- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 449 for memclr (0.11 sec)
-
test/codegen/issue52635.go
// asmcheck // Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Test that optimized range memclr works with pointers to arrays. // The clears get inlined, see https://github.com/golang/go/issues/56997 package codegen type T struct { a *[10]int b [10]int } func (t *T) f() { // amd64:-".*runtime.memclrNoHeapPointers"
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jan 31 18:11:24 UTC 2023 - 816 bytes - Viewed (0) -
test/codegen/slices.go
} func SliceExtensionInt64(s []int, l64 int64) []int { // 386:`.*runtime\.makeslice` // 386:-`.*runtime\.memclr` return append(s, make([]int, l64)...) } // ------------------ // // Make+Copy // // ------------------ // // Issue #26252 - avoid memclr for make+copy func SliceMakeCopyLen(s []int) []int { // amd64:`.*runtime\.mallocgc` // amd64:`.*runtime\.memmove`
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Feb 23 18:57:27 UTC 2023 - 9.8K bytes - Viewed (0) -
src/runtime/memclr_386.s
TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-8 MOVL ptr+0(FP), DI MOVL n+4(FP), BX XORL AX, AX // MOVOU seems always faster than REP STOSL. tail: // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. TESTL BX, BX JEQ _0 CMPL BX, $2 JBE _1or2 CMPL BX, $4 JB _3 JE _4 CMPL BX, $8 JBE _5through8 CMPL BX, $16 JBE _9through16 #ifdef GO386_softfloat
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 2.4K bytes - Viewed (0) -
src/runtime/memclr_amd64.s
// AX = ptr // BX = n MOVQ AX, DI // DI = ptr XORQ AX, AX // MOVOU seems always faster than REP STOSQ when Enhanced REP STOSQ is not available. tail: // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. TESTQ BX, BX JEQ _0 CMPQ BX, $2 JBE _1or2 CMPQ BX, $4 JBE _3or4 CMPQ BX, $8 JB _5through7 JE _8 CMPQ BX, $16 JBE _9through16 CMPQ BX, $32
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 10 20:52:34 UTC 2022 - 4.9K bytes - Viewed (0) -
src/runtime/memmove_386.s
// 128 because that is the maximum SSE register load (loading all data // into registers lets us ignore copy direction). tail: // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. TESTL BX, BX JEQ move_0 CMPL BX, $2 JBE move_1or2 CMPL BX, $4 JB move_3 JE move_4 CMPL BX, $8 JBE move_5through8 CMPL BX, $16 JBE move_9through16
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat Nov 06 10:24:44 UTC 2021 - 4.4K bytes - Viewed (0) -
src/cmd/compile/internal/walk/range.go
} // arrayClear constructs a call to runtime.memclr for fast zeroing of slices and arrays. func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node { elemsize := typecheck.RangeExprType(a.Type()).Elem().Size() if elemsize <= 0 { return nil } // Convert to // if len(a) != 0 { // hp = &a[0] // hn = len(a)*sizeof(elem(a)) // memclr{NoHeap,Has}Pointers(hp, hn) // i = len(a) - 1 // }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Sep 20 14:52:33 UTC 2023 - 17.6K bytes - Viewed (0) -
src/cmd/compile/internal/walk/assign.go
// s = growslice(T, s.ptr, n, s.cap, l2, T) // } // // clear the new portion of the underlying array. // hp := &s[len(s)-l2] // hn := l2 * sizeof(T) // memclr(hp, hn) // } // } // s // // if T has pointers, the final memclr can go inside the "then" branch, as // growslice will have done the clearing for us. func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:09:06 UTC 2024 - 20.3K bytes - Viewed (0) -
src/runtime/memmove_amd64.s
// registers before writing it back. move_256through2048 on the other // hand can be used only when the memory regions don't overlap or the copy // direction is forward. // // BSR+branch table make almost all memmove/memclr benchmarks worse. Not worth doing. TESTQ BX, BX JEQ move_0 CMPQ BX, $2 JBE move_1or2 CMPQ BX, $4 JB move_3 JBE move_4 CMPQ BX, $8 JB move_5through7 JE move_8 CMPQ BX, $16
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sun Apr 10 15:52:08 UTC 2022 - 12.5K bytes - Viewed (0) -
platforms/jvm/normalization-java/src/main/java/org/gradle/internal/normalization/java/impl/Member.java
* bytecode manipulation libraries tools like ASM. * * <p>The notion of "member" here is similar to, but broader than * {@link java.lang.reflect.Member}. The latter is essentially an abstraction over fields, * methods and constructors; this Member and its subtypes represent not only fields and * methods, but also classes, inner classes, annotations and their values, and more. This
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Sep 28 15:09:49 UTC 2023 - 1.8K bytes - Viewed (0) -
src/runtime/mbitmap.go
// The range [dst, dst+size) must lie within a single object. // It does not perform the actual writes. // // As a special case, src == 0 indicates that this is being used for a // memclr. bulkBarrierPreWrite will pass 0 for the src of each write // barrier. // // Callers should call bulkBarrierPreWrite immediately before // calling memmove(dst, src, size). This function is marked nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0)