- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 282 for frequently (0.28 sec)
-
src/runtime/race_amd64.s
TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0-0 // Handle command raceGetProcCmd (0) here. // First, code below assumes that we are on curg, while raceGetProcCmd // can be executed on g0. Second, it is called frequently, so will // benefit from this fast path. CMPQ RARG0, $0 JNE rest get_tls(RARG0) MOVQ g(RARG0), RARG0 MOVQ g_m(RARG0), RARG0 MOVQ m_p(RARG0), RARG0 MOVQ p_raceprocctx(RARG0), RARG0
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 15.1K bytes - Viewed (0) -
ci/official/containers/linux_arm64/builder.devtoolset/stringop_trunc.patch
- && __string2_1bptr_p (src) && n <= 8 \ - ? __mempcpy_small (dest, __mempcpy_args (src), n) \ - : __mempcpy (dest, src, n))) -# endif -/* In glibc we use this function frequently but for namespace reasons - we have to use the name `__mempcpy'. */ -# define mempcpy(dest, src, n) __mempcpy (dest, src, n) -# endif - -# if !__GNUC_PREREQ (3, 0) || defined _FORCE_INLINES
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Sep 18 14:52:45 UTC 2023 - 42.9K bytes - Viewed (0) -
src/runtime/race_arm64.s
TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0 // Handle command raceGetProcCmd (0) here. // First, code below assumes that we are on curg, while raceGetProcCmd // can be executed on g0. Second, it is called frequently, so will // benefit from this fast path. CBNZ R0, rest MOVD g, R13 #ifdef TLS_darwin MOVD R27, R12 // save R27 a.k.a. REGTMP (callee-save in C). load_g clobbers it #endif load_g #ifdef TLS_darwin
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 15.5K bytes - Viewed (0) -
pilot/pkg/model/endpointshards.go
// Calls to Keys should be guarded with a lock on the EndpointShards. func (es *EndpointShards) Keys() []ShardKey { // len(shards) ~= number of remote clusters which isn't too large, doing this sort frequently // shouldn't be too problematic. If it becomes an issue we can cache it in the EndpointShards struct. keys := make([]ShardKey, 0, len(es.Shards)) for k := range es.Shards { keys = append(keys, k) }
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Fri Jun 14 04:34:37 UTC 2024 - 15.6K bytes - Viewed (0) -
src/cmd/internal/obj/ppc64/doc.go
adding 3 NOPs. The purpose of this directive is to improve performance for cases like loops where better alignment (8 or 16 instead of 4) might be helpful. This directive exists in PPC64 assembler and is frequently used by PPC64 assembler writers. PCALIGN $16 PCALIGN $8 By default, functions in Go are aligned to 16 bytes, as is the case in all other compilers for PPC64. If there is a PCALIGN directive requesting alignment
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 21 16:47:45 UTC 2023 - 11.3K bytes - Viewed (0) -
src/internal/trace/trace_test.go
{"runtime.GOMAXPROCS", 0}, {"main.main", 0}, }}, } if !stress { // Only check for this stack if !stress because traceAdvance alone could // allocate enough memory to trigger a GC if called frequently enough. // This might cause the runtime.GC call we're trying to match against to // coalesce with an active GC triggered this by traceAdvance. In that case
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 18.5K bytes - Viewed (0) -
platforms/documentation/docs/src/docs/userguide/jvm/scala_plugin.adoc
By compiling only classes whose source code has changed since the previous compilation, and classes affected by these changes, incremental compilation can significantly reduce Scala compilation time. It is particularly effective when frequently compiling small code increments, as is often done at development time.
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Wed Nov 29 17:38:38 UTC 2023 - 17K bytes - Viewed (0) -
src/runtime/race_ppc64le.s
TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0 // Handle command raceGetProcCmd (0) here. // First, code below assumes that we are on curg, while raceGetProcCmd // can be executed on g0. Second, it is called frequently, so will // benefit from this fast path. MOVD $0, R0 // clear R0 since we came from C code CMP R3, $0 BNE rest // Inline raceGetProdCmd without clobbering callee-save registers. MOVD runtime·tls_g(SB), R10
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 17K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
type bufferedMarshaller interface { proto.Sizer runtime.ProtobufMarshaller } // Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently. type bufferedReverseMarshaller interface { proto.Sizer runtime.ProtobufReverseMarshaller } // estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Wed Feb 23 13:38:23 UTC 2022 - 17.8K bytes - Viewed (0) -
guava/src/com/google/common/util/concurrent/ExecutionSequencer.java
* ThreadLocal; however, ThreadLocal is not well optimized for the case where the ThreadLocal is * non-static, and is initialized/removed frequently - this causes churn in the Thread specific * hashmaps. Using a static ThreadLocal to avoid that overhead would mean that different * ExecutionSequencer objects interfere with each other, which would be undesirable, in addition
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Thu Feb 01 21:46:34 UTC 2024 - 22.1K bytes - Viewed (0)