- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 572 for mstart0 (0.39 sec)
-
src/internal/trace/order.go
o.initialGen = gen } var curCtx, newCtx schedCtx curCtx.M = m newCtx.M = m var ms *mState if m == NoThread { curCtx.P = NoProc curCtx.G = NoGoroutine newCtx = curCtx } else { // Pull out or create the mState for this event. var ok bool ms, ok = o.mStates[m] if !ok { ms = &mState{ g: NoGoroutine, p: NoProc, } o.mStates[m] = ms } curCtx.P = ms.p
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 03 14:56:25 UTC 2024 - 52.4K bytes - Viewed (0) -
test/goto.go
} else { // GCCGO_ERROR "block starts here" L: } } func _() { if false { // GCCGO_ERROR "block starts here" L: } else { goto L // ERROR "goto L jumps into block starting at LINE-3|goto jumps into block" } } func _() { if true { goto L // ERROR "goto L jumps into block starting at LINE+1|goto jumps into block" } else { // GCCGO_ERROR "block starts here" L: } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Apr 19 02:27:58 UTC 2017 - 8.4K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_test.go
// Tests that xadduintptr correctly updates 64-bit values. The place where // we actually do so is mstats.go, functions mSysStat{Inc,Dec}. func TestXadduintptrOnUint64(t *testing.T) { if goarch.BigEndian { // On big endian architectures, we never use xadduintptr to update // 64-bit values and hence we skip the test. (Note that functions // mSysStat{Inc,Dec} in mstats.go have explicit checks for // big-endianness.)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 8.5K bytes - Viewed (0) -
staging/src/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go
StabilityLevel: metrics.ALPHA, }, []string{"protocol", "transport", "stage"}, ) legacyregistry.MustRegister(starts) legacyregistry.MustRegister(latencies) legacyregistry.MustRegister(failures) return &DialMetrics{starts: starts, latencies: latencies, failures: failures, clock: clock.RealClock{}} } // Clock returns the clock. func (m *DialMetrics) Clock() clock.Clock {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Nov 01 23:36:51 UTC 2022 - 4.3K bytes - Viewed (0) -
src/cmd/vendor/rsc.io/markdown/inline.go
d = 1 } del := p.Text[0] == '~' x := &Emph{Marker: p.Text[:d], Inner: append([]Inline(nil), dst[start.i+1:]...)} start.Text = start.Text[:len(start.Text)-d] p.Text = p.Text[d:] if start.Text == "" { dst = dst[:start.i] } else { dst = dst[:start.i+1] } trimStack() if del { dst = append(dst, (*Del)(x)) } else if d == 2 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 16:19:04 UTC 2024 - 21.9K bytes - Viewed (0) -
src/internal/trace/testdata/testprog/annotations-stress.go
func main() { baseCtx := context.Background() // Create a task that starts and ends entirely outside of the trace. ctx0, t0 := trace.NewTask(baseCtx, "parent") // Create a task that starts before the trace and ends during the trace. ctx1, t1 := trace.NewTask(ctx0, "type1") // Start tracing. if err := trace.Start(os.Stdout); err != nil { log.Fatalf("failed to start tracing: %v", err) } t1.End()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 1.8K bytes - Viewed (0) -
tensorflow/compiler/jit/cluster_scoping_pass.cc
} void ClusterScopingPassImpl::AddScopeToAllTransitivePredecessors(Node* start) { const string unique_suffix = absl::StrCat("_", GetUniqueScopeId()); std::vector<Node*> starts; starts.push_back(start); auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); }; ReverseDFSFrom(*graph_, starts, enter, /*leave=*/nullptr, /*stable_comparator=*/NodeComparatorName()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema_generated.h
::flatbuffers::uoffset_t start_; void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); } explicit SoftmaxOptionsBuilder(::flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } ::flatbuffers::Offset<SoftmaxOptions> Finish() { const auto end = fbb_.EndTable(start_);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 1M bytes - Viewed (0) -
src/runtime/cgo/gcc_freebsd.c
size_t size; int err; SIGFILLSET(ign); pthread_sigmask(SIG_SETMASK, &ign, &oset); pthread_attr_init(&attr); pthread_attr_getstacksize(&attr, &size); // Leave stacklo=0 and set stackhi=size; mstart will do the rest. ts->g->stackhi = size; err = _cgo_try_pthread_create(&p, &attr, threadentry, ts); pthread_sigmask(SIG_SETMASK, &oset, nil); if (err != 0) { fatalf("pthread_create failed: %s", strerror(err));
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Aug 15 03:40:00 UTC 2023 - 1.5K bytes - Viewed (0) -
staging/src/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater) Manager { return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, 0.0) } // NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, // or starts tracking on create with p probability.
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue May 16 20:03:48 UTC 2023 - 3.3K bytes - Viewed (0)