- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 19 for spdelta (1.67 sec)
-
src/internal/runtime/atomic/atomic_wasm.go
//go:nosplit //go:noinline func Xadd(ptr *uint32, delta int32) uint32 { new := *ptr + uint32(delta) *ptr = new return new } //go:nosplit //go:noinline func Xadd64(ptr *uint64, delta int64) uint64 { new := *ptr + uint64(delta) *ptr = new return new } //go:nosplit //go:noinline func Xadduintptr(ptr *uintptr, delta uintptr) uintptr { new := *ptr + delta *ptr = new return new }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 5.4K bytes - Viewed (0) -
pilot/pkg/xds/deltatest.go
"istio.io/istio/pkg/util/sets" ) var knownOptimizationGaps = sets.New( "BlackHoleCluster", "InboundPassthroughCluster", "PassthroughCluster", ) // compareDiff compares a Delta and SotW XDS response. This allows checking that the Delta XDS // response returned the optimal result. Checks include correctness checks (e.g. if a config changed, // we must include it) and possible optimizations (e.g. we sent a config, but it was not changed).
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Thu Jun 13 01:56:28 UTC 2024 - 5.7K bytes - Viewed (0) -
src/sync/waitgroup.go
// See the WaitGroup example. func (wg *WaitGroup) Add(delta int) { if race.Enabled { if delta < 0 { // Synchronize decrements with Wait. race.ReleaseMerge(unsafe.Pointer(wg)) } race.Disable() defer race.Enable() } state := wg.state.Add(uint64(delta) << 32) v := int32(state >> 32) w := uint32(state) if race.Enabled && delta > 0 && v == int32(delta) { // The first increment must be synchronized with Wait.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 21:14:51 UTC 2024 - 4K bytes - Viewed (0) -
src/sync/atomic/type.go
func (x *Int32) CompareAndSwap(old, new int32) (swapped bool) { return CompareAndSwapInt32(&x.v, old, new) } // Add atomically adds delta to x and returns the new value. func (x *Int32) Add(delta int32) (new int32) { return AddInt32(&x.v, delta) } // And atomically performs a bitwise AND operation on x using the bitmask // provided as mask and returns the old value.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 8.5K bytes - Viewed (0) -
src/internal/trace/mud.go
// histogram. mudDegree = 1024 ) type edge struct { // At x, the function increases by y. x, delta float64 // Additionally at x is a Dirac delta function with area dirac. dirac float64 } // add adds a uniform function over [l, r] scaled so the total weight // of the uniform is area. If l==r, this adds a Dirac delta function. func (d *mud) add(l, r, area float64) { if area == 0 { return } if r < l {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 5.7K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_386.go
} //go:nosplit //go:noinline func LoadAcquintptr(ptr *uintptr) uintptr { return *ptr } //go:noescape func Xadd64(ptr *uint64, delta int64) uint64 //go:noescape func Xadduintptr(ptr *uintptr, delta uintptr) uintptr //go:noescape func Xadd(ptr *uint32, delta int32) uint32 //go:noescape func Xchg64(ptr *uint64, new uint64) uint64 //go:noescape func Xchg(ptr *uint32, new uint32) uint32
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 2.2K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_arm.go
} // Atomic add and return new value. // //go:nosplit func Xadd(val *uint32, delta int32) uint32 { for { oval := *val nval := oval + uint32(delta) if Cas(val, oval, nval) { return nval } } } //go:noescape func Xadduintptr(ptr *uintptr, delta uintptr) uintptr //go:nosplit func Xchg(addr *uint32, v uint32) uint32 { for { old := *addr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 19:57:43 UTC 2024 - 4.8K bytes - Viewed (0) -
src/internal/runtime/atomic/atomic_mipsx.go
_ = *addr spinLock(&lock.state) } //go:nosplit func unlock() { spinUnlock(&lock.state) } //go:nosplit func Xadd64(addr *uint64, delta int64) (new uint64) { lockAndCheck(addr) new = *addr + uint64(delta) *addr = new unlock() return } //go:nosplit func Xchg64(addr *uint64, new uint64) (old uint64) { lockAndCheck(addr) old = *addr *addr = new
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 20:08:37 UTC 2024 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf_test.cc
/*is_in_fallback_enabled_mode=*/false)); FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main"); ASSERT_TRUE(main); EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated, mlir::TF::kMlirPh1BridgeCounterV2, mlir::TF::kMlirPh1BridgeCounterTpu,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:44:37 UTC 2024 - 6.2K bytes - Viewed (0) -
pkg/test/framework/components/istio/cleanup.go
// Write time spent for cleanup and deploy to ARTIFACTS/trace.yaml and logs to allow analyzing test times defer func() { delta := time.Since(t0) i.ctx.RecordTraceEvent("istio-cleanup", delta.Seconds()) scopes.Framework.Infof("=== SUCCEEDED: Cleanup Istio in %v [Suite=%s] ===", delta, i.ctx.Settings().TestID) }() if i.cfg.DumpKubernetesManifests { i.installer.Dump(i.ctx) } if i.cfg.DeployIstio {
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Thu Jun 06 22:12:34 UTC 2024 - 5K bytes - Viewed (0)