- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 96 for updateIPs (1.44 sec)
-
src/testing/testing.go
c.cleanups = c.cleanups[:last] } c.mu.Unlock() if cleanup == nil { return nil } cleanup() } } // resetRaces updates c.parent's count of data race errors (or the global count, // if c has no parent), and updates c.lastRaceErrors to match. // // Any races that occurred prior to this call to resetRaces will // not be attributed to c. func (c *common) resetRaces() {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 76.1K bytes - Viewed (0) -
CHANGELOG/CHANGELOG-1.3.md
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Dec 24 02:28:26 UTC 2020 - 84K bytes - Viewed (0) -
pkg/kubelet/cm/devicemanager/manager_test.go
as.True(ok) as.Equal(int64(2), resource1Capacity.Value()) as.Equal(int64(2), resource1Allocatable.Value()) as.Equal(0, len(removedResources)) // Updates a healthy device to unhealthy should reduce allocatable by 1. devs[1].Health = pluginapi.Unhealthy callback(resourceName1, devs) capacity, allocatable, removedResources = testManager.GetCapacity()
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Tue Jun 04 06:25:43 UTC 2024 - 65K bytes - Viewed (0) -
pkg/controller/volume/persistentvolume/pv_controller.go
logger.V(4).Info("Synchronizing PersistentVolume, volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", "volumeName", volume.Name) // This just updates the volume phase and clears // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound // to the claim. if err = ctrl.unbindVolume(ctx, volume); err != nil { return err } return nil
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri May 10 08:42:31 UTC 2024 - 89.2K bytes - Viewed (0) -
src/cmd/go/internal/modload/buildlist.go
} pruning := pruningForGoVersion(goVersion) if pruning == unpruned && rs.pruning != unpruned { // Use newRequirements instead of convertDepth because convertDepth // also updates roots; here, we want to report the unmodified roots // even though they may seem inconsistent. rs = newRequirements(unpruned, rs.rootModules, rs.direct) } return rs.Graph(ctx) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 15 16:04:44 UTC 2024 - 53.8K bytes - Viewed (0) -
src/runtime/mbitmap.go
func (m markBits) isMarked() bool { return *m.bytep&m.mask != 0 } // setMarked sets the marked bit in the markbits, atomically. func (m markBits) setMarked() { // Might be racing with other updates, so use atomic update always. // We used to be clever here and use a non-atomic update in certain // cases, but it's not worth the risk. atomic.Or8(m.bytep, m.mask) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
cmd/erasure-object.go
if !dstOpts.NoAuditLog { auditObjectErasureSet(ctx, dstObject, &er) } // This call shouldn't be used for anything other than metadata updates or adding self referential versions. if !srcInfo.metadataOnly { return oi, NotImplemented{} } if !dstOpts.NoLock { lk := er.NewNSLock(dstBucket, dstObject)
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Mon Jun 10 15:51:27 UTC 2024 - 78.6K bytes - Viewed (0) -
src/cmd/link/internal/ld/dwarf.go
typeRuntimeEface loader.Sym typeRuntimeIface loader.Sym uintptrInfoSym loader.Sym // Used at various points in that parallel portion of DWARF gen to // protect against conflicting updates to globals (such as "gdbscript") dwmu *sync.Mutex } // dwSym wraps a loader.Sym; this type is meant to obey the interface // rules for dwarf.Sym from the cmd/internal/dwarf package. DwDie and
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 02 16:25:18 UTC 2024 - 72.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
TensorCores, optionally followed by a backward pass (training update) with more ops on the SparseCore. Ops are broken up into: 1. SC forward pass 2. TC forward/backward pass 3. SC backward pass 4. non-TPU loop counter updates These 4 functions are then staggered so as to enable parallel execution. In pseudocode, the algorithm is as follows: // Start step 0 C_0 = cond(args_0) N_0 = non_tpu(args_0) if (C_0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/lower_tf.mlir
// RUN: tf-opt %s -test-tf-lower-tf | FileCheck %s // CHECK-LABEL: invert_permutation func.func @invert_permutation(%arg0: tensor<5xi32>) -> tensor<5xi32> { // CHECK-DAG: %[[UPDATES:.*]] = "tf.Const"() <{value = dense<[0, 1, 2, 3, 4]> : tensor<5xi32>}> : () -> tensor<5xi32> // CHECK-DAG: %[[SHAPE:.*]] = "tf.Const"() <{value = dense<[5, 1]> : tensor<2xi32>}> : () -> tensor<2xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 92K bytes - Viewed (0)