- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 2,633 for block1 (0.33 sec)
-
cmd/namespace-lock.go
timeout.LogSuccess(UTCNow().Sub(start)) return LockContext{ctx: newCtx, cancel: cancel}, nil } // Unlock - block until write lock is released. func (di *distLockInstance) Unlock(lc LockContext) { if lc.cancel != nil { lc.cancel() } di.rwMutex.Unlock(lc.ctx) } // RLock - block until read lock is taken or timeout has occurred.
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Mon Jun 05 23:56:35 UTC 2023 - 9.2K bytes - Viewed (0) -
pkg/kubelet/clustertrustbundle/clustertrustbundle_manager_test.go
func diffBundles(a, b []byte) string { var block *pem.Block aBlocks := []*pem.Block{} for { block, a = pem.Decode(a) if block == nil { break } aBlocks = append(aBlocks, block) } sort.Slice(aBlocks, func(i, j int) bool { if aBlocks[i].Type < aBlocks[j].Type { return true } else if aBlocks[i].Type == aBlocks[j].Type { comp := bytes.Compare(aBlocks[i].Bytes, aBlocks[j].Bytes)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Fri Nov 03 18:40:48 UTC 2023 - 15.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/fuse.go
) // fuse simplifies control flow by joining basic blocks. func fuse(f *Func, typ fuseType) { for changed := true; changed; { changed = false // Be sure to avoid quadratic behavior in fuseBlockPlain. See issue 13554. // Previously this was dealt with using backwards iteration, now fuseBlockPlain // handles large runs of blocks. for i := len(f.Blocks) - 1; i >= 0; i-- { b := f.Blocks[i] if typ&fuseTypeIf != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Oct 31 20:45:54 UTC 2023 - 9K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/sparsetree.go
) type SparseTreeNode struct { child *Block sibling *Block parent *Block // Every block has 6 numbers associated with it: // entry-1, entry, entry+1, exit-1, and exit, exit+1. // entry and exit are conceptually the top of the block (phi functions) // entry+1 and exit-1 are conceptually the bottom of the block (ordinary defs) // entry-1 and exit+1 are conceptually "just before" the block (conditions flowing in) //
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Nov 18 17:59:44 UTC 2022 - 8.1K bytes - Viewed (0) -
cmd/erasure-utils.go
"github.com/klauspost/reedsolomon" ) // getDataBlockLen - get length of data blocks from encoded blocks. func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int { size := 0 // Figure out the data block length. for _, block := range enBlocks[:dataBlocks] { size += len(block) } return size } // Writes all the data blocks from encoded blocks until requested // outSize length. Provides a way to skip bytes until the offset.
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Wed Jan 31 02:11:45 UTC 2024 - 3.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/critical.go
// critical splits critical edges (those that go from a block with // more than one outedge to a block with more than one inedge). // Regalloc wants a critical-edge-free CFG so it can implement phi values. func critical(f *Func) { // maps from phi arg ID to the new block created for that argument blocks := f.Cache.allocBlockSlice(f.NumValues()) defer f.Cache.freeBlockSlice(blocks) // need to iterate over f.Blocks without range, as we might
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 16 21:40:11 UTC 2023 - 3.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/lca.go
bid := q[n].bid cid := q[n].cid q = q[:n] // Add block to tour. blocks[bid].pos = int32(len(tour)) tour = append(tour, bid) // Proceed down next child edge (if any). if cid == 0 { // This is our first visit to b. Set its depth. blocks[bid].depth = blocks[blocks[bid].parent].depth + 1 // Then explore its first child. cid = blocks[bid].firstChild } else {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Oct 30 21:52:15 UTC 2023 - 3.8K bytes - Viewed (0) -
src/cmd/compile/internal/types2/labels.go
} } } // A block tracks label declarations in a block and its enclosing blocks. type block struct { parent *block // enclosing block lstmt *syntax.LabeledStmt // labeled statement to which this block belongs, or nil labels map[string]*syntax.LabeledStmt // allocated lazily } // insert records a new label declaration for the current block.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 19:19:55 UTC 2024 - 7.2K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/plugins/gcs/ram_file_block_cache_test.cc
// Now load a different block of file "a" at timestamp `now` + 1. When the // first block of "a" expires, this block should also be removed because it // also belongs to file "a". TF_EXPECT_OK(ReadCache(&cache, "a", 8, 1, &out)); // Ensure that all blocks are in the cache (i.e. reads are cache hits). EXPECT_EQ(cache.CacheSize(), 24); EXPECT_EQ(calls, 3); TF_EXPECT_OK(ReadCache(&cache, "a", 0, 1, &out));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Oct 15 03:16:57 UTC 2021 - 23.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/tighten.go
package ssa import "cmd/compile/internal/base" // tighten moves Values closer to the Blocks in which they are used. // This can reduce the amount of register spilling required, // if it doesn't also create more live values. // A Value can be moved to any block that // dominates all blocks in which it is used. func tighten(f *Func) { if base.Flag.N != 0 && len(f.Blocks) < 10000 { // Skip the optimization in -N mode, except for huge functions.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 16 01:01:38 UTC 2023 - 7.7K bytes - Viewed (0)