Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 403 for blockn (0.1 sec)

  1. src/internal/chacha8rand/chacha8.go

    package chacha8rand
    
    import "internal/byteorder"
    
    const (
    	ctrInc = 4  // increment counter by 4 between block calls
    	ctrMax = 16 // reseed when counter reaches 16
    	chunk  = 32 // each chunk produced by block is 32 uint64s
    	reseed = 4  // reseed with 4 words
    )
    
    // block is the chacha8rand block function.
    func block(seed *[4]uint64, blocks *[32]uint64, counter uint32)
    
    // A State holds the state for a single random generator.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 21:47:29 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_functional_ops.cc

        func::FuncOp func = cond_value ? then_func : else_func;
    
        // Make sure that the function has exactly one block to simplify inlining.
        // TFLite doesn't use control flow with blocks so functions with more than
        // one blocks are not encountered in practice.
        if (!llvm::hasSingleElement(func)) return failure();
    
        IRMapping mapper;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.6K bytes
    - Viewed (0)
  3. testing/internal-testing/src/main/groovy/org/gradle/test/fixtures/concurrent/ConcurrentSpec.groovy

            } finally {
                instant.mainThread(null)
            }
        }
    
        /**
         * Executes the given action and then blocks until all test threads have completed. The action may define instants for later querying outside the block.
         */
        void async(long timeoutInSeconds = 20, Runnable action) {
            Date timeout = new Date(System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(timeoutInSeconds))
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Apr 04 07:21:38 UTC 2024
    - 3.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/lower_globals_to_ml_program.cc

        // If we're in the first block, then the argument to that block is the
        // one we're looking for.
        if (auto func = cast<func::FuncOp>(op)) {
          if (blockArg.getOwner()->isEntryBlock()) {
            *out = blockArg;
            return success();
          }
        }
    
        // If we're in an inner block, the we have to find all ops that branch
        // to that block, and trace through them.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  5. platforms/core-runtime/base-services/src/main/java/org/gradle/internal/work/WorkerThreadRegistry.java

         *
         * This method blocks until a worker lease is available.
         */
        void runAsWorkerThread(Runnable action);
    
        /**
         * Runs the given action as an unmanaged worker, if not already a worker. This is basically the same as {@link #runAsWorkerThread(Runnable)} but does not block waiting for a lease.
         * Instead, a temporary lease is granted to the current thread.
         *
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Tue Mar 12 02:21:10 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  6. doc/next/5-toolchain.md

    <!-- https://go.dev/cl/577935 -->
    For 386 and amd64, the compiler will use information from PGO to align certain
    hot blocks in loops.  This improves performance an additional 1-1.5% at
    a cost of an additional 0.1% text and binary size.  This is currently only implemented
    on 386 and amd64 because it has not shown an improvement on other platforms.
    Hot block alignment can be disabled with `-gcflags=[<packages>=]-d=alignhot=0`
    
    ## Assembler {#assembler}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Jun 11 17:18:10 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  7. src/internal/trace/testdata/testprog/stacks.go

    	// Now we will do a bunch of things for which we verify stacks later.
    	// It is impossible to ensure that a goroutine has actually blocked
    	// on a channel, in a select or otherwise. So we kick off goroutines
    	// that need to block first in the hope that while we are executing
    	// the rest of the test, they will block.
    	go func() { // func1
    		select {}
    	}()
    	go func() { // func2
    		var c chan int
    		c <- 0
    	}()
    	go func() { // func3
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 17 18:48:18 UTC 2024
    - 2.4K bytes
    - Viewed (0)
  8. src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go

    	return &Group{cancel: cancel}, ctx
    }
    
    // Wait blocks until all function calls from the Go method have returned, then
    // returns the first non-nil error (if any) from them.
    func (g *Group) Wait() error {
    	g.wg.Wait()
    	if g.cancel != nil {
    		g.cancel(g.err)
    	}
    	return g.err
    }
    
    // Go calls the given function in a new goroutine.
    // It blocks until the new goroutine can be added without the number of
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:57:25 UTC 2024
    - 3.3K bytes
    - Viewed (0)
  9. internal/disk/stat_linux.go

    	s := syscall.Statfs_t{}
    	err = syscall.Statfs(path, &s)
    	if err != nil {
    		return Info{}, err
    	}
    	reservedBlocks := s.Bfree - s.Bavail
    	info = Info{
    		Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
    		Free:  uint64(s.Frsize) * s.Bavail,
    		Files: s.Files,
    		Ffree: s.Ffree,
    		//nolint:unconvert
    		FSType: getFSType(int64(s.Type)),
    	}
    
    	st := syscall.Stat_t{}
    	err = syscall.Stat(path, &st)
    Registered: Sun Jun 16 00:44:34 UTC 2024
    - Last Modified: Mon Feb 26 19:34:50 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  10. src/crypto/md5/md5.go

    func (d *digest) Write(p []byte) (nn int, err error) {
    	// Note that we currently call block or blockGeneric
    	// directly (guarded using haveAsm) because this allows
    	// escape analysis to see that p and d don't escape.
    	nn = len(p)
    	d.len += uint64(nn)
    	if d.nx > 0 {
    		n := copy(d.x[d.nx:], p)
    		d.nx += n
    		if d.nx == BlockSize {
    			if haveAsm {
    				block(d, d.x[:])
    			} else {
    				blockGeneric(d, d.x[:])
    			}
    			d.nx = 0
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon May 13 18:57:38 UTC 2024
    - 4.3K bytes
    - Viewed (0)
Back to top