Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 12 for Predecessors (0.26 sec)

  1. src/cmd/compile/internal/ssa/debug.go

    		state.currentState.reset(ourStartState)
    	}
    
    	// Zero predecessors
    	if len(preds) == 0 {
    		if previousBlock != nil {
    			state.f.Fatalf("Function %v, block %s with no predecessors is not first block, has previous %s", state.f, b.String(), previousBlock.String())
    		}
    		// startState is empty
    		reset(abt.T{})
    		return abt.T{}, blockChanged
    	}
    
    	// One predecessor
    	l0 := blockLocs[preds[0].ID]
    	p0 := l0.endState
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 10 19:44:43 UTC 2024
    - 58.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc

        for (Operation* op : ops_to_process) {
          if (predecessors) {
            for (Value operand : op->getOperands()) {
              // Stop at the block boundary.
              if (mlir::isa<BlockArgument>(operand)) continue;
    
              Operation* predecessor = operand.getDefiningOp();
              if (!operations->contains(predecessor) &&
                  !ops_to_avoid.contains(predecessor)) {
                new_ops.insert(operand.getDefiningOp());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 92.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/jit/mark_for_compilation_pass.cc

      best_pred_for_node[from] = -1;
    
      int rpo_index = 0, current_rpo_node;
      do {
        current_rpo_node = rpo[rpo_index++];
        std::optional<int> some_pred, preferred_pred;
        for (int pred : cycles_graph_.Predecessors(current_rpo_node)) {
          if (!best_pred_for_node.contains(pred)) {
            continue;
          }
    
          // Ignore the from->to edge since we're trying to find an alternate path.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  4. src/cmd/compile/internal/ssa/regalloc.go

    //
    // The register allocator requires that a block is not scheduled until
    // at least one of its predecessors have been scheduled. The most recent
    // such predecessor provides the starting register state for a block.
    //
    // It also requires that there are no critical edges (critical =
    // comes from a block with >1 successor and goes to a block with >1
    // predecessor).  This makes it easy to add fixup code on merge edges -
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Nov 21 17:49:56 UTC 2023
    - 87.2K bytes
    - Viewed (0)
  5. src/cmd/compile/internal/ssa/rewrite.go

    	var args []*Value
    	for _, a := range target.Args {
    		if a != load && a.Block.ID == target.Block.ID {
    			args = append(args, a)
    		}
    	}
    
    	// memPreds contains memory states known to be predecessors of load's
    	// memory state. It is lazily initialized.
    	var memPreds map[*Value]bool
    	for i := 0; len(args) > 0; i++ {
    		const limit = 100
    		if i >= limit {
    			// Give up if we have done a lot of iterations.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:02:52 UTC 2024
    - 64.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/g3doc/_includes/tf_passes.md

      }) {...} : () -> tensor<4xf32>
    ```
    ### `-tf-tpu-colocate-splits`
    
    _Colocates each Split op with its predecessor_
    
    It is beneficial for performance to assign a `Split` op to the same device
    as its predecessor. This is because the weight of cut edges is always
    minimized when the `Split` is with its predecessor. This colocation
    constraint will be used by the placer graph optimization to assign a device
    to the op.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Aug 02 02:26:39 UTC 2023
    - 96.4K bytes
    - Viewed (0)
  7. src/cmd/compile/internal/ssa/prove.go

    		// If p and p.Succs[0] are dominators it means that every path
    		// from entry to b passes through p and p.Succs[0]. We care that
    		// no path from entry to b passes through p.Succs[1]. If p.Succs[0]
    		// has one predecessor then (apart from the degenerate case),
    		// there is no path from entry that can reach b through p.Succs[1].
    		// TODO: how about p->yes->b->yes, i.e. a loop in yes.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:30:21 UTC 2024
    - 48.9K bytes
    - Viewed (0)
  8. src/html/template/escape_test.go

    				},
    				`<a href="/foo?x=foo?x=%3cbar%3e?x=baz">`,
    			},
    		*/
    	}
    
    	// pred is a template function that returns the predecessor of a
    	// natural number for testing recursive templates.
    	fns := FuncMap{"pred": func(a ...any) (any, error) {
    		if len(a) == 1 {
    			if i, _ := a[0].(int); i > 0 {
    				return i - 1, nil
    			}
    		}
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Oct 16 03:29:27 UTC 2023
    - 56.2K bytes
    - Viewed (0)
  9. android/guava/src/com/google/common/util/concurrent/AbstractFuture.java

            succ = curr.next;
            if (curr.thread != null) { // we aren't unlinking this node, update pred.
              pred = curr;
            } else if (pred != null) { // We are unlinking this node and it has a predecessor.
              pred.next = succ;
              if (pred.thread == null) { // We raced with another node that unlinked pred. Restart.
                continue restart;
              }
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Fri Jun 07 22:25:23 UTC 2024
    - 63.1K bytes
    - Viewed (1)
  10. guava/src/com/google/common/util/concurrent/AbstractFuture.java

            succ = curr.next;
            if (curr.thread != null) { // we aren't unlinking this node, update pred.
              pred = curr;
            } else if (pred != null) { // We are unlinking this node and it has a predecessor.
              pred.next = succ;
              if (pred.thread == null) { // We raced with another node that unlinked pred. Restart.
                continue restart;
              }
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Fri Jun 07 22:25:23 UTC 2024
    - 62.8K bytes
    - Viewed (1)
Back to top