Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 93 for predecessor (0.31 sec)

  1. tensorflow/compiler/mlir/tf2xla/internal/passes/extract_head_tail_outside_compilation.cc

        auto predecessors = analysis.DirectControlPredecessors(&cluster_op);
        if (!predecessors.empty() && !IsEmbeddingOp(&cluster_op)) {
          bool skip = false;
          for (Operation* predecessor : llvm::reverse(predecessors)) {
            if (IsEmbeddingOp(predecessor)) continue;
            skip = !head_outside_compiled_ops.contains(predecessor);
            break;
          }
          if (skip) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/sparsecore_passes.td

      let constructor = "TFDevice::CreateEmbeddingProgramKeyPass()";
        let description = [{
        Passes in the program key to embedding ops. Will move the embedding ops
        after a _TPUCompileMlir op if there is no predecessor _TPUCompileMlir op.
        Both the embedding op and compile op are assumed to be wrapped in separate
        tf_device.launch() ops. This is because the embedding op is head outside
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 28 23:42:09 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  3. platforms/core-configuration/model-core/src/main/java/org/gradle/model/internal/registry/ModelBinding.java

         * the target state, not the input state. Implicitly, a rule accepts as input the subject in the state that is the predecessor of the target state.
         */
        public BindingPredicate getPredicate() {
            return predicate;
        }
    
        public boolean isBound() {
            return boundTo != null;
        }
    
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Sep 28 09:51:04 UTC 2023
    - 3.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_program_key.cc

            // (parent) block, we need to check that it's before (the
            // (parent of) the preprocess_op.
            if (o->isBeforeInBlock(it->second)) {
              break;  // valid compile predecessor
            } else {
              return WalkResult::advance();
            }
          }
          o = o->getParentOp();
        }
        // Check that the the compile op actually passes its results to its parents.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h

        AnalyzeRegion(region);
      }
    
      SideEffectAnalysisInfo(SideEffectAnalysisInfo&&) = default;
    
      // Returns a vector of ops that are direct control predecessors of `op`,
      // sorted in program order. If `filter` is provided, only predecessors that
      // pass the filter (returning true) will be included.
      const llvm::SmallVector<Operation*, 4>& DirectControlPredecessors(
          Operation* op) const;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 09:04:13 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  6. subprojects/core/src/main/java/org/gradle/execution/plan/Node.java

         */
        public void maybeInheritFinalizerGroups() {
            NodeGroup newGroup = group;
            for (Node predecessor : getDependencyPredecessors()) {
                if (predecessor.getGroup() instanceof HasFinalizers) {
                    newGroup = maybeInheritGroupAsFinalizerDependency((HasFinalizers) predecessor.getGroup(), newGroup);
                }
            }
            if (newGroup != group) {
                setGroup(newGroup);
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Aug 24 13:30:48 UTC 2023
    - 22.7K bytes
    - Viewed (0)
  7. platforms/core-configuration/model-core/src/main/java/org/gradle/model/internal/registry/DefaultModelRegistry.java

                    // Node must be at the predecessor state before calculating dependencies
                    NodeAtState predecessor = new NodeAtState(getPath(), getTargetState().previous());
                    dependencies.add(graph.nodeAtState(predecessor));
                    // Transition any other nodes that depend on the predecessor state
                    dependencies.add(new TransitionDependents(predecessor));
                    seenPredecessor = true;
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Fri May 24 15:40:00 UTC 2024
    - 45.7K bytes
    - Viewed (0)
  8. src/cmd/compile/internal/ssa/check.go

    		for _, c := range b.Preds {
    			if !blockMark[c.b.ID] {
    				f.Fatalf("predecessor block %v for %v is missing", c, b)
    			}
    		}
    		for _, c := range b.Succs {
    			if !blockMark[c.b.ID] {
    				f.Fatalf("successor block %v for %v is missing", c, b)
    			}
    		}
    	}
    
    	if len(f.Entry.Preds) > 0 {
    		f.Fatalf("entry block %s of %s has predecessor(s) %v", f.Entry, f.Name, f.Entry.Preds)
    	}
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 09 16:41:23 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  9. src/cmd/compile/internal/ssa/dom_test.go

    	blocs = append(blocs,
    		Bloc("entry",
    			Valu("mem", OpInitMem, types.TypeMem, 0, nil),
    			Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
    			Goto(blockn(0)),
    		),
    	)
    
    	// We want predecessor lists to be long, so 2/3rds of the blocks have a
    	// successor of the first or last block.
    	for i := 0; i < size; i++ {
    		switch i % 3 {
    		case 0:
    			blocs = append(blocs, Bloc(blockn(i),
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Mar 26 19:58:28 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc

          return WalkResult::interrupt();
        return WalkResult::advance();
      });
      return result.wasInterrupted();
    }
    
    // Make invariant the `ShapeOp`s or a `ReadVariableOp` that's the `ShapeOp`'s
    // predecessor.
    void MakeShapeOpInvariant(tf_device::ReplicateOp replicate_op, int num_replicas,
                              Block* replicate_block, TF::ShapeOp shape_op) {
      // Ignore ShapeOps that have virtual devices.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.5K bytes
    - Viewed (0)
Back to top