Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 42 for backtracker (0.18 sec)

  1. src/regexp/exec.go

    		return nil
    	}
    
    	if re.onepass != nil {
    		return re.doOnePass(r, b, s, pos, ncap, dstCap)
    	}
    	if r == nil && len(b)+len(s) < re.maxBitStateLen {
    		return re.backtrack(b, s, pos, ncap, dstCap)
    	}
    
    	m := re.get()
    	i, _ := m.inputs.init(r, b, s)
    
    	m.init(ncap)
    	if !m.match(i, pos) {
    		re.put(m)
    		return nil
    	}
    
    	dstCap = append(dstCap, m.matchcap...)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat Jun 04 20:10:54 UTC 2022
    - 12.3K bytes
    - Viewed (0)
  2. misc/go_android_exec/main.go

    			}
    		}
    	}
    
    	deviceBin := fmt.Sprintf("%s/%s", deviceGotmp, binName)
    	if err := adb("push", os.Args[1], deviceBin); err != nil {
    		return 0, err
    	}
    
    	// Forward SIGQUIT from the go command to show backtraces from
    	// the binary instead of from this wrapper.
    	quit := make(chan os.Signal, 1)
    	signal.Notify(quit, syscall.SIGQUIT)
    	go func() {
    		for range quit {
    			// We don't have the PID of the running process; use the
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Aug 21 17:46:57 UTC 2023
    - 15.3K bytes
    - Viewed (0)
  3. android/guava/src/com/google/common/graph/Graphs.java

       * is always detected before reusing an edge, so no special logic is required. In the undirected
       * case, we must take care not to "backtrack" over an edge (i.e. going from A to B and then going
       * from B to A).
       */
      private static boolean canTraverseWithoutReusingEdge(
          Graph<?> graph, Object nextNode, @CheckForNull Object previousNode) {
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Mon Apr 01 16:15:01 UTC 2024
    - 21.2K bytes
    - Viewed (0)
  4. guava/src/com/google/common/graph/Graphs.java

       * is always detected before reusing an edge, so no special logic is required. In the undirected
       * case, we must take care not to "backtrack" over an edge (i.e. going from A to B and then going
       * from B to A).
       */
      private static boolean canTraverseWithoutReusingEdge(
          Graph<?> graph, Object nextNode, @CheckForNull Object previousNode) {
    Registered: Wed Jun 12 16:38:11 UTC 2024
    - Last Modified: Mon Apr 01 16:15:01 UTC 2024
    - 21.7K bytes
    - Viewed (0)
  5. pkg/test/kube/dump.go

    						scopes.Framework.Warnf("Unable to write envoy err log for VM cluster/pod/container: %s/%s/%s/%s: %v",
    							c.Name(), pod.Namespace, pod.Name, container.Name, err)
    					}
    					if strings.Contains(stdout, "envoy backtrace") {
    						scopes.Framework.Errorf("FAIL: VM envoy crashed in cluster/pod/container: %s/%s/%s/%s. See log: %s",
    							c.Name(), pod.Namespace, pod.Name, container.Name, prow.ArtifactsURL(fname))
    
    Registered: Fri Jun 14 15:00:06 UTC 2024
    - Last Modified: Thu Jun 06 22:12:34 UTC 2024
    - 22.2K bytes
    - Viewed (0)
  6. src/internal/abi/type.go

    type Name struct {
    	Bytes *byte
    }
    
    // DataChecked does pointer arithmetic on n's Bytes, and that arithmetic is asserted to
    // be safe for the reason in whySafe (which can appear in a backtrace, etc.)
    func (n Name) DataChecked(off int, whySafe string) *byte {
    	return (*byte)(addChecked(unsafe.Pointer(n.Bytes), uintptr(off), whySafe))
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Apr 17 21:09:59 UTC 2024
    - 21.8K bytes
    - Viewed (0)
  7. src/runtime/cgocall.go

    package runtime
    
    import (
    	"internal/abi"
    	"internal/goarch"
    	"internal/goexperiment"
    	"runtime/internal/sys"
    	"unsafe"
    )
    
    // Addresses collected in a cgo backtrace when crashing.
    // Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
    type cgoCallers [32]uintptr
    
    // argset matches runtime/cgo/linux_syscall.c:argset_t
    type argset struct {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 01:16:47 UTC 2024
    - 24.2K bytes
    - Viewed (0)
  8. src/runtime/asm_mips64x.s

    // in the caller's stack frame. These stubs write the args into that stack space and
    // then tail call to the corresponding runtime handler.
    // The tail call makes these stubs disappear in backtraces.
    TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
    	MOVV	R1, x+0(FP)
    	MOVV	R2, y+8(FP)
    	JMP	runtime·goPanicIndex(SB)
    TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
    	MOVV	R1, x+0(FP)
    	MOVV	R2, y+8(FP)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Nov 06 19:45:59 UTC 2023
    - 24.3K bytes
    - Viewed (0)
  9. src/runtime/asm_riscv64.s

    // arguments are allocated in the caller's stack frame.
    // These stubs write the args into that stack space and then tail call to the
    // corresponding runtime handler.
    // The tail call makes these stubs disappear in backtraces.
    TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
    	MOV	T0, X10
    	MOV	T1, X11
    	JMP	runtime·goPanicIndex<ABIInternal>(SB)
    TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
    	MOV	T0, X10
    	MOV	T1, X11
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 09 13:57:06 UTC 2023
    - 27K bytes
    - Viewed (0)
  10. src/runtime/asm_s390x.s

    // in the caller's stack frame. These stubs write the args into that stack space and
    // then tail call to the corresponding runtime handler.
    // The tail call makes these stubs disappear in backtraces.
    TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
    	MOVD	R0, x+0(FP)
    	MOVD	R1, y+8(FP)
    	JMP	runtime·goPanicIndex(SB)
    TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
    	MOVD	R0, x+0(FP)
    	MOVD	R1, y+8(FP)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Jan 25 09:18:28 UTC 2024
    - 28.1K bytes
    - Viewed (0)
Back to top