Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 375 for getB (0.07 sec)

  1. docs/en/docs/tutorial/security/get-current-user.md

        Here **FastAPI** won't get confused because you are using `Depends`.
    
    !!! check
        The way this dependency system is designed allows us to have different dependencies (different "dependables") that all return a `User` model.
    
        We are not restricted to having only one dependency that can return that type of data.
    
    ## Other models
    
    Registered: Mon Jun 17 08:32:26 UTC 2024
    - Last Modified: Thu Jan 11 16:31:18 UTC 2024
    - 7.6K bytes
    - Viewed (0)
  2. docs/de/docs/tutorial/security/get-current-user.md

    Nils Lindemann <******@****.***> 1711822085 +0100
    Registered: Mon Jun 17 08:32:26 UTC 2024
    - Last Modified: Sat Mar 30 18:08:05 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  3. src/runtime/print.go

    func printlock() {
    	mp := getg().m
    	mp.locks++ // do not reschedule between printlock++ and lock(&debuglock).
    	mp.printlock++
    	if mp.printlock == 1 {
    		lock(&debuglock)
    	}
    	mp.locks-- // now we know debuglock is held and holding up mp.locks for us.
    }
    
    func printunlock() {
    	mp := getg().m
    	mp.printlock--
    	if mp.printlock == 0 {
    		unlock(&debuglock)
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jan 20 03:27:26 UTC 2023
    - 5.9K bytes
    - Viewed (0)
  4. src/runtime/rand.go

    //
    //go:nosplit
    //go:linkname rand
    func rand() uint64 {
    	// Note: We avoid acquirem here so that in the fast path
    	// there is just a getg, an inlined c.Next, and a return.
    	// The performance difference on a 16-core AMD is
    	// 3.7ns/call this way versus 4.3ns/call with acquirem (+16%).
    	mp := getg().m
    	c := &mp.chacha8
    	for {
    		// Note: c.Next is marked nosplit,
    		// so we don't need to use mp.locks
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri May 31 14:32:47 UTC 2024
    - 8K bytes
    - Viewed (0)
  5. src/runtime/coro.go

    // and then calls coroexit to remove the extra concurrency.
    func corostart() {
    	gp := getg()
    	c := gp.coroarg
    	gp.coroarg = nil
    
    	defer coroexit(c)
    	c.f(c)
    }
    
    // coroexit is like coroswitch but closes the coro
    // and exits the current goroutine
    func coroexit(c *coro) {
    	gp := getg()
    	gp.coroarg = c
    	gp.coroexit = true
    	mcall(coroswitch_m)
    }
    
    //go:linkname coroswitch
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:09:18 UTC 2024
    - 7.4K bytes
    - Viewed (0)
  6. src/runtime/debug.go

    //
    // Ensure mayMoreStackPreempt can be called for all ABIs.
    //
    //go:nosplit
    //go:linkname mayMoreStackPreempt
    func mayMoreStackPreempt() {
    	// Don't do anything on the g0 or gsignal stack.
    	gp := getg()
    	if gp == gp.m.g0 || gp == gp.m.gsignal {
    		return
    	}
    	// Force a preemption, unless the stack is already poisoned.
    	if gp.stackguard0 < stackPoisonMin {
    		gp.stackguard0 = stackPreempt
    	}
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Sat May 11 20:38:24 UTC 2024
    - 4.2K bytes
    - Viewed (0)
  7. src/runtime/export_debuglog_test.go

    func (l *dlogger) P(x any) *dlogger      { return l.p(x) }
    func (l *dlogger) S(x string) *dlogger   { return l.s(x) }
    func (l *dlogger) PC(x uintptr) *dlogger { return l.pc(x) }
    
    func DumpDebugLog() string {
    	gp := getg()
    	gp.writebuf = make([]byte, 0, 1<<20)
    	printDebugLog()
    	buf := gp.writebuf
    	gp.writebuf = nil
    
    	return string(buf)
    }
    
    func ResetDebugLog() {
    	stw := stopTheWorld(stwForTestResetDebugLog)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Nov 15 16:49:45 UTC 2023
    - 1.3K bytes
    - Viewed (0)
  8. src/runtime/rwmutex.go

    			lock(&rw.rLock)
    			if rw.readerPass > 0 {
    				// Writer finished.
    				rw.readerPass -= 1
    				unlock(&rw.rLock)
    			} else {
    				// Queue this reader to be woken by
    				// the writer.
    				m := getg().m
    				m.schedlink = rw.readers
    				rw.readers.set(m)
    				unlock(&rw.rLock)
    				notesleep(&m.park)
    				noteclear(&m.park)
    			}
    		})
    	}
    }
    
    // runlock undoes a single rlock call on rw.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 14:29:04 UTC 2024
    - 5K bytes
    - Viewed (0)
  9. src/runtime/sys_libc.go

    // Preserves the calling point as the location where a profiler traceback will begin.
    //
    //go:nosplit
    func libcCall(fn, arg unsafe.Pointer) int32 {
    	// Leave caller's PC/SP/G around for traceback.
    	gp := getg()
    	var mp *m
    	if gp != nil {
    		mp = gp.m
    	}
    	if mp != nil && mp.libcallsp == 0 {
    		mp.libcallg.set(gp)
    		mp.libcallpc = getcallerpc()
    		// sp must be the last, because once async cpu profiler finds
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Tue Apr 05 17:54:15 UTC 2022
    - 1.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc

      if (!nested_module) return;
    
      InlinerInterface inliner(&getContext());
      auto walk_result = getOperation().walk([&](TF::PartitionedCallOp call_op) {
        if (!call_op.getF().getRootReference().getValue().starts_with(
                kNestedModule))
          return WalkResult::advance();
        // This is a call we need to inline!
        LLVM_DEBUG(llvm::dbgs()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 19 08:06:04 UTC 2023
    - 4K bytes
    - Viewed (0)
Back to top