- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 38 for new_stack (0.2 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h
OperationState new_state(candidate_op->getLoc(), candidate_op->getName().getStringRef(), inputs, output_types, candidate_op->getAttrs()); for (int i = 0; i < candidate_op->getNumRegions(); ++i) { new_state.addRegion(); } Operation* quantized_op = rewriter.create(new_state); if (candidate_op->getNumRegions() != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
src/runtime/stack.go
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 41.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/optimize.cc
if (!first->hasOneUse() || !second->hasOneUse()) return rewriter.notifyMatchFailure(concat, "slice ops are used elsewhere"); SmallVector<int64_t> new_start; SmallVector<int64_t> new_limit; SmallVector<int64_t> new_slice_shape; new_start.reserve(first.getStrides().size()); new_limit.reserve(first.getStrides().size()); new_slice_shape.reserve(first.getStrides().size());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 26.9K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/telemetry/internal/counter/stackcounter.go
mu sync.Mutex // as this is a detail of the implementation, it could be replaced // by a more efficient mechanism stacks []stack } type stack struct { pcs []uintptr counter *Counter } func NewStack(name string, depth int) *StackCounter { return &StackCounter{name: name, depth: depth, file: &defaultFile} } // Inc increments a stack counter. It computes the caller's stack and
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:10:54 UTC 2024 - 4.8K bytes - Viewed (0) -
src/cmd/internal/telemetry/telemetry.go
return counter.New(name) } // NewStackCounter returns a new stack counter with the given name and depth. func NewStackCounter(name string, depth int) *counter.StackCounter { return counter.NewStack(name, depth) } // CountFlags creates a counter for every flag that is set // and increments the counter. The name of the counter is // the concatenation of prefix and the flag name.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 20 15:47:30 UTC 2024 - 3.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
OperationState new_state(quantizing_op->getLoc(), quantizing_op->getName().getStringRef(), inputs, output_types, quantizing_op->getAttrs()); for (int i = 0; i < quantizing_op->getNumRegions(); ++i) { new_state.addRegion(); } Operation* quantized_op = rewriter.create(new_state); if (quantizing_op->getNumRegions() != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 22 05:52:39 UTC 2024 - 23.6K bytes - Viewed (0) -
pkg/hbone/util.go
buf1 := bufferPoolCopy.Get().([]byte) // nolint: staticcheck defer bufferPoolCopy.Put(buf1) bufCap := cap(buf1) buf := buf1[0:bufCap:bufCap] // For netstack: src is a gonet.Conn, doesn't implement WriterTo. Dst is a net.TcpConn - and implements ReadFrom. // CopyBuffered is the actual implementation of Copy and CopyBuffer. // if buf is nil, one is allocated. // Duplicated from io
Registered: Fri Jun 14 15:00:06 UTC 2024 - Last Modified: Tue May 23 17:08:31 UTC 2023 - 3.4K bytes - Viewed (0) -
src/runtime/lock_sema.go
} gp.m.mLockProfile.recordUnlock(l) gp.m.locks-- if gp.m.locks < 0 { throw("runtime·unlock: lock count") } if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack gp.stackguard0 = stackPreempt } } // One-time notifications. func noteclear(n *note) { n.key = 0 } func notewakeup(n *note) { var v uintptr for {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 6.8K bytes - Viewed (0) -
src/runtime/lock_futex.go
gp.m.mLockProfile.recordUnlock(l) gp.m.locks-- if gp.m.locks < 0 { throw("runtime·unlock: lock count") } if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack gp.stackguard0 = stackPreempt } } // One-time notifications. func noteclear(n *note) { n.key = 0 } func notewakeup(n *note) { old := atomic.Xchg(key32(&n.key), 1) if old != 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:34 UTC 2024 - 5.4K bytes - Viewed (0) -
src/runtime/asm_loong64.s
MOVV g, (m_morebuf+gobuf_g)(R7) // Call newstack on m->g0's stack. MOVV m_g0(R7), g JAL runtime·save_g(SB) MOVV (g_sched+gobuf_sp)(g), R3 // Create a stack frame on g0 to call newstack. MOVV R0, -8(R3) // Zero saved LR in frame ADDV $-8, R3 JAL runtime·newstack(SB) // Not reached, but make sure the return PC from the call to newstack // is still in this function, and not the beginning of the next.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon May 13 15:04:25 UTC 2024 - 26.5K bytes - Viewed (0)