- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 109 for popf (1.24 sec)
-
src/sync/poolqueue.go
package sync import ( "sync/atomic" "unsafe" ) // poolDequeue is a lock-free fixed-size single-producer, // multi-consumer queue. The single producer can both push and pop // from the head, and consumers can pop from the tail. // // It has the added feature that it nils out unused slots to avoid // unnecessary retention of objects. This is important for sync.Pool,
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 26 18:12:29 UTC 2024 - 8.3K bytes - Viewed (0) -
src/cmd/compile/internal/types2/initorder.go
// removing all incoming dependencies), otherwise there are initialization // cycles. emitted := make(map[*declInfo]bool) for len(pq) > 0 { // get the next node n := heap.Pop(&pq).(*graphNode) if debug { fmt.Printf("\t%s (src pos %d) depends on %d nodes now\n", n.obj.Name(), n.obj.order(), n.ndeps) } // if n still depends on other nodes, we have a cycle if n.ndeps > 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 28 22:06:51 UTC 2024 - 9.8K bytes - Viewed (0) -
src/go/types/initorder.go
// removing all incoming dependencies), otherwise there are initialization // cycles. emitted := make(map[*declInfo]bool) for len(pq) > 0 { // get the next node n := heap.Pop(&pq).(*graphNode) if debug { fmt.Printf("\t%s (src pos %d) depends on %d nodes now\n", n.obj.Name(), n.obj.order(), n.ndeps) } // if n still depends on other nodes, we have a cycle if n.ndeps > 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Apr 03 18:48:38 UTC 2024 - 9.9K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/schedule.go
func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] } func (h *ValHeap) Push(x interface{}) { // Push and Pop use pointer receivers because they modify the slice's length, // not just its contents. v := x.(*Value) h.a = append(h.a, v) } func (h *ValHeap) Pop() interface{} { old := h.a n := len(old) x := old[n-1] h.a = old[0 : n-1] return x } func (h ValHeap) Less(i, j int) bool {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 15:53:17 UTC 2024 - 16.4K bytes - Viewed (0) -
src/internal/trace/internal/oldtrace/order.go
func (l *orderEventList) Less(i, j int) bool { return (*l)[i].ev.Ts < (*l)[j].ev.Ts } func (h *orderEventList) Push(x orderEvent) { *h = append(*h, x) heapUp(h, len(*h)-1) } func (h *orderEventList) Pop() orderEvent { n := len(*h) - 1 (*h)[0], (*h)[n] = (*h)[n], (*h)[0] heapDown(h, 0, n) x := (*h)[len(*h)-1] *h = (*h)[:len(*h)-1] return x } func heapUp(h *orderEventList, j int) { for {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_cluster_formation.mlir
%3 = "tf.opD"(%2) {_xla_compile_device_type = "TPU", _replication_info = "replicate", is_stateless = true} : (tensor<i1>) -> tensor<i1> %4 = "tf.opE"() {is_stateless = true} : () -> tensor<i1> %5 = "tf.opF"(%arg0) {_xla_compile_device_type = "TPU", _replication_info = "replicate", is_stateless = true} : (tensor<i1>) -> tensor<i1> func.return %2, %3, %5 : tensor<i1>, tensor<i1>, tensor<i1> } // CHECK: "tf.opB"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 53.9K bytes - Viewed (0) -
src/internal/trace/gc.go
} func (h bandUtilHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h *bandUtilHeap) Push(x any) { *h = append(*h, x.(bandUtil)) } func (h *bandUtilHeap) Pop() any { x := (*h)[len(*h)-1] *h = (*h)[:len(*h)-1] return x } // UtilWindow is a specific window at Time. type UtilWindow struct { Time int64
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:48:18 UTC 2024 - 26K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc
// doesn't depends on any ops below it. std::stack<Operation*> op_stack; while (!value_queue.empty()) { Value current_value = value_queue.front(); value_queue.pop(); Operation* defining_node = current_value.getDefiningOp(); if (defining_node == nullptr) continue; op_stack.push(defining_node); for (Value arg : defining_node->getOperands()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 21.8K bytes - Viewed (0) -
src/runtime/mgcwork.go
if b.nobj != 0 { throw("workbuf is not empty") } } // getempty pops an empty work buffer off the work.empty list, // allocating new buffers if none are available. // //go:nowritebarrier func getempty() *workbuf { var b *workbuf if work.empty != 0 { b = (*workbuf)(work.empty.pop()) if b != nil { b.checkempty() } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:53:03 UTC 2024 - 12.9K bytes - Viewed (0) -
platforms/core-runtime/files/src/main/java/org/gradle/internal/file/impl/DefaultDeleter.java
Deque<File> stack = new ArrayDeque<File>(); stack.push(directory); while (!stack.isEmpty() && paths.size() < MAX_REPORTED_PATHS) { File current = stack.pop(); String absolutePath = current.getAbsolutePath(); if (!current.equals(directory) && !failedPaths.contains(absolutePath) && current.lastModified() >= startTime) { paths.add(absolutePath);
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Feb 15 17:10:06 UTC 2024 - 12.9K bytes - Viewed (0)