Search Options

Results per page
Sort
Preferred Languages
Advance

Results 81 - 90 of 105 for Unbounded (0.18 sec)

  1. src/runtime/mgc.go

    	if uniqueMapCleanup != nil {
    		select {
    		case uniqueMapCleanup <- struct{}{}:
    		default:
    		}
    	}
    
    	// Clear central sudog cache.
    	// Leave per-P caches alone, they have strictly bounded size.
    	// Disconnect cached list before dropping it on the floor,
    	// so that a dangling ref to one entry does not pin all of them.
    	lock(&sched.sudoglock)
    	var sg, sgnext *sudog
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 16:25:21 UTC 2024
    - 62K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ssa/rewrite.go

    // b2i32 translates a boolean value to 0 or 1.
    func b2i32(b bool) int32 {
    	if b {
    		return 1
    	}
    	return 0
    }
    
    // shiftIsBounded reports whether (left/right) shift Value v is known to be bounded.
    // A shift is bounded if it is shifting by less than the width of the shifted value.
    func shiftIsBounded(v *Value) bool {
    	return v.AuxInt != 0
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 19:02:52 UTC 2024
    - 64.2K bytes
    - Viewed (0)
  3. src/cmd/compile/internal/walk/builtin.go

    	}
    
    	length := typecheck.Conv(n.Len, types.Types[types.TINT])
    	copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
    	copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
    
    	if !t.Elem().HasPointers() && n.Bounded() {
    		// When len(to)==len(from) and elements have no pointers:
    		// replace make+copy with runtime.mallocgc+runtime.memmove.
    
    		// We do not check for overflow of len(to)*elem.Width here
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 08 22:35:22 UTC 2024
    - 31.2K bytes
    - Viewed (0)
  4. pkg/controller/nodelifecycle/node_lifecycle_controller.go

    	// updating each pod's status one at a time. This is performed serially, and
    	// can take a while if we're processing each node serially as well. So we
    	// process them with bounded concurrency instead, since most of the time is
    	// spent waiting on io.
    	workqueue.ParallelizeUntil(ctx, nc.nodeUpdateWorkerSize, len(nodes), updateNodeFunc)
    
    	nc.handleDisruption(ctx, zoneToNodeConditions, nodes)
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Sat May 04 18:33:12 UTC 2024
    - 51.6K bytes
    - Viewed (0)
  5. src/math/big/int.go

    	// Calculate the quotient and cosequences using Collins' stopping condition.
    	// Note that overflow of a Word is not possible when computing the remainder
    	// sequence and cosequences since the cosequence size is bounded by the input size.
    	// See section 4.2 of Jebelean for details.
    	for a2 >= v2 && a1-a2 >= v1+v2 {
    		q, r := a1/a2, a1%a2
    		a1, a2 = a2, r
    		u0, u1, u2 = u1, u2, u1+q*u2
    		v0, v1, v2 = v1, v2, v1+q*v2
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Mar 14 17:02:38 UTC 2024
    - 33.1K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/walk/order.go

    		return
    	}
    
    	mk := as.Y.(*ir.MakeExpr)
    	if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
    		return
    	}
    	mk.SetOp(ir.OMAKESLICECOPY)
    	mk.Cap = cp.Y
    	// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
    	mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
    	as.Y = typecheck.Expr(mk)
    	s[1] = nil // remove separate copy call
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Mar 08 02:00:33 UTC 2024
    - 42.7K bytes
    - Viewed (0)
  7. src/runtime/mbitmap.go

    // heapBitsInSpan returns true if the size of an object implies its ptr/scalar
    // data is stored at the end of the span, and is accessible via span.heapBits.
    //
    // Note: this works for both rounded-up sizes (span.elemsize) and unrounded
    // type sizes because minSizeForMallocHeader is guaranteed to be at a size
    // class boundary.
    //
    //go:nosplit
    func heapBitsInSpan(userSize uintptr) bool {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  8. pkg/generated/openapi/zz_generated.openapi.go

    							Type:        []string{"integer"},
    							Format:      "int32",
    						},
    					},
    				},
    			},
    		},
    	}
    }
    
    Registered: Sat Jun 15 01:39:40 UTC 2024
    - Last Modified: Wed Jun 05 18:37:07 UTC 2024
    - 3M bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tfrt/tests/mlrt/while_to_map_fn.mlir

    }
    
    // -----
    
    // Test a while to map_fn conversion in which tensor array is used instead of
    // tensor list and the tensor array size and the number of iterations are bounded
    // by separate constants of the same value.
    
    // CHECK-LABEL: map2/while/LoopCond_body
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 23 06:40:22 UTC 2024
    - 68.6K bytes
    - Viewed (0)
  10. src/runtime/malloc.go

    			// is freed when all subobjects are unreachable. The subobjects
    			// must be noscan (don't have pointers), this ensures that
    			// the amount of potentially wasted memory is bounded.
    			//
    			// Size of the memory block used for combining (maxTinySize) is tunable.
    			// Current setting is 16 bytes, which relates to 2x worst case memory
    			// wastage (when all but one subobjects are unreachable).
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
Back to top