Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 34 for loadOps (0.13 sec)

  1. src/runtime/mprof.go

    // If there are less than size records, copyFn is invoked for each record, and
    // ok returns true.
    func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool) {
    	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
    	for mp := first; mp != nil; mp = mp.alllink {
    		n++
    	}
    	if n <= size {
    		ok = true
    		for mp := first; mp != nil; mp = mp.alllink {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 30 17:57:37 UTC 2024
    - 53.3K bytes
    - Viewed (0)
  2. src/cmd/compile/internal/ssa/debug.go

    				changed := false
    				state.changedSlots.clear()
    
    				// Update locs/registers with the effects of each Value.
    				for _, v := range b.Values {
    					slots := state.valueNames[v.ID]
    
    					// Loads and stores inherit the names of their sources.
    					var source *Value
    					switch v.Op {
    					case OpStoreReg:
    						source = v.Args[0]
    					case OpLoadReg:
    						switch a := v.Args[0]; a.Op {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Jun 10 19:44:43 UTC 2024
    - 58.4K bytes
    - Viewed (0)
  3. src/crypto/internal/nistec/p256_asm_ppc64le.s

    // changes would be needed to make this work for big
    // endian; however additional changes beyond what I
    // have noted are most likely needed to make it work.
    // - The string used with VPERM to swap the byte order
    //   for loads and stores.
    // - The constants that are loaded from CPOOL.
    //
    
    // The following constants are defined in an order
    // that is correct for use with LXVD2X/STXVD2X
    // on little endian.
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Mar 04 17:29:44 UTC 2024
    - 56.5K bytes
    - Viewed (0)
  4. src/runtime/mgcscavenge.go

    }
    
    // atomicScavChunkData is an atomic wrapper around a scavChunkData
    // that stores it in its packed form.
    type atomicScavChunkData struct {
    	value atomic.Uint64
    }
    
    // load loads and unpacks a scavChunkData.
    func (sc *atomicScavChunkData) load() scavChunkData {
    	return unpackScavChunkData(sc.value.Load())
    }
    
    // store packs and writes a new scavChunkData. store must be serialized
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 08 17:48:45 UTC 2024
    - 52.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td

        TF_Tensor:$output
      );
    }
    
    def TF_TPUCompileMlirAndExecuteOp : TF_Op<"TPUCompileMlirAndExecute", [AttrSizedOperandSegments]> {
      let summary = "Op that compiles a computation in MLIR into a TPU program, and loads and executes it on a TPU device.";
    
      let description = [{
    For the internal use of the TPU compiler.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 90.5K bytes
    - Viewed (0)
  6. src/runtime/malloc.go

    	unlock(&h.lock)
    
    	// N.B. The arenas L1 map is quite small on all platforms, so it's fine to
    	// just iterate over the whole thing.
    	for i := range h.arenas {
    		l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i])))
    		if l2 == nil {
    			continue
    		}
    		sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
    	}
    }
    
    // base address for all 0-byte allocations
    var zerobase uintptr
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 59.6K bytes
    - Viewed (0)
  7. src/runtime/map.go

    // to the new table.
    
    // Picking loadFactor: too large and we have lots of overflow
    // buckets, too small and we waste a lot of space. I wrote
    // a simple program to check some stats for different loads:
    // (64-bit, 8 byte keys and elems)
    //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    //        4.00         2.13        20.77         3.00         4.00
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed May 29 17:58:53 UTC 2024
    - 57.6K bytes
    - Viewed (0)
  8. src/runtime/mbitmap.go

    	var sl notInHeapSlice
    	sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
    	return *(*[]uintptr)(unsafe.Pointer(&sl))
    }
    
    // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits.
    //
    // addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize)
    // must be true.
    //
    //go:nosplit
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu May 23 00:18:55 UTC 2024
    - 60K bytes
    - Viewed (0)
  9. src/cmd/go/internal/modget/get.go

    	// into a deterministic-but-arbitrary order.
    	sort.Slice(upgrades, func(i, j int) bool {
    		return upgrades[i].path < upgrades[j].path
    	})
    	return upgrades
    }
    
    // loadPackages loads the packages matching the given patterns, invoking the
    // findPackage function for each package that may require a change to the
    // build list.
    //
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Fri Jun 07 18:26:32 UTC 2024
    - 66.5K bytes
    - Viewed (0)
  10. platforms/documentation/docs/src/docs/userguide/optimizing-performance/configuration_cache.adoc

    Following the configuration phase, Gradle writes a snapshot of the task graph to a new configuration cache entry, for later Gradle invocations.
    Gradle then loads the task graph from the configuration cache, so that it can apply optimizations to the tasks, and then runs the execution phase as normal.
    Configuration time will still be spent the first time you run a particular set of tasks.
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Fri Mar 29 16:24:12 UTC 2024
    - 71.1K bytes
    - Viewed (0)
Back to top