- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 37 for loadsys (0.17 sec)
-
src/reflect/type.go
// FuncOf does not share the common lookupCache since cacheKey is not // sufficient to represent functions unambiguously. var funcLookupCache struct { sync.Mutex // Guards stores (but not loads) on m. // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf. // Elements of m are append-only and thus safe for concurrent reading. m sync.Map }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 85.5K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/filesystem_interface.h
/// `Env::LoadLibrary` or during TensorFlow's startup if they are on certain /// paths (although this has a security risk if two plugins register for the /// same filesystem and the malicious one loads before the legimitate one - /// but we consider this to be something that users should care about and /// manage themselves). In both of these cases, core TensorFlow looks for
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 17:36:54 UTC 2022 - 53.1K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/modular_filesystem_test.cc
return *schemes; } INSTANTIATE_TEST_SUITE_P(ModularFileSystem, ModularFileSystemTest, ::testing::ValuesIn(GetSchemes())); // Loads a shared object implementing filesystem functionality. static bool LoadDSO(const std::string& dso) { tensorflow::Status status = RegisterFilesystemPlugin(dso); if (!status.ok())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 20:25:58 UTC 2022 - 71K bytes - Viewed (0) -
cmd/iam-store.go
cache.iamUserPolicyMap.Store(name, mp) } } else { cache.iamGroupPolicyMap.Store(name, mp) } cache.updatedAt = time.Now() return mp.UpdatedAt, nil } // PolicyNotificationHandler - loads given policy from storage. If not present, // deletes from cache. This notification only reads from storage, and updates // cache. When the notification is for a policy deletion, it updates the
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Jun 13 22:26:38 UTC 2024 - 75.8K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/debug.go
changed := false state.changedSlots.clear() // Update locs/registers with the effects of each Value. for _, v := range b.Values { slots := state.valueNames[v.ID] // Loads and stores inherit the names of their sources. var source *Value switch v.Op { case OpStoreReg: source = v.Args[0] case OpLoadReg: switch a := v.Args[0]; a.Op {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 58.4K bytes - Viewed (0) -
src/crypto/internal/nistec/p256_asm_ppc64le.s
// changes would be needed to make this work for big // endian; however additional changes beyond what I // have noted are most likely needed to make it work. // - The string used with VPERM to swap the byte order // for loads and stores. // - The constants that are loaded from CPOOL. // // The following constants are defined in an order // that is correct for use with LXVD2X/STXVD2X // on little endian.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 56.5K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
} // atomicScavChunkData is an atomic wrapper around a scavChunkData // that stores it in its packed form. type atomicScavChunkData struct { value atomic.Uint64 } // load loads and unpacks a scavChunkData. func (sc *atomicScavChunkData) load() scavChunkData { return unpackScavChunkData(sc.value.Load()) } // store packs and writes a new scavChunkData. store must be serialized
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td
TF_Tensor:$output ); } def TF_TPUCompileMlirAndExecuteOp : TF_Op<"TPUCompileMlirAndExecute", [AttrSizedOperandSegments]> { let summary = "Op that compiles a computation in MLIR into a TPU program, and loads and executes it on a TPU device."; let description = [{ For the internal use of the TPU compiler.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 04:08:35 UTC 2024 - 90.5K bytes - Viewed (0) -
src/runtime/map.go
// to the new table. // Picking loadFactor: too large and we have lots of overflow // buckets, too small and we waste a lot of space. I wrote // a simple program to check some stats for different loads: // (64-bit, 8 byte keys and elems) // loadFactor %overflow bytes/entry hitprobe missprobe // 4.00 2.13 20.77 3.00 4.00
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 57.6K bytes - Viewed (0) -
src/runtime/mbitmap.go
var sl notInHeapSlice sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems} return *(*[]uintptr)(unsafe.Pointer(&sl)) } // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits. // // addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize) // must be true. // //go:nosplit
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0)