- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 37 for loadOps (0.53 sec)
-
tensorflow/c/experimental/filesystem/filesystem_interface.h
/// `Env::LoadLibrary` or during TensorFlow's startup if they are on certain /// paths (although this has a security risk if two plugins register for the /// same filesystem and the malicious one loads before the legimitate one - /// but we consider this to be something that users should care about and /// manage themselves). In both of these cases, core TensorFlow looks for
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 17:36:54 UTC 2022 - 53.1K bytes - Viewed (0) -
tensorflow/c/experimental/filesystem/modular_filesystem_test.cc
return *schemes; } INSTANTIATE_TEST_SUITE_P(ModularFileSystem, ModularFileSystemTest, ::testing::ValuesIn(GetSchemes())); // Loads a shared object implementing filesystem functionality. static bool LoadDSO(const std::string& dso) { tensorflow::Status status = RegisterFilesystemPlugin(dso); if (!status.ok())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 20:25:58 UTC 2022 - 71K bytes - Viewed (0) -
cmd/iam-store.go
cache.iamUserPolicyMap.Store(name, mp) } } else { cache.iamGroupPolicyMap.Store(name, mp) } cache.updatedAt = time.Now() return mp.UpdatedAt, nil } // PolicyNotificationHandler - loads given policy from storage. If not present, // deletes from cache. This notification only reads from storage, and updates // cache. When the notification is for a policy deletion, it updates the
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Thu Jun 13 22:26:38 UTC 2024 - 75.8K bytes - Viewed (0) -
src/runtime/mprof.go
// If there are less than size records, copyFn is invoked for each record, and // ok returns true. func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool) { first := (*m)(atomic.Loadp(unsafe.Pointer(&allm))) for mp := first; mp != nil; mp = mp.alllink { n++ } if n <= size { ok = true for mp := first; mp != nil; mp = mp.alllink {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 30 17:57:37 UTC 2024 - 53.3K bytes - Viewed (0) -
src/runtime/mgcscavenge.go
} // atomicScavChunkData is an atomic wrapper around a scavChunkData // that stores it in its packed form. type atomicScavChunkData struct { value atomic.Uint64 } // load loads and unpacks a scavChunkData. func (sc *atomicScavChunkData) load() scavChunkData { return unpackScavChunkData(sc.value.Load()) } // store packs and writes a new scavChunkData. store must be serialized
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 08 17:48:45 UTC 2024 - 52.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/debug.go
changed := false state.changedSlots.clear() // Update locs/registers with the effects of each Value. for _, v := range b.Values { slots := state.valueNames[v.ID] // Loads and stores inherit the names of their sources. var source *Value switch v.Op { case OpStoreReg: source = v.Args[0] case OpLoadReg: switch a := v.Args[0]; a.Op {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 58.4K bytes - Viewed (0) -
src/crypto/internal/nistec/p256_asm_ppc64le.s
// changes would be needed to make this work for big // endian; however additional changes beyond what I // have noted are most likely needed to make it work. // - The string used with VPERM to swap the byte order // for loads and stores. // - The constants that are loaded from CPOOL. // // The following constants are defined in an order // that is correct for use with LXVD2X/STXVD2X // on little endian.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 04 17:29:44 UTC 2024 - 56.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td
TF_Tensor:$output ); } def TF_TPUCompileMlirAndExecuteOp : TF_Op<"TPUCompileMlirAndExecute", [AttrSizedOperandSegments]> { let summary = "Op that compiles a computation in MLIR into a TPU program, and loads and executes it on a TPU device."; let description = [{ For the internal use of the TPU compiler.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 04:08:35 UTC 2024 - 90.5K bytes - Viewed (0) -
src/runtime/malloc.go
unlock(&h.lock) // N.B. The arenas L1 map is quite small on all platforms, so it's fine to // just iterate over the whole thing. for i := range h.arenas { l2 := (*[1 << arenaL2Bits]*heapArena)(atomic.Loadp(unsafe.Pointer(&h.arenas[i]))) if l2 == nil { continue } sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2)) } } // base address for all 0-byte allocations var zerobase uintptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 59.6K bytes - Viewed (0) -
src/runtime/map.go
// to the new table. // Picking loadFactor: too large and we have lots of overflow // buckets, too small and we waste a lot of space. I wrote // a simple program to check some stats for different loads: // (64-bit, 8 byte keys and elems) // loadFactor %overflow bytes/entry hitprobe missprobe // 4.00 2.13 20.77 3.00 4.00
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 57.6K bytes - Viewed (0)