- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 37 for loadsys (0.26 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
int GetLiftedNumResults() const { return num_new_results_; } // Generates hoisted reads for resources that need them before the op. void GenerateHoistedReads(); // Replaces all resource loads in the given region with hoisted loads. If // `read_only` is true, limit this replacement to read only resources. void ReplaceResourceLoads(Region& region, bool read_only);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewrite.go
// has more than one use. if load.Uses != 1 { return false } mem := load.MemoryArg() // We need the load's memory arg to still be alive at target. That // can't be the case if one of target's args depends on a memory // state that is a successor of load's memory arg. // // For example, it would be invalid to merge load into target in // the following situation because newmem has killed oldmem
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64.rules
// Indexed ops generate indexed load or store instructions for all GOPPC64 values. // Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits, // and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops. // On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits. // and support for PC relative addressing must be available if relocation is needed.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 53.2K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/S390XOps.go
// Fast-BCR-serialization to ensure store-load ordering. {name: "SYNC", argLength: 1, reg: sync, asm: "SYNC", typ: "Mem"}, // Atomic loads. These are just normal loads but return <value,memory> tuples // so they can be properly ordered with other loads. // load from arg0+auxint+aux. arg1=mem. {name: "MOVBZatomicload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Feb 24 00:21:13 UTC 2023 - 52.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/S390X.rules
(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) => (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) // Merge address calculations into loads and stores. // Offsets from SB must not be merged into unaligned memory accesses because // loads/stores using PC-relative addressing directly must be aligned to the // size of the target.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 12 18:09:26 UTC 2023 - 74.3K bytes - Viewed (0) -
src/cmd/link/internal/loader/loader.go
} } return FuncInfo{} } // Preload a package: adds autolib. // Does not add defined package or non-packaged symbols to the symbol table. // These are done in LoadSyms. // Does not read symbol data. // Returns the fingerprint of the object. func (l *Loader) Preload(localSymVersion int, f *bio.Reader, lib *sym.Library, unit *sym.CompilationUnit, length int64) goobj.FingerprintType {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 24 20:26:10 UTC 2024 - 81.5K bytes - Viewed (0) -
tensorflow/BUILD
}), ) # copybara:comment_end # A shared object which includes registration mechanisms for ops and # kernels. Does not include the implementations of any ops or kernels. Instead, # the library which loads libtensorflow_framework.so # (e.g. _pywrap_tensorflow_internal.so for Python, libtensorflow.so for the C # API) is responsible for registering ops with libtensorflow_framework.so. In
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 31 16:51:59 UTC 2024 - 53.5K bytes - Viewed (0) -
src/cmd/compile/internal/ppc64/ssa.go
// alignment with small iteration // counts. if ctr > 3 { p = s.Prog(obj.APCALIGN) p.From.Type = obj.TYPE_CONST p.From.Offset = 16 } // Generate 16 byte loads and stores. // Use temp register for index (16) // on the second one. p = s.Prog(ppc64.ALXVD2X) p.From.Type = obj.TYPE_MEM p.From.Reg = srcReg p.From.Index = ppc64.REGZERO
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 19:59:38 UTC 2024 - 55.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64.rules
(If cond yes no) => (NE (TESTB cond cond) yes no) (JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB))) // Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here. (AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem) (AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem) (AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 93.9K bytes - Viewed (0) -
src/cmd/link/internal/ld/lib.go
lib := ctxt.Library[i] if lib.Shlib == "" { loadobjfile(ctxt, lib) } } } } // Add non-package symbols and references of externally defined symbols. ctxt.loader.LoadSyms(ctxt.Arch) // Load symbols from shared libraries, after all Go object symbols are loaded. for _, lib := range ctxt.Library { if lib.Shlib != "" { if ctxt.Debugvlog > 1 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue May 21 18:45:27 UTC 2024 - 88.6K bytes - Viewed (0)