- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 14 for Inlining (2.65 sec)
-
src/runtime/mbitmap.go
//go:nosplit func addb(p *byte, n uintptr) *byte { // Note: wrote out full expression instead of calling add(p, n) // to reduce the number of temporaries generated by the // compiler for this trivial expression during inlining. return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n)) } // subtractb returns the byte pointer p-n. // //go:nowritebarrier //go:nosplit func subtractb(p *byte, n uintptr) *byte {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc
} } return LogicalResult::success(); } // After inlining, there will likely be some instances where a // TPUReplicatedInput feeds directly into a TPUReplicatedOutput. Find such // pairs and remove them. LogicalResult RemoveOutputInputPairs(func::FuncOp func) { llvm::SetVector<Operation*> ops_to_erase; // Inlining can result in multiple TPUCompilationResultOp and
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 92.9K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
if (op_reg_data->is_function_op) { function_nodes.push_back(node); } } for (auto node : function_nodes) { VLOG(2) << "Inlining function " << node->name(); const FunctionDef* fdef = library->Find(node->type_string()); if (fdef == nullptr) { return errors::Internal("Failed to find function ", node->type_string(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 51K bytes - Viewed (0) -
src/net/netip/netip_test.go
ip := IPv4(8, 8, 8, 8) ips = ips[:0] for i := 0; i < 100; i++ { ips = append(ips, ip) } } } // ip4i was one of the possible representations of IP that came up in // discussions, inlining IPv4 addresses, but having an "overflow" // interface for IPv6 or IPv6 + zone. This is here for benchmarking. type ip4i struct { ip4 [4]byte flags1 byte flags2 byte flags3 byte flags4 byte
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 17:10:01 UTC 2024 - 54.3K bytes - Viewed (0) -
src/runtime/traceback.go
} arg.pc = 0 callCgoSymbolizer(&arg) } // printOneCgoTraceback prints the traceback of a single cgo caller. // This can print more than one line because of inlining. // It returns the "stop" result of commitFrame. func printOneCgoTraceback(pc uintptr, commitFrame func() (pr, stop bool), arg *cgoSymbolizerArg) bool { arg.pc = pc for { if pr, stop := commitFrame(); stop {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64.rules
// floating-point fused multiply-add/sub (F(ADD|SUB) (FMUL x y) z) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z) (F(ADDS|SUBS) (FMULS x y) z) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z) // Arch-specific inlining for small or disjoint runtime.memmove (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem))))) && sz >= 0 && isSameCall(sym, "runtime.memmove")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 53.2K bytes - Viewed (0) -
cmd/erasure-object.go
if metaArr[index].IsValid() { metaArr[index].ModTime = modTime metaArr[index].VersionID = versionID if !metaArr[index].InlineData() { // If the data is not inlined, we may end up incorrectly // inlining the data here, that leads to an inconsistent // situation where some objects are were not inlined // were now inlined, make sure to `nil` the Data such // that xl.meta is written as expected.
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Mon Jun 10 15:51:27 UTC 2024 - 78.6K bytes - Viewed (0) -
tensorflow/c/c_api.h
// names - "[a-z][a-z0-9_]*". In the latter case, // names for outputs will be generated automatically. // opts - various options for the function, e.g. XLA's inlining control. // description - optional human-readable description of this function. // status - Set to OK on success and an appropriate error on failure. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 26 21:08:15 UTC 2023 - 82.3K bytes - Viewed (0) -
src/runtime/asm_amd64.s
// Caution: ugly multiline assembly macros in your future! #define DISPATCH(NAME,MAXSIZE) \ CMPQ CX, $MAXSIZE; \ JA 3(PC); \ MOVQ $NAME(SB), AX; \ JMP AX // Note: can't just "JMP NAME(SB)" - bad inlining results. TEXT ·reflectcall(SB), NOSPLIT, $0-48 MOVLQZX frameSize+32(FP), CX DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Sat May 11 20:38:24 UTC 2024 - 60.4K bytes - Viewed (0) -
src/runtime/mheap.go
// Must be nosplit because it has callers that are nosplit. // //go:nosplit func spanOf(p uintptr) *mspan { // This function looks big, but we use a lot of constant // folding around arenaL1Bits to get it under the inlining // budget. Also, many of the checks here are safety checks // that Go needs to do anyway, so the generated code is quite // short. ri := arenaIndex(p) if arenaL1Bits == 0 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 22:31:00 UTC 2024 - 78K bytes - Viewed (0)