- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 91 for food (0.1 sec)
-
src/cmd/compile/internal/ssa/_gen/S390X.rules
(MOVWreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 4 => x // Fold zero extensions into constants. (MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))]) (MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) (MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) // Fold sign extensions into constants. (MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))])
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 12 18:09:26 UTC 2023 - 74.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/AMD64.rules
=> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) // Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1) (BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x) (BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x) (BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x) (BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x) // Fold boolean negation into SETcc. (XORLconst [1] (SETNE x)) => (SETEQ x)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 93.9K bytes - Viewed (0) -
src/cmd/go/internal/modcmd/vendor.go
// src/cmd/go/internal/load/pkg.go func checkPathCollisions(modpkgs map[module.Version][]string) { var foldPath = make(map[string]string, len(modpkgs)) for m := range modpkgs { fold := str.ToFold(m.Path) if other := foldPath[fold]; other == "" { foldPath[fold] = m.Path } else if other != m.Path { base.Fatalf("go.mod: case-insensitive import collision: %q and %q", m.Path, other) } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 14 14:19:59 UTC 2024 - 14.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules
(MOVWreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x) (MOVWreg x:(MOVWloadidx4 _ _ _)) => (MOVDreg x) (MOVWUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x) (MOVWUreg x:(MOVWUloadidx4 _ _ _)) => (MOVDreg x) // fold double extensions (MOVBreg x:(MOVBreg _)) => (MOVDreg x) (MOVBUreg x:(MOVBUreg _)) => (MOVDreg x) (MOVHreg x:(MOVBreg _)) => (MOVDreg x) (MOVHreg x:(MOVBUreg _)) => (MOVDreg x) (MOVHreg x:(MOVHreg _)) => (MOVDreg x)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 12 19:38:41 UTC 2024 - 4.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/RISCV64.rules
(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVWstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem) // We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis // knows what variables are being read/written by the ops.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 14:57:07 UTC 2024 - 40.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/nilcheck.go
// undo that information when this dominator subtree is done. nonNilValues[ptr.ID] = v work = append(work, bp{op: ClearPtr, ptr: ptr}) fallthrough // a non-eliminated nil check might be a good place for a statement boundary. default: if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) { v.Pos = v.Pos.WithIsStmt() pendingLines.remove(v.Pos) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Oct 31 20:45:54 UTC 2023 - 11.3K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/sccp.go
OpNot: lt1 := t.getLatticeCell(val.Args[0]) if lt1.tag == constant { // here we take a shortcut by reusing generic rules to fold constants t.latticeCells[val] = computeLattice(t.f, val, lt1.val) } else { t.latticeCells[val] = lattice{lt1.tag, nil} } // fold 2-input operations case // add OpAdd64, OpAdd32, OpAdd16, OpAdd8, OpAdd32F, OpAdd64F, // sub
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jan 22 16:54:50 UTC 2024 - 17.6K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/numberlines.go
OpArgIntReg, OpArgFloatReg: return true } return false } // nextGoodStatementIndex returns an index at i or later that is believed // to be a good place to start the statement for b. This decision is // based on v's Op, the possibility of a better later operation, and // whether the values following i are the same line as v.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Aug 14 21:26:13 UTC 2023 - 7.8K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/schedule.go
edges = append(edges, edge{a, v}) } } } // Find store chain for block. // Store chains for different blocks overwrite each other, so // the calculated store chain is good only for this block. for _, v := range b.Values { if v.Op != OpPhi && v.Op != OpInitMem && v.Type.IsMemory() { nextMem[v.MemoryArg().ID] = v } }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 15:53:17 UTC 2024 - 16.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/memcombine.go
package ssa import ( "cmd/compile/internal/base" "cmd/compile/internal/types" "cmd/internal/src" "sort" ) // memcombine combines smaller loads and stores into larger ones. // We ensure this generates good code for encoding/binary operations. // It may help other cases also. func memcombine(f *Func) { // This optimization requires that the architecture has // unaligned loads and unaligned stores. if !f.Config.unalignedOK {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 21 19:45:41 UTC 2024 - 18.4K bytes - Viewed (0)