- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for fixup (0.09 sec)
-
src/net/main_conf_test.go
// and returns a fixup function to restore the old settings. func forceGoDNS() func() { c := systemConf() oldGo := c.netGo oldCgo := c.netCgo fixup := func() { c.netGo = oldGo c.netCgo = oldCgo } c.netGo = true c.netCgo = false return fixup } // forceCgoDNS forces the resolver configuration to use the cgo resolver // and returns a fixup function to restore the old settings.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Feb 19 20:29:27 UTC 2024 - 2.5K bytes - Viewed (0) -
src/cmd/compile/internal/coverage/cover.go
"strconv" "strings" ) // names records state information collected in the first fixup // phase so that it can be passed to the second fixup phase. type names struct { MetaVar *ir.Name PkgIdVar *ir.Name InitFn *ir.Func CounterMode coverage.CounterMode CounterGran coverage.CounterGranularity } // Fixup adds calls to the pkg init function as appropriate to
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Mar 29 21:55:46 UTC 2024 - 6K bytes - Viewed (0) -
src/net/lookup_test.go
} if runtime.GOOS == "ios" { t.Skip("no resolv.conf on iOS") } defer dnsWaitGroup.Wait() if fixup := forceGoDNS(); fixup != nil { testDots(t, "go") fixup() } if fixup := forceCgoDNS(); fixup != nil { testDots(t, "cgo") fixup() } } func testDots(t *testing.T, mode string) { names, err := LookupAddr("8.8.8.8") // Google dns server if err != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 01:00:11 UTC 2024 - 41.4K bytes - Viewed (0) -
src/math/big/natdiv.go
(As before, this fixup is only needed at most twice.) Now that q̂ = ⌊uₙuₙ₋₁uₙ₋₂ / vₙ₋₁vₙ₋₂⌋, as mentioned above it is at most one away from the correct q, and we've avoided doing any n-digit math. (If we need the new remainder, it can be computed as r̂·B + uₙ₋₂ - q̂·vₙ₋₂.) The final check u < q̂·v and the possible fixup must be done at full precision.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 14 17:02:38 UTC 2024 - 34.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
(CMPconst [0] z:(ANDconst [c] x)) && int64(uint16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) // And finally, fixup the flag user. (CMPconst <t> [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 <t> z) (CMPconst <t> [0] (Select0 z:((ADDCCconst|ANDCCconst|NEGCC|CNTLZDCC|RLDICLCC) y))) => (Select1 <t> z)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 19:59:38 UTC 2024 - 3.8K bytes - Viewed (0) -
test/codegen/arithmetic.go
// amd64:`INCL` a++ // amd64:`DECL` b-- // amd64:`SUBL.*-128` c += 128 return a, b, c } // Divide -> shift rules usually require fixup for negative inputs. // If the input is non-negative, make sure the fixup is eliminated. func divInt(v int64) int64 { if v < 0 { return 0 } // amd64:-`.*SARQ.*63,`, -".*SHRQ", ".*SARQ.*[$]9," return v / 512 }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 15:28:00 UTC 2024 - 15.2K bytes - Viewed (0) -
src/cmd/link/internal/ppc64/asm.go
} ctxt.Arch.ByteOrder.PutUint32(p[r.Off()+4:], OP_TOCRESTORE) } return stub.Sym(), firstUse } // Scan relocs and generate PLT stubs and generate/fixup ABI defined functions created by the linker. func genstubs(ctxt *ld.Link, ldr *loader.Loader) { var stubs []loader.Sym var abifuncs []loader.Sym for _, s := range ctxt.Textp { relocs := ldr.Relocs(s)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 19 20:54:08 UTC 2024 - 63.7K bytes - Viewed (0) -
src/cmd/asm/internal/asm/asm.go
// BC 4,... // into // BC $4,... prog.From = obj.Addr{ Type: obj.TYPE_CONST, Offset: p.getConstant(prog, op, &a[0]), } } // Likewise, fixup usage like: // BC x,LT,... // BC x,foo+2,... // BC x,4 // BC x,$5 // into // BC x,CR0LT,... // BC x,CR0EQ,... // BC x,CR1LT,... // BC x,CR1GT,...
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 02:04:54 UTC 2024 - 25.5K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewrite.go
nbits := int64((auxint >> 24) & 0xFF) mask = ((1 << uint(nbits-mb)) - 1) ^ ((1 << uint(nbits-me)) - 1) if mb > me { mask = ^mask } if nbits == 32 { mask = uint64(uint32(mask)) } // Fixup ME to match ISA definition. The second argument to MASK(..,me) // is inclusive. me = (me - 1) & (nbits - 1) return } // This verifies that the mask is a set of // consecutive bits including the least
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0) -
guava/src/com/google/common/net/InetAddresses.java
int coercedHash = Hashing.murmur3_32_fixed().hashLong(addressAsLong).asInt(); // Squash into 224/4 Multicast and 240/4 Reserved space (i.e. 224/3). coercedHash |= 0xe0000000; // Fixup to avoid some "illegal" values. Currently the only potential // illegal value is 255.255.255.255. if (coercedHash == 0xffffffff) { coercedHash = 0xfffffffe; }
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Fri May 24 16:44:05 UTC 2024 - 47.1K bytes - Viewed (0)