- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 39 for MASK (0.04 sec)
-
src/sync/atomic/atomic_test.go
i int32 after int32 } x.before = magic32 x.after = magic32 x.i = -1 j := x.i for mask := int32(1); mask != 0; mask <<= 1 { old := x.i k := AndInt32(&x.i, ^mask) j &= ^mask if x.i != j || k != old { t.Fatalf("mask=%d i=%d j=%d k=%d old=%d", mask, x.i, j, k, old) } } if x.before != magic32 || x.after != magic32 {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 17 18:37:29 UTC 2024 - 71.4K bytes - Viewed (0) -
src/runtime/mbitmap.go
// the address of the first word referenced by mask. addr uintptr // mask is a bitmask where each bit corresponds to pointer-words after addr. // Bit 0 is the pointer-word at addr, Bit 1 is the next word, and so on. // If a bit is 1, then there is a pointer at that word. // nextFast and next mask out bits in this mask as their pointers are processed. mask uintptr
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 00:18:55 UTC 2024 - 60K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/rewrite.go
} else if nbits == 32 { mb = bits.LeadingZeros32(uint32(mask)) me = 32 - bits.TrailingZeros32(uint32(mask)) mbn = bits.LeadingZeros32(^uint32(mask)) men = 32 - bits.TrailingZeros32(^uint32(mask)) } else { mb = bits.LeadingZeros64(uint64(mask)) me = 64 - bits.TrailingZeros64(uint64(mask)) mbn = bits.LeadingZeros64(^uint64(mask)) men = 64 - bits.TrailingZeros64(^uint64(mask)) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Jun 07 19:02:52 UTC 2024 - 64.2K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
//sys fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) (err error) { if pathname == "" { return fanotifyMark(fd, flags, mask, dirFd, nil) } p, err := BytePtrFromString(pathname) if err != nil { return err } return fanotifyMark(fd, flags, mask, dirFd, p) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Mar 07 05:26:45 UTC 2024 - 77.5K bytes - Viewed (0) -
src/reflect/type.go
} // Element is small with pointer mask; use as literal bits. ptrs := typ.PtrBytes / goarch.PtrSize mask := typ.GcSlice(0, (ptrs+7)/8) // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes). for ; ptrs > 120; ptrs -= 120 { dst = append(dst, 120) dst = append(dst, mask[:15]...) mask = mask[15:] } dst = append(dst, byte(ptrs))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 85.5K bytes - Viewed (0) -
cmd/peer-rest-server.go
} pattern := event.NewPattern(prefix, suffix) var eventNames []event.Name var mask pubsub.Mask for _, ev := range values[peerRESTListenEvents] { eventName, err := event.ParseName(ev) if err != nil { return grid.NewRemoteErr(err) } mask.MergeMaskable(eventName) eventNames = append(eventNames, eventName) }
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Fri May 24 23:05:23 UTC 2024 - 52.1K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/debug.go
k, d := it.Next() live := d.(*liveSlot) slots[k] = live.VarLoc if live.VarLoc.Registers == 0 { continue } mask := uint64(live.VarLoc.Registers) for { if mask == 0 { break } reg := uint8(bits.TrailingZeros64(mask)) mask &^= 1 << reg registers[reg] = append(registers[reg], SlotID(k)) } } state.slots, state.registers = slots, registers }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Jun 10 19:44:43 UTC 2024 - 58.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
padded_val.push_back(padding_val[i]); if (mask) *mask |= 1 << i; } } LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { TF::StridedSliceOp strided_slice_op = llvm::cast<TF::StridedSliceOp>(op); // Handle ellipsis mask. if (strided_slice_op.getEllipsisMask() != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 64.6K bytes - Viewed (0) -
src/cmd/link/internal/ppc64/asm.go
} su.SetUint32(target.Arch, offset, newinsn) } // Rewrite the instruction at offset into a hardware nop instruction. Also, verify the // existing instruction under mask matches the check value. func rewritetonop(target *ld.Target, ldr *loader.Loader, su *loader.SymbolBuilder, offset int64, mask, check uint32) { rewritetoinsn(target, ldr, su, offset, mask, check, OP_NOP) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Mar 19 20:54:08 UTC 2024 - 63.7K bytes - Viewed (0) -
src/runtime/map.go
} // Increment with probability 1/(1<<(h.B-15)). // When we reach 1<<15 - 1, we will have approximately // as many overflow buckets as buckets. mask := uint32(1)<<(h.B-15) - 1 // Example: if h.B == 18, then mask == 7, // and rand() & 7 == 0 with probability 1/8. if uint32(rand())&mask == 0 { h.noverflow++ } } func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap { var ovf *bmap
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 17:58:53 UTC 2024 - 57.6K bytes - Viewed (0)