- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 309 for writeMsg (0.14 sec)
-
src/net/iprawsock_posix.go
} return c.fd.writeTo(b, sa) } func (c *IPConn) writeMsg(b, oob []byte, addr *IPAddr) (n, oobn int, err error) { if c.fd.isConnected { return 0, 0, ErrWriteToConnected } if addr == nil { return 0, 0, errMissingAddress } sa, err := addr.sockaddr(c.fd.family) if err != nil { return 0, 0, err } return c.fd.writeMsg(b, oob, sa) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 16:54:32 UTC 2024 - 3.9K bytes - Viewed (0) -
src/net/unixsock_posix.go
} if addr.Net != sotypeToNet(c.fd.sotype) { return 0, syscall.EAFNOSUPPORT } sa := &syscall.SockaddrUnix{Name: addr.Name} return c.fd.writeTo(b, sa) } func (c *UnixConn) writeMsg(b, oob []byte, addr *UnixAddr) (n, oobn int, err error) { if c.fd.sotype == syscall.SOCK_DGRAM && c.fd.isConnected { return 0, 0, ErrWriteToConnected } var sa syscall.Sockaddr if addr != nil {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 16:54:32 UTC 2024 - 6.6K bytes - Viewed (0) -
src/internal/poll/fd_unix.go
if err = fd.pd.waitWrite(fd.isFile); err == nil { continue } } if err != nil { return 0, err } return len(p), nil } } // WriteMsg wraps the sendmsg network call. func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) { if err := fd.writeLock(); err != nil { return 0, 0, err } defer fd.writeUnlock()
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 04:09:44 UTC 2024 - 17.9K bytes - Viewed (0) -
src/net/udpsock_posix.go
} } func (c *UDPConn) writeMsg(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) { if c.fd.isConnected && addr != nil { return 0, 0, ErrWriteToConnected } if !c.fd.isConnected && addr == nil { return 0, 0, errMissingAddress } sa, err := addr.sockaddr(c.fd.family) if err != nil { return 0, 0, err } return c.fd.writeMsg(b, oob, sa) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 16:54:32 UTC 2024 - 7.5K bytes - Viewed (0) -
src/compress/lzw/writer.go
table [tableSize]uint32 } // writeLSB writes the code c for "Least Significant Bits first" data. func (w *Writer) writeLSB(c uint32) error { w.bits |= c << w.nBits w.nBits += w.width for w.nBits >= 8 { if err := w.w.WriteByte(uint8(w.bits)); err != nil { return err } w.bits >>= 8 w.nBits -= 8 } return nil } // writeMSB writes the code c for "Most Significant Bits first" data.
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 13:32:40 UTC 2024 - 7.9K bytes - Viewed (0) -
cmd/erasure-encode.go
} // Encode reads from the reader, erasure-encodes the data and writes to the writers. func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []io.Writer, buf []byte, quorum int) (total int64, err error) { writer := &multiWriter{ writers: writers, writeQuorum: quorum, errs: make([]error, len(writers)), } for { var blocks [][]byte
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Wed May 15 00:11:04 UTC 2024 - 2.9K bytes - Viewed (0) -
cmd/erasure-decode.go
return bytesWritten, errLessData } return bytesWritten, derr } // Heal reads from readers, reconstruct shards and writes the data to the writers. func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.ReaderAt, totalLength int64, prefer []bool) (derr error) { if len(writers) != e.parityBlocks+e.dataBlocks { return errInvalidArgument } reader := newParallelReader(readers, e, 0, totalLength)
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Tue May 21 14:36:21 UTC 2024 - 9.4K bytes - Viewed (0) -
src/runtime/tracetype.go
// Insert the pointer to the type itself. id, _ := t.tab.put(noescape(unsafe.Pointer(&typ)), goarch.PtrSize) return id } // dump writes all previously cached types to trace buffers and // releases all memory and resets state. It must only be called once the caller // can guarantee that there are no more writers to the table. func (t *traceTypeTable) dump(gen uintptr) { w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 22 20:31:27 UTC 2024 - 2.3K bytes - Viewed (0) -
internal/ringbuffer/ring_buffer.go
func (r *RingBuffer) Reset() { r.mu.Lock() defer r.mu.Unlock() // Set error so any readers/writers will return immediately. r.setErr(errors.New("reset called"), true) if r.block { r.readCond.Broadcast() r.writeCond.Broadcast() } // Unlock the mutex so readers/writers can finish. r.mu.Unlock() r.wg.Wait() r.mu.Lock() r.r = 0 r.w = 0 r.err = nil r.isFull = false
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Wed May 15 00:11:04 UTC 2024 - 13.3K bytes - Viewed (0) -
internal/ringbuffer/README.md
Regular Reads will block until data is available, but not wait for a full buffer. Writes will block until there is space available and writes bigger than the buffer will wait for reads to make space. `TryRead` and `TryWrite` are still available for non-blocking reads and writes. To signify the end of the stream, close the ring buffer from the writer side with `rb.CloseWriter()`
Registered: Sun Jun 16 00:44:34 UTC 2024 - Last Modified: Wed May 15 00:11:04 UTC 2024 - 2.1K bytes - Viewed (0)