feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

21
vendor/github.com/libp2p/go-mplex/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Jeromy Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

27
vendor/github.com/libp2p/go-mplex/README.md generated vendored Normal file
View File

@@ -0,0 +1,27 @@
# go-mplex
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
[![Go Reference](https://pkg.go.dev/badge/github.com/libp2p/go-mplex.svg)](https://pkg.go.dev/github.com/libp2p/go-mplex)
[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/)
[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23libp2p)
[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
A super simple [stream muxing](https://docs.libp2p.io/concepts/stream-multiplexing/) library implementing [mplex](https://github.com/libp2p/specs/tree/master/mplex).
## Usage
```go
mplex := multiplex.NewMultiplex(mysocket)
s, _ := mplex.NewStream()
s.Write([]byte("Hello World!"))
s.Close()
os, _ := mplex.Accept()
// echo back everything received
io.Copy(os, os)
```
---
The last gx published version of this module was: 0.2.35: QmWGQQ6Tz8AdUpxktLf3zgnVN9Vy8fcWVezZJSU3ZmiANj

97
vendor/github.com/libp2p/go-mplex/deadline.go generated vendored Normal file
View File

@@ -0,0 +1,97 @@
// Copied from the go standard library.
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE-BSD file.
package multiplex
import (
"sync"
"time"
)
// pipeDeadline is an abstraction for handling timeouts.
type pipeDeadline struct {
mu sync.Mutex // Guards timer and cancel
timer *time.Timer
cancel chan struct{} // Must be non-nil
}
func makePipeDeadline() pipeDeadline {
return pipeDeadline{cancel: make(chan struct{})}
}
// set sets the point in time when the deadline will time out.
// A timeout event is signaled by closing the channel returned by waiter.
// Once a timeout has occurred, the deadline can be refreshed by specifying a
// t value in the future.
//
// A zero value for t prevents timeout.
func (d *pipeDeadline) set(t time.Time) {
d.mu.Lock()
defer d.mu.Unlock()
// deadline closed
if d.cancel == nil {
return
}
if d.timer != nil && !d.timer.Stop() {
<-d.cancel // Wait for the timer callback to finish and close cancel
}
d.timer = nil
// Time is zero, then there is no deadline.
closed := isClosedChan(d.cancel)
if t.IsZero() {
if closed {
d.cancel = make(chan struct{})
}
return
}
// Time in the future, setup a timer to cancel in the future.
if dur := time.Until(t); dur > 0 {
if closed {
d.cancel = make(chan struct{})
}
d.timer = time.AfterFunc(dur, func() {
close(d.cancel)
})
return
}
// Time in the past, so close immediately.
if !closed {
close(d.cancel)
}
}
// wait returns a channel that is closed when the deadline is exceeded.
func (d *pipeDeadline) wait() chan struct{} {
d.mu.Lock()
defer d.mu.Unlock()
return d.cancel
}
// close closes, the deadline. Any future calls to `set` will do nothing.
func (d *pipeDeadline) close() {
d.mu.Lock()
defer d.mu.Unlock()
if d.timer != nil && !d.timer.Stop() {
<-d.cancel // Wait for the timer callback to finish and close cancel
}
d.timer = nil
d.cancel = nil
}
func isClosedChan(c <-chan struct{}) bool {
select {
case <-c:
return true
default:
return false
}
}

670
vendor/github.com/libp2p/go-mplex/multiplex.go generated vendored Normal file
View File

@@ -0,0 +1,670 @@
package multiplex
import (
"bufio"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"os"
"runtime/debug"
"sync"
"time"
pool "github.com/libp2p/go-buffer-pool"
logging "github.com/ipfs/go-log/v2"
"github.com/multiformats/go-varint"
)
var log = logging.Logger("mplex")
const (
MaxMessageSize = 1 << 20
BufferSize = 4096
MaxBuffers = 4
MinMemoryReservation = 3 * BufferSize
)
var (
ChunkSize = BufferSize - 20
)
// Max time to block waiting for a slow reader to read from a stream before
// resetting it. Preferably, we'd have some form of back-pressure mechanism but
// we don't have that in this protocol.
var ReceiveTimeout = 5 * time.Second
// ErrShutdown is returned when operating on a shutdown session
var ErrShutdown = errors.New("session shut down")
// ErrTwoInitiators is returned when both sides think they're the initiator
var ErrTwoInitiators = errors.New("two initiators")
// ErrInvalidState is returned when the other side does something it shouldn't.
// In this case, we close the connection to be safe.
var ErrInvalidState = errors.New("received an unexpected message from the peer")
var errTimeout = timeout{}
var ResetStreamTimeout = 2 * time.Minute
var getInputBufferTimeout = time.Minute
type timeout struct{}
func (timeout) Error() string { return "i/o deadline exceeded" }
func (timeout) Temporary() bool { return true }
func (timeout) Timeout() bool { return true }
// The MemoryManager allows management of memory allocations.
type MemoryManager interface {
// ReserveMemory reserves memory / buffer.
ReserveMemory(size int, prio uint8) error
// ReleaseMemory explicitly releases memory previously reserved with ReserveMemory
ReleaseMemory(size int)
}
type nullMemoryManager struct{}
func (m *nullMemoryManager) ReserveMemory(size int, prio uint8) error { return nil }
func (m *nullMemoryManager) ReleaseMemory(size int) {}
// +1 for initiator
const (
newStreamTag = 0
messageTag = 2
closeTag = 4
resetTag = 6
)
// Multiplex is a mplex session.
type Multiplex struct {
con net.Conn
buf *bufio.Reader
nextID uint64
initiator bool
memoryManager MemoryManager
closed chan struct{}
shutdown chan struct{}
shutdownErr error
shutdownLock sync.Mutex
writeCh chan []byte
nstreams chan *Stream
channels map[streamID]*Stream
chLock sync.Mutex
bufIn, bufOut chan struct{}
bufInTimer *time.Timer
reservedMemory int
}
// NewMultiplex creates a new multiplexer session.
func NewMultiplex(con net.Conn, initiator bool, memoryManager MemoryManager) (*Multiplex, error) {
if memoryManager == nil {
memoryManager = &nullMemoryManager{}
}
mp := &Multiplex{
con: con,
initiator: initiator,
channels: make(map[streamID]*Stream),
closed: make(chan struct{}),
shutdown: make(chan struct{}),
nstreams: make(chan *Stream, 16),
memoryManager: memoryManager,
}
// up-front reserve memory for the essential buffers (1 input, 1 output + the reader buffer)
if err := mp.memoryManager.ReserveMemory(MinMemoryReservation, 255); err != nil {
return nil, err
}
mp.reservedMemory += MinMemoryReservation
bufs := 1
// reserve some more memory for buffers if possible
for i := 1; i < MaxBuffers; i++ {
var prio uint8
if bufs < 2 {
prio = 192
} else {
prio = 128
}
// 2xBufferSize -- one for input and one for output
if err := mp.memoryManager.ReserveMemory(2*BufferSize, prio); err != nil {
break
}
mp.reservedMemory += 2 * BufferSize
bufs++
}
mp.buf = bufio.NewReaderSize(con, BufferSize)
mp.writeCh = make(chan []byte, bufs)
mp.bufIn = make(chan struct{}, bufs)
mp.bufOut = make(chan struct{}, bufs)
mp.bufInTimer = time.NewTimer(0)
if !mp.bufInTimer.Stop() {
<-mp.bufInTimer.C
}
go mp.handleIncoming()
go mp.handleOutgoing()
return mp, nil
}
func (mp *Multiplex) newStream(id streamID, name string) (s *Stream) {
s = &Stream{
id: id,
name: name,
dataIn: make(chan []byte, 1),
rDeadline: makePipeDeadline(),
wDeadline: makePipeDeadline(),
mp: mp,
writeCancel: make(chan struct{}),
readCancel: make(chan struct{}),
}
return
}
// Accept accepts the next stream from the connection.
func (m *Multiplex) Accept() (*Stream, error) {
select {
case s, ok := <-m.nstreams:
if !ok {
return nil, errors.New("multiplex closed")
}
return s, nil
case <-m.closed:
return nil, m.shutdownErr
}
}
// Close closes the session.
func (mp *Multiplex) Close() error {
mp.closeNoWait()
// Wait for the receive loop to finish.
<-mp.closed
return nil
}
func (mp *Multiplex) closeNoWait() {
mp.shutdownLock.Lock()
select {
case <-mp.shutdown:
default:
mp.memoryManager.ReleaseMemory(mp.reservedMemory)
mp.con.Close()
close(mp.shutdown)
}
mp.shutdownLock.Unlock()
}
// IsClosed returns true if the session is closed.
func (mp *Multiplex) IsClosed() bool {
select {
case <-mp.closed:
return true
default:
return false
}
}
// CloseChan returns a read-only channel which will be closed when the session is closed
func (mp *Multiplex) CloseChan() <-chan struct{} {
return mp.closed
}
func (mp *Multiplex) sendMsg(timeout, cancel <-chan struct{}, header uint64, data []byte) error {
buf, err := mp.getBufferOutbound(len(data)+20, timeout, cancel)
if err != nil {
return err
}
n := 0
n += binary.PutUvarint(buf[n:], header)
n += binary.PutUvarint(buf[n:], uint64(len(data)))
n += copy(buf[n:], data)
select {
case mp.writeCh <- buf[:n]:
return nil
case <-mp.shutdown:
mp.putBufferOutbound(buf)
return ErrShutdown
case <-timeout:
mp.putBufferOutbound(buf)
return errTimeout
case <-cancel:
mp.putBufferOutbound(buf)
return ErrStreamClosed
}
}
func (mp *Multiplex) handleOutgoing() {
defer func() {
if rerr := recover(); rerr != nil {
fmt.Fprintf(os.Stderr, "caught panic in handleOutgoing: %s\n%s\n", rerr, debug.Stack())
}
}()
for {
select {
case <-mp.shutdown:
return
case data := <-mp.writeCh:
err := mp.doWriteMsg(data)
mp.putBufferOutbound(data)
if err != nil {
// the connection is closed by this time
log.Warnf("error writing data: %s", err.Error())
return
}
}
}
}
func (mp *Multiplex) doWriteMsg(data []byte) error {
if mp.isShutdown() {
return ErrShutdown
}
_, err := mp.con.Write(data)
if err != nil {
mp.closeNoWait()
}
return err
}
func (mp *Multiplex) nextChanID() uint64 {
out := mp.nextID
mp.nextID++
return out
}
// NewStream creates a new stream.
func (mp *Multiplex) NewStream(ctx context.Context) (*Stream, error) {
return mp.NewNamedStream(ctx, "")
}
// NewNamedStream creates a new named stream.
func (mp *Multiplex) NewNamedStream(ctx context.Context, name string) (*Stream, error) {
mp.chLock.Lock()
// We could call IsClosed but this is faster (given that we already have
// the lock).
if mp.channels == nil {
mp.chLock.Unlock()
return nil, ErrShutdown
}
sid := mp.nextChanID()
header := (sid << 3) | newStreamTag
if name == "" {
name = fmt.Sprint(sid)
}
s := mp.newStream(streamID{
id: sid,
initiator: true,
}, name)
mp.channels[s.id] = s
mp.chLock.Unlock()
err := mp.sendMsg(ctx.Done(), nil, header, []byte(name))
if err != nil {
if err == errTimeout {
return nil, ctx.Err()
}
return nil, err
}
return s, nil
}
func (mp *Multiplex) cleanup() {
mp.closeNoWait()
// Take the channels.
mp.chLock.Lock()
channels := mp.channels
mp.channels = nil
mp.chLock.Unlock()
// Cancel any reads/writes
for _, msch := range channels {
msch.cancelRead(ErrStreamReset)
msch.cancelWrite(ErrStreamReset)
}
// And... shutdown!
if mp.shutdownErr == nil {
mp.shutdownErr = ErrShutdown
}
close(mp.closed)
}
func (mp *Multiplex) handleIncoming() {
defer func() {
if rerr := recover(); rerr != nil {
fmt.Fprintf(os.Stderr, "caught panic in handleIncoming: %s\n%s\n", rerr, debug.Stack())
}
}()
defer mp.cleanup()
recvTimeout := time.NewTimer(0)
defer recvTimeout.Stop()
recvTimeoutFired := false
loop:
for {
chID, tag, err := mp.readNextHeader()
if err != nil {
mp.shutdownErr = err
return
}
remoteIsInitiator := tag&1 == 0
ch := streamID{
// true if *I'm* the initiator.
initiator: !remoteIsInitiator,
id: chID,
}
// Rounds up the tag:
// 0 -> 0
// 1 -> 2
// 2 -> 2
// 3 -> 4
// etc...
tag += (tag & 1)
mlen, err := mp.readNextMsgLen()
if err != nil {
mp.shutdownErr = err
return
}
mp.chLock.Lock()
msch, ok := mp.channels[ch]
mp.chLock.Unlock()
switch tag {
case newStreamTag:
if ok {
log.Debugf("received NewStream message for existing stream: %d", ch)
mp.shutdownErr = ErrInvalidState
return
}
// skip stream name, this is not at all useful in the context of libp2p streams
if err := mp.skipNextMsg(mlen); err != nil {
mp.shutdownErr = err
return
}
msch = mp.newStream(ch, "")
mp.chLock.Lock()
mp.channels[ch] = msch
mp.chLock.Unlock()
select {
case mp.nstreams <- msch:
case <-mp.shutdown:
return
}
case resetTag:
if err := mp.skipNextMsg(mlen); err != nil {
mp.shutdownErr = err
return
}
if !ok {
// This is *ok*. We forget the stream on reset.
continue
}
// Cancel any ongoing reads/writes.
msch.cancelRead(ErrStreamReset)
msch.cancelWrite(ErrStreamReset)
case closeTag:
if err := mp.skipNextMsg(mlen); err != nil {
mp.shutdownErr = err
return
}
if !ok {
// may have canceled our reads already.
continue
}
// unregister and throw away future data.
mp.chLock.Lock()
delete(mp.channels, ch)
mp.chLock.Unlock()
// close data channel, there will be no more data.
close(msch.dataIn)
// We intentionally don't cancel any deadlines, cancel reads, cancel
// writes, etc. We just deliver the EOF by closing the
// data channel, and unregister the channel so we don't
// receive any more data. The user still needs to call
// `Close()` or `Reset()`.
case messageTag:
if !ok {
// We're not accepting data on this stream, for
// some reason. It's likely that we reset it, or
// simply canceled reads (e.g., called Close).
if err := mp.skipNextMsg(mlen); err != nil {
mp.shutdownErr = err
return
}
continue
}
read:
for rd := 0; rd < mlen; {
nextChunk := mlen - rd
if nextChunk > BufferSize {
nextChunk = BufferSize
}
b, err := mp.readNextChunk(nextChunk)
if err != nil {
mp.shutdownErr = err
return
}
rd += nextChunk
if !recvTimeout.Stop() && !recvTimeoutFired {
<-recvTimeout.C
}
recvTimeout.Reset(ReceiveTimeout)
recvTimeoutFired = false
select {
case msch.dataIn <- b:
case <-msch.readCancel:
// the user has canceled reading. walk away.
mp.putBufferInbound(b)
if err := mp.skipNextMsg(mlen - rd); err != nil {
mp.shutdownErr = err
return
}
break read
case <-recvTimeout.C:
recvTimeoutFired = true
mp.putBufferInbound(b)
log.Warnf("timed out receiving message into stream queue.")
// Do not do this asynchronously. Otherwise, we
// could drop a message, then receive a message,
// then reset.
msch.Reset()
if err := mp.skipNextMsg(mlen - rd); err != nil {
mp.shutdownErr = err
return
}
continue loop
case <-mp.shutdown:
mp.putBufferInbound(b)
return
}
}
default:
log.Debugf("message with unknown header on stream %s", ch)
mp.skipNextMsg(mlen)
if ok {
msch.Reset()
}
}
}
}
func (mp *Multiplex) isShutdown() bool {
select {
case <-mp.shutdown:
return true
default:
return false
}
}
func (mp *Multiplex) sendResetMsg(header uint64, hard bool) {
ctx, cancel := context.WithTimeout(context.Background(), ResetStreamTimeout)
defer cancel()
err := mp.sendMsg(ctx.Done(), nil, header, nil)
if err != nil && !mp.isShutdown() {
if hard {
log.Warnf("error sending reset message: %s; killing connection", err.Error())
mp.Close()
} else {
log.Debugf("error sending reset message: %s", err.Error())
}
}
}
func (mp *Multiplex) readNextHeader() (uint64, uint64, error) {
h, err := varint.ReadUvarint(mp.buf)
if err != nil {
return 0, 0, err
}
// get channel ID
ch := h >> 3
rem := h & 7
return ch, rem, nil
}
func (mp *Multiplex) readNextMsgLen() (int, error) {
l, err := varint.ReadUvarint(mp.buf)
if err != nil {
return 0, err
}
if l > uint64(MaxMessageSize) {
return 0, fmt.Errorf("message size too large")
}
if l == 0 {
return 0, nil
}
return int(l), nil
}
func (mp *Multiplex) readNextChunk(mlen int) ([]byte, error) {
buf, err := mp.getBufferInbound(mlen)
if err != nil {
return nil, err
}
_, err = io.ReadFull(mp.buf, buf)
if err != nil {
mp.putBufferInbound(buf)
return nil, err
}
return buf, nil
}
func (mp *Multiplex) skipNextMsg(mlen int) error {
if mlen == 0 {
return nil
}
_, err := mp.buf.Discard(mlen)
return err
}
func (mp *Multiplex) getBufferInbound(length int) ([]byte, error) {
timerFired := false
defer func() {
if !mp.bufInTimer.Stop() && !timerFired {
<-mp.bufInTimer.C
}
}()
mp.bufInTimer.Reset(getInputBufferTimeout)
select {
case mp.bufIn <- struct{}{}:
case <-mp.bufInTimer.C:
timerFired = true
return nil, errTimeout
case <-mp.shutdown:
return nil, ErrShutdown
}
return mp.getBuffer(length), nil
}
func (mp *Multiplex) getBufferOutbound(length int, timeout, cancel <-chan struct{}) ([]byte, error) {
select {
case mp.bufOut <- struct{}{}:
case <-timeout:
return nil, errTimeout
case <-cancel:
return nil, ErrStreamClosed
case <-mp.shutdown:
return nil, ErrShutdown
}
return mp.getBuffer(length), nil
}
func (mp *Multiplex) getBuffer(length int) []byte {
return pool.Get(length)
}
func (mp *Multiplex) putBufferInbound(b []byte) {
mp.putBuffer(b, mp.bufIn)
}
func (mp *Multiplex) putBufferOutbound(b []byte) {
mp.putBuffer(b, mp.bufOut)
}
func (mp *Multiplex) putBuffer(slice []byte, putBuf chan struct{}) {
<-putBuf
pool.Put(slice)
}

268
vendor/github.com/libp2p/go-mplex/stream.go generated vendored Normal file
View File

@@ -0,0 +1,268 @@
package multiplex
import (
"context"
"errors"
"io"
"sync"
"time"
"go.uber.org/multierr"
)
var (
ErrStreamReset = errors.New("stream reset")
ErrStreamClosed = errors.New("closed stream")
)
// streamID is a convenience type for operating on stream IDs
type streamID struct {
id uint64
initiator bool
}
// header computes the header for the given tag
func (id *streamID) header(tag uint64) uint64 {
header := id.id<<3 | tag
if !id.initiator {
header--
}
return header
}
type Stream struct {
id streamID
name string
dataIn chan []byte
mp *Multiplex
extra []byte
// exbuf is for holding the reference to the beginning of the extra slice
// for later memory pool freeing
exbuf []byte
rDeadline, wDeadline pipeDeadline
clLock sync.Mutex
writeCancelErr, readCancelErr error
writeCancel, readCancel chan struct{}
}
func (s *Stream) Name() string {
return s.name
}
// tries to preload pending data
func (s *Stream) preloadData() {
select {
case read, ok := <-s.dataIn:
if !ok {
return
}
s.extra = read
s.exbuf = read
default:
}
}
func (s *Stream) waitForData() error {
select {
case read, ok := <-s.dataIn:
if !ok {
return io.EOF
}
s.extra = read
s.exbuf = read
return nil
case <-s.readCancel:
// This is the only place where it's safe to return these.
s.returnBuffers()
return s.readCancelErr
case <-s.rDeadline.wait():
return errTimeout
}
}
func (s *Stream) returnBuffers() {
if s.exbuf != nil {
s.mp.putBufferInbound(s.exbuf)
s.exbuf = nil
s.extra = nil
}
for {
select {
case read, ok := <-s.dataIn:
if !ok {
return
}
if read == nil {
continue
}
s.mp.putBufferInbound(read)
default:
return
}
}
}
func (s *Stream) Read(b []byte) (int, error) {
select {
case <-s.readCancel:
return 0, s.readCancelErr
default:
}
if s.extra == nil {
err := s.waitForData()
if err != nil {
return 0, err
}
}
n := 0
for s.extra != nil && n < len(b) {
read := copy(b[n:], s.extra)
n += read
if read < len(s.extra) {
s.extra = s.extra[read:]
} else {
if s.exbuf != nil {
s.mp.putBufferInbound(s.exbuf)
}
s.extra = nil
s.exbuf = nil
s.preloadData()
}
}
return n, nil
}
func (s *Stream) Write(b []byte) (int, error) {
var written int
for written < len(b) {
wl := len(b) - written
if wl > ChunkSize {
wl = ChunkSize
}
n, err := s.write(b[written : written+wl])
if err != nil {
return written, err
}
written += n
}
return written, nil
}
func (s *Stream) write(b []byte) (int, error) {
select {
case <-s.writeCancel:
return 0, s.writeCancelErr
default:
}
err := s.mp.sendMsg(s.wDeadline.wait(), s.writeCancel, s.id.header(messageTag), b)
if err != nil {
return 0, err
}
return len(b), nil
}
func (s *Stream) cancelWrite(err error) bool {
s.wDeadline.close()
s.clLock.Lock()
defer s.clLock.Unlock()
select {
case <-s.writeCancel:
return false
default:
s.writeCancelErr = err
close(s.writeCancel)
return true
}
}
func (s *Stream) cancelRead(err error) bool {
// Always unregister for reading first, even if we're already closed (or
// already closing). When handleIncoming calls this, it expects the
// stream to be unregistered by the time it returns.
s.mp.chLock.Lock()
delete(s.mp.channels, s.id)
s.mp.chLock.Unlock()
s.rDeadline.close()
s.clLock.Lock()
defer s.clLock.Unlock()
select {
case <-s.readCancel:
return false
default:
s.readCancelErr = err
close(s.readCancel)
return true
}
}
func (s *Stream) CloseWrite() error {
if !s.cancelWrite(ErrStreamClosed) {
// Check if we closed the stream _nicely_. If so, we don't need
// to report an error to the user.
if s.writeCancelErr == ErrStreamClosed {
return nil
}
// Closed for some other reason. Report it.
return s.writeCancelErr
}
ctx, cancel := context.WithTimeout(context.Background(), ResetStreamTimeout)
defer cancel()
err := s.mp.sendMsg(ctx.Done(), nil, s.id.header(closeTag), nil)
// We failed to close the stream after 2 minutes, something is probably wrong.
if err != nil && !s.mp.isShutdown() {
log.Warnf("Error closing stream: %s; killing connection", err.Error())
s.mp.Close()
}
return err
}
func (s *Stream) CloseRead() error {
s.cancelRead(ErrStreamClosed)
return nil
}
func (s *Stream) Close() error {
return multierr.Combine(s.CloseRead(), s.CloseWrite())
}
func (s *Stream) Reset() error {
s.cancelRead(ErrStreamReset)
if s.cancelWrite(ErrStreamReset) {
// Send a reset in the background.
go s.mp.sendResetMsg(s.id.header(resetTag), true)
}
return nil
}
func (s *Stream) SetDeadline(t time.Time) error {
s.rDeadline.set(t)
s.wDeadline.set(t)
return nil
}
func (s *Stream) SetReadDeadline(t time.Time) error {
s.rDeadline.set(t)
return nil
}
func (s *Stream) SetWriteDeadline(t time.Time) error {
s.wDeadline.set(t)
return nil
}

3
vendor/github.com/libp2p/go-mplex/version.json generated vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"version": "v0.7.0"
}