feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

View File

@@ -0,0 +1,151 @@
package client
import (
"net"
"sync"
"sync/atomic"
"time"
)
// Chanel number:
// 0x4000 through 0x7FFF: These values are the allowed channel
// numbers (16,383 possible values).
const (
minChannelNumber uint16 = 0x4000
maxChannelNumber uint16 = 0x7fff
)
type bindingState int32
const (
bindingStateIdle bindingState = iota
bindingStateRequest
bindingStateReady
bindingStateRefresh
bindingStateFailed
)
type binding struct {
number uint16 // read-only
st bindingState // thread-safe (atomic op)
addr net.Addr // read-only
mgr *bindingManager // read-only
muBind sync.Mutex // thread-safe, for ChannelBind ops
_refreshedAt time.Time // protected by mutex
mutex sync.RWMutex // thread-safe
}
func (b *binding) setState(state bindingState) {
atomic.StoreInt32((*int32)(&b.st), int32(state))
}
func (b *binding) state() bindingState {
return bindingState(atomic.LoadInt32((*int32)(&b.st)))
}
func (b *binding) setRefreshedAt(at time.Time) {
b.mutex.Lock()
defer b.mutex.Unlock()
b._refreshedAt = at
}
func (b *binding) refreshedAt() time.Time {
b.mutex.RLock()
defer b.mutex.RUnlock()
return b._refreshedAt
}
// Thread-safe binding map
type bindingManager struct {
chanMap map[uint16]*binding
addrMap map[string]*binding
next uint16
mutex sync.RWMutex
}
func newBindingManager() *bindingManager {
return &bindingManager{
chanMap: map[uint16]*binding{},
addrMap: map[string]*binding{},
next: minChannelNumber,
}
}
func (mgr *bindingManager) assignChannelNumber() uint16 {
n := mgr.next
if mgr.next == maxChannelNumber {
mgr.next = minChannelNumber
} else {
mgr.next++
}
return n
}
func (mgr *bindingManager) create(addr net.Addr) *binding {
mgr.mutex.Lock()
defer mgr.mutex.Unlock()
b := &binding{
number: mgr.assignChannelNumber(),
addr: addr,
mgr: mgr,
_refreshedAt: time.Now(),
}
mgr.chanMap[b.number] = b
mgr.addrMap[b.addr.String()] = b
return b
}
func (mgr *bindingManager) findByAddr(addr net.Addr) (*binding, bool) {
mgr.mutex.RLock()
defer mgr.mutex.RUnlock()
b, ok := mgr.addrMap[addr.String()]
return b, ok
}
func (mgr *bindingManager) findByNumber(number uint16) (*binding, bool) {
mgr.mutex.RLock()
defer mgr.mutex.RUnlock()
b, ok := mgr.chanMap[number]
return b, ok
}
func (mgr *bindingManager) deleteByAddr(addr net.Addr) bool {
mgr.mutex.Lock()
defer mgr.mutex.Unlock()
b, ok := mgr.addrMap[addr.String()]
if !ok {
return false
}
delete(mgr.addrMap, addr.String())
delete(mgr.chanMap, b.number)
return true
}
func (mgr *bindingManager) deleteByNumber(number uint16) bool {
mgr.mutex.Lock()
defer mgr.mutex.Unlock()
b, ok := mgr.chanMap[number]
if !ok {
return false
}
delete(mgr.addrMap, b.addr.String())
delete(mgr.chanMap, number)
return true
}
func (mgr *bindingManager) size() int {
mgr.mutex.RLock()
defer mgr.mutex.RUnlock()
return len(mgr.chanMap)
}

613
vendor/github.com/pion/turn/v2/internal/client/conn.go generated vendored Normal file
View File

@@ -0,0 +1,613 @@
// Package client implements the API for a TURN client
package client
import (
"errors"
"fmt"
"io"
"math"
"net"
"sync"
"time"
"github.com/pion/logging"
"github.com/pion/stun"
"github.com/pion/turn/v2/internal/proto"
)
const (
maxReadQueueSize = 1024
permRefreshInterval = 120 * time.Second
maxRetryAttempts = 3
)
const (
timerIDRefreshAlloc int = iota
timerIDRefreshPerms
)
func noDeadline() time.Time {
return time.Time{}
}
type inboundData struct {
data []byte
from net.Addr
}
// UDPConnObserver is an interface to UDPConn observer
type UDPConnObserver interface {
TURNServerAddr() net.Addr
Username() stun.Username
Realm() stun.Realm
WriteTo(data []byte, to net.Addr) (int, error)
PerformTransaction(msg *stun.Message, to net.Addr, dontWait bool) (TransactionResult, error)
OnDeallocated(relayedAddr net.Addr)
}
// UDPConnConfig is a set of configuration params use by NewUDPConn
type UDPConnConfig struct {
Observer UDPConnObserver
RelayedAddr net.Addr
Integrity stun.MessageIntegrity
Nonce stun.Nonce
Lifetime time.Duration
Log logging.LeveledLogger
}
// UDPConn is the implementation of the Conn and PacketConn interfaces for UDP network connections.
// comatible with net.PacketConn and net.Conn
type UDPConn struct {
obs UDPConnObserver // read-only
relayedAddr net.Addr // read-only
permMap *permissionMap // thread-safe
bindingMgr *bindingManager // thread-safe
integrity stun.MessageIntegrity // read-only
_nonce stun.Nonce // needs mutex x
_lifetime time.Duration // needs mutex x
readCh chan *inboundData // thread-safe
closeCh chan struct{} // thread-safe
readTimer *time.Timer // thread-safe
refreshAllocTimer *PeriodicTimer // thread-safe
refreshPermsTimer *PeriodicTimer // thread-safe
mutex sync.RWMutex // thread-safe
log logging.LeveledLogger // read-only
}
// NewUDPConn creates a new instance of UDPConn
func NewUDPConn(config *UDPConnConfig) *UDPConn {
c := &UDPConn{
obs: config.Observer,
relayedAddr: config.RelayedAddr,
permMap: newPermissionMap(),
bindingMgr: newBindingManager(),
integrity: config.Integrity,
_nonce: config.Nonce,
_lifetime: config.Lifetime,
readCh: make(chan *inboundData, maxReadQueueSize),
closeCh: make(chan struct{}),
readTimer: time.NewTimer(time.Duration(math.MaxInt64)),
log: config.Log,
}
c.log.Debugf("initial lifetime: %d seconds", int(c.lifetime().Seconds()))
c.refreshAllocTimer = NewPeriodicTimer(
timerIDRefreshAlloc,
c.onRefreshTimers,
c.lifetime()/2,
)
c.refreshPermsTimer = NewPeriodicTimer(
timerIDRefreshPerms,
c.onRefreshTimers,
permRefreshInterval,
)
if c.refreshAllocTimer.Start() {
c.log.Debugf("refreshAllocTimer started")
}
if c.refreshPermsTimer.Start() {
c.log.Debugf("refreshPermsTimer started")
}
return c
}
// ReadFrom reads a packet from the connection,
// copying the payload into p. It returns the number of
// bytes copied into p and the return address that
// was on the packet.
// It returns the number of bytes read (0 <= n <= len(p))
// and any error encountered. Callers should always process
// the n > 0 bytes returned before considering the error err.
// ReadFrom can be made to time out and return
// an Error with Timeout() == true after a fixed time limit;
// see SetDeadline and SetReadDeadline.
func (c *UDPConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
for {
select {
case ibData := <-c.readCh:
n := copy(p, ibData.data)
if n < len(ibData.data) {
return 0, nil, io.ErrShortBuffer
}
return n, ibData.from, nil
case <-c.readTimer.C:
return 0, nil, &net.OpError{
Op: "read",
Net: c.LocalAddr().Network(),
Addr: c.LocalAddr(),
Err: newTimeoutError("i/o timeout"),
}
case <-c.closeCh:
return 0, nil, &net.OpError{
Op: "read",
Net: c.LocalAddr().Network(),
Addr: c.LocalAddr(),
Err: errClosed,
}
}
}
}
// WriteTo writes a packet with payload p to addr.
// WriteTo can be made to time out and return
// an Error with Timeout() == true after a fixed time limit;
// see SetDeadline and SetWriteDeadline.
// On packet-oriented connections, write timeouts are rare.
func (c *UDPConn) WriteTo(p []byte, addr net.Addr) (int, error) { //nolint: gocognit
var err error
_, ok := addr.(*net.UDPAddr)
if !ok {
return 0, errUDPAddrCast
}
// check if we have a permission for the destination IP addr
perm, ok := c.permMap.find(addr)
if !ok {
perm = &permission{}
c.permMap.insert(addr, perm)
}
// This func-block would block, per destination IP (, or perm), until
// the perm state becomes "requested". Purpose of this is to guarantee
// the order of packets (within the same perm).
// Note that CreatePermission transaction may not be complete before
// all the data transmission. This is done assuming that the request
// will be mostly likely successful and we can tolerate some loss of
// UDP packet (or reorder), inorder to minimize the latency in most cases.
createPermission := func() error {
perm.mutex.Lock()
defer perm.mutex.Unlock()
if perm.state() == permStateIdle {
// punch a hole! (this would block a bit..)
if err = c.createPermissions(addr); err != nil {
c.permMap.delete(addr)
return err
}
perm.setState(permStatePermitted)
}
return nil
}
for i := 0; i < maxRetryAttempts; i++ {
if err = createPermission(); !errors.Is(err, errTryAgain) {
break
}
}
if err != nil {
return 0, err
}
// bind channel
b, ok := c.bindingMgr.findByAddr(addr)
if !ok {
b = c.bindingMgr.create(addr)
}
bindSt := b.state()
if bindSt == bindingStateIdle || bindSt == bindingStateRequest || bindSt == bindingStateFailed {
func() {
// block only callers with the same binding until
// the binding transaction has been complete
b.muBind.Lock()
defer b.muBind.Unlock()
// binding state may have been changed while waiting. check again.
if b.state() == bindingStateIdle {
b.setState(bindingStateRequest)
go func() {
err2 := c.bind(b)
if err2 != nil {
c.log.Warnf("bind() failed: %s", err2.Error())
b.setState(bindingStateFailed)
// keep going...
} else {
b.setState(bindingStateReady)
}
}()
}
}()
// send data using SendIndication
peerAddr := addr2PeerAddress(addr)
var msg *stun.Message
msg, err = stun.Build(
stun.TransactionID,
stun.NewType(stun.MethodSend, stun.ClassIndication),
proto.Data(p),
peerAddr,
stun.Fingerprint,
)
if err != nil {
return 0, err
}
// indication has no transaction (fire-and-forget)
return c.obs.WriteTo(msg.Raw, c.obs.TURNServerAddr())
}
// binding is either ready
// check if the binding needs a refresh
func() {
b.muBind.Lock()
defer b.muBind.Unlock()
if b.state() == bindingStateReady && time.Since(b.refreshedAt()) > 5*time.Minute {
b.setState(bindingStateRefresh)
go func() {
err = c.bind(b)
if err != nil {
c.log.Warnf("bind() for refresh failed: %s", err.Error())
b.setState(bindingStateFailed)
// keep going...
} else {
b.setRefreshedAt(time.Now())
b.setState(bindingStateReady)
}
}()
}
}()
// send via ChannelData
return c.sendChannelData(p, b.number)
}
// Close closes the connection.
// Any blocked ReadFrom or WriteTo operations will be unblocked and return errors.
func (c *UDPConn) Close() error {
c.refreshAllocTimer.Stop()
c.refreshPermsTimer.Stop()
select {
case <-c.closeCh:
return errAlreadyClosed
default:
close(c.closeCh)
}
c.obs.OnDeallocated(c.relayedAddr)
return c.refreshAllocation(0, true /* dontWait=true */)
}
// LocalAddr returns the local network address.
func (c *UDPConn) LocalAddr() net.Addr {
return c.relayedAddr
}
// SetDeadline sets the read and write deadlines associated
// with the connection. It is equivalent to calling both
// SetReadDeadline and SetWriteDeadline.
//
// A deadline is an absolute time after which I/O operations
// fail with a timeout (see type Error) instead of
// blocking. The deadline applies to all future and pending
// I/O, not just the immediately following call to ReadFrom or
// WriteTo. After a deadline has been exceeded, the connection
// can be refreshed by setting a deadline in the future.
//
// An idle timeout can be implemented by repeatedly extending
// the deadline after successful ReadFrom or WriteTo calls.
//
// A zero value for t means I/O operations will not time out.
func (c *UDPConn) SetDeadline(t time.Time) error {
return c.SetReadDeadline(t)
}
// SetReadDeadline sets the deadline for future ReadFrom calls
// and any currently-blocked ReadFrom call.
// A zero value for t means ReadFrom will not time out.
func (c *UDPConn) SetReadDeadline(t time.Time) error {
var d time.Duration
if t == noDeadline() {
d = time.Duration(math.MaxInt64)
} else {
d = time.Until(t)
}
c.readTimer.Reset(d)
return nil
}
// SetWriteDeadline sets the deadline for future WriteTo calls
// and any currently-blocked WriteTo call.
// Even if write times out, it may return n > 0, indicating that
// some of the data was successfully written.
// A zero value for t means WriteTo will not time out.
func (c *UDPConn) SetWriteDeadline(t time.Time) error {
// Write never blocks.
return nil
}
func addr2PeerAddress(addr net.Addr) proto.PeerAddress {
var peerAddr proto.PeerAddress
switch a := addr.(type) {
case *net.UDPAddr:
peerAddr.IP = a.IP
peerAddr.Port = a.Port
case *net.TCPAddr:
peerAddr.IP = a.IP
peerAddr.Port = a.Port
}
return peerAddr
}
func (c *UDPConn) createPermissions(addrs ...net.Addr) error {
setters := []stun.Setter{
stun.TransactionID,
stun.NewType(stun.MethodCreatePermission, stun.ClassRequest),
}
for _, addr := range addrs {
setters = append(setters, addr2PeerAddress(addr))
}
setters = append(setters,
c.obs.Username(),
c.obs.Realm(),
c.nonce(),
c.integrity,
stun.Fingerprint)
msg, err := stun.Build(setters...)
if err != nil {
return err
}
trRes, err := c.obs.PerformTransaction(msg, c.obs.TURNServerAddr(), false)
if err != nil {
return err
}
res := trRes.Msg
if res.Type.Class == stun.ClassErrorResponse {
var code stun.ErrorCodeAttribute
if err = code.GetFrom(res); err == nil {
if code.Code == stun.CodeStaleNonce {
c.setNonceFromMsg(res)
return errTryAgain
}
return fmt.Errorf("%s (error %s)", res.Type, code) //nolint:goerr113
}
return fmt.Errorf("%s", res.Type) //nolint:goerr113
}
return nil
}
// HandleInbound passes inbound data in UDPConn
func (c *UDPConn) HandleInbound(data []byte, from net.Addr) {
// copy data
copied := make([]byte, len(data))
copy(copied, data)
select {
case c.readCh <- &inboundData{data: copied, from: from}:
default:
c.log.Warnf("receive buffer full")
}
}
// FindAddrByChannelNumber returns a peer address associated with the
// channel number on this UDPConn
func (c *UDPConn) FindAddrByChannelNumber(chNum uint16) (net.Addr, bool) {
b, ok := c.bindingMgr.findByNumber(chNum)
if !ok {
return nil, false
}
return b.addr, true
}
func (c *UDPConn) setNonceFromMsg(msg *stun.Message) {
// Update nonce
var nonce stun.Nonce
if err := nonce.GetFrom(msg); err == nil {
c.setNonce(nonce)
c.log.Debug("refresh allocation: 438, got new nonce.")
} else {
c.log.Warn("refresh allocation: 438 but no nonce.")
}
}
func (c *UDPConn) refreshAllocation(lifetime time.Duration, dontWait bool) error {
msg, err := stun.Build(
stun.TransactionID,
stun.NewType(stun.MethodRefresh, stun.ClassRequest),
proto.Lifetime{Duration: lifetime},
c.obs.Username(),
c.obs.Realm(),
c.nonce(),
c.integrity,
stun.Fingerprint,
)
if err != nil {
return fmt.Errorf("%w: %s", errFailedToBuildRefreshRequest, err.Error())
}
c.log.Debugf("send refresh request (dontWait=%v)", dontWait)
trRes, err := c.obs.PerformTransaction(msg, c.obs.TURNServerAddr(), dontWait)
if err != nil {
return fmt.Errorf("%w: %s", errFailedToRefreshAllocation, err.Error())
}
if dontWait {
c.log.Debug("refresh request sent")
return nil
}
c.log.Debug("refresh request sent, and waiting response")
res := trRes.Msg
if res.Type.Class == stun.ClassErrorResponse {
var code stun.ErrorCodeAttribute
if err = code.GetFrom(res); err == nil {
if code.Code == stun.CodeStaleNonce {
c.setNonceFromMsg(res)
return errTryAgain
}
return err
}
return fmt.Errorf("%s", res.Type) //nolint:goerr113
}
// Getting lifetime from response
var updatedLifetime proto.Lifetime
if err := updatedLifetime.GetFrom(res); err != nil {
return fmt.Errorf("%w: %s", errFailedToGetLifetime, err.Error())
}
c.setLifetime(updatedLifetime.Duration)
c.log.Debugf("updated lifetime: %d seconds", int(c.lifetime().Seconds()))
return nil
}
func (c *UDPConn) refreshPermissions() error {
addrs := c.permMap.addrs()
if len(addrs) == 0 {
c.log.Debug("no permission to refresh")
return nil
}
if err := c.createPermissions(addrs...); err != nil {
if errors.Is(err, errTryAgain) {
return errTryAgain
}
c.log.Errorf("fail to refresh permissions: %s", err.Error())
return err
}
c.log.Debug("refresh permissions successful")
return nil
}
func (c *UDPConn) bind(b *binding) error {
setters := []stun.Setter{
stun.TransactionID,
stun.NewType(stun.MethodChannelBind, stun.ClassRequest),
addr2PeerAddress(b.addr),
proto.ChannelNumber(b.number),
c.obs.Username(),
c.obs.Realm(),
c.nonce(),
c.integrity,
stun.Fingerprint,
}
msg, err := stun.Build(setters...)
if err != nil {
return err
}
trRes, err := c.obs.PerformTransaction(msg, c.obs.TURNServerAddr(), false)
if err != nil {
c.bindingMgr.deleteByAddr(b.addr)
return err
}
res := trRes.Msg
if res.Type != stun.NewType(stun.MethodChannelBind, stun.ClassSuccessResponse) {
return fmt.Errorf("unexpected response type %s", res.Type) //nolint:goerr113
}
c.log.Debugf("channel binding successful: %s %d", b.addr.String(), b.number)
// Success.
return nil
}
func (c *UDPConn) sendChannelData(data []byte, chNum uint16) (int, error) {
chData := &proto.ChannelData{
Data: data,
Number: proto.ChannelNumber(chNum),
}
chData.Encode()
return c.obs.WriteTo(chData.Raw, c.obs.TURNServerAddr())
}
func (c *UDPConn) onRefreshTimers(id int) {
c.log.Debugf("refresh timer %d expired", id)
switch id {
case timerIDRefreshAlloc:
var err error
lifetime := c.lifetime()
// limit the max retries on errTryAgain to 3
// when stale nonce returns, sencond retry should succeed
for i := 0; i < maxRetryAttempts; i++ {
err = c.refreshAllocation(lifetime, false)
if !errors.Is(err, errTryAgain) {
break
}
}
if err != nil {
c.log.Warnf("refresh allocation failed")
}
case timerIDRefreshPerms:
var err error
for i := 0; i < maxRetryAttempts; i++ {
err = c.refreshPermissions()
if !errors.Is(err, errTryAgain) {
break
}
}
if err != nil {
c.log.Warnf("refresh permissions failed")
}
}
}
func (c *UDPConn) nonce() stun.Nonce {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c._nonce
}
func (c *UDPConn) setNonce(nonce stun.Nonce) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.log.Debugf("set new nonce with %d bytes", len(nonce))
c._nonce = nonce
}
func (c *UDPConn) lifetime() time.Duration {
c.mutex.RLock()
defer c.mutex.RUnlock()
return c._lifetime
}
func (c *UDPConn) setLifetime(lifetime time.Duration) {
c.mutex.Lock()
defer c.mutex.Unlock()
c._lifetime = lifetime
}

View File

@@ -0,0 +1,37 @@
package client
import (
"errors"
)
var (
errFakeErr = errors.New("fake error")
errTryAgain = errors.New("try again")
errClosed = errors.New("use of closed network connection")
errUDPAddrCast = errors.New("addr is not a net.UDPAddr")
errAlreadyClosed = errors.New("already closed")
errDoubleLock = errors.New("try-lock is already locked")
errTransactionClosed = errors.New("transaction closed")
errWaitForResultOnNonResultTransaction = errors.New("WaitForResult called on non-result transaction")
errFailedToBuildRefreshRequest = errors.New("failed to build refresh request")
errFailedToRefreshAllocation = errors.New("failed to refresh allocation")
errFailedToGetLifetime = errors.New("failed to get lifetime from refresh response")
)
type timeoutError struct {
msg string
}
func newTimeoutError(msg string) error {
return &timeoutError{
msg: msg,
}
}
func (e *timeoutError) Error() string {
return e.msg
}
func (e *timeoutError) Timeout() bool {
return true
}

View File

@@ -0,0 +1,82 @@
package client
import (
"sync"
"time"
)
// PeriodicTimerTimeoutHandler is a handler called on timeout
type PeriodicTimerTimeoutHandler func(timerID int)
// PeriodicTimer is a periodic timer
type PeriodicTimer struct {
id int
interval time.Duration
timeoutHandler PeriodicTimerTimeoutHandler
stopFunc func()
mutex sync.RWMutex
}
// NewPeriodicTimer create a new timer
func NewPeriodicTimer(id int, timeoutHandler PeriodicTimerTimeoutHandler, interval time.Duration) *PeriodicTimer {
return &PeriodicTimer{
id: id,
interval: interval,
timeoutHandler: timeoutHandler,
}
}
// Start starts the timer.
func (t *PeriodicTimer) Start() bool {
t.mutex.Lock()
defer t.mutex.Unlock()
// this is a noop if the timer is always running
if t.stopFunc != nil {
return false
}
cancelCh := make(chan struct{})
go func() {
canceling := false
for !canceling {
timer := time.NewTimer(t.interval)
select {
case <-timer.C:
t.timeoutHandler(t.id)
case <-cancelCh:
canceling = true
timer.Stop()
}
}
}()
t.stopFunc = func() {
close(cancelCh)
}
return true
}
// Stop stops the timer.
func (t *PeriodicTimer) Stop() {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.stopFunc != nil {
t.stopFunc()
t.stopFunc = nil
}
}
// IsRunning tests if the timer is running.
// Debug purpose only
func (t *PeriodicTimer) IsRunning() bool {
t.mutex.RLock()
defer t.mutex.RUnlock()
return (t.stopFunc != nil)
}

View File

@@ -0,0 +1,90 @@
package client
import (
"net"
"sync"
"sync/atomic"
)
type permState int32
const (
permStateIdle permState = iota
permStatePermitted
)
type permission struct {
st permState // thread-safe (atomic op)
mutex sync.RWMutex // thread-safe
}
func (p *permission) setState(state permState) {
atomic.StoreInt32((*int32)(&p.st), int32(state))
}
func (p *permission) state() permState {
return permState(atomic.LoadInt32((*int32)(&p.st)))
}
// Thread-safe permission map
type permissionMap struct {
permMap map[string]*permission
mutex sync.RWMutex
}
func (m *permissionMap) insert(addr net.Addr, p *permission) bool {
m.mutex.Lock()
defer m.mutex.Unlock()
udpAddr, ok := addr.(*net.UDPAddr)
if !ok {
return false
}
m.permMap[udpAddr.IP.String()] = p
return true
}
func (m *permissionMap) find(addr net.Addr) (*permission, bool) {
m.mutex.RLock()
defer m.mutex.RUnlock()
udpAddr, ok := addr.(*net.UDPAddr)
if !ok {
return nil, false
}
p, ok := m.permMap[udpAddr.IP.String()]
return p, ok
}
func (m *permissionMap) delete(addr net.Addr) {
m.mutex.Lock()
defer m.mutex.Unlock()
udpAddr, ok := addr.(*net.UDPAddr)
if !ok {
return
}
delete(m.permMap, udpAddr.IP.String())
}
func (m *permissionMap) addrs() []net.Addr {
m.mutex.RLock()
defer m.mutex.RUnlock()
addrs := []net.Addr{}
for k := range m.permMap {
addrs = append(addrs, &net.UDPAddr{
IP: net.ParseIP(k),
})
}
return addrs
}
func newPermissionMap() *permissionMap {
return &permissionMap{
permMap: map[string]*permission{},
}
}

View File

@@ -0,0 +1,185 @@
package client
import (
"net"
"sync"
"time"
"github.com/pion/stun"
)
const (
maxRtxInterval time.Duration = 1600 * time.Millisecond
)
// TransactionResult is a bag of result values of a transaction
type TransactionResult struct {
Msg *stun.Message
From net.Addr
Retries int
Err error
}
// TransactionConfig is a set of config params used by NewTransaction
type TransactionConfig struct {
Key string
Raw []byte
To net.Addr
Interval time.Duration
IgnoreResult bool // true to throw away the result of this transaction (it will not be readable using WaitForResult)
}
// Transaction represents a transaction
type Transaction struct {
Key string // read-only
Raw []byte // read-only
To net.Addr // read-only
nRtx int // modified only by the timer thread
interval time.Duration // modified only by the timer thread
timer *time.Timer // thread-safe, set only by the creator, and stopper
resultCh chan TransactionResult // thread-safe
mutex sync.RWMutex
}
// NewTransaction creates a new instance of Transaction
func NewTransaction(config *TransactionConfig) *Transaction {
var resultCh chan TransactionResult
if !config.IgnoreResult {
resultCh = make(chan TransactionResult)
}
return &Transaction{
Key: config.Key, // read-only
Raw: config.Raw, // read-only
To: config.To, // read-only
interval: config.Interval, // modified only by the timer thread
resultCh: resultCh, // thread-safe
}
}
// StartRtxTimer starts the transaction timer
func (t *Transaction) StartRtxTimer(onTimeout func(trKey string, nRtx int)) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.timer = time.AfterFunc(t.interval, func() {
t.mutex.Lock()
t.nRtx++
nRtx := t.nRtx
t.interval *= 2
if t.interval > maxRtxInterval {
t.interval = maxRtxInterval
}
t.mutex.Unlock()
onTimeout(t.Key, nRtx)
})
}
// StopRtxTimer stop the transaction timer
func (t *Transaction) StopRtxTimer() {
t.mutex.Lock()
defer t.mutex.Unlock()
if t.timer != nil {
t.timer.Stop()
}
}
// WriteResult writes the result to the result channel
func (t *Transaction) WriteResult(res TransactionResult) bool {
if t.resultCh == nil {
return false
}
t.resultCh <- res
return true
}
// WaitForResult waits for the transaction result
func (t *Transaction) WaitForResult() TransactionResult {
if t.resultCh == nil {
return TransactionResult{
Err: errWaitForResultOnNonResultTransaction,
}
}
result, ok := <-t.resultCh
if !ok {
result.Err = errTransactionClosed
}
return result
}
// Close closes the transaction
func (t *Transaction) Close() {
if t.resultCh != nil {
close(t.resultCh)
}
}
// Retries returns the number of retransmission it has made
func (t *Transaction) Retries() int {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.nRtx
}
// TransactionMap is a thread-safe transaction map
type TransactionMap struct {
trMap map[string]*Transaction
mutex sync.RWMutex
}
// NewTransactionMap create a new instance of the transaction map
func NewTransactionMap() *TransactionMap {
return &TransactionMap{
trMap: map[string]*Transaction{},
}
}
// Insert inserts a trasaction to the map
func (m *TransactionMap) Insert(key string, tr *Transaction) bool {
m.mutex.Lock()
defer m.mutex.Unlock()
m.trMap[key] = tr
return true
}
// Find looks up a transaction by its key
func (m *TransactionMap) Find(key string) (*Transaction, bool) {
m.mutex.RLock()
defer m.mutex.RUnlock()
tr, ok := m.trMap[key]
return tr, ok
}
// Delete deletes a transaction by its key
func (m *TransactionMap) Delete(key string) {
m.mutex.Lock()
defer m.mutex.Unlock()
delete(m.trMap, key)
}
// CloseAndDeleteAll closes and deletes all transactions
func (m *TransactionMap) CloseAndDeleteAll() {
m.mutex.Lock()
defer m.mutex.Unlock()
for trKey, tr := range m.trMap {
tr.Close()
delete(m.trMap, trKey)
}
}
// Size returns the length of the transaction map
func (m *TransactionMap) Size() int {
m.mutex.RLock()
defer m.mutex.RUnlock()
return len(m.trMap)
}

View File

@@ -0,0 +1,24 @@
package client
import (
"sync/atomic"
)
// TryLock implement the classic "try-lock" operation.
type TryLock struct {
n int32
}
// Lock tries to lock the try-lock. If successful, it returns true.
// Otherwise, it returns false immedidately.
func (c *TryLock) Lock() error {
if !atomic.CompareAndSwapInt32(&c.n, 0, 1) {
return errDoubleLock
}
return nil
}
// Unlock unlocks the try-lock.
func (c *TryLock) Unlock() {
atomic.StoreInt32(&c.n, 0)
}