feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

64
vendor/github.com/status-im/status-go/peers/README.md generated vendored Normal file
View File

@@ -0,0 +1,64 @@
Peer pool signals
=================
Peer pool sends 3 types of signals.
Discovery started signal will be sent once discovery server is started.
And every time node will have to re-start discovery server because peer number dropped too low.
```json
{
"type": "discovery.started",
"event": null
}
```
Discovery stopped signal will be sent once discovery found max limit of peers
for every registered topic.
```json
{
"type": "discovery.stopped",
"event": null
}
```
Discovery summary signal will be sent every time new peer is added or removed
from a cluster. It will contain a map with capability as a key and total numbers
of peers with that capability as a value.
```json
{
"type": "discovery.summary",
"event": [
{
"id": "339c84c816b5f17a622c8d7ab9498f9998e942a274f70794af934bf5d3d02e14db8ddca2170e4edccede29ea6d409b154c141c34c01006e76c95e17672a27454",
"name": "peer-0/v1.0/darwin/go1.10.1",
"caps": [
"shh/6"
],
"network": {
"localAddress": "127.0.0.1:61049",
"remoteAddress": "127.0.0.1:33732",
"inbound": false,
"trusted": false,
"static": true
},
"protocols": {
"shh": "unknown"
}
}
]
}
```
Or if we don't have any peers:
```json
{
"type": "discovery.summary",
"event": []
}
```

65
vendor/github.com/status-im/status-go/peers/cache.go generated vendored Normal file
View File

@@ -0,0 +1,65 @@
package peers
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/status-im/status-go/db"
)
// NewCache returns instance of PeersDatabase
func NewCache(db *leveldb.DB) *Cache {
return &Cache{db: db}
}
// Cache maintains list of peers that were discovered.
type Cache struct {
db *leveldb.DB
}
func makePeerKey(peerID enode.ID, topic discv5.Topic) []byte {
return db.Key(db.PeersCache, []byte(topic), peerID.Bytes())
}
// AddPeer stores peer with a following key: <topic><peer ID>
func (d *Cache) AddPeer(peer *discv5.Node, topic discv5.Topic) error {
data, err := peer.MarshalText()
if err != nil {
return err
}
pk, err := peer.ID.Pubkey()
if err != nil {
return err
}
return d.db.Put(makePeerKey(enode.PubkeyToIDV4(pk), topic), data, nil)
}
// RemovePeer deletes a peer from database.
func (d *Cache) RemovePeer(nodeID enode.ID, topic discv5.Topic) error {
return d.db.Delete(makePeerKey(nodeID, topic), nil)
}
// GetPeersRange returns peers for a given topic with a limit.
func (d *Cache) GetPeersRange(topic discv5.Topic, limit int) (nodes []*discv5.Node) {
key := db.Key(db.PeersCache, []byte(topic))
// it is important to set Limit on the range passed to iterator, so that
// we limit reads only to particular topic.
iterator := d.db.NewIterator(util.BytesPrefix(key), nil)
defer iterator.Release()
count := 0
for iterator.Next() && count < limit {
node := discv5.Node{}
value := iterator.Value()
if err := node.UnmarshalText(value); err != nil {
log.Error("can't unmarshal node", "value", value, "error", err)
continue
}
nodes = append(nodes, &node)
count++
}
return nodes
}

View File

@@ -0,0 +1,113 @@
package peers
import (
"context"
"time"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/status-im/status-go/params"
"github.com/status-im/status-go/signal"
)
// Verifier verifies if a give node is trusted.
type Verifier interface {
VerifyNode(context.Context, enode.ID) bool
}
// MailServerDiscoveryTopic topic name for mailserver discovery.
const MailServerDiscoveryTopic = "whispermail"
// MailServerDiscoveryLimits default mailserver discovery limits.
var MailServerDiscoveryLimits = params.Limits{Min: 3, Max: 3}
// cacheOnlyTopicPool handles a mail server topic pool.
type cacheOnlyTopicPool struct {
*TopicPool
verifier Verifier
}
// newCacheOnlyTopicPool returns instance of CacheOnlyTopicPool.
func newCacheOnlyTopicPool(t *TopicPool, verifier Verifier) *cacheOnlyTopicPool {
return &cacheOnlyTopicPool{
TopicPool: t,
verifier: verifier,
}
}
// MaxReached checks if the max allowed peers is reached or not. When true
// peerpool will stop the discovery process on this TopicPool.
// Main difference with basic TopicPool is we want to stop discovery process
// when the number of cached peers eq/exceeds the max limit.
func (t *cacheOnlyTopicPool) MaxReached() bool {
t.mu.RLock()
defer t.mu.RUnlock()
if t.limits.Max == 0 {
return true
}
peers := t.cache.GetPeersRange(t.topic, t.limits.Max)
return len(peers) >= t.limits.Max
}
var sendEnodeDiscovered = signal.SendEnodeDiscovered
// ConfirmAdded calls base TopicPool ConfirmAdded method and sends a signal
// confirming the enode has been discovered.
func (t *cacheOnlyTopicPool) ConfirmAdded(server *p2p.Server, nodeID enode.ID) {
trusted := t.verifier.VerifyNode(context.TODO(), nodeID)
if trusted {
// add to cache only if trusted
t.TopicPool.ConfirmAdded(server, nodeID)
sendEnodeDiscovered(nodeID.String(), string(t.topic))
t.subtractToLimits()
}
// If a peer was trusted, it was moved to connectedPeers,
// signal was sent and we can safely remove it.
if peer, ok := t.connectedPeers[nodeID]; ok {
// NOTE: removeServerPeer removes the server peer immediately.
// which means the next discovery.summary is not going to include
// the peer.
// We leave some time so that we ensure the signal is propagated
go func() {
time.Sleep(200)
t.removeServerPeer(server, peer)
}()
// Delete it from `connectedPeers` immediately to
// prevent removing it from the cache which logic is
// implemented in TopicPool.
delete(t.connectedPeers, nodeID)
}
// It a peer was not trusted, it is still in pendingPeers.
// We should remove it from the p2p.Server.
if peer, ok := t.pendingPeers[nodeID]; ok {
// NOTE: removeServerPeer removes the server peer immediately.
// which means the next discovery.summary is not going to include
// the peer.
// We leave some time so that we ensure the signal is propagated
go func() {
time.Sleep(200)
t.removeServerPeer(server, peer.peerInfo)
}()
// Delete it from `connectedPeers` immediately to
// prevent removing it from the cache which logic is
// implemented in TopicPool.
delete(t.pendingPeers, nodeID)
}
}
// subtractToLimits subtracts one to topic pool limits.
func (t *cacheOnlyTopicPool) subtractToLimits() {
t.mu.Lock()
defer t.mu.Unlock()
if t.limits.Max > 0 {
t.limits.Max = t.limits.Max - 1
}
if t.limits.Min > 0 {
t.limits.Min = t.limits.Min - 1
}
}

455
vendor/github.com/status-im/status-go/peers/peerpool.go generated vendored Normal file
View File

@@ -0,0 +1,455 @@
package peers
import (
"crypto/ecdsa"
"errors"
"sync"
"time"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/status-im/status-go/discovery"
"github.com/status-im/status-go/params"
"github.com/status-im/status-go/peers/verifier"
"github.com/status-im/status-go/signal"
)
var (
// ErrDiscv5NotRunning returned when pool is started but discover v5 is not running or not enabled.
ErrDiscv5NotRunning = errors.New("Discovery v5 is not running")
)
// PoolEvent is a type used to for peer pool events.
type PoolEvent string
const (
immediately = 0 * time.Minute
// expirationPeriod is an amount of time while peer is considered as a connectable
expirationPeriod = 60 * time.Minute
// discoveryRestartTimeout defines how often loop will try to start discovery server
discoveryRestartTimeout = 2 * time.Second
// DefaultFastSync is a recommended value for aggressive peers search.
DefaultFastSync = 3 * time.Second
// DefaultSlowSync is a recommended value for slow (background) peers search.
DefaultSlowSync = 30 * time.Second
// DefaultDiscV5Timeout is a timeout after which Discv5 is stopped.
DefaultDiscV5Timeout = 3 * time.Minute
// DefaultTopicFastModeTimeout is a timeout after which sync mode is switched to slow mode.
DefaultTopicFastModeTimeout = 30 * time.Second
// DefaultTopicStopSearchDelay is the default delay when stopping a topic search.
DefaultTopicStopSearchDelay = 10 * time.Second
)
// Options is a struct with PeerPool configuration.
type Options struct {
FastSync time.Duration
SlowSync time.Duration
// After this time, Discovery is stopped even if max peers is not reached.
DiscServerTimeout time.Duration
// AllowStop allows stopping Discovery when reaching max peers or after timeout.
AllowStop bool
// TopicStopSearchDelay time stopSearch will be waiting for max cached peers to be
// filled before really stopping the search.
TopicStopSearchDelay time.Duration
// TrustedMailServers is a list of trusted nodes.
TrustedMailServers []enode.ID
}
// NewDefaultOptions returns a struct with default Options.
func NewDefaultOptions() *Options {
return &Options{
FastSync: DefaultFastSync,
SlowSync: DefaultSlowSync,
DiscServerTimeout: DefaultDiscV5Timeout,
AllowStop: false,
TopicStopSearchDelay: DefaultTopicStopSearchDelay,
}
}
type peerInfo struct {
// discoveredTime last time when node was found by v5
discoveredTime time.Time
// dismissed is true when our node requested a disconnect
dismissed bool
// added is true when the node tries to add this peer to a server
added bool
node *discv5.Node
// store public key separately to make peerInfo more independent from discv5
publicKey *ecdsa.PublicKey
}
func (p *peerInfo) NodeID() enode.ID {
return enode.PubkeyToIDV4(p.publicKey)
}
// PeerPool manages discovered peers and connects them to p2p server
type PeerPool struct {
opts *Options
discovery discovery.Discovery
// config can be set only once per pool life cycle
config map[discv5.Topic]params.Limits
cache *Cache
mu sync.RWMutex
timeoutMu sync.RWMutex
topics []TopicPoolInterface
serverSubscription event.Subscription
events chan *p2p.PeerEvent
quit chan struct{}
wg sync.WaitGroup
timeout <-chan time.Time
updateTopic chan *updateTopicRequest
}
// NewPeerPool creates instance of PeerPool
func NewPeerPool(discovery discovery.Discovery, config map[discv5.Topic]params.Limits, cache *Cache, options *Options) *PeerPool {
return &PeerPool{
opts: options,
discovery: discovery,
config: config,
cache: cache,
}
}
func (p *PeerPool) setDiscoveryTimeout() {
p.timeoutMu.Lock()
defer p.timeoutMu.Unlock()
if p.opts.AllowStop && p.opts.DiscServerTimeout > 0 {
p.timeout = time.After(p.opts.DiscServerTimeout)
}
}
// Start creates topic pool for each topic in config and subscribes to server events.
func (p *PeerPool) Start(server *p2p.Server) error {
if !p.discovery.Running() {
return ErrDiscv5NotRunning
}
p.mu.Lock()
defer p.mu.Unlock()
// init channels
p.quit = make(chan struct{})
p.updateTopic = make(chan *updateTopicRequest)
p.setDiscoveryTimeout()
// subscribe to peer events
p.events = make(chan *p2p.PeerEvent, 20)
p.serverSubscription = server.SubscribeEvents(p.events)
p.wg.Add(1)
go func() {
p.handleServerPeers(server, p.events)
p.wg.Done()
}()
// collect topics and start searching for nodes
p.topics = make([]TopicPoolInterface, 0, len(p.config))
for topic, limits := range p.config {
var topicPool TopicPoolInterface
t := newTopicPool(p.discovery, topic, limits, p.opts.SlowSync, p.opts.FastSync, p.cache)
if topic == MailServerDiscoveryTopic {
v, err := p.initVerifier()
if err != nil {
return err
}
topicPool = newCacheOnlyTopicPool(t, v)
} else {
topicPool = t
}
if err := topicPool.StartSearch(server); err != nil {
return err
}
p.topics = append(p.topics, topicPool)
}
// discovery must be already started when pool is started
signal.SendDiscoveryStarted()
return nil
}
func (p *PeerPool) initVerifier() (v Verifier, err error) {
return verifier.NewLocalVerifier(p.opts.TrustedMailServers), nil
}
func (p *PeerPool) startDiscovery() error {
if p.discovery.Running() {
return nil
}
if err := p.discovery.Start(); err != nil {
return err
}
p.mu.Lock()
p.setDiscoveryTimeout()
p.mu.Unlock()
signal.SendDiscoveryStarted()
return nil
}
func (p *PeerPool) stopDiscovery(server *p2p.Server) {
if !p.discovery.Running() {
return
}
if err := p.discovery.Stop(); err != nil {
log.Error("discovery errored when stopping", "err", err)
}
for _, t := range p.topics {
t.StopSearch(server)
}
p.timeoutMu.Lock()
p.timeout = nil
p.timeoutMu.Unlock()
signal.SendDiscoveryStopped()
}
// restartDiscovery and search for topics that have peer count below min
func (p *PeerPool) restartDiscovery(server *p2p.Server) error {
if !p.discovery.Running() {
if err := p.startDiscovery(); err != nil {
return err
}
log.Debug("restarted discovery from peer pool")
}
for _, t := range p.topics {
if !t.BelowMin() || t.SearchRunning() {
continue
}
err := t.StartSearch(server)
if err != nil {
log.Error("search failed to start", "error", err)
}
}
return nil
}
// handleServerPeers watches server peer events, notifies topic pools about changes
// in the peer set and stops the discv5 if all topic pools collected enough peers.
//
// @TODO(adam): split it into peers and discovery management loops. This should
// simplify the whole logic and allow to remove `timeout` field from `PeerPool`.
func (p *PeerPool) handleServerPeers(server *p2p.Server, events <-chan *p2p.PeerEvent) {
retryDiscv5 := make(chan struct{}, 1)
stopDiscv5 := make(chan struct{}, 1)
queueRetry := func(d time.Duration) {
go func() {
time.Sleep(d)
select {
case retryDiscv5 <- struct{}{}:
default:
}
}()
}
queueStop := func() {
go func() {
select {
case stopDiscv5 <- struct{}{}:
default:
}
}()
}
for {
// We use a separate lock for timeout, as this loop should
// always be running, otherwise the p2p.Server will hang.
// Because the handler of events might potentially hang on the
// server, deadlocking if this loop is waiting for the global lock.
// NOTE: this code probably needs to be refactored and simplified
// as it's difficult to follow the asynchronous nature of it.
p.timeoutMu.RLock()
timeout := p.timeout
p.timeoutMu.RUnlock()
select {
case <-p.quit:
log.Debug("stopping DiscV5 because of quit")
p.stopDiscovery(server)
return
case <-timeout:
log.Info("DiscV5 timed out")
p.stopDiscovery(server)
case <-retryDiscv5:
if err := p.restartDiscovery(server); err != nil {
log.Error("starting discv5 failed", "error", err, "retry", discoveryRestartTimeout)
queueRetry(discoveryRestartTimeout)
}
case <-stopDiscv5:
p.handleStopTopics(server)
case req := <-p.updateTopic:
if p.updateTopicLimits(server, req) == nil {
if !p.discovery.Running() {
queueRetry(immediately)
}
}
case event := <-events:
// NOTE: handlePeerEventType needs to be called asynchronously
// as it publishes on the <-events channel, leading to a deadlock
// if events channel is full.
go p.handlePeerEventType(server, event, queueRetry, queueStop)
}
}
}
func (p *PeerPool) handlePeerEventType(server *p2p.Server, event *p2p.PeerEvent, queueRetry func(time.Duration), queueStop func()) {
p.mu.Lock()
defer p.mu.Unlock()
var shouldRetry bool
var shouldStop bool
switch event.Type {
case p2p.PeerEventTypeDrop:
log.Debug("confirm peer dropped", "ID", event.Peer)
if p.handleDroppedPeer(server, event.Peer) {
shouldRetry = true
}
case p2p.PeerEventTypeAdd: // skip other events
log.Debug("confirm peer added", "ID", event.Peer)
p.handleAddedPeer(server, event.Peer)
shouldStop = true
default:
return
}
// First we send the discovery summary
SendDiscoverySummary(server.PeersInfo())
// then we send the stop event
if shouldRetry {
queueRetry(immediately)
} else if shouldStop {
queueStop()
}
}
// handleAddedPeer notifies all topics about added peer.
func (p *PeerPool) handleAddedPeer(server *p2p.Server, nodeID enode.ID) {
for _, t := range p.topics {
t.ConfirmAdded(server, nodeID)
if p.opts.AllowStop && t.MaxReached() {
t.setStopSearchTimeout(p.opts.TopicStopSearchDelay)
}
}
}
// handleStopTopics stops the search on any topics having reached its max cached
// limit or its delay stop is expired, additionally will stop discovery if all
// peers are stopped.
func (p *PeerPool) handleStopTopics(server *p2p.Server) {
if !p.opts.AllowStop {
return
}
for _, t := range p.topics {
if t.readyToStopSearch() {
t.StopSearch(server)
}
}
if p.allTopicsStopped() {
log.Debug("closing discv5 connection because all topics reached max limit")
p.stopDiscovery(server)
}
}
// allTopicsStopped returns true if all topics are stopped.
func (p *PeerPool) allTopicsStopped() (all bool) {
if !p.opts.AllowStop {
return false
}
all = true
for _, t := range p.topics {
if !t.isStopped() {
all = false
}
}
return all
}
// handleDroppedPeer notifies every topic about dropped peer and returns true if any peer have connections
// below min limit
func (p *PeerPool) handleDroppedPeer(server *p2p.Server, nodeID enode.ID) (any bool) {
for _, t := range p.topics {
confirmed := t.ConfirmDropped(server, nodeID)
if confirmed {
newPeer := t.AddPeerFromTable(server)
if newPeer != nil {
log.Debug("added peer from local table", "ID", newPeer.ID)
}
}
log.Debug("search", "topic", t.Topic(), "below min", t.BelowMin())
if t.BelowMin() && !t.SearchRunning() {
any = true
}
}
return any
}
// Stop closes pool quit channel and all channels that are watched by search queries
// and waits till all goroutines will exit.
func (p *PeerPool) Stop() {
// pool wasn't started
if p.quit == nil {
return
}
select {
case <-p.quit:
return
default:
log.Debug("started closing peer pool")
close(p.quit)
}
p.serverSubscription.Unsubscribe()
p.wg.Wait()
}
type updateTopicRequest struct {
Topic string
Limits params.Limits
}
// UpdateTopic updates the pre-existing TopicPool limits.
func (p *PeerPool) UpdateTopic(topic string, limits params.Limits) error {
if _, err := p.getTopic(topic); err != nil {
return err
}
p.updateTopic <- &updateTopicRequest{
Topic: topic,
Limits: limits,
}
return nil
}
func (p *PeerPool) updateTopicLimits(server *p2p.Server, req *updateTopicRequest) error {
t, err := p.getTopic(req.Topic)
if err != nil {
return err
}
t.SetLimits(req.Limits)
return nil
}
func (p *PeerPool) getTopic(topic string) (TopicPoolInterface, error) {
for _, t := range p.topics {
if t.Topic() == discv5.Topic(topic) {
return t, nil
}
}
return nil, errors.New("topic not found")
}

12
vendor/github.com/status-im/status-go/peers/signal.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
package peers
import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/status-im/status-go/signal"
)
// SendDiscoverySummary sends discovery.summary signal.
func SendDiscoverySummary(peers []*p2p.PeerInfo) {
signal.SendDiscoverySummary(peers)
}

View File

@@ -0,0 +1,41 @@
package peers
import (
"container/heap"
)
type peerInfoItem struct {
*peerInfo
index int
}
type peerPriorityQueue []*peerInfoItem
var _ heap.Interface = (*peerPriorityQueue)(nil)
func (q peerPriorityQueue) Len() int { return len(q) }
func (q peerPriorityQueue) Less(i, j int) bool {
return q[i].discoveredTime.After(q[j].discoveredTime)
}
func (q peerPriorityQueue) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
q[i].index = i
q[j].index = j
}
func (q *peerPriorityQueue) Push(x interface{}) {
item := x.(*peerInfoItem)
item.index = len(*q)
*q = append(*q, item)
}
func (q *peerPriorityQueue) Pop() interface{} {
old := *q
n := len(old)
item := old[n-1]
item.index = -1
*q = old[0 : n-1]
return item
}

View File

@@ -0,0 +1,58 @@
package peers
import (
"sync"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/status-im/status-go/discovery"
)
// Register manages register topic queries
type Register struct {
discovery discovery.Discovery
topics []discv5.Topic
wg sync.WaitGroup
quit chan struct{}
}
// NewRegister creates instance of topic register
func NewRegister(discovery discovery.Discovery, topics ...discv5.Topic) *Register {
return &Register{discovery: discovery, topics: topics}
}
// Start topic register query for every topic
func (r *Register) Start() error {
if !r.discovery.Running() {
return ErrDiscv5NotRunning
}
r.quit = make(chan struct{})
for _, topic := range r.topics {
r.wg.Add(1)
go func(t discv5.Topic) {
log.Debug("v5 register topic", "topic", t)
if err := r.discovery.Register(string(t), r.quit); err != nil {
log.Error("error registering topic", "topic", t, "error", err)
}
r.wg.Done()
}(topic)
}
return nil
}
// Stop all register topic queries and waits for them to exit
func (r *Register) Stop() {
if r.quit == nil {
return
}
select {
case <-r.quit:
return
default:
close(r.quit)
}
log.Debug("waiting for register queries to exit")
r.wg.Wait()
}

View File

@@ -0,0 +1,595 @@
package peers
import (
"container/heap"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/status-im/status-go/discovery"
"github.com/status-im/status-go/params"
)
const (
// notQueuedIndex used to define that item is not queued in the heap queue.
notQueuedIndex = -1
)
// maxCachedPeersMultiplier peers max limit will be multiplied by this number
// to get the maximum number of cached peers allowed.
var maxCachedPeersMultiplier = 1
// maxPendingPeersMultiplier peers max limit will be multiplied by this number
// to get the maximum number of pending peers allowed.
var maxPendingPeersMultiplier = 2
// TopicPoolInterface the TopicPool interface.
type TopicPoolInterface interface {
StopSearch(server *p2p.Server)
BelowMin() bool
SearchRunning() bool
StartSearch(server *p2p.Server) error
ConfirmDropped(server *p2p.Server, nodeID enode.ID) bool
AddPeerFromTable(server *p2p.Server) *discv5.Node
MaxReached() bool
ConfirmAdded(server *p2p.Server, nodeID enode.ID)
isStopped() bool
Topic() discv5.Topic
SetLimits(limits params.Limits)
setStopSearchTimeout(delay time.Duration)
readyToStopSearch() bool
}
type Clock interface {
Now() time.Time
}
type realClock struct{}
func (realClock) Now() time.Time { return time.Now() }
// newTopicPool returns instance of TopicPool.
func newTopicPool(discovery discovery.Discovery, topic discv5.Topic, limits params.Limits, slowMode, fastMode time.Duration, cache *Cache) *TopicPool {
pool := TopicPool{
discovery: discovery,
topic: topic,
limits: limits,
fastMode: fastMode,
slowMode: slowMode,
fastModeTimeout: DefaultTopicFastModeTimeout,
pendingPeers: make(map[enode.ID]*peerInfoItem),
discoveredPeersQueue: make(peerPriorityQueue, 0),
discoveredPeers: make(map[enode.ID]bool),
connectedPeers: make(map[enode.ID]*peerInfo),
cache: cache,
maxCachedPeers: limits.Max * maxCachedPeersMultiplier,
maxPendingPeers: limits.Max * maxPendingPeersMultiplier,
clock: realClock{},
}
heap.Init(&pool.discoveredPeersQueue)
return &pool
}
// TopicPool manages peers for topic.
type TopicPool struct {
discovery discovery.Discovery
// configuration
topic discv5.Topic
limits params.Limits
fastMode time.Duration
slowMode time.Duration
fastModeTimeout time.Duration
mu sync.RWMutex
discWG sync.WaitGroup
poolWG sync.WaitGroup
quit chan struct{}
running int32
currentMode time.Duration
period chan time.Duration
fastModeTimeoutCancel chan struct{}
pendingPeers map[enode.ID]*peerInfoItem // contains found and requested to be connected peers but not confirmed
discoveredPeersQueue peerPriorityQueue // priority queue to find the most recently discovered peers; does not containt peers requested to connect
discoveredPeers map[enode.ID]bool // remembers which peers have already been discovered and are enqueued
connectedPeers map[enode.ID]*peerInfo // currently connected peers
stopSearchTimeout *time.Time
maxPendingPeers int
maxCachedPeers int
cache *Cache
clock Clock
}
func (t *TopicPool) addToPendingPeers(peer *peerInfo) {
if _, ok := t.pendingPeers[peer.NodeID()]; ok {
return
}
t.pendingPeers[peer.NodeID()] = &peerInfoItem{
peerInfo: peer,
index: notQueuedIndex,
}
// maxPendingPeers = 0 means no limits.
if t.maxPendingPeers == 0 || t.maxPendingPeers >= len(t.pendingPeers) {
return
}
var oldestPeer *peerInfo
for _, i := range t.pendingPeers {
if oldestPeer != nil && oldestPeer.discoveredTime.Before(i.peerInfo.discoveredTime) {
continue
}
oldestPeer = i.peerInfo
}
t.removeFromPendingPeers(oldestPeer.NodeID())
}
// addToQueue adds the passed peer to the queue if it is already pending.
func (t *TopicPool) addToQueue(peer *peerInfo) {
if p, ok := t.pendingPeers[peer.NodeID()]; ok {
if _, ok := t.discoveredPeers[peer.NodeID()]; ok {
return
}
heap.Push(&t.discoveredPeersQueue, p)
t.discoveredPeers[peer.NodeID()] = true
}
}
func (t *TopicPool) popFromQueue() *peerInfo {
if t.discoveredPeersQueue.Len() == 0 {
return nil
}
item := heap.Pop(&t.discoveredPeersQueue).(*peerInfoItem)
item.index = notQueuedIndex
delete(t.discoveredPeers, item.peerInfo.NodeID())
return item.peerInfo
}
func (t *TopicPool) removeFromPendingPeers(nodeID enode.ID) {
peer, ok := t.pendingPeers[nodeID]
if !ok {
return
}
delete(t.pendingPeers, nodeID)
if peer.index != notQueuedIndex {
heap.Remove(&t.discoveredPeersQueue, peer.index)
delete(t.discoveredPeers, nodeID)
}
}
func (t *TopicPool) updatePendingPeer(nodeID enode.ID) {
peer, ok := t.pendingPeers[nodeID]
if !ok {
return
}
peer.discoveredTime = t.clock.Now()
if peer.index != notQueuedIndex {
heap.Fix(&t.discoveredPeersQueue, peer.index)
}
}
func (t *TopicPool) movePeerFromPoolToConnected(nodeID enode.ID) {
peer, ok := t.pendingPeers[nodeID]
if !ok {
return
}
t.removeFromPendingPeers(nodeID)
t.connectedPeers[nodeID] = peer.peerInfo
}
// SearchRunning returns true if search is running
func (t *TopicPool) SearchRunning() bool {
return atomic.LoadInt32(&t.running) == 1
}
// MaxReached returns true if we connected with max number of peers.
func (t *TopicPool) MaxReached() bool {
t.mu.RLock()
defer t.mu.RUnlock()
return len(t.connectedPeers) == t.limits.Max
}
// BelowMin returns true if current number of peers is below min limit.
func (t *TopicPool) BelowMin() bool {
t.mu.RLock()
defer t.mu.RUnlock()
return len(t.connectedPeers) < t.limits.Min
}
// maxCachedPeersReached returns true if max number of cached peers is reached.
func (t *TopicPool) maxCachedPeersReached() bool {
if t.maxCachedPeers == 0 {
return true
}
peers := t.cache.GetPeersRange(t.topic, t.maxCachedPeers)
return len(peers) >= t.maxCachedPeers
}
// setStopSearchTimeout sets the timeout to stop current topic search if it's not
// been stopped before.
func (t *TopicPool) setStopSearchTimeout(delay time.Duration) {
if t.stopSearchTimeout != nil {
return
}
now := t.clock.Now().Add(delay)
t.stopSearchTimeout = &now
}
// isStopSearchDelayExpired returns true if the timeout to stop current topic
// search has been accomplished.
func (t *TopicPool) isStopSearchDelayExpired() bool {
if t.stopSearchTimeout == nil {
return false
}
return t.stopSearchTimeout.Before(t.clock.Now())
}
// readyToStopSearch return true if all conditions to stop search are ok.
func (t *TopicPool) readyToStopSearch() bool {
return t.isStopSearchDelayExpired() || t.maxCachedPeersReached()
}
// updateSyncMode changes the sync mode depending on the current number
// of connected peers and limits.
func (t *TopicPool) updateSyncMode() {
newMode := t.slowMode
if len(t.connectedPeers) < t.limits.Min {
newMode = t.fastMode
}
t.setSyncMode(newMode)
}
func (t *TopicPool) setSyncMode(mode time.Duration) {
if mode == t.currentMode {
return
}
t.period <- mode
t.currentMode = mode
// if selected mode is fast mode and fast mode timeout was not set yet,
// do it now
if mode == t.fastMode && t.fastModeTimeoutCancel == nil {
t.fastModeTimeoutCancel = t.limitFastMode(t.fastModeTimeout)
}
// remove fast mode timeout as slow mode is selected now
if mode == t.slowMode && t.fastModeTimeoutCancel != nil {
close(t.fastModeTimeoutCancel)
t.fastModeTimeoutCancel = nil
}
}
func (t *TopicPool) limitFastMode(timeout time.Duration) chan struct{} {
if timeout == 0 {
return nil
}
cancel := make(chan struct{})
t.poolWG.Add(1)
go func() {
defer t.poolWG.Done()
select {
case <-time.After(timeout):
t.mu.Lock()
t.setSyncMode(t.slowMode)
t.mu.Unlock()
case <-cancel:
return
}
}()
return cancel
}
// ConfirmAdded called when peer was added by p2p Server.
// 1. Skip a peer if it not in our peer table
// 2. Add a peer to a cache.
// 3. Disconnect a peer if it was connected after we reached max limit of peers.
// (we can't know in advance if peer will be connected, thats why we allow
// to overflow for short duration)
// 4. Switch search to slow mode if it is running.
func (t *TopicPool) ConfirmAdded(server *p2p.Server, nodeID enode.ID) {
t.mu.Lock()
defer t.mu.Unlock()
peerInfoItem, ok := t.pendingPeers[nodeID]
inbound := !ok || !peerInfoItem.added
log.Debug("peer added event", "peer", nodeID.String(), "inbound", inbound)
if inbound {
return
}
peer := peerInfoItem.peerInfo // get explicit reference
// established connection means that the node
// is a viable candidate for a connection and can be cached
if err := t.cache.AddPeer(peer.node, t.topic); err != nil {
log.Error("failed to persist a peer", "error", err)
}
t.movePeerFromPoolToConnected(nodeID)
// if the upper limit is already reached, drop this peer
if len(t.connectedPeers) > t.limits.Max {
log.Debug("max limit is reached drop the peer", "ID", nodeID, "topic", t.topic)
peer.dismissed = true
t.removeServerPeer(server, peer)
return
}
// make sure `dismissed` is reset
peer.dismissed = false
// A peer was added so check if we can switch to slow mode.
if t.SearchRunning() {
t.updateSyncMode()
}
}
// ConfirmDropped called when server receives drop event.
// 1. Skip peer if it is not in our peer table.
// 2. If disconnect request - we could drop that peer ourselves.
// 3. If connected number will drop below min limit - switch to fast mode.
// 4. Delete a peer from cache and peer table.
// Returns false if peer is not in our table or we requested removal of this peer.
// Otherwise peer is removed and true is returned.
func (t *TopicPool) ConfirmDropped(server *p2p.Server, nodeID enode.ID) bool {
t.mu.Lock()
defer t.mu.Unlock()
// either inbound or connected from another topic
peer, exist := t.connectedPeers[nodeID]
if !exist {
return false
}
log.Debug("disconnect", "ID", nodeID, "dismissed", peer.dismissed)
delete(t.connectedPeers, nodeID)
// Peer was removed by us because exceeded the limit.
// Add it back to the pool as it can be useful in the future.
if peer.dismissed {
t.addToPendingPeers(peer)
// use queue for peers that weren't added to p2p server
t.addToQueue(peer)
return false
}
// If there was a network error, this event will be received
// but the peer won't be removed from the static nodes set.
// That's why we need to call `removeServerPeer` manually.
t.removeServerPeer(server, peer)
if err := t.cache.RemovePeer(nodeID, t.topic); err != nil {
log.Error("failed to remove peer from cache", "error", err)
}
// As we removed a peer, update a sync strategy if needed.
if t.SearchRunning() {
t.updateSyncMode()
}
return true
}
// AddPeerFromTable checks if there is a valid peer in local table and adds it to a server.
func (t *TopicPool) AddPeerFromTable(server *p2p.Server) *discv5.Node {
t.mu.RLock()
defer t.mu.RUnlock()
// The most recently added peer is removed from the queue.
// If it did not expire yet, it will be added to the server.
// TODO(adam): investigate if it's worth to keep the peer in the queue
// until the server confirms it is added and in the meanwhile only adjust its priority.
peer := t.popFromQueue()
if peer != nil && t.clock.Now().Before(peer.discoveredTime.Add(expirationPeriod)) {
t.addServerPeer(server, peer)
return peer.node
}
return nil
}
// StartSearch creates discv5 queries and runs a loop to consume found peers.
func (t *TopicPool) StartSearch(server *p2p.Server) error {
if atomic.LoadInt32(&t.running) == 1 {
return nil
}
if !t.discovery.Running() {
return ErrDiscv5NotRunning
}
atomic.StoreInt32(&t.running, 1)
t.mu.Lock()
defer t.mu.Unlock()
t.quit = make(chan struct{})
t.stopSearchTimeout = nil
// `period` is used to notify about the current sync mode.
t.period = make(chan time.Duration, 2)
// use fast sync mode at the beginning
t.setSyncMode(t.fastMode)
// peers management
found := make(chan *discv5.Node, 5) // 5 reasonable number for concurrently found nodes
lookup := make(chan bool, 10) // sufficiently buffered channel, just prevents blocking because of lookup
for _, peer := range t.cache.GetPeersRange(t.topic, 5) {
log.Debug("adding a peer from cache", "peer", peer)
found <- peer
}
t.discWG.Add(1)
go func() {
if err := t.discovery.Discover(string(t.topic), t.period, found, lookup); err != nil {
log.Error("error searching foro", "topic", t.topic, "err", err)
}
t.discWG.Done()
}()
t.poolWG.Add(1)
go func() {
t.handleFoundPeers(server, found, lookup)
t.poolWG.Done()
}()
return nil
}
func (t *TopicPool) handleFoundPeers(server *p2p.Server, found <-chan *discv5.Node, lookup <-chan bool) {
selfID := discv5.PubkeyID(server.Self().Pubkey())
for {
select {
case <-t.quit:
return
case <-lookup:
case node := <-found:
if node.ID == selfID {
continue
}
if err := t.processFoundNode(server, node); err != nil {
log.Error("failed to process found node", "node", node, "error", err)
}
}
}
}
// processFoundNode called when node is discovered by kademlia search query
// 2 important conditions
// 1. every time when node is processed we need to update discoveredTime.
// peer will be considered as valid later only if it was discovered < 60m ago
// 2. if peer is connected or if max limit is reached we are not a adding peer to p2p server
func (t *TopicPool) processFoundNode(server *p2p.Server, node *discv5.Node) error {
t.mu.Lock()
defer t.mu.Unlock()
pk, err := node.ID.Pubkey()
if err != nil {
return err
}
nodeID := enode.PubkeyToIDV4(pk)
log.Debug("peer found", "ID", nodeID, "topic", t.topic)
// peer is already connected so update only discoveredTime
if peer, ok := t.connectedPeers[nodeID]; ok {
peer.discoveredTime = t.clock.Now()
return nil
}
if _, ok := t.pendingPeers[nodeID]; ok {
t.updatePendingPeer(nodeID)
} else {
t.addToPendingPeers(&peerInfo{
discoveredTime: t.clock.Now(),
node: node,
publicKey: pk,
})
}
log.Debug(
"adding peer to a server", "peer", node.ID.String(),
"connected", len(t.connectedPeers), "max", t.maxCachedPeers)
// This can happen when the monotonic clock is not precise enough and
// multiple peers gets added at the same clock time, resulting in all
// of them having the same discoveredTime.
// At which point a random peer will be removed, sometimes being the
// peer we just added.
// We could make sure that the latest added peer is not removed,
// but this is simpler, and peers will be fresh enough as resolution
// should be quite high (ms at least).
// This has been reported on windows builds
// only https://github.com/status-im/nim-status-client/issues/522
if t.pendingPeers[nodeID] == nil {
log.Debug("peer added has just been removed", "peer", nodeID)
return nil
}
// the upper limit is not reached, so let's add this peer
if len(t.connectedPeers) < t.maxCachedPeers {
t.addServerPeer(server, t.pendingPeers[nodeID].peerInfo)
} else {
t.addToQueue(t.pendingPeers[nodeID].peerInfo)
}
return nil
}
func (t *TopicPool) addServerPeer(server *p2p.Server, info *peerInfo) {
info.added = true
n := enode.NewV4(info.publicKey, info.node.IP, int(info.node.TCP), int(info.node.UDP))
server.AddPeer(n)
}
func (t *TopicPool) removeServerPeer(server *p2p.Server, info *peerInfo) {
info.added = false
n := enode.NewV4(info.publicKey, info.node.IP, int(info.node.TCP), int(info.node.UDP))
server.RemovePeer(n)
}
func (t *TopicPool) isStopped() bool {
t.mu.Lock()
defer t.mu.Unlock()
return t.currentMode == 0
}
// StopSearch stops the closes stop
func (t *TopicPool) StopSearch(server *p2p.Server) {
if !atomic.CompareAndSwapInt32(&t.running, 1, 0) {
return
}
if t.quit == nil {
return
}
select {
case <-t.quit:
return
default:
}
log.Debug("stoping search", "topic", t.topic)
close(t.quit)
t.mu.Lock()
if t.fastModeTimeoutCancel != nil {
close(t.fastModeTimeoutCancel)
t.fastModeTimeoutCancel = nil
}
t.currentMode = 0
t.mu.Unlock()
// wait for poolWG to exit because it writes to period channel
t.poolWG.Wait()
close(t.period)
t.discWG.Wait()
}
// Topic exposes the internal discovery topic.
func (t *TopicPool) Topic() discv5.Topic {
return t.topic
}
// SetLimits set the limits for the current TopicPool.
func (t *TopicPool) SetLimits(limits params.Limits) {
t.mu.Lock()
defer t.mu.Unlock()
t.limits = limits
}

View File

@@ -0,0 +1,30 @@
package verifier
import (
"context"
"github.com/ethereum/go-ethereum/p2p/enode"
)
// LocalVerifier verifies nodes based on a provided local list.
type LocalVerifier struct {
KnownPeers map[enode.ID]struct{}
}
// NewLocalVerifier returns a new LocalVerifier instance.
func NewLocalVerifier(peers []enode.ID) *LocalVerifier {
knownPeers := make(map[enode.ID]struct{})
for _, peer := range peers {
knownPeers[peer] = struct{}{}
}
return &LocalVerifier{KnownPeers: knownPeers}
}
// VerifyNode checks if a given node is trusted using a local list.
func (v *LocalVerifier) VerifyNode(_ context.Context, nodeID enode.ID) bool {
if _, ok := v.KnownPeers[nodeID]; ok {
return true
}
return false
}