feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

View File

@@ -0,0 +1,4 @@
This package is an early prototype of Discovery v5. Do not use this code.
See https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md for the
current Discovery v5 specification.

View File

@@ -0,0 +1,396 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Contains the node database, storing previously seen nodes and any collected
// metadata about them for QoS purposes.
package discv5
import (
"bytes"
"crypto/rand"
"encoding/binary"
"fmt"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element.
nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
nodeDBCleanupCycle = time.Hour // Time period for running the expiration task.
)
// nodeDB stores all nodes we know about.
type nodeDB struct {
lvl *leveldb.DB // Interface to the database itself
self NodeID // Own node id to prevent adding it into the database
runner sync.Once // Ensures we can start at most one expirer
quit chan struct{} // Channel to signal the expiring thread to stop
}
// Schema layout for the node database
var (
nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with
nodeDBDiscoverRoot = ":discover"
nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping"
nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong"
nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail"
nodeDBTopicRegTickets = ":tickets"
)
// newNodeDB creates a new node database for storing and retrieving infos about
// known peers in the network. If no path is given, an in-memory, temporary
// database is constructed.
func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
if path == "" {
return newMemoryNodeDB(self)
}
return newPersistentNodeDB(path, version, self)
}
// newMemoryNodeDB creates a new in-memory node database without a persistent
// backend.
func newMemoryNodeDB(self NodeID) (*nodeDB, error) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
return nil, err
}
return &nodeDB{
lvl: db,
self: self,
quit: make(chan struct{}),
}, nil
}
// newPersistentNodeDB creates/opens a leveldb backed persistent node database,
// also flushing its contents in case of a version mismatch.
func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
opts := &opt.Options{OpenFilesCacheCapacity: 5}
db, err := leveldb.OpenFile(path, opts)
if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
db, err = leveldb.RecoverFile(path, nil)
}
if err != nil {
return nil, err
}
// The nodes contained in the cache correspond to a certain protocol version.
// Flush all nodes if the version doesn't match.
currentVer := make([]byte, binary.MaxVarintLen64)
currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
blob, err := db.Get(nodeDBVersionKey, nil)
switch err {
case leveldb.ErrNotFound:
// Version not found (i.e. empty cache), insert it
if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
db.Close()
return nil, err
}
case nil:
// Version present, flush if different
if !bytes.Equal(blob, currentVer) {
db.Close()
if err = os.RemoveAll(path); err != nil {
return nil, err
}
return newPersistentNodeDB(path, version, self)
}
}
return &nodeDB{
lvl: db,
self: self,
quit: make(chan struct{}),
}, nil
}
// makeKey generates the leveldb key-blob from a node id and its particular
// field of interest.
func makeKey(id NodeID, field string) []byte {
if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
return []byte(field)
}
return append(nodeDBItemPrefix, append(id[:], field...)...)
}
// splitKey tries to split a database key into a node id and a field part.
func splitKey(key []byte) (id NodeID, field string) {
// If the key is not of a node, return it plainly
if !bytes.HasPrefix(key, nodeDBItemPrefix) {
return NodeID{}, string(key)
}
// Otherwise split the id and field
item := key[len(nodeDBItemPrefix):]
copy(id[:], item[:len(id)])
field = string(item[len(id):])
return id, field
}
// fetchInt64 retrieves an integer instance associated with a particular
// database key.
func (db *nodeDB) fetchInt64(key []byte) int64 {
blob, err := db.lvl.Get(key, nil)
if err != nil {
return 0
}
val, read := binary.Varint(blob)
if read <= 0 {
return 0
}
return val
}
// storeInt64 update a specific database entry to the current time instance as a
// unix timestamp.
func (db *nodeDB) storeInt64(key []byte, n int64) error {
blob := make([]byte, binary.MaxVarintLen64)
blob = blob[:binary.PutVarint(blob, n)]
return db.lvl.Put(key, blob, nil)
}
func (db *nodeDB) storeRLP(key []byte, val interface{}) error {
blob, err := rlp.EncodeToBytes(val)
if err != nil {
return err
}
return db.lvl.Put(key, blob, nil)
}
func (db *nodeDB) fetchRLP(key []byte, val interface{}) error {
blob, err := db.lvl.Get(key, nil)
if err != nil {
return err
}
err = rlp.DecodeBytes(blob, val)
if err != nil {
log.Warn(fmt.Sprintf("key %x (%T) %v", key, val, err))
}
return err
}
// node retrieves a node with a given id from the database.
func (db *nodeDB) node(id NodeID) *Node {
var node Node
if err := db.fetchRLP(makeKey(id, nodeDBDiscoverRoot), &node); err != nil {
return nil
}
node.sha = crypto.Keccak256Hash(node.ID[:])
return &node
}
// updateNode inserts - potentially overwriting - a node into the peer database.
func (db *nodeDB) updateNode(node *Node) error {
return db.storeRLP(makeKey(node.ID, nodeDBDiscoverRoot), node)
}
// deleteNode deletes all information/keys associated with a node.
func (db *nodeDB) deleteNode(id NodeID) error {
deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
for deleter.Next() {
if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
return err
}
}
return nil
}
// ensureExpirer is a small helper method ensuring that the data expiration
// mechanism is running. If the expiration goroutine is already running, this
// method simply returns.
//
// The goal is to start the data evacuation only after the network successfully
// bootstrapped itself (to prevent dumping potentially useful seed nodes). Since
// it would require significant overhead to exactly trace the first successful
// convergence, it's simpler to "ensure" the correct state when an appropriate
// condition occurs (i.e. a successful bonding), and discard further events.
func (db *nodeDB) ensureExpirer() {
db.runner.Do(func() { go db.expirer() })
}
// expirer should be started in a go routine, and is responsible for looping ad
// infinitum and dropping stale data from the database.
func (db *nodeDB) expirer() {
tick := time.NewTicker(nodeDBCleanupCycle)
defer tick.Stop()
for {
select {
case <-tick.C:
if err := db.expireNodes(); err != nil {
log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err))
}
case <-db.quit:
return
}
}
}
// expireNodes iterates over the database and deletes all nodes that have not
// been seen (i.e. received a pong from) for some allotted time.
func (db *nodeDB) expireNodes() error {
threshold := time.Now().Add(-nodeDBNodeExpiration)
// Find discovered nodes that are older than the allowance
it := db.lvl.NewIterator(nil, nil)
defer it.Release()
for it.Next() {
// Skip the item if not a discovery node
id, field := splitKey(it.Key())
if field != nodeDBDiscoverRoot {
continue
}
// Skip the node if not expired yet (and not self)
if !bytes.Equal(id[:], db.self[:]) {
if seen := db.lastPong(id); seen.After(threshold) {
continue
}
}
// Otherwise delete all associated information
db.deleteNode(id)
}
return nil
}
// lastPing retrieves the time of the last ping packet send to a remote node,
// requesting binding.
func (db *nodeDB) lastPing(id NodeID) time.Time {
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
}
// updateLastPing updates the last time we tried contacting a remote node.
func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
}
// lastPong retrieves the time of the last successful contact from remote node.
func (db *nodeDB) lastPong(id NodeID) time.Time {
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
}
// updateLastPong updates the last time a remote node successfully contacted.
func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
}
// findFails retrieves the number of findnode failures since bonding.
func (db *nodeDB) findFails(id NodeID) int {
return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
}
// updateFindFails updates the number of findnode failures since bonding.
func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
}
// querySeeds retrieves random nodes to be used as potential seed nodes
// for bootstrapping.
func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
var (
now = time.Now()
nodes = make([]*Node, 0, n)
it = db.lvl.NewIterator(nil, nil)
id NodeID
)
defer it.Release()
seek:
for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ {
// Seek to a random entry. The first byte is incremented by a
// random amount each time in order to increase the likelihood
// of hitting all existing nodes in very small databases.
ctr := id[0]
rand.Read(id[:])
id[0] = ctr + id[0]%16
it.Seek(makeKey(id, nodeDBDiscoverRoot))
n := nextNode(it)
if n == nil {
id[0] = 0
continue seek // iterator exhausted
}
if n.ID == db.self {
continue seek
}
if now.Sub(db.lastPong(n.ID)) > maxAge {
continue seek
}
for i := range nodes {
if nodes[i].ID == n.ID {
continue seek // duplicate
}
}
nodes = append(nodes, n)
}
return nodes
}
func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) {
key := makeKey(id, nodeDBTopicRegTickets)
blob, _ := db.lvl.Get(key, nil)
if len(blob) != 8 {
return 0, 0
}
issued = binary.BigEndian.Uint32(blob[0:4])
used = binary.BigEndian.Uint32(blob[4:8])
return
}
func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) error {
key := makeKey(id, nodeDBTopicRegTickets)
blob := make([]byte, 8)
binary.BigEndian.PutUint32(blob[0:4], issued)
binary.BigEndian.PutUint32(blob[4:8], used)
return db.lvl.Put(key, blob, nil)
}
// reads the next node record from the iterator, skipping over other
// database entries.
func nextNode(it iterator.Iterator) *Node {
for end := false; !end; end = !it.Next() {
id, field := splitKey(it.Key())
if field != nodeDBDiscoverRoot {
continue
}
var n Node
if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
log.Warn(fmt.Sprintf("invalid node %x: %v", id, err))
continue
}
return &n
}
return nil
}
// close flushes and closes the database files.
func (db *nodeDB) close() {
close(db.quit)
db.lvl.Close()
}

View File

@@ -0,0 +1,24 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import "github.com/ethereum/go-ethereum/metrics"
var (
ingressTrafficMeter = metrics.NewRegisteredMeter("discv5/InboundTraffic", nil)
egressTrafficMeter = metrics.NewRegisteredMeter("discv5/OutboundTraffic", nil)
)

1269
vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,413 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"crypto/ecdsa"
"crypto/elliptic"
"encoding/hex"
"errors"
"fmt"
"math/big"
"math/rand"
"net"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
// Node represents a host on the network.
// The public fields of Node may not be modified.
type Node struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP, TCP uint16 // port numbers
ID NodeID // the node's public key
// Network-related fields are contained in nodeNetGuts.
// These fields are not supposed to be used off the
// Network.loop goroutine.
nodeNetGuts
}
// NewNode creates a new node. It is mostly meant to be used for
// testing purposes.
func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node {
if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
return &Node{
IP: ip,
UDP: udpPort,
TCP: tcpPort,
ID: id,
nodeNetGuts: nodeNetGuts{sha: crypto.Keccak256Hash(id[:])},
}
}
func (n *Node) addr() *net.UDPAddr {
return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)}
}
// Incomplete returns true for nodes with no IP address.
func (n *Node) Incomplete() bool {
return n.IP == nil
}
// checks whether n is a valid complete node.
func (n *Node) validateComplete() error {
if n.Incomplete() {
return errors.New("incomplete node")
}
if n.UDP == 0 {
return errors.New("missing UDP port")
}
if n.TCP == 0 {
return errors.New("missing TCP port")
}
if n.IP.IsMulticast() || n.IP.IsUnspecified() {
return errors.New("invalid IP (multicast/unspecified)")
}
_, err := n.ID.Pubkey() // validate the key (on curve, etc.)
return err
}
// The string representation of a Node is a URL.
// Please see ParseNode for a description of the format.
func (n *Node) String() string {
u := url.URL{Scheme: "enode"}
if n.Incomplete() {
u.Host = fmt.Sprintf("%x", n.ID[:])
} else {
addr := net.TCPAddr{IP: n.IP, Port: int(n.TCP)}
u.User = url.User(fmt.Sprintf("%x", n.ID[:]))
u.Host = addr.String()
if n.UDP != n.TCP {
u.RawQuery = "discport=" + strconv.Itoa(int(n.UDP))
}
}
return u.String()
}
var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$")
// ParseNode parses a node designator.
//
// There are two basic forms of node designators
// - incomplete nodes, which only have the public key (node ID)
// - complete nodes, which contain the public key and IP/Port information
//
// For incomplete nodes, the designator must look like one of these
//
// enode://<hex node id>
// <hex node id>
//
// For complete nodes, the node ID is encoded in the username portion
// of the URL, separated from the host by an @ sign. The hostname can
// only be given as an IP address, DNS domain names are not allowed.
// The port in the host name section is the TCP listening port. If the
// TCP and UDP (discovery) ports differ, the UDP port is specified as
// query parameter "discport".
//
// In the following example, the node URL describes
// a node with IP address 10.3.58.6, TCP listening port 30303
// and UDP discovery port 30301.
//
// enode://<hex node id>@10.3.58.6:30303?discport=30301
func ParseNode(rawurl string) (*Node, error) {
if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
id, err := HexID(m[1])
if err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
return NewNode(id, nil, 0, 0), nil
}
return parseComplete(rawurl)
}
func parseComplete(rawurl string) (*Node, error) {
var (
id NodeID
ip net.IP
tcpPort, udpPort uint64
)
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
if u.Scheme != "enode" {
return nil, errors.New("invalid URL scheme, want \"enode\"")
}
// Parse the Node ID from the user portion.
if u.User == nil {
return nil, errors.New("does not contain node ID")
}
if id, err = HexID(u.User.String()); err != nil {
return nil, fmt.Errorf("invalid node ID (%v)", err)
}
// Parse the IP address.
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return nil, fmt.Errorf("invalid host: %v", err)
}
if ip = net.ParseIP(host); ip == nil {
return nil, errors.New("invalid IP address")
}
// Ensure the IP is 4 bytes long for IPv4 addresses.
if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
// Parse the port numbers.
if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil {
return nil, errors.New("invalid port")
}
udpPort = tcpPort
qv := u.Query()
if qv.Get("discport") != "" {
udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16)
if err != nil {
return nil, errors.New("invalid discport in query")
}
}
return NewNode(id, ip, uint16(udpPort), uint16(tcpPort)), nil
}
// MustParseNode parses a node URL. It panics if the URL is not valid.
func MustParseNode(rawurl string) *Node {
n, err := ParseNode(rawurl)
if err != nil {
panic("invalid node URL: " + err.Error())
}
return n
}
// MarshalText implements encoding.TextMarshaler.
func (n *Node) MarshalText() ([]byte, error) {
return []byte(n.String()), nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (n *Node) UnmarshalText(text []byte) error {
dec, err := ParseNode(string(text))
if err == nil {
*n = *dec
}
return err
}
// type nodeQueue []*Node
//
// // pushNew adds n to the end if it is not present.
// func (nl *nodeList) appendNew(n *Node) {
// for _, entry := range n {
// if entry == n {
// return
// }
// }
// *nq = append(*nq, n)
// }
//
// // popRandom removes a random node. Nodes closer to
// // to the head of the beginning of the have a slightly higher probability.
// func (nl *nodeList) popRandom() *Node {
// ix := rand.Intn(len(*nq))
// //TODO: probability as mentioned above.
// nl.removeIndex(ix)
// }
//
// func (nl *nodeList) removeIndex(i int) *Node {
// slice = *nl
// if len(*slice) <= i {
// return nil
// }
// *nl = append(slice[:i], slice[i+1:]...)
// }
const nodeIDBits = 512
// NodeID is a unique identifier for each node.
// The node identifier is a marshaled elliptic curve public key.
type NodeID [nodeIDBits / 8]byte
// NodeID prints as a long hexadecimal number.
func (n NodeID) String() string {
return fmt.Sprintf("%x", n[:])
}
// The Go syntax representation of a NodeID is a call to HexID.
func (n NodeID) GoString() string {
return fmt.Sprintf("discover.HexID(\"%x\")", n[:])
}
// TerminalString returns a shortened hex string for terminal logging.
func (n NodeID) TerminalString() string {
return hex.EncodeToString(n[:8])
}
// HexID converts a hex string to a NodeID.
// The string may be prefixed with 0x.
func HexID(in string) (NodeID, error) {
var id NodeID
b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
if err != nil {
return id, err
} else if len(b) != len(id) {
return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
}
copy(id[:], b)
return id, nil
}
// MustHexID converts a hex string to a NodeID.
// It panics if the string is not a valid NodeID.
func MustHexID(in string) NodeID {
id, err := HexID(in)
if err != nil {
panic(err)
}
return id
}
// PubkeyID returns a marshaled representation of the given public key.
func PubkeyID(pub *ecdsa.PublicKey) NodeID {
var id NodeID
pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y)
if len(pbytes)-1 != len(id) {
panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes)))
}
copy(id[:], pbytes[1:])
return id
}
// Pubkey returns the public key represented by the node ID.
// It returns an error if the ID is not a point on the curve.
func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) {
p := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}
half := len(n) / 2
p.X.SetBytes(n[:half])
p.Y.SetBytes(n[half:])
if !p.Curve.IsOnCurve(p.X, p.Y) {
return nil, errors.New("id is invalid secp256k1 curve point")
}
return p, nil
}
// recoverNodeID computes the public key used to sign the
// given hash from the signature.
func recoverNodeID(hash, sig []byte) (id NodeID, err error) {
pubkey, err := crypto.Ecrecover(hash, sig)
if err != nil {
return id, err
}
if len(pubkey)-1 != len(id) {
return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8)
}
for i := range id {
id[i] = pubkey[i+1]
}
return id, nil
}
// distcmp compares the distances a->target and b->target.
// Returns -1 if a is closer to target, 1 if b is closer to target
// and 0 if they are equal.
func distcmp(target, a, b common.Hash) int {
for i := range target {
da := a[i] ^ target[i]
db := b[i] ^ target[i]
if da > db {
return 1
} else if da < db {
return -1
}
}
return 0
}
// table of leading zero counts for bytes [0..255]
var lzcount = [256]int{
8, 7, 6, 6, 5, 5, 5, 5,
4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
}
// logdist returns the logarithmic distance between a and b, log2(a ^ b).
func logdist(a, b common.Hash) int {
lz := 0
for i := range a {
x := a[i] ^ b[i]
if x == 0 {
lz += 8
} else {
lz += lzcount[x]
break
}
}
return len(a)*8 - lz
}
// hashAtDistance returns a random hash such that logdist(a, b) == n
func hashAtDistance(a common.Hash, n int) (b common.Hash) {
if n == 0 {
return a
}
// flip bit at position n, fill the rest with random bits
b = a
pos := len(a) - n/8 - 1
bit := byte(0x01) << (byte(n%8) - 1)
if bit == 0 {
pos++
bit = 0x80
}
b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
for i := pos + 1; i < len(a); i++ {
b[i] = byte(rand.Intn(255))
}
return b
}

View File

@@ -0,0 +1,17 @@
// Code generated by "stringer -type=nodeEvent"; DO NOT EDIT.
package discv5
import "strconv"
const _nodeEvent_name = "pongTimeoutpingTimeoutneighboursTimeout"
var _nodeEvent_index = [...]uint8{0, 11, 22, 39}
func (i nodeEvent) String() string {
i -= 264
if i >= nodeEvent(len(_nodeEvent_index)-1) {
return "nodeEvent(" + strconv.FormatInt(int64(i+264), 10) + ")"
}
return _nodeEvent_name[_nodeEvent_index[i]:_nodeEvent_index[i+1]]
}

View File

@@ -0,0 +1,318 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package discv5 is a prototype implementation of Discvery v5.
// Deprecated: do not use this package.
package discv5
import (
"crypto/rand"
"encoding/binary"
"fmt"
"net"
"sort"
"github.com/ethereum/go-ethereum/common"
)
const (
alpha = 3 // Kademlia concurrency factor
bucketSize = 16 // Kademlia bucket size
hashBits = len(common.Hash{}) * 8
nBuckets = hashBits + 1 // Number of buckets
maxFindnodeFailures = 5
)
type Table struct {
count int // number of nodes
buckets [nBuckets]*bucket // index of known nodes by distance
nodeAddedHook func(*Node) // for testing
self *Node // metadata of the local node
}
// bucket contains nodes, ordered by their last activity. the entry
// that was most recently active is the first element in entries.
type bucket struct {
entries []*Node
replacements []*Node
}
func newTable(ourID NodeID, ourAddr *net.UDPAddr) *Table {
self := NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port))
tab := &Table{self: self}
for i := range tab.buckets {
tab.buckets[i] = new(bucket)
}
return tab
}
const printTable = false
// chooseBucketRefreshTarget selects random refresh targets to keep all Kademlia
// buckets filled with live connections and keep the network topology healthy.
// This requires selecting addresses closer to our own with a higher probability
// in order to refresh closer buckets too.
//
// This algorithm approximates the distance distribution of existing nodes in the
// table by selecting a random node from the table and selecting a target address
// with a distance less than twice of that of the selected node.
// This algorithm will be improved later to specifically target the least recently
// used buckets.
func (tab *Table) chooseBucketRefreshTarget() common.Hash {
entries := 0
if printTable {
fmt.Println()
}
for i, b := range &tab.buckets {
entries += len(b.entries)
if printTable {
for _, e := range b.entries {
fmt.Println(i, e.state, e.addr().String(), e.ID.String(), e.sha.Hex())
}
}
}
prefix := binary.BigEndian.Uint64(tab.self.sha[0:8])
dist := ^uint64(0)
entry := int(randUint(uint32(entries + 1)))
for _, b := range &tab.buckets {
if entry < len(b.entries) {
n := b.entries[entry]
dist = binary.BigEndian.Uint64(n.sha[0:8]) ^ prefix
break
}
entry -= len(b.entries)
}
ddist := ^uint64(0)
if dist+dist > dist {
ddist = dist
}
targetPrefix := prefix ^ randUint64n(ddist)
var target common.Hash
binary.BigEndian.PutUint64(target[0:8], targetPrefix)
rand.Read(target[8:])
return target
}
// readRandomNodes fills the given slice with random nodes from the
// table. It will not write the same node more than once. The nodes in
// the slice are copies and can be modified by the caller.
func (tab *Table) readRandomNodes(buf []*Node) (n int) {
// TODO: tree-based buckets would help here
// Find all non-empty buckets and get a fresh slice of their entries.
var buckets [][]*Node
for _, b := range &tab.buckets {
if len(b.entries) > 0 {
buckets = append(buckets, b.entries)
}
}
if len(buckets) == 0 {
return 0
}
// Shuffle the buckets.
for i := uint32(len(buckets)) - 1; i > 0; i-- {
j := randUint(i)
buckets[i], buckets[j] = buckets[j], buckets[i]
}
// Move head of each bucket into buf, removing buckets that become empty.
var i, j int
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
b := buckets[j]
buf[i] = &(*b[0])
buckets[j] = b[1:]
if len(b) == 1 {
buckets = append(buckets[:j], buckets[j+1:]...)
}
if len(buckets) == 0 {
break
}
}
return i + 1
}
func randUint(max uint32) uint32 {
if max < 2 {
return 0
}
var b [4]byte
rand.Read(b[:])
return binary.BigEndian.Uint32(b[:]) % max
}
func randUint64n(max uint64) uint64 {
if max < 2 {
return 0
}
var b [8]byte
rand.Read(b[:])
return binary.BigEndian.Uint64(b[:]) % max
}
// closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex.
func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently.
close := &nodesByDistance{target: target}
for _, b := range &tab.buckets {
for _, n := range b.entries {
close.push(n, nresults)
}
}
return close
}
// add attempts to add the given node its corresponding bucket. If the
// bucket has space available, adding the node succeeds immediately.
// Otherwise, the node is added to the replacement cache for the bucket.
func (tab *Table) add(n *Node) (contested *Node) {
//fmt.Println("add", n.addr().String(), n.ID.String(), n.sha.Hex())
if n.ID == tab.self.ID {
return
}
b := tab.buckets[logdist(tab.self.sha, n.sha)]
switch {
case b.bump(n):
// n exists in b.
return nil
case len(b.entries) < bucketSize:
// b has space available.
b.addFront(n)
tab.count++
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
return nil
default:
// b has no space left, add to replacement cache
// and revalidate the last entry.
// TODO: drop previous node
b.replacements = append(b.replacements, n)
if len(b.replacements) > bucketSize {
copy(b.replacements, b.replacements[1:])
b.replacements = b.replacements[:len(b.replacements)-1]
}
return b.entries[len(b.entries)-1]
}
}
// stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full.
func (tab *Table) stuff(nodes []*Node) {
outer:
for _, n := range nodes {
if n.ID == tab.self.ID {
continue // don't add self
}
bucket := tab.buckets[logdist(tab.self.sha, n.sha)]
for i := range bucket.entries {
if bucket.entries[i].ID == n.ID {
continue outer // already in bucket
}
}
if len(bucket.entries) < bucketSize {
bucket.entries = append(bucket.entries, n)
tab.count++
if tab.nodeAddedHook != nil {
tab.nodeAddedHook(n)
}
}
}
}
// delete removes an entry from the node table (used to evacuate
// failed/non-bonded discovery peers).
func (tab *Table) delete(node *Node) {
//fmt.Println("delete", node.addr().String(), node.ID.String(), node.sha.Hex())
bucket := tab.buckets[logdist(tab.self.sha, node.sha)]
for i := range bucket.entries {
if bucket.entries[i].ID == node.ID {
bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...)
tab.count--
return
}
}
}
func (tab *Table) deleteReplace(node *Node) {
b := tab.buckets[logdist(tab.self.sha, node.sha)]
i := 0
for i < len(b.entries) {
if b.entries[i].ID == node.ID {
b.entries = append(b.entries[:i], b.entries[i+1:]...)
tab.count--
} else {
i++
}
}
// refill from replacement cache
// TODO: maybe use random index
if len(b.entries) < bucketSize && len(b.replacements) > 0 {
ri := len(b.replacements) - 1
b.addFront(b.replacements[ri])
tab.count++
b.replacements[ri] = nil
b.replacements = b.replacements[:ri]
}
}
func (b *bucket) addFront(n *Node) {
b.entries = append(b.entries, nil)
copy(b.entries[1:], b.entries)
b.entries[0] = n
}
func (b *bucket) bump(n *Node) bool {
for i := range b.entries {
if b.entries[i].ID == n.ID {
// move it to the front
copy(b.entries[1:], b.entries[:i])
b.entries[0] = n
return true
}
}
return false
}
// nodesByDistance is a list of nodes, ordered by
// distance to target.
type nodesByDistance struct {
entries []*Node
target common.Hash
}
// push adds the given node to the list, keeping the total size below maxElems.
func (h *nodesByDistance) push(n *Node, maxElems int) {
ix := sort.Search(len(h.entries), func(i int) bool {
return distcmp(h.target, h.entries[i].sha, n.sha) > 0
})
if len(h.entries) < maxElems {
h.entries = append(h.entries, n)
}
if ix == len(h.entries) {
// farther away than all nodes we already have.
// if there was room for it, the node is now the last element.
} else {
// slide existing entries down to make room
// this will overwrite the entry we just appended.
copy(h.entries[ix+1:], h.entries[ix:])
h.entries[ix] = n
}
}

View File

@@ -0,0 +1,884 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"math/rand"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
)
const (
ticketTimeBucketLen = time.Minute
collectFrequency = time.Second * 30
registerFrequency = time.Second * 60
maxCollectDebt = 10
maxRegisterDebt = 5
keepTicketConst = time.Minute * 10
keepTicketExp = time.Minute * 5
targetWaitTime = time.Minute * 10
topicQueryTimeout = time.Second * 5
topicQueryResend = time.Minute
// topic radius detection
maxRadius = 0xffffffffffffffff
radiusTC = time.Minute * 20
radiusBucketsPerBit = 8
minSlope = 1
minPeakSize = 40
maxNoAdjust = 20
lookupWidth = 8
minRightSum = 20
searchForceQuery = 4
)
// timeBucket represents absolute monotonic time in minutes.
// It is used as the index into the per-topic ticket buckets.
type timeBucket int
type ticket struct {
topics []Topic
regTime []mclock.AbsTime // Per-topic local absolute time when the ticket can be used.
// The serial number that was issued by the server.
serial uint32
// Used by registrar, tracks absolute time when the ticket was created.
issueTime mclock.AbsTime
// Fields used only by registrants
node *Node // the registrar node that signed this ticket
refCnt int // tracks number of topics that will be registered using this ticket
pong []byte // encoded pong packet signed by the registrar
}
// ticketRef refers to a single topic in a ticket.
type ticketRef struct {
t *ticket
idx int // index of the topic in t.topics and t.regTime
}
func (ref ticketRef) topic() Topic {
return ref.t.topics[ref.idx]
}
func (ref ticketRef) topicRegTime() mclock.AbsTime {
return ref.t.regTime[ref.idx]
}
func pongToTicket(localTime mclock.AbsTime, topics []Topic, node *Node, p *ingressPacket) (*ticket, error) {
wps := p.data.(*pong).WaitPeriods
if len(topics) != len(wps) {
return nil, fmt.Errorf("bad wait period list: got %d values, want %d", len(topics), len(wps))
}
if rlpHash(topics) != p.data.(*pong).TopicHash {
return nil, fmt.Errorf("bad topic hash")
}
t := &ticket{
issueTime: localTime,
node: node,
topics: topics,
pong: p.rawData,
regTime: make([]mclock.AbsTime, len(wps)),
}
// Convert wait periods to local absolute time.
for i, wp := range wps {
t.regTime[i] = localTime + mclock.AbsTime(time.Second*time.Duration(wp))
}
return t, nil
}
func ticketToPong(t *ticket, pong *pong) {
pong.Expiration = uint64(t.issueTime / mclock.AbsTime(time.Second))
pong.TopicHash = rlpHash(t.topics)
pong.TicketSerial = t.serial
pong.WaitPeriods = make([]uint32, len(t.regTime))
for i, regTime := range t.regTime {
pong.WaitPeriods[i] = uint32(time.Duration(regTime-t.issueTime) / time.Second)
}
}
type ticketStore struct {
// radius detector and target address generator
// exists for both searched and registered topics
radius map[Topic]*topicRadius
// Contains buckets (for each absolute minute) of tickets
// that can be used in that minute.
// This is only set if the topic is being registered.
tickets map[Topic]*topicTickets
regQueue []Topic // Topic registration queue for round robin attempts
regSet map[Topic]struct{} // Topic registration queue contents for fast filling
nodes map[*Node]*ticket
nodeLastReq map[*Node]reqInfo
lastBucketFetched timeBucket
nextTicketCached *ticketRef
searchTopicMap map[Topic]searchTopic
nextTopicQueryCleanup mclock.AbsTime
queriesSent map[*Node]map[common.Hash]sentQuery
}
type searchTopic struct {
foundChn chan<- *Node
}
type sentQuery struct {
sent mclock.AbsTime
lookup lookupInfo
}
type topicTickets struct {
buckets map[timeBucket][]ticketRef
nextLookup mclock.AbsTime
nextReg mclock.AbsTime
}
func newTicketStore() *ticketStore {
return &ticketStore{
radius: make(map[Topic]*topicRadius),
tickets: make(map[Topic]*topicTickets),
regSet: make(map[Topic]struct{}),
nodes: make(map[*Node]*ticket),
nodeLastReq: make(map[*Node]reqInfo),
searchTopicMap: make(map[Topic]searchTopic),
queriesSent: make(map[*Node]map[common.Hash]sentQuery),
}
}
// addTopic starts tracking a topic. If register is true,
// the local node will register the topic and tickets will be collected.
func (s *ticketStore) addTopic(topic Topic, register bool) {
log.Trace("Adding discovery topic", "topic", topic, "register", register)
if s.radius[topic] == nil {
s.radius[topic] = newTopicRadius(topic)
}
if register && s.tickets[topic] == nil {
s.tickets[topic] = &topicTickets{buckets: make(map[timeBucket][]ticketRef)}
}
}
func (s *ticketStore) addSearchTopic(t Topic, foundChn chan<- *Node) {
s.addTopic(t, false)
if s.searchTopicMap[t].foundChn == nil {
s.searchTopicMap[t] = searchTopic{foundChn: foundChn}
}
}
func (s *ticketStore) removeSearchTopic(t Topic) {
if st := s.searchTopicMap[t]; st.foundChn != nil {
delete(s.searchTopicMap, t)
}
}
// removeRegisterTopic deletes all tickets for the given topic.
func (s *ticketStore) removeRegisterTopic(topic Topic) {
log.Trace("Removing discovery topic", "topic", topic)
if s.tickets[topic] == nil {
log.Warn("Removing non-existent discovery topic", "topic", topic)
return
}
for _, list := range s.tickets[topic].buckets {
for _, ref := range list {
ref.t.refCnt--
if ref.t.refCnt == 0 {
delete(s.nodes, ref.t.node)
delete(s.nodeLastReq, ref.t.node)
}
}
}
delete(s.tickets, topic)
}
func (s *ticketStore) regTopicSet() []Topic {
topics := make([]Topic, 0, len(s.tickets))
for topic := range s.tickets {
topics = append(topics, topic)
}
return topics
}
// nextRegisterLookup returns the target of the next lookup for ticket collection.
func (s *ticketStore) nextRegisterLookup() (lookupInfo, time.Duration) {
// Queue up any new topics (or discarded ones), preserving iteration order
for topic := range s.tickets {
if _, ok := s.regSet[topic]; !ok {
s.regQueue = append(s.regQueue, topic)
s.regSet[topic] = struct{}{}
}
}
// Iterate over the set of all topics and look up the next suitable one
for len(s.regQueue) > 0 {
// Fetch the next topic from the queue, and ensure it still exists
topic := s.regQueue[0]
s.regQueue = s.regQueue[1:]
delete(s.regSet, topic)
if s.tickets[topic] == nil {
continue
}
// If the topic needs more tickets, return it
if s.tickets[topic].nextLookup < mclock.Now() {
next, delay := s.radius[topic].nextTarget(false), 100*time.Millisecond
log.Trace("Found discovery topic to register", "topic", topic, "target", next.target, "delay", delay)
return next, delay
}
}
// No registration topics found or all exhausted, sleep
delay := 40 * time.Second
log.Trace("No topic found to register", "delay", delay)
return lookupInfo{}, delay
}
func (s *ticketStore) nextSearchLookup(topic Topic) lookupInfo {
tr := s.radius[topic]
target := tr.nextTarget(tr.radiusLookupCnt >= searchForceQuery)
if target.radiusLookup {
tr.radiusLookupCnt++
} else {
tr.radiusLookupCnt = 0
}
return target
}
func (s *ticketStore) addTicketRef(r ticketRef) {
topic := r.t.topics[r.idx]
tickets := s.tickets[topic]
if tickets == nil {
log.Warn("Adding ticket to non-existent topic", "topic", topic)
return
}
bucket := timeBucket(r.t.regTime[r.idx] / mclock.AbsTime(ticketTimeBucketLen))
tickets.buckets[bucket] = append(tickets.buckets[bucket], r)
r.t.refCnt++
min := mclock.Now() - mclock.AbsTime(collectFrequency)*maxCollectDebt
if tickets.nextLookup < min {
tickets.nextLookup = min
}
tickets.nextLookup += mclock.AbsTime(collectFrequency)
//s.removeExcessTickets(topic)
}
func (s *ticketStore) nextFilteredTicket() (*ticketRef, time.Duration) {
now := mclock.Now()
for {
ticket, wait := s.nextRegisterableTicket()
if ticket == nil {
return ticket, wait
}
log.Trace("Found discovery ticket to register", "node", ticket.t.node, "serial", ticket.t.serial, "wait", wait)
regTime := now + mclock.AbsTime(wait)
topic := ticket.t.topics[ticket.idx]
if s.tickets[topic] != nil && regTime >= s.tickets[topic].nextReg {
return ticket, wait
}
s.removeTicketRef(*ticket)
}
}
func (s *ticketStore) ticketRegistered(ref ticketRef) {
now := mclock.Now()
topic := ref.t.topics[ref.idx]
tickets := s.tickets[topic]
min := now - mclock.AbsTime(registerFrequency)*maxRegisterDebt
if min > tickets.nextReg {
tickets.nextReg = min
}
tickets.nextReg += mclock.AbsTime(registerFrequency)
s.tickets[topic] = tickets
s.removeTicketRef(ref)
}
// nextRegisterableTicket returns the next ticket that can be used
// to register.
//
// If the returned wait time <= zero the ticket can be used. For a positive
// wait time, the caller should requery the next ticket later.
//
// A ticket can be returned more than once with <= zero wait time in case
// the ticket contains multiple topics.
func (s *ticketStore) nextRegisterableTicket() (*ticketRef, time.Duration) {
now := mclock.Now()
if s.nextTicketCached != nil {
return s.nextTicketCached, time.Duration(s.nextTicketCached.topicRegTime() - now)
}
for bucket := s.lastBucketFetched; ; bucket++ {
var (
empty = true // true if there are no tickets
nextTicket ticketRef // uninitialized if this bucket is empty
)
for _, tickets := range s.tickets {
//s.removeExcessTickets(topic)
if len(tickets.buckets) != 0 {
empty = false
list := tickets.buckets[bucket]
for _, ref := range list {
//debugLog(fmt.Sprintf(" nrt bucket = %d node = %x sn = %v wait = %v", bucket, ref.t.node.ID[:8], ref.t.serial, time.Duration(ref.topicRegTime()-now)))
if nextTicket.t == nil || ref.topicRegTime() < nextTicket.topicRegTime() {
nextTicket = ref
}
}
}
}
if empty {
return nil, 0
}
if nextTicket.t != nil {
s.nextTicketCached = &nextTicket
return &nextTicket, time.Duration(nextTicket.topicRegTime() - now)
}
s.lastBucketFetched = bucket
}
}
// removeTicket removes a ticket from the ticket store
func (s *ticketStore) removeTicketRef(ref ticketRef) {
log.Trace("Removing discovery ticket reference", "node", ref.t.node.ID, "serial", ref.t.serial)
// Make nextRegisterableTicket return the next available ticket.
s.nextTicketCached = nil
topic := ref.topic()
tickets := s.tickets[topic]
if tickets == nil {
log.Trace("Removing tickets from unknown topic", "topic", topic)
return
}
bucket := timeBucket(ref.t.regTime[ref.idx] / mclock.AbsTime(ticketTimeBucketLen))
list := tickets.buckets[bucket]
idx := -1
for i, bt := range list {
if bt.t == ref.t {
idx = i
break
}
}
if idx == -1 {
panic(nil)
}
list = append(list[:idx], list[idx+1:]...)
if len(list) != 0 {
tickets.buckets[bucket] = list
} else {
delete(tickets.buckets, bucket)
}
ref.t.refCnt--
if ref.t.refCnt == 0 {
delete(s.nodes, ref.t.node)
delete(s.nodeLastReq, ref.t.node)
}
}
type lookupInfo struct {
target common.Hash
topic Topic
radiusLookup bool
}
type reqInfo struct {
pingHash []byte
lookup lookupInfo
time mclock.AbsTime
}
// returns -1 if not found
func (t *ticket) findIdx(topic Topic) int {
for i, tt := range t.topics {
if tt == topic {
return i
}
}
return -1
}
func (s *ticketStore) registerLookupDone(lookup lookupInfo, nodes []*Node, ping func(n *Node) []byte) {
now := mclock.Now()
for i, n := range nodes {
if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius {
if lookup.radiusLookup {
if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC {
s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now}
}
} else {
if s.nodes[n] == nil {
s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now}
}
}
}
}
}
func (s *ticketStore) searchLookupDone(lookup lookupInfo, nodes []*Node, query func(n *Node, topic Topic) []byte) {
now := mclock.Now()
for i, n := range nodes {
if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius {
if lookup.radiusLookup {
if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC {
s.nodeLastReq[n] = reqInfo{pingHash: nil, lookup: lookup, time: now}
}
} // else {
if s.canQueryTopic(n, lookup.topic) {
hash := query(n, lookup.topic)
if hash != nil {
s.addTopicQuery(common.BytesToHash(hash), n, lookup)
}
}
//}
}
}
}
func (s *ticketStore) adjustWithTicket(now mclock.AbsTime, targetHash common.Hash, t *ticket) {
for i, topic := range t.topics {
if tt, ok := s.radius[topic]; ok {
tt.adjustWithTicket(now, targetHash, ticketRef{t, i})
}
}
}
func (s *ticketStore) addTicket(localTime mclock.AbsTime, pingHash []byte, ticket *ticket) {
log.Trace("Adding discovery ticket", "node", ticket.node.ID, "serial", ticket.serial)
lastReq, ok := s.nodeLastReq[ticket.node]
if !(ok && bytes.Equal(pingHash, lastReq.pingHash)) {
return
}
s.adjustWithTicket(localTime, lastReq.lookup.target, ticket)
if lastReq.lookup.radiusLookup || s.nodes[ticket.node] != nil {
return
}
topic := lastReq.lookup.topic
topicIdx := ticket.findIdx(topic)
if topicIdx == -1 {
return
}
bucket := timeBucket(localTime / mclock.AbsTime(ticketTimeBucketLen))
if s.lastBucketFetched == 0 || bucket < s.lastBucketFetched {
s.lastBucketFetched = bucket
}
if _, ok := s.tickets[topic]; ok {
wait := ticket.regTime[topicIdx] - localTime
rnd := rand.ExpFloat64()
if rnd > 10 {
rnd = 10
}
if float64(wait) < float64(keepTicketConst)+float64(keepTicketExp)*rnd {
// use the ticket to register this topic
//fmt.Println("addTicket", ticket.node.ID[:8], ticket.node.addr().String(), ticket.serial, ticket.pong)
s.addTicketRef(ticketRef{ticket, topicIdx})
}
}
if ticket.refCnt > 0 {
s.nextTicketCached = nil
s.nodes[ticket.node] = ticket
}
}
func (s *ticketStore) canQueryTopic(node *Node, topic Topic) bool {
qq := s.queriesSent[node]
if qq != nil {
now := mclock.Now()
for _, sq := range qq {
if sq.lookup.topic == topic && sq.sent > now-mclock.AbsTime(topicQueryResend) {
return false
}
}
}
return true
}
func (s *ticketStore) addTopicQuery(hash common.Hash, node *Node, lookup lookupInfo) {
now := mclock.Now()
qq := s.queriesSent[node]
if qq == nil {
qq = make(map[common.Hash]sentQuery)
s.queriesSent[node] = qq
}
qq[hash] = sentQuery{sent: now, lookup: lookup}
s.cleanupTopicQueries(now)
}
func (s *ticketStore) cleanupTopicQueries(now mclock.AbsTime) {
if s.nextTopicQueryCleanup > now {
return
}
exp := now - mclock.AbsTime(topicQueryResend)
for n, qq := range s.queriesSent {
for h, q := range qq {
if q.sent < exp {
delete(qq, h)
}
}
if len(qq) == 0 {
delete(s.queriesSent, n)
}
}
s.nextTopicQueryCleanup = now + mclock.AbsTime(topicQueryTimeout)
}
func (s *ticketStore) gotTopicNodes(from *Node, hash common.Hash, nodes []rpcNode) (timeout bool) {
now := mclock.Now()
//fmt.Println("got", from.addr().String(), hash, len(nodes))
qq := s.queriesSent[from]
if qq == nil {
return true
}
q, ok := qq[hash]
if !ok || now > q.sent+mclock.AbsTime(topicQueryTimeout) {
return true
}
inside := float64(0)
if len(nodes) > 0 {
inside = 1
}
s.radius[q.lookup.topic].adjust(now, q.lookup.target, from.sha, inside)
chn := s.searchTopicMap[q.lookup.topic].foundChn
if chn == nil {
//fmt.Println("no channel")
return false
}
for _, node := range nodes {
ip := node.IP
if ip.IsUnspecified() || ip.IsLoopback() {
ip = from.IP
}
n := NewNode(node.ID, ip, node.UDP, node.TCP)
select {
case chn <- n:
default:
return false
}
}
return false
}
type topicRadius struct {
topic Topic
topicHashPrefix uint64
radius, minRadius uint64
buckets []topicRadiusBucket
converged bool
radiusLookupCnt int
}
type topicRadiusEvent int
const (
trOutside topicRadiusEvent = iota
trInside
trNoAdjust
trCount
)
type topicRadiusBucket struct {
weights [trCount]float64
lastTime mclock.AbsTime
value float64
lookupSent map[common.Hash]mclock.AbsTime
}
func (b *topicRadiusBucket) update(now mclock.AbsTime) {
if now == b.lastTime {
return
}
exp := math.Exp(-float64(now-b.lastTime) / float64(radiusTC))
for i, w := range b.weights {
b.weights[i] = w * exp
}
b.lastTime = now
for target, tm := range b.lookupSent {
if now-tm > mclock.AbsTime(respTimeout) {
b.weights[trNoAdjust] += 1
delete(b.lookupSent, target)
}
}
}
func (b *topicRadiusBucket) adjust(now mclock.AbsTime, inside float64) {
b.update(now)
if inside <= 0 {
b.weights[trOutside] += 1
} else {
if inside >= 1 {
b.weights[trInside] += 1
} else {
b.weights[trInside] += inside
b.weights[trOutside] += 1 - inside
}
}
}
func newTopicRadius(t Topic) *topicRadius {
topicHash := crypto.Keccak256Hash([]byte(t))
topicHashPrefix := binary.BigEndian.Uint64(topicHash[0:8])
return &topicRadius{
topic: t,
topicHashPrefix: topicHashPrefix,
radius: maxRadius,
minRadius: maxRadius,
}
}
func (r *topicRadius) getBucketIdx(addrHash common.Hash) int {
prefix := binary.BigEndian.Uint64(addrHash[0:8])
var log2 float64
if prefix != r.topicHashPrefix {
log2 = math.Log2(float64(prefix ^ r.topicHashPrefix))
}
bucket := int((64 - log2) * radiusBucketsPerBit)
max := 64*radiusBucketsPerBit - 1
if bucket > max {
return max
}
if bucket < 0 {
return 0
}
return bucket
}
func (r *topicRadius) targetForBucket(bucket int) common.Hash {
min := math.Pow(2, 64-float64(bucket+1)/radiusBucketsPerBit)
max := math.Pow(2, 64-float64(bucket)/radiusBucketsPerBit)
a := uint64(min)
b := randUint64n(uint64(max - min))
xor := a + b
if xor < a {
xor = ^uint64(0)
}
prefix := r.topicHashPrefix ^ xor
var target common.Hash
binary.BigEndian.PutUint64(target[0:8], prefix)
globalRandRead(target[8:])
return target
}
// package rand provides a Read function in Go 1.6 and later, but
// we can't use it yet because we still support Go 1.5.
func globalRandRead(b []byte) {
pos := 0
val := 0
for n := 0; n < len(b); n++ {
if pos == 0 {
val = rand.Int()
pos = 7
}
b[n] = byte(val)
val >>= 8
pos--
}
}
func (r *topicRadius) chooseLookupBucket(a, b int) int {
if a < 0 {
a = 0
}
if a > b {
return -1
}
c := 0
for i := a; i <= b; i++ {
if i >= len(r.buckets) || r.buckets[i].weights[trNoAdjust] < maxNoAdjust {
c++
}
}
if c == 0 {
return -1
}
rnd := randUint(uint32(c))
for i := a; i <= b; i++ {
if i >= len(r.buckets) || r.buckets[i].weights[trNoAdjust] < maxNoAdjust {
if rnd == 0 {
return i
}
rnd--
}
}
panic(nil) // should never happen
}
func (r *topicRadius) needMoreLookups(a, b int, maxValue float64) bool {
var max float64
if a < 0 {
a = 0
}
if b >= len(r.buckets) {
b = len(r.buckets) - 1
if r.buckets[b].value > max {
max = r.buckets[b].value
}
}
if b >= a {
for i := a; i <= b; i++ {
if r.buckets[i].value > max {
max = r.buckets[i].value
}
}
}
return maxValue-max < minPeakSize
}
func (r *topicRadius) recalcRadius() (radius uint64, radiusLookup int) {
maxBucket := 0
maxValue := float64(0)
now := mclock.Now()
v := float64(0)
for i := range r.buckets {
r.buckets[i].update(now)
v += r.buckets[i].weights[trOutside] - r.buckets[i].weights[trInside]
r.buckets[i].value = v
//fmt.Printf("%v %v | ", v, r.buckets[i].weights[trNoAdjust])
}
//fmt.Println()
slopeCross := -1
for i, b := range r.buckets {
v := b.value
if v < float64(i)*minSlope {
slopeCross = i
break
}
if v > maxValue {
maxValue = v
maxBucket = i + 1
}
}
minRadBucket := len(r.buckets)
sum := float64(0)
for minRadBucket > 0 && sum < minRightSum {
minRadBucket--
b := r.buckets[minRadBucket]
sum += b.weights[trInside] + b.weights[trOutside]
}
r.minRadius = uint64(math.Pow(2, 64-float64(minRadBucket)/radiusBucketsPerBit))
lookupLeft := -1
if r.needMoreLookups(0, maxBucket-lookupWidth-1, maxValue) {
lookupLeft = r.chooseLookupBucket(maxBucket-lookupWidth, maxBucket-1)
}
lookupRight := -1
if slopeCross != maxBucket && (minRadBucket <= maxBucket || r.needMoreLookups(maxBucket+lookupWidth, len(r.buckets)-1, maxValue)) {
for len(r.buckets) <= maxBucket+lookupWidth {
r.buckets = append(r.buckets, topicRadiusBucket{lookupSent: make(map[common.Hash]mclock.AbsTime)})
}
lookupRight = r.chooseLookupBucket(maxBucket, maxBucket+lookupWidth-1)
}
if lookupLeft == -1 {
radiusLookup = lookupRight
} else {
if lookupRight == -1 {
radiusLookup = lookupLeft
} else {
if randUint(2) == 0 {
radiusLookup = lookupLeft
} else {
radiusLookup = lookupRight
}
}
}
//fmt.Println("mb", maxBucket, "sc", slopeCross, "mrb", minRadBucket, "ll", lookupLeft, "lr", lookupRight, "mv", maxValue)
if radiusLookup == -1 {
// no more radius lookups needed at the moment, return a radius
r.converged = true
rad := maxBucket
if minRadBucket < rad {
rad = minRadBucket
}
radius = ^uint64(0)
if rad > 0 {
radius = uint64(math.Pow(2, 64-float64(rad)/radiusBucketsPerBit))
}
r.radius = radius
}
return
}
func (r *topicRadius) nextTarget(forceRegular bool) lookupInfo {
if !forceRegular {
_, radiusLookup := r.recalcRadius()
if radiusLookup != -1 {
target := r.targetForBucket(radiusLookup)
r.buckets[radiusLookup].lookupSent[target] = mclock.Now()
return lookupInfo{target: target, topic: r.topic, radiusLookup: true}
}
}
radExt := r.radius / 2
if radExt > maxRadius-r.radius {
radExt = maxRadius - r.radius
}
rnd := randUint64n(r.radius) + randUint64n(2*radExt)
if rnd > radExt {
rnd -= radExt
} else {
rnd = radExt - rnd
}
prefix := r.topicHashPrefix ^ rnd
var target common.Hash
binary.BigEndian.PutUint64(target[0:8], prefix)
globalRandRead(target[8:])
return lookupInfo{target: target, topic: r.topic, radiusLookup: false}
}
func (r *topicRadius) adjustWithTicket(now mclock.AbsTime, targetHash common.Hash, t ticketRef) {
wait := t.t.regTime[t.idx] - t.t.issueTime
inside := float64(wait)/float64(targetWaitTime) - 0.5
if inside > 1 {
inside = 1
}
if inside < 0 {
inside = 0
}
r.adjust(now, targetHash, t.t.node.sha, inside)
}
func (r *topicRadius) adjust(now mclock.AbsTime, targetHash, addrHash common.Hash, inside float64) {
bucket := r.getBucketIdx(addrHash)
//fmt.Println("adjust", bucket, len(r.buckets), inside)
if bucket >= len(r.buckets) {
return
}
r.buckets[bucket].adjust(now, inside)
delete(r.buckets[bucket].lookupSent, targetHash)
}

View File

@@ -0,0 +1,407 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"container/heap"
"fmt"
"math"
"math/rand"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/log"
)
const (
maxEntries = 10000
maxEntriesPerTopic = 50
fallbackRegistrationExpiry = 1 * time.Hour
)
type Topic string
type topicEntry struct {
topic Topic
fifoIdx uint64
node *Node
expire mclock.AbsTime
}
type topicInfo struct {
entries map[uint64]*topicEntry
fifoHead, fifoTail uint64
rqItem *topicRequestQueueItem
wcl waitControlLoop
}
// removes tail element from the fifo
func (t *topicInfo) getFifoTail() *topicEntry {
for t.entries[t.fifoTail] == nil {
t.fifoTail++
}
tail := t.entries[t.fifoTail]
t.fifoTail++
return tail
}
type nodeInfo struct {
entries map[Topic]*topicEntry
lastIssuedTicket, lastUsedTicket uint32
// you can't register a ticket newer than lastUsedTicket before noRegUntil (absolute time)
noRegUntil mclock.AbsTime
}
type topicTable struct {
db *nodeDB
self *Node
nodes map[*Node]*nodeInfo
topics map[Topic]*topicInfo
globalEntries uint64
requested topicRequestQueue
requestCnt uint64
lastGarbageCollection mclock.AbsTime
}
func newTopicTable(db *nodeDB, self *Node) *topicTable {
if printTestImgLogs {
fmt.Printf("*N %016x\n", self.sha[:8])
}
return &topicTable{
db: db,
nodes: make(map[*Node]*nodeInfo),
topics: make(map[Topic]*topicInfo),
self: self,
}
}
func (t *topicTable) getOrNewTopic(topic Topic) *topicInfo {
ti := t.topics[topic]
if ti == nil {
rqItem := &topicRequestQueueItem{
topic: topic,
priority: t.requestCnt,
}
ti = &topicInfo{
entries: make(map[uint64]*topicEntry),
rqItem: rqItem,
}
t.topics[topic] = ti
heap.Push(&t.requested, rqItem)
}
return ti
}
func (t *topicTable) checkDeleteTopic(topic Topic) {
ti := t.topics[topic]
if ti == nil {
return
}
if len(ti.entries) == 0 && ti.wcl.hasMinimumWaitPeriod() {
delete(t.topics, topic)
heap.Remove(&t.requested, ti.rqItem.index)
}
}
func (t *topicTable) getOrNewNode(node *Node) *nodeInfo {
n := t.nodes[node]
if n == nil {
//fmt.Printf("newNode %016x %016x\n", t.self.sha[:8], node.sha[:8])
var issued, used uint32
if t.db != nil {
issued, used = t.db.fetchTopicRegTickets(node.ID)
}
n = &nodeInfo{
entries: make(map[Topic]*topicEntry),
lastIssuedTicket: issued,
lastUsedTicket: used,
}
t.nodes[node] = n
}
return n
}
func (t *topicTable) checkDeleteNode(node *Node) {
if n, ok := t.nodes[node]; ok && len(n.entries) == 0 && n.noRegUntil < mclock.Now() {
//fmt.Printf("deleteNode %016x %016x\n", t.self.sha[:8], node.sha[:8])
delete(t.nodes, node)
}
}
func (t *topicTable) storeTicketCounters(node *Node) {
n := t.getOrNewNode(node)
if t.db != nil {
t.db.updateTopicRegTickets(node.ID, n.lastIssuedTicket, n.lastUsedTicket)
}
}
func (t *topicTable) getEntries(topic Topic) []*Node {
t.collectGarbage()
te := t.topics[topic]
if te == nil {
return nil
}
nodes := make([]*Node, len(te.entries))
i := 0
for _, e := range te.entries {
nodes[i] = e.node
i++
}
t.requestCnt++
t.requested.update(te.rqItem, t.requestCnt)
return nodes
}
func (t *topicTable) addEntry(node *Node, topic Topic) {
n := t.getOrNewNode(node)
// clear previous entries by the same node
for _, e := range n.entries {
t.deleteEntry(e)
}
// ***
n = t.getOrNewNode(node)
tm := mclock.Now()
te := t.getOrNewTopic(topic)
if len(te.entries) == maxEntriesPerTopic {
t.deleteEntry(te.getFifoTail())
}
if t.globalEntries == maxEntries {
t.deleteEntry(t.leastRequested()) // not empty, no need to check for nil
}
fifoIdx := te.fifoHead
te.fifoHead++
entry := &topicEntry{
topic: topic,
fifoIdx: fifoIdx,
node: node,
expire: tm + mclock.AbsTime(fallbackRegistrationExpiry),
}
if printTestImgLogs {
fmt.Printf("*+ %d %v %016x %016x\n", tm/1000000, topic, t.self.sha[:8], node.sha[:8])
}
te.entries[fifoIdx] = entry
n.entries[topic] = entry
t.globalEntries++
te.wcl.registered(tm)
}
// removes least requested element from the fifo
func (t *topicTable) leastRequested() *topicEntry {
for t.requested.Len() > 0 && t.topics[t.requested[0].topic] == nil {
heap.Pop(&t.requested)
}
if t.requested.Len() == 0 {
return nil
}
return t.topics[t.requested[0].topic].getFifoTail()
}
// entry should exist
func (t *topicTable) deleteEntry(e *topicEntry) {
if printTestImgLogs {
fmt.Printf("*- %d %v %016x %016x\n", mclock.Now()/1000000, e.topic, t.self.sha[:8], e.node.sha[:8])
}
ne := t.nodes[e.node].entries
delete(ne, e.topic)
if len(ne) == 0 {
t.checkDeleteNode(e.node)
}
te := t.topics[e.topic]
delete(te.entries, e.fifoIdx)
if len(te.entries) == 0 {
t.checkDeleteTopic(e.topic)
}
t.globalEntries--
}
// It is assumed that topics and waitPeriods have the same length.
func (t *topicTable) useTicket(node *Node, serialNo uint32, topics []Topic, idx int, issueTime uint64, waitPeriods []uint32) (registered bool) {
log.Trace("Using discovery ticket", "serial", serialNo, "topics", topics, "waits", waitPeriods)
//fmt.Println("useTicket", serialNo, topics, waitPeriods)
t.collectGarbage()
n := t.getOrNewNode(node)
if serialNo < n.lastUsedTicket {
return false
}
tm := mclock.Now()
if serialNo > n.lastUsedTicket && tm < n.noRegUntil {
return false
}
if serialNo != n.lastUsedTicket {
n.lastUsedTicket = serialNo
n.noRegUntil = tm + mclock.AbsTime(noRegTimeout())
t.storeTicketCounters(node)
}
currTime := uint64(tm / mclock.AbsTime(time.Second))
regTime := issueTime + uint64(waitPeriods[idx])
relTime := int64(currTime - regTime)
if relTime >= -1 && relTime <= regTimeWindow+1 { // give clients a little security margin on both ends
if e := n.entries[topics[idx]]; e == nil {
t.addEntry(node, topics[idx])
} else {
// if there is an active entry, don't move to the front of the FIFO but prolong expire time
e.expire = tm + mclock.AbsTime(fallbackRegistrationExpiry)
}
return true
}
return false
}
func (t *topicTable) getTicket(node *Node, topics []Topic) *ticket {
t.collectGarbage()
now := mclock.Now()
n := t.getOrNewNode(node)
n.lastIssuedTicket++
t.storeTicketCounters(node)
tic := &ticket{
issueTime: now,
topics: topics,
serial: n.lastIssuedTicket,
regTime: make([]mclock.AbsTime, len(topics)),
}
for i, topic := range topics {
var waitPeriod time.Duration
if topic := t.topics[topic]; topic != nil {
waitPeriod = topic.wcl.waitPeriod
} else {
waitPeriod = minWaitPeriod
}
tic.regTime[i] = now + mclock.AbsTime(waitPeriod)
}
return tic
}
const gcInterval = time.Minute
func (t *topicTable) collectGarbage() {
tm := mclock.Now()
if time.Duration(tm-t.lastGarbageCollection) < gcInterval {
return
}
t.lastGarbageCollection = tm
for node, n := range t.nodes {
for _, e := range n.entries {
if e.expire <= tm {
t.deleteEntry(e)
}
}
t.checkDeleteNode(node)
}
for topic := range t.topics {
t.checkDeleteTopic(topic)
}
}
const (
minWaitPeriod = time.Minute
regTimeWindow = 10 // seconds
avgnoRegTimeout = time.Minute * 10
// target average interval between two incoming ad requests
wcTargetRegInterval = time.Minute * 10 / maxEntriesPerTopic
//
wcTimeConst = time.Minute * 10
)
// initialization is not required, will set to minWaitPeriod at first registration
type waitControlLoop struct {
lastIncoming mclock.AbsTime
waitPeriod time.Duration
}
func (w *waitControlLoop) registered(tm mclock.AbsTime) {
w.waitPeriod = w.nextWaitPeriod(tm)
w.lastIncoming = tm
}
func (w *waitControlLoop) nextWaitPeriod(tm mclock.AbsTime) time.Duration {
period := tm - w.lastIncoming
wp := time.Duration(float64(w.waitPeriod) * math.Exp((float64(wcTargetRegInterval)-float64(period))/float64(wcTimeConst)))
if wp < minWaitPeriod {
wp = minWaitPeriod
}
return wp
}
func (w *waitControlLoop) hasMinimumWaitPeriod() bool {
return w.nextWaitPeriod(mclock.Now()) == minWaitPeriod
}
func noRegTimeout() time.Duration {
e := rand.ExpFloat64()
if e > 100 {
e = 100
}
return time.Duration(float64(avgnoRegTimeout) * e)
}
type topicRequestQueueItem struct {
topic Topic
priority uint64
index int
}
// A topicRequestQueue implements heap.Interface and holds topicRequestQueueItems.
type topicRequestQueue []*topicRequestQueueItem
func (tq topicRequestQueue) Len() int { return len(tq) }
func (tq topicRequestQueue) Less(i, j int) bool {
return tq[i].priority < tq[j].priority
}
func (tq topicRequestQueue) Swap(i, j int) {
tq[i], tq[j] = tq[j], tq[i]
tq[i].index = i
tq[j].index = j
}
func (tq *topicRequestQueue) Push(x interface{}) {
n := len(*tq)
item := x.(*topicRequestQueueItem)
item.index = n
*tq = append(*tq, item)
}
func (tq *topicRequestQueue) Pop() interface{} {
old := *tq
n := len(old)
item := old[n-1]
item.index = -1
*tq = old[0 : n-1]
return item
}
func (tq *topicRequestQueue) update(item *topicRequestQueueItem, priority uint64) {
item.priority = priority
heap.Fix(tq, item.index)
}

View File

@@ -0,0 +1,429 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discv5
import (
"bytes"
"crypto/ecdsa"
"errors"
"fmt"
"net"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp"
)
const Version = 4
// Errors
var (
errPacketTooSmall = errors.New("too small")
errBadPrefix = errors.New("bad prefix")
)
// Timeouts
const (
respTimeout = 500 * time.Millisecond
expiration = 20 * time.Second
)
// RPC request structures
type (
ping struct {
Version uint
From, To rpcEndpoint
Expiration uint64
// v5
Topics []Topic
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// pong is the reply to ping.
pong struct {
// This field should mirror the UDP envelope address
// of the ping packet, which provides a way to discover the
// the external address (after NAT).
To rpcEndpoint
ReplyTok []byte // This contains the hash of the ping packet.
Expiration uint64 // Absolute timestamp at which the packet becomes invalid.
// v5
TopicHash common.Hash
TicketSerial uint32
WaitPeriods []uint32
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// findnode is a query for nodes close to the given target.
findnode struct {
Target NodeID // doesn't need to be an actual public key
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// findnode is a query for nodes close to the given target.
findnodeHash struct {
Target common.Hash
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
// reply to findnode
neighbors struct {
Nodes []rpcNode
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
}
topicRegister struct {
Topics []Topic
Idx uint
Pong []byte
}
topicQuery struct {
Topic Topic
Expiration uint64
}
// reply to topicQuery
topicNodes struct {
Echo common.Hash
Nodes []rpcNode
}
rpcNode struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP uint16 // for discovery protocol
TCP uint16 // for RLPx protocol
ID NodeID
}
rpcEndpoint struct {
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP uint16 // for discovery protocol
TCP uint16 // for RLPx protocol
}
)
var (
versionPrefix = []byte("temporary discovery v5")
versionPrefixSize = len(versionPrefix)
sigSize = 520 / 8
headSize = versionPrefixSize + sigSize // space of packet frame data
)
// Neighbors replies are sent across multiple packets to
// stay below the 1280 byte limit. We compute the maximum number
// of entries by stuffing a packet until it grows too large.
var maxNeighbors = func() int {
p := neighbors{Expiration: ^uint64(0)}
maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
for n := 0; ; n++ {
p.Nodes = append(p.Nodes, maxSizeNode)
size, _, err := rlp.EncodeToReader(p)
if err != nil {
// If this ever happens, it will be caught by the unit tests.
panic("cannot encode: " + err.Error())
}
if headSize+size+1 >= 1280 {
return n
}
}
}()
var maxTopicNodes = func() int {
p := topicNodes{}
maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
for n := 0; ; n++ {
p.Nodes = append(p.Nodes, maxSizeNode)
size, _, err := rlp.EncodeToReader(p)
if err != nil {
// If this ever happens, it will be caught by the unit tests.
panic("cannot encode: " + err.Error())
}
if headSize+size+1 >= 1280 {
return n
}
}
}()
func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
ip := addr.IP.To4()
if ip == nil {
ip = addr.IP.To16()
}
return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort}
}
func nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) {
if err := netutil.CheckRelayIP(sender.IP, rn.IP); err != nil {
return nil, err
}
n := NewNode(rn.ID, rn.IP, rn.UDP, rn.TCP)
err := n.validateComplete()
return n, err
}
func nodeToRPC(n *Node) rpcNode {
return rpcNode{ID: n.ID, IP: n.IP, UDP: n.UDP, TCP: n.TCP}
}
type ingressPacket struct {
remoteID NodeID
remoteAddr *net.UDPAddr
ev nodeEvent
hash []byte
data interface{} // one of the RPC structs
rawData []byte
}
type conn interface {
ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error)
WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error)
Close() error
LocalAddr() net.Addr
}
// udp implements the RPC protocol.
type udp struct {
conn conn
priv *ecdsa.PrivateKey
ourEndpoint rpcEndpoint
net *Network
}
// ListenUDP returns a new table that listens for UDP packets on laddr.
func ListenUDP(priv *ecdsa.PrivateKey, conn conn, nodeDBPath string, netrestrict *netutil.Netlist) (*Network, error) {
realaddr := conn.LocalAddr().(*net.UDPAddr)
transport, err := listenUDP(priv, conn, realaddr)
if err != nil {
return nil, err
}
net, err := newNetwork(transport, priv.PublicKey, nodeDBPath, netrestrict)
if err != nil {
return nil, err
}
log.Info("UDP listener up", "net", net.tab.self)
transport.net = net
go transport.readLoop()
return net, nil
}
func listenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr) (*udp, error) {
return &udp{conn: conn, priv: priv, ourEndpoint: makeEndpoint(realaddr, uint16(realaddr.Port))}, nil
}
func (t *udp) localAddr() *net.UDPAddr {
return t.conn.LocalAddr().(*net.UDPAddr)
}
func (t *udp) Close() {
t.conn.Close()
}
func (t *udp) send(remote *Node, ptype nodeEvent, data interface{}) (hash []byte) {
hash, _ = t.sendPacket(remote.ID, remote.addr(), byte(ptype), data)
return hash
}
func (t *udp) sendPing(remote *Node, toaddr *net.UDPAddr, topics []Topic) (hash []byte) {
hash, _ = t.sendPacket(remote.ID, toaddr, byte(pingPacket), ping{
Version: Version,
From: t.ourEndpoint,
To: makeEndpoint(toaddr, uint16(toaddr.Port)), // TODO: maybe use known TCP port from DB
Expiration: uint64(time.Now().Add(expiration).Unix()),
Topics: topics,
})
return hash
}
func (t *udp) sendNeighbours(remote *Node, results []*Node) {
// Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit.
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
for i, result := range results {
p.Nodes = append(p.Nodes, nodeToRPC(result))
if len(p.Nodes) == maxNeighbors || i == len(results)-1 {
t.sendPacket(remote.ID, remote.addr(), byte(neighborsPacket), p)
p.Nodes = p.Nodes[:0]
}
}
}
func (t *udp) sendFindnodeHash(remote *Node, target common.Hash) {
t.sendPacket(remote.ID, remote.addr(), byte(findnodeHashPacket), findnodeHash{
Target: target,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
}
func (t *udp) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []byte) {
t.sendPacket(remote.ID, remote.addr(), byte(topicRegisterPacket), topicRegister{
Topics: topics,
Idx: uint(idx),
Pong: pong,
})
}
func (t *udp) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) {
p := topicNodes{Echo: queryHash}
var sent bool
for _, result := range nodes {
if result.IP.Equal(t.net.tab.self.IP) || netutil.CheckRelayIP(remote.IP, result.IP) == nil {
p.Nodes = append(p.Nodes, nodeToRPC(result))
}
if len(p.Nodes) == maxTopicNodes {
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
p.Nodes = p.Nodes[:0]
sent = true
}
}
if !sent || len(p.Nodes) > 0 {
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
}
}
func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req interface{}) (hash []byte, err error) {
//fmt.Println("sendPacket", nodeEvent(ptype), toaddr.String(), toid.String())
packet, hash, err := encodePacket(t.priv, ptype, req)
if err != nil {
//fmt.Println(err)
return hash, err
}
log.Trace(fmt.Sprintf(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr))
if nbytes, err := t.conn.WriteToUDP(packet, toaddr); err != nil {
log.Trace(fmt.Sprint("UDP send failed:", err))
} else {
egressTrafficMeter.Mark(int64(nbytes))
}
//fmt.Println(err)
return hash, err
}
// zeroed padding space for encodePacket.
var headSpace = make([]byte, headSize)
func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (p, hash []byte, err error) {
b := new(bytes.Buffer)
b.Write(headSpace)
b.WriteByte(ptype)
if err := rlp.Encode(b, req); err != nil {
log.Error(fmt.Sprint("error encoding packet:", err))
return nil, nil, err
}
packet := b.Bytes()
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
if err != nil {
log.Error(fmt.Sprint("could not sign packet:", err))
return nil, nil, err
}
copy(packet, versionPrefix)
copy(packet[versionPrefixSize:], sig)
hash = crypto.Keccak256(packet[versionPrefixSize:])
return packet, hash, nil
}
// readLoop runs in its own goroutine. it injects ingress UDP packets
// into the network loop.
func (t *udp) readLoop() {
defer t.conn.Close()
// Discovery packets are defined to be no larger than 1280 bytes.
// Packets larger than this size will be cut at the end and treated
// as invalid because their hash won't match.
buf := make([]byte, 1280)
for {
nbytes, from, err := t.conn.ReadFromUDP(buf)
ingressTrafficMeter.Mark(int64(nbytes))
if netutil.IsTemporaryError(err) {
// Ignore temporary read errors.
log.Debug(fmt.Sprintf("Temporary read error: %v", err))
continue
} else if err != nil {
// Shut down the loop for permament errors.
log.Debug(fmt.Sprintf("Read error: %v", err))
return
}
t.handlePacket(from, buf[:nbytes])
}
}
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
pkt := ingressPacket{remoteAddr: from}
if err := decodePacket(buf, &pkt); err != nil {
log.Debug(fmt.Sprintf("Bad packet from %v: %v", from, err))
//fmt.Println("bad packet", err)
return err
}
t.net.reqReadPacket(pkt)
return nil
}
func decodePacket(buffer []byte, pkt *ingressPacket) error {
if len(buffer) < headSize+1 {
return errPacketTooSmall
}
buf := make([]byte, len(buffer))
copy(buf, buffer)
prefix, sig, sigdata := buf[:versionPrefixSize], buf[versionPrefixSize:headSize], buf[headSize:]
if !bytes.Equal(prefix, versionPrefix) {
return errBadPrefix
}
fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig)
if err != nil {
return err
}
pkt.rawData = buf
pkt.hash = crypto.Keccak256(buf[versionPrefixSize:])
pkt.remoteID = fromID
switch pkt.ev = nodeEvent(sigdata[0]); pkt.ev {
case pingPacket:
pkt.data = new(ping)
case pongPacket:
pkt.data = new(pong)
case findnodePacket:
pkt.data = new(findnode)
case neighborsPacket:
pkt.data = new(neighbors)
case findnodeHashPacket:
pkt.data = new(findnodeHash)
case topicRegisterPacket:
pkt.data = new(topicRegister)
case topicQueryPacket:
pkt.data = new(topicQuery)
case topicNodesPacket:
pkt.data = new(topicNodes)
default:
return fmt.Errorf("unknown packet type: %d", sigdata[0])
}
s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
err = s.Decode(pkt.data)
return err
}