82
vendor/github.com/status-im/status-go/services/ext/README.md
generated
vendored
Normal file
82
vendor/github.com/status-im/status-go/services/ext/README.md
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
Whisper API Extension
|
||||
=====================
|
||||
|
||||
API
|
||||
---
|
||||
|
||||
|
||||
#### shhext_getNewFilterMessages
|
||||
|
||||
Accepts the same input as [`shh_getFilterMessages`](https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_getFilterChanges).
|
||||
|
||||
##### Returns
|
||||
|
||||
Returns a list of whisper messages matching the specified filter. Filters out
|
||||
the messages already confirmed received by [`shhext_confirmMessagesProcessed`](#shhextconfirmmessagesprocessed)
|
||||
|
||||
Deduplication is made using the whisper envelope content and topic only, so the
|
||||
same content received in different whisper envelopes will be deduplicated.
|
||||
|
||||
|
||||
#### shhext_confirmMessagesProcessed
|
||||
|
||||
Confirms whisper messages received and processed on the client side. These
|
||||
messages won't appear anymore when [`shhext_getNewFilterMessages`](#shhextgetnewfiltermessages)
|
||||
is called.
|
||||
|
||||
##### Parameters
|
||||
|
||||
Gets a list of whisper envelopes.
|
||||
|
||||
|
||||
#### shhext_post
|
||||
|
||||
Accepts same input as [`shh_post`](https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_post).
|
||||
|
||||
##### Returns
|
||||
|
||||
`DATA`, 32 Bytes - the envelope hash
|
||||
|
||||
#### shhext_requestMessages
|
||||
|
||||
Sends a request for historic messages to a mail server.
|
||||
|
||||
##### Parameters
|
||||
|
||||
1. `Object` - The message request object:
|
||||
|
||||
- `mailServerPeer`:`URL` - Mail servers' enode addess
|
||||
- `from`:`QUANTITY` - (optional) Lower bound of time range as unix timestamp, default is 24 hours back from now
|
||||
- `to`:`QUANTITY`- (optional) Upper bound of time range as unix timestamp, default is now
|
||||
- `topic`:`DATA`, 4 Bytes - Regular whisper topic
|
||||
- `symKeyID`:`DATA`- ID of a symmetric key to authenticate to mail server, derived from mail server password
|
||||
|
||||
##### Returns
|
||||
|
||||
`Boolean` - returns `true` if the request was send, otherwise `false`.
|
||||
|
||||
Signals
|
||||
-------
|
||||
|
||||
Sends sent signal once per envelope.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "envelope.sent",
|
||||
"event": {
|
||||
"hash": "0xea0b93079ed32588628f1cabbbb5ed9e4d50b7571064c2962c3853972db67790"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Sends expired signal if envelope dropped from whisper local queue before it was
|
||||
sent to any peer on the network.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "envelope.expired",
|
||||
"event": {
|
||||
"hash": "0x754f4c12dccb14886f791abfeb77ffb86330d03d5a4ba6f37a8c21281988b69e"
|
||||
}
|
||||
}
|
||||
```
|
||||
1808
vendor/github.com/status-im/status-go/services/ext/api.go
generated
vendored
Normal file
1808
vendor/github.com/status-im/status-go/services/ext/api.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
55
vendor/github.com/status-im/status-go/services/ext/context.go
generated
vendored
Normal file
55
vendor/github.com/status-im/status-go/services/ext/context.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package ext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/status-go/db"
|
||||
)
|
||||
|
||||
// ContextKey is a type used for keys in ext Context.
|
||||
type ContextKey struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// NewContextKey returns new ContextKey instance.
|
||||
func NewContextKey(name string) ContextKey {
|
||||
return ContextKey{Name: name}
|
||||
}
|
||||
|
||||
var (
|
||||
historyDBKey = NewContextKey("history_db")
|
||||
requestRegistryKey = NewContextKey("request_registry")
|
||||
timeKey = NewContextKey("time")
|
||||
)
|
||||
|
||||
// NewContext creates Context with all required fields.
|
||||
func NewContext(ctx context.Context, source TimeSource, registry *RequestsRegistry, storage db.Storage) Context {
|
||||
ctx = context.WithValue(ctx, historyDBKey, db.NewHistoryStore(storage))
|
||||
ctx = context.WithValue(ctx, timeKey, source)
|
||||
ctx = context.WithValue(ctx, requestRegistryKey, registry)
|
||||
return Context{ctx}
|
||||
}
|
||||
|
||||
// TimeSource is a type used for current time.
|
||||
type TimeSource func() time.Time
|
||||
|
||||
// Context provides access to request-scoped values.
|
||||
type Context struct {
|
||||
context.Context
|
||||
}
|
||||
|
||||
// HistoryStore returns db.HistoryStore instance associated with this request.
|
||||
func (c Context) HistoryStore() db.HistoryStore {
|
||||
return c.Value(historyDBKey).(db.HistoryStore)
|
||||
}
|
||||
|
||||
// Time returns current time using time function associated with this request.
|
||||
func (c Context) Time() time.Time {
|
||||
return c.Value(timeKey).(TimeSource)()
|
||||
}
|
||||
|
||||
// RequestRegistry returns RequestRegistry that tracks each request life-span.
|
||||
func (c Context) RequestRegistry() *RequestsRegistry {
|
||||
return c.Value(requestRegistryKey).(*RequestsRegistry)
|
||||
}
|
||||
48
vendor/github.com/status-im/status-go/services/ext/handler_mock.go
generated
vendored
Normal file
48
vendor/github.com/status-im/status-go/services/ext/handler_mock.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package ext
|
||||
|
||||
import (
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
type failureMessage struct {
|
||||
IDs [][]byte
|
||||
Error error
|
||||
}
|
||||
|
||||
func NewHandlerMock(buf int) HandlerMock {
|
||||
return HandlerMock{
|
||||
confirmations: make(chan [][]byte, buf),
|
||||
expirations: make(chan failureMessage, buf),
|
||||
requestsCompleted: make(chan types.Hash, buf),
|
||||
requestsExpired: make(chan types.Hash, buf),
|
||||
requestsFailed: make(chan types.Hash, buf),
|
||||
}
|
||||
}
|
||||
|
||||
type HandlerMock struct {
|
||||
confirmations chan [][]byte
|
||||
expirations chan failureMessage
|
||||
requestsCompleted chan types.Hash
|
||||
requestsExpired chan types.Hash
|
||||
requestsFailed chan types.Hash
|
||||
}
|
||||
|
||||
func (t HandlerMock) EnvelopeSent(ids [][]byte) {
|
||||
t.confirmations <- ids
|
||||
}
|
||||
|
||||
func (t HandlerMock) EnvelopeExpired(ids [][]byte, err error) {
|
||||
t.expirations <- failureMessage{IDs: ids, Error: err}
|
||||
}
|
||||
|
||||
func (t HandlerMock) MailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) {
|
||||
if err == nil {
|
||||
t.requestsCompleted <- requestID
|
||||
} else {
|
||||
t.requestsFailed <- requestID
|
||||
}
|
||||
}
|
||||
|
||||
func (t HandlerMock) MailServerRequestExpired(hash types.Hash) {
|
||||
t.requestsExpired <- hash
|
||||
}
|
||||
136
vendor/github.com/status-im/status-go/services/ext/mailrequests.go
generated
vendored
Normal file
136
vendor/github.com/status-im/status-go/services/ext/mailrequests.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package ext
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/services/ext/mailservers"
|
||||
)
|
||||
|
||||
// EnvelopeState in local tracker
|
||||
type EnvelopeState int
|
||||
|
||||
const (
|
||||
// NotRegistered returned if asked hash wasn't registered in the tracker.
|
||||
NotRegistered EnvelopeState = -1
|
||||
// MailServerRequestSent is set when p2p request is sent to the mailserver
|
||||
MailServerRequestSent
|
||||
)
|
||||
|
||||
// MailRequestMonitor is responsible for monitoring history request to mailservers.
|
||||
type MailRequestMonitor struct {
|
||||
eventSub mailservers.EnvelopeEventSubscriber
|
||||
handler EnvelopeEventsHandler
|
||||
|
||||
mu sync.Mutex
|
||||
cache map[types.Hash]EnvelopeState
|
||||
|
||||
requestsRegistry *RequestsRegistry
|
||||
|
||||
wg sync.WaitGroup
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
func NewMailRequestMonitor(eventSub mailservers.EnvelopeEventSubscriber, h EnvelopeEventsHandler, reg *RequestsRegistry) *MailRequestMonitor {
|
||||
return &MailRequestMonitor{
|
||||
eventSub: eventSub,
|
||||
handler: h,
|
||||
cache: make(map[types.Hash]EnvelopeState),
|
||||
requestsRegistry: reg,
|
||||
}
|
||||
}
|
||||
|
||||
// Start processing events.
|
||||
func (m *MailRequestMonitor) Start() {
|
||||
m.quit = make(chan struct{})
|
||||
m.wg.Add(1)
|
||||
go func() {
|
||||
m.handleEnvelopeEvents()
|
||||
m.wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop process events.
|
||||
func (m *MailRequestMonitor) Stop() {
|
||||
close(m.quit)
|
||||
m.wg.Wait()
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) GetState(hash types.Hash) EnvelopeState {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
state, exist := m.cache[hash]
|
||||
if !exist {
|
||||
return NotRegistered
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// handleEnvelopeEvents processes whisper envelope events
|
||||
func (m *MailRequestMonitor) handleEnvelopeEvents() {
|
||||
events := make(chan types.EnvelopeEvent, 100) // must be buffered to prevent blocking whisper
|
||||
sub := m.eventSub.SubscribeEnvelopeEvents(events)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-m.quit:
|
||||
return
|
||||
case event := <-events:
|
||||
m.handleEvent(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleEvent based on type of the event either triggers
|
||||
// confirmation handler or removes hash from MailRequestMonitor
|
||||
func (m *MailRequestMonitor) handleEvent(event types.EnvelopeEvent) {
|
||||
handlers := map[types.EventType]func(types.EnvelopeEvent){
|
||||
types.EventMailServerRequestSent: m.handleRequestSent,
|
||||
types.EventMailServerRequestCompleted: m.handleEventMailServerRequestCompleted,
|
||||
types.EventMailServerRequestExpired: m.handleEventMailServerRequestExpired,
|
||||
}
|
||||
|
||||
if handler, ok := handlers[event.Event]; ok {
|
||||
handler(event)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) handleRequestSent(event types.EnvelopeEvent) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.cache[event.Hash] = MailServerRequestSent
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) handleEventMailServerRequestCompleted(event types.EnvelopeEvent) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.requestsRegistry.Unregister(event.Hash)
|
||||
state, ok := m.cache[event.Hash]
|
||||
if !ok || state != MailServerRequestSent {
|
||||
return
|
||||
}
|
||||
log.Debug("mailserver response received", "hash", event.Hash)
|
||||
delete(m.cache, event.Hash)
|
||||
if m.handler != nil {
|
||||
if resp, ok := event.Data.(*types.MailServerResponse); ok {
|
||||
m.handler.MailServerRequestCompleted(event.Hash, resp.LastEnvelopeHash, resp.Cursor, resp.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MailRequestMonitor) handleEventMailServerRequestExpired(event types.EnvelopeEvent) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.requestsRegistry.Unregister(event.Hash)
|
||||
state, ok := m.cache[event.Hash]
|
||||
if !ok || state != MailServerRequestSent {
|
||||
return
|
||||
}
|
||||
log.Debug("mailserver response expired", "hash", event.Hash)
|
||||
delete(m.cache, event.Hash)
|
||||
if m.handler != nil {
|
||||
m.handler.MailServerRequestExpired(event.Hash)
|
||||
}
|
||||
}
|
||||
145
vendor/github.com/status-im/status-go/services/ext/mailservers/cache.go
generated
vendored
Normal file
145
vendor/github.com/status-im/status-go/services/ext/mailservers/cache.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
package mailservers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/db"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// NewPeerRecord returns instance of the peer record.
|
||||
func NewPeerRecord(node *enode.Node) PeerRecord {
|
||||
return PeerRecord{node: node}
|
||||
}
|
||||
|
||||
// PeerRecord is set data associated with each peer that is stored on disk.
|
||||
// PeerRecord stored with a enode as a key in leveldb, and body marshalled as json.
|
||||
type PeerRecord struct {
|
||||
node *enode.Node
|
||||
|
||||
// last time it was used.
|
||||
LastUsed time.Time
|
||||
}
|
||||
|
||||
// Encode encodes PeerRecords to bytes.
|
||||
func (r PeerRecord) Encode() ([]byte, error) {
|
||||
return json.Marshal(r)
|
||||
}
|
||||
|
||||
// ID returns enode identity of the node.
|
||||
func (r PeerRecord) ID() enode.ID {
|
||||
return r.node.ID()
|
||||
}
|
||||
|
||||
// Node returs pointer to original object.
|
||||
// enode.Node doensn't allow modification on the object.
|
||||
func (r PeerRecord) Node() *enode.Node {
|
||||
return r.node
|
||||
}
|
||||
|
||||
// EncodeKey returns bytes that will should be used as a key in persistent storage.
|
||||
func (r PeerRecord) EncodeKey() ([]byte, error) {
|
||||
return r.Node().MarshalText()
|
||||
}
|
||||
|
||||
// NewCache returns pointer to a Cache instance.
|
||||
func NewCache(db *leveldb.DB) *Cache {
|
||||
return &Cache{db: db}
|
||||
}
|
||||
|
||||
// Cache is wrapper for operations on disk with leveldb.
|
||||
type Cache struct {
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
// Replace deletes old and adds new records in the persistent cache.
|
||||
func (c *Cache) Replace(nodes []*enode.Node) error {
|
||||
batch := new(leveldb.Batch)
|
||||
iter := createPeersIterator(c.db)
|
||||
defer iter.Release()
|
||||
newNodes := nodesToMap(nodes)
|
||||
for iter.Next() {
|
||||
record, err := unmarshalKeyValue(keyWithoutPrefix(iter.Key()), iter.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, exist := newNodes[types.EnodeID(record.ID())]; exist {
|
||||
delete(newNodes, types.EnodeID(record.ID()))
|
||||
} else {
|
||||
batch.Delete(iter.Key())
|
||||
}
|
||||
}
|
||||
for _, n := range newNodes {
|
||||
enodeKey, err := n.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// we put nil as default value doesn't have any state associated with them.
|
||||
batch.Put(db.Key(db.MailserversCache, enodeKey), nil)
|
||||
}
|
||||
return c.db.Write(batch, nil)
|
||||
}
|
||||
|
||||
// LoadAll loads all records from persistent database.
|
||||
func (c *Cache) LoadAll() (rst []PeerRecord, err error) {
|
||||
iter := createPeersIterator(c.db)
|
||||
for iter.Next() {
|
||||
record, err := unmarshalKeyValue(keyWithoutPrefix(iter.Key()), iter.Value())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rst = append(rst, record)
|
||||
}
|
||||
return rst, nil
|
||||
}
|
||||
|
||||
// UpdateRecord updates single record.
|
||||
func (c *Cache) UpdateRecord(record PeerRecord) error {
|
||||
enodeKey, err := record.EncodeKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := record.Encode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.db.Put(db.Key(db.MailserversCache, enodeKey), value, nil)
|
||||
}
|
||||
|
||||
func unmarshalKeyValue(key, value []byte) (record PeerRecord, err error) {
|
||||
enodeKey := key
|
||||
node := new(enode.Node)
|
||||
err = node.UnmarshalText(enodeKey)
|
||||
if err != nil {
|
||||
return record, err
|
||||
}
|
||||
record = PeerRecord{node: node}
|
||||
if len(value) != 0 {
|
||||
err = json.Unmarshal(value, &record)
|
||||
}
|
||||
return record, err
|
||||
}
|
||||
|
||||
func nodesToMap(nodes []*enode.Node) map[types.EnodeID]*enode.Node {
|
||||
rst := map[types.EnodeID]*enode.Node{}
|
||||
for _, n := range nodes {
|
||||
rst[types.EnodeID(n.ID())] = n
|
||||
}
|
||||
return rst
|
||||
}
|
||||
|
||||
func createPeersIterator(level *leveldb.DB) iterator.Iterator {
|
||||
return level.NewIterator(util.BytesPrefix([]byte{byte(db.MailserversCache)}), nil)
|
||||
}
|
||||
|
||||
// keyWithoutPrefix removes first byte from key.
|
||||
func keyWithoutPrefix(key []byte) []byte {
|
||||
return key[1:]
|
||||
}
|
||||
271
vendor/github.com/status-im/status-go/services/ext/mailservers/connmanager.go
generated
vendored
Normal file
271
vendor/github.com/status-im/status-go/services/ext/mailservers/connmanager.go
generated
vendored
Normal file
@@ -0,0 +1,271 @@
|
||||
package mailservers
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
const (
|
||||
peerEventsBuffer = 10 // sufficient buffer to avoid blocking a p2p feed.
|
||||
whisperEventsBuffer = 20 // sufficient buffer to avod blocking a eventSub envelopes feed.
|
||||
)
|
||||
|
||||
// PeerAdderRemover is an interface for adding or removing peers.
|
||||
type PeerAdderRemover interface {
|
||||
AddPeer(node *enode.Node)
|
||||
RemovePeer(node *enode.Node)
|
||||
}
|
||||
|
||||
// PeerEventsSubscriber interface to subscribe for p2p.PeerEvent's.
|
||||
type PeerEventsSubscriber interface {
|
||||
SubscribeEvents(chan *p2p.PeerEvent) event.Subscription
|
||||
}
|
||||
|
||||
// EnvelopeEventSubscriber interface to subscribe for types.EnvelopeEvent's.
|
||||
type EnvelopeEventSubscriber interface {
|
||||
SubscribeEnvelopeEvents(chan<- types.EnvelopeEvent) types.Subscription
|
||||
}
|
||||
|
||||
type p2pServer interface {
|
||||
PeerAdderRemover
|
||||
PeerEventsSubscriber
|
||||
}
|
||||
|
||||
// NewConnectionManager creates an instance of ConnectionManager.
|
||||
func NewConnectionManager(server p2pServer, eventSub EnvelopeEventSubscriber, target, maxFailures int, timeout time.Duration) *ConnectionManager {
|
||||
return &ConnectionManager{
|
||||
server: server,
|
||||
eventSub: eventSub,
|
||||
connectedTarget: target,
|
||||
maxFailures: maxFailures,
|
||||
notifications: make(chan []*enode.Node),
|
||||
timeoutWaitAdded: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionManager manages keeps target of peers connected.
|
||||
type ConnectionManager struct {
|
||||
wg sync.WaitGroup
|
||||
quit chan struct{}
|
||||
|
||||
server p2pServer
|
||||
eventSub EnvelopeEventSubscriber
|
||||
|
||||
notifications chan []*enode.Node
|
||||
connectedTarget int
|
||||
timeoutWaitAdded time.Duration
|
||||
maxFailures int
|
||||
}
|
||||
|
||||
// Notify sends a non-blocking notification about new nodes.
|
||||
func (ps *ConnectionManager) Notify(nodes []*enode.Node) {
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
select {
|
||||
case ps.notifications <- nodes:
|
||||
case <-ps.quit:
|
||||
}
|
||||
ps.wg.Done()
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
// Start subscribes to a p2p server and handles new peers and state updates for those peers.
|
||||
func (ps *ConnectionManager) Start() {
|
||||
ps.quit = make(chan struct{})
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
state := newInternalState(ps.server, ps.connectedTarget, ps.timeoutWaitAdded)
|
||||
events := make(chan *p2p.PeerEvent, peerEventsBuffer)
|
||||
sub := ps.server.SubscribeEvents(events)
|
||||
whisperEvents := make(chan types.EnvelopeEvent, whisperEventsBuffer)
|
||||
whisperSub := ps.eventSub.SubscribeEnvelopeEvents(whisperEvents)
|
||||
requests := map[types.Hash]struct{}{}
|
||||
failuresPerServer := map[types.EnodeID]int{}
|
||||
|
||||
defer sub.Unsubscribe()
|
||||
defer whisperSub.Unsubscribe()
|
||||
defer ps.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ps.quit:
|
||||
return
|
||||
case err := <-sub.Err():
|
||||
log.Error("retry after error subscribing to p2p events", "error", err)
|
||||
return
|
||||
case err := <-whisperSub.Err():
|
||||
log.Error("retry after error suscribing to eventSub events", "error", err)
|
||||
return
|
||||
case newNodes := <-ps.notifications:
|
||||
state.processReplacement(newNodes, events)
|
||||
case ev := <-events:
|
||||
processPeerEvent(state, ev)
|
||||
case ev := <-whisperEvents:
|
||||
// TODO treat failed requests the same way as expired
|
||||
switch ev.Event {
|
||||
case types.EventMailServerRequestSent:
|
||||
requests[ev.Hash] = struct{}{}
|
||||
case types.EventMailServerRequestCompleted:
|
||||
// reset failures count on first success
|
||||
failuresPerServer[ev.Peer] = 0
|
||||
delete(requests, ev.Hash)
|
||||
case types.EventMailServerRequestExpired:
|
||||
_, exist := requests[ev.Hash]
|
||||
if !exist {
|
||||
continue
|
||||
}
|
||||
failuresPerServer[ev.Peer]++
|
||||
log.Debug("request to a mail server expired, disconnect a peer", "address", ev.Peer)
|
||||
if failuresPerServer[ev.Peer] >= ps.maxFailures {
|
||||
state.nodeDisconnected(ev.Peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop gracefully closes all background goroutines and waits until they finish.
|
||||
func (ps *ConnectionManager) Stop() {
|
||||
if ps.quit == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ps.quit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
close(ps.quit)
|
||||
ps.wg.Wait()
|
||||
ps.quit = nil
|
||||
}
|
||||
|
||||
func (state *internalState) processReplacement(newNodes []*enode.Node, events <-chan *p2p.PeerEvent) {
|
||||
replacement := map[types.EnodeID]*enode.Node{}
|
||||
for _, n := range newNodes {
|
||||
replacement[types.EnodeID(n.ID())] = n
|
||||
}
|
||||
state.replaceNodes(replacement)
|
||||
if state.ReachedTarget() {
|
||||
log.Debug("already connected with required target", "target", state.target)
|
||||
return
|
||||
}
|
||||
if state.timeout != 0 {
|
||||
log.Debug("waiting defined timeout to establish connections",
|
||||
"timeout", state.timeout, "target", state.target)
|
||||
timer := time.NewTimer(state.timeout)
|
||||
waitForConnections(state, timer.C, events)
|
||||
timer.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func newInternalState(srv PeerAdderRemover, target int, timeout time.Duration) *internalState {
|
||||
return &internalState{
|
||||
options: options{target: target, timeout: timeout},
|
||||
srv: srv,
|
||||
connected: map[types.EnodeID]struct{}{},
|
||||
currentNodes: map[types.EnodeID]*enode.Node{},
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
target int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
type internalState struct {
|
||||
options
|
||||
srv PeerAdderRemover
|
||||
|
||||
connected map[types.EnodeID]struct{}
|
||||
currentNodes map[types.EnodeID]*enode.Node
|
||||
}
|
||||
|
||||
func (state *internalState) ReachedTarget() bool {
|
||||
return len(state.connected) >= state.target
|
||||
}
|
||||
|
||||
func (state *internalState) replaceNodes(new map[types.EnodeID]*enode.Node) {
|
||||
for nid, n := range state.currentNodes {
|
||||
if _, exist := new[nid]; !exist {
|
||||
delete(state.connected, nid)
|
||||
state.srv.RemovePeer(n)
|
||||
}
|
||||
}
|
||||
if !state.ReachedTarget() {
|
||||
for _, n := range new {
|
||||
state.srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
state.currentNodes = new
|
||||
}
|
||||
|
||||
func (state *internalState) nodeAdded(peer types.EnodeID) {
|
||||
n, exist := state.currentNodes[peer]
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
if state.ReachedTarget() {
|
||||
state.srv.RemovePeer(n)
|
||||
} else {
|
||||
state.connected[types.EnodeID(n.ID())] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (state *internalState) nodeDisconnected(peer types.EnodeID) {
|
||||
n, exist := state.currentNodes[peer] // unrelated event
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
_, exist = state.connected[peer] // check if already disconnected
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
if len(state.currentNodes) == 1 { // keep node connected if we don't have another choice
|
||||
return
|
||||
}
|
||||
state.srv.RemovePeer(n) // remove peer permanently, otherwise p2p.Server will try to reconnect
|
||||
delete(state.connected, peer)
|
||||
if !state.ReachedTarget() { // try to connect with any other selected (but not connected) node
|
||||
for nid, n := range state.currentNodes {
|
||||
_, exist := state.connected[nid]
|
||||
if exist || peer == nid {
|
||||
continue
|
||||
}
|
||||
state.srv.AddPeer(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func processPeerEvent(state *internalState, ev *p2p.PeerEvent) {
|
||||
switch ev.Type {
|
||||
case p2p.PeerEventTypeAdd:
|
||||
log.Debug("connected to a mailserver", "address", ev.Peer)
|
||||
state.nodeAdded(types.EnodeID(ev.Peer))
|
||||
case p2p.PeerEventTypeDrop:
|
||||
log.Debug("mailserver disconnected", "address", ev.Peer)
|
||||
state.nodeDisconnected(types.EnodeID(ev.Peer))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForConnections(state *internalState, timeout <-chan time.Time, events <-chan *p2p.PeerEvent) {
|
||||
for {
|
||||
select {
|
||||
case ev := <-events:
|
||||
processPeerEvent(state, ev)
|
||||
if state.ReachedTarget() {
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
85
vendor/github.com/status-im/status-go/services/ext/mailservers/connmonitor.go
generated
vendored
Normal file
85
vendor/github.com/status-im/status-go/services/ext/mailservers/connmonitor.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package mailservers
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// NewLastUsedConnectionMonitor returns pointer to the instance of LastUsedConnectionMonitor.
|
||||
func NewLastUsedConnectionMonitor(ps *PeerStore, cache *Cache, eventSub EnvelopeEventSubscriber) *LastUsedConnectionMonitor {
|
||||
return &LastUsedConnectionMonitor{
|
||||
ps: ps,
|
||||
cache: cache,
|
||||
eventSub: eventSub,
|
||||
}
|
||||
}
|
||||
|
||||
// LastUsedConnectionMonitor watches relevant events and reflects it in cache.
|
||||
type LastUsedConnectionMonitor struct {
|
||||
ps *PeerStore
|
||||
cache *Cache
|
||||
|
||||
eventSub EnvelopeEventSubscriber
|
||||
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// Start spins a separate goroutine to watch connections.
|
||||
func (mon *LastUsedConnectionMonitor) Start() {
|
||||
mon.quit = make(chan struct{})
|
||||
mon.wg.Add(1)
|
||||
go func() {
|
||||
events := make(chan types.EnvelopeEvent, whisperEventsBuffer)
|
||||
sub := mon.eventSub.SubscribeEnvelopeEvents(events)
|
||||
defer sub.Unsubscribe()
|
||||
defer mon.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-mon.quit:
|
||||
return
|
||||
case err := <-sub.Err():
|
||||
log.Error("retry after error suscribing to eventSub events", "error", err)
|
||||
return
|
||||
case ev := <-events:
|
||||
node := mon.ps.Get(ev.Peer)
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
if ev.Event == types.EventMailServerRequestCompleted {
|
||||
err := mon.updateRecord(ev.Peer)
|
||||
if err != nil {
|
||||
log.Error("unable to update storage", "peer", ev.Peer, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (mon *LastUsedConnectionMonitor) updateRecord(nodeID types.EnodeID) error {
|
||||
node := mon.ps.Get(nodeID)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
return mon.cache.UpdateRecord(PeerRecord{node: node, LastUsed: time.Now()})
|
||||
}
|
||||
|
||||
// Stop closes channel to signal a quit and waits until all goroutines are stoppped.
|
||||
func (mon *LastUsedConnectionMonitor) Stop() {
|
||||
if mon.quit == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-mon.quit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
close(mon.quit)
|
||||
mon.wg.Wait()
|
||||
mon.quit = nil
|
||||
}
|
||||
63
vendor/github.com/status-im/status-go/services/ext/mailservers/peerstore.go
generated
vendored
Normal file
63
vendor/github.com/status-im/status-go/services/ext/mailservers/peerstore.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package mailservers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoConnected returned when mail servers are not connected.
|
||||
ErrNoConnected = errors.New("no connected mail servers")
|
||||
)
|
||||
|
||||
// PeersProvider is an interface for requesting list of peers.
|
||||
type PeersProvider interface {
|
||||
Peers() []*p2p.Peer
|
||||
}
|
||||
|
||||
// NewPeerStore returns an instance of PeerStore.
|
||||
func NewPeerStore(cache *Cache) *PeerStore {
|
||||
return &PeerStore{
|
||||
nodes: map[types.EnodeID]*enode.Node{},
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
// PeerStore stores list of selected mail servers and keeps N of them connected.
|
||||
type PeerStore struct {
|
||||
mu sync.RWMutex
|
||||
nodes map[types.EnodeID]*enode.Node
|
||||
|
||||
cache *Cache
|
||||
}
|
||||
|
||||
// Exist confirms that peers was added to a store.
|
||||
func (ps *PeerStore) Exist(nodeID types.EnodeID) bool {
|
||||
ps.mu.RLock()
|
||||
defer ps.mu.RUnlock()
|
||||
_, exist := ps.nodes[nodeID]
|
||||
return exist
|
||||
}
|
||||
|
||||
// Get returns instance of the node with requested ID or nil if ID is not found.
|
||||
func (ps *PeerStore) Get(nodeID types.EnodeID) *enode.Node {
|
||||
ps.mu.RLock()
|
||||
defer ps.mu.RUnlock()
|
||||
return ps.nodes[nodeID]
|
||||
}
|
||||
|
||||
// Update updates peers locally.
|
||||
func (ps *PeerStore) Update(nodes []*enode.Node) error {
|
||||
ps.mu.Lock()
|
||||
ps.nodes = map[types.EnodeID]*enode.Node{}
|
||||
for _, n := range nodes {
|
||||
ps.nodes[types.EnodeID(n.ID())] = n
|
||||
}
|
||||
ps.mu.Unlock()
|
||||
return ps.cache.Replace(nodes)
|
||||
}
|
||||
54
vendor/github.com/status-im/status-go/services/ext/mailservers/utils.go
generated
vendored
Normal file
54
vendor/github.com/status-im/status-go/services/ext/mailservers/utils.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package mailservers
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// GetFirstConnected returns first connected peer that is also added to a peer store.
|
||||
// Raises ErrNoConnected if no peers are added to a peer store.
|
||||
func GetFirstConnected(provider PeersProvider, store *PeerStore) (*enode.Node, error) {
|
||||
peers := provider.Peers()
|
||||
for _, p := range peers {
|
||||
if store.Exist(types.EnodeID(p.ID())) {
|
||||
return p.Node(), nil
|
||||
}
|
||||
}
|
||||
return nil, ErrNoConnected
|
||||
}
|
||||
|
||||
// NodesNotifee interface to be notified when new nodes are received.
|
||||
type NodesNotifee interface {
|
||||
Notify([]*enode.Node)
|
||||
}
|
||||
|
||||
// EnsureUsedRecordsAddedFirst checks if any nodes were marked as connected before app went offline.
|
||||
func EnsureUsedRecordsAddedFirst(ps *PeerStore, conn NodesNotifee) error {
|
||||
records, err := ps.cache.LoadAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(records) == 0 {
|
||||
return nil
|
||||
}
|
||||
sort.Slice(records, func(i, j int) bool {
|
||||
return records[i].LastUsed.After(records[j].LastUsed)
|
||||
})
|
||||
all := recordsToNodes(records)
|
||||
if !records[0].LastUsed.IsZero() {
|
||||
conn.Notify(all[:1])
|
||||
}
|
||||
conn.Notify(all)
|
||||
return nil
|
||||
}
|
||||
|
||||
func recordsToNodes(records []PeerRecord) []*enode.Node {
|
||||
nodes := make([]*enode.Node, len(records))
|
||||
for i := range records {
|
||||
nodes[i] = records[i].Node()
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
45
vendor/github.com/status-im/status-go/services/ext/node_mock.go
generated
vendored
Normal file
45
vendor/github.com/status-im/status-go/services/ext/node_mock.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package ext
|
||||
|
||||
import (
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
enstypes "github.com/status-im/status-go/eth-node/types/ens"
|
||||
)
|
||||
|
||||
type TestNodeWrapper struct {
|
||||
whisper types.Whisper
|
||||
waku types.Waku
|
||||
}
|
||||
|
||||
func NewTestNodeWrapper(whisper types.Whisper, waku types.Waku) *TestNodeWrapper {
|
||||
return &TestNodeWrapper{whisper: whisper, waku: waku}
|
||||
}
|
||||
|
||||
func (w *TestNodeWrapper) NewENSVerifier(_ *zap.Logger) enstypes.ENSVerifier {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (w *TestNodeWrapper) GetWhisper(_ interface{}) (types.Whisper, error) {
|
||||
return w.whisper, nil
|
||||
}
|
||||
|
||||
func (w *TestNodeWrapper) GetWaku(_ interface{}) (types.Waku, error) {
|
||||
return w.waku, nil
|
||||
}
|
||||
|
||||
func (w *TestNodeWrapper) GetWakuV2(_ interface{}) (types.Waku, error) {
|
||||
return w.waku, nil
|
||||
}
|
||||
|
||||
func (w *TestNodeWrapper) PeersCount() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (w *TestNodeWrapper) AddPeer(url string) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (w *TestNodeWrapper) RemovePeer(url string) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
98
vendor/github.com/status-im/status-go/services/ext/requests.go
generated
vendored
Normal file
98
vendor/github.com/status-im/status-go/services/ext/requests.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
package ext
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultRequestsDelay will be used in RequestsRegistry if no other was provided.
|
||||
DefaultRequestsDelay = 3 * time.Second
|
||||
)
|
||||
|
||||
type requestMeta struct {
|
||||
timestamp time.Time
|
||||
lastUID types.Hash
|
||||
}
|
||||
|
||||
// NewRequestsRegistry creates instance of the RequestsRegistry and returns pointer to it.
|
||||
func NewRequestsRegistry(delay time.Duration) *RequestsRegistry {
|
||||
r := &RequestsRegistry{
|
||||
delay: delay,
|
||||
}
|
||||
r.Clear()
|
||||
return r
|
||||
}
|
||||
|
||||
// RequestsRegistry keeps map for all requests with timestamp when they were made.
|
||||
type RequestsRegistry struct {
|
||||
mu sync.Mutex
|
||||
delay time.Duration
|
||||
uidToTopics map[types.Hash]types.Hash
|
||||
byTopicsHash map[types.Hash]requestMeta
|
||||
}
|
||||
|
||||
// Register request with given topics. If request with same topics was made in less then configured delay then error
|
||||
// will be returned.
|
||||
func (r *RequestsRegistry) Register(uid types.Hash, topics []types.TopicType) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
topicsHash := topicsToHash(topics)
|
||||
if meta, exist := r.byTopicsHash[topicsHash]; exist {
|
||||
if time.Since(meta.timestamp) < r.delay {
|
||||
return fmt.Errorf("another request with the same topics was sent less than %s ago. Please wait for a bit longer, or set `force` to true in request parameters", r.delay)
|
||||
}
|
||||
}
|
||||
newMeta := requestMeta{
|
||||
timestamp: time.Now(),
|
||||
lastUID: uid,
|
||||
}
|
||||
r.uidToTopics[uid] = topicsHash
|
||||
r.byTopicsHash[topicsHash] = newMeta
|
||||
return nil
|
||||
}
|
||||
|
||||
// Has returns true if given uid is stored in registry.
|
||||
func (r *RequestsRegistry) Has(uid types.Hash) bool {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
_, exist := r.uidToTopics[uid]
|
||||
return exist
|
||||
}
|
||||
|
||||
// Unregister removes request with given UID from registry.
|
||||
func (r *RequestsRegistry) Unregister(uid types.Hash) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
topicsHash, exist := r.uidToTopics[uid]
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
delete(r.uidToTopics, uid)
|
||||
meta := r.byTopicsHash[topicsHash]
|
||||
// remove topicsHash only if we are trying to unregister last request with this topic.
|
||||
if meta.lastUID == uid {
|
||||
delete(r.byTopicsHash, topicsHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Clear recreates all structures used for caching requests.
|
||||
func (r *RequestsRegistry) Clear() {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.uidToTopics = map[types.Hash]types.Hash{}
|
||||
r.byTopicsHash = map[types.Hash]requestMeta{}
|
||||
}
|
||||
|
||||
// topicsToHash returns non-cryptographic hash of the topics.
|
||||
func topicsToHash(topics []types.TopicType) types.Hash {
|
||||
hash := fnv.New32()
|
||||
for i := range topics {
|
||||
_, _ = hash.Write(topics[i][:]) // never returns error per documentation
|
||||
}
|
||||
return types.BytesToHash(hash.Sum(nil))
|
||||
}
|
||||
67
vendor/github.com/status-im/status-go/services/ext/rpc.go
generated
vendored
Normal file
67
vendor/github.com/status-im/status-go/services/ext/rpc.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// TODO: These types should be defined using protobuf, but protoc can only emit []byte instead of types.HexBytes,
|
||||
// which causes issues when marshaling to JSON on the react side. Let's do that once the chat protocol is moved to the go repo.
|
||||
|
||||
package ext
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
)
|
||||
|
||||
// SendPublicMessageRPC represents the RPC payload for the SendPublicMessage RPC method
|
||||
type SendPublicMessageRPC struct {
|
||||
Sig string // TODO: remove
|
||||
Chat string
|
||||
Payload types.HexBytes
|
||||
}
|
||||
|
||||
// TODO: implement with accordance to https://github.com/status-im/status-go/protocol/issues/28.
|
||||
func (m SendPublicMessageRPC) ID() string { return m.Chat }
|
||||
|
||||
func (m SendPublicMessageRPC) PublicName() string { return m.Chat }
|
||||
|
||||
func (m SendPublicMessageRPC) PublicKey() *ecdsa.PublicKey { return nil }
|
||||
|
||||
// SendDirectMessageRPC represents the RPC payload for the SendDirectMessage RPC method
|
||||
type SendDirectMessageRPC struct {
|
||||
Sig string // TODO: remove
|
||||
Chat string
|
||||
Payload types.HexBytes
|
||||
PubKey types.HexBytes
|
||||
DH bool // TODO: make sure to remove safely
|
||||
}
|
||||
|
||||
// TODO: implement with accordance to https://github.com/status-im/status-go/protocol/issues/28.
|
||||
func (m SendDirectMessageRPC) ID() string { return "" }
|
||||
|
||||
func (m SendDirectMessageRPC) PublicName() string { return "" }
|
||||
|
||||
func (m SendDirectMessageRPC) PublicKey() *ecdsa.PublicKey {
|
||||
publicKey, _ := crypto.UnmarshalPubkey(m.PubKey)
|
||||
return publicKey
|
||||
}
|
||||
|
||||
type JoinRPC struct {
|
||||
Chat string
|
||||
PubKey types.HexBytes
|
||||
Payload types.HexBytes
|
||||
}
|
||||
|
||||
func (m JoinRPC) ID() string { return m.Chat }
|
||||
|
||||
func (m JoinRPC) PublicName() string {
|
||||
if len(m.PubKey) > 0 {
|
||||
return ""
|
||||
}
|
||||
return m.Chat
|
||||
}
|
||||
|
||||
func (m JoinRPC) PublicKey() *ecdsa.PublicKey {
|
||||
if len(m.PubKey) > 0 {
|
||||
return nil
|
||||
}
|
||||
publicKey, _ := crypto.UnmarshalPubkey(m.PubKey)
|
||||
return publicKey
|
||||
}
|
||||
828
vendor/github.com/status-im/status-go/services/ext/service.go
generated
vendored
Normal file
828
vendor/github.com/status-im/status-go/services/ext/service.go
generated
vendored
Normal file
@@ -0,0 +1,828 @@
|
||||
package ext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"go.uber.org/zap"
|
||||
|
||||
commongethtypes "github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
gethrpc "github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/status-im/status-go/account"
|
||||
"github.com/status-im/status-go/api/multiformat"
|
||||
"github.com/status-im/status-go/connection"
|
||||
"github.com/status-im/status-go/db"
|
||||
coretypes "github.com/status-im/status-go/eth-node/core/types"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/images"
|
||||
"github.com/status-im/status-go/multiaccounts"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/protocol"
|
||||
"github.com/status-im/status-go/protocol/anonmetrics"
|
||||
"github.com/status-im/status-go/protocol/common"
|
||||
"github.com/status-im/status-go/protocol/common/shard"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/communities/token"
|
||||
"github.com/status-im/status-go/protocol/protobuf"
|
||||
"github.com/status-im/status-go/protocol/pushnotificationclient"
|
||||
"github.com/status-im/status-go/protocol/pushnotificationserver"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
"github.com/status-im/status-go/rpc"
|
||||
"github.com/status-im/status-go/server"
|
||||
"github.com/status-im/status-go/services/browsers"
|
||||
"github.com/status-im/status-go/services/communitytokens"
|
||||
"github.com/status-im/status-go/services/ext/mailservers"
|
||||
mailserversDB "github.com/status-im/status-go/services/mailservers"
|
||||
"github.com/status-im/status-go/services/wallet"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/thirdparty"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
)
|
||||
|
||||
const infinityString = "∞"
|
||||
const providerID = "community"
|
||||
|
||||
// EnvelopeEventsHandler used for two different event types.
|
||||
type EnvelopeEventsHandler interface {
|
||||
EnvelopeSent([][]byte)
|
||||
EnvelopeExpired([][]byte, error)
|
||||
MailServerRequestCompleted(types.Hash, types.Hash, []byte, error)
|
||||
MailServerRequestExpired(types.Hash)
|
||||
}
|
||||
|
||||
// Service is a service that provides some additional API to whisper-based protocols like Whisper or Waku.
|
||||
type Service struct {
|
||||
messenger *protocol.Messenger
|
||||
identity *ecdsa.PrivateKey
|
||||
cancelMessenger chan struct{}
|
||||
storage db.TransactionalStorage
|
||||
n types.Node
|
||||
rpcClient *rpc.Client
|
||||
config params.NodeConfig
|
||||
mailMonitor *MailRequestMonitor
|
||||
server *p2p.Server
|
||||
peerStore *mailservers.PeerStore
|
||||
accountsDB *accounts.Database
|
||||
multiAccountsDB *multiaccounts.Database
|
||||
account *multiaccounts.Account
|
||||
}
|
||||
|
||||
// Make sure that Service implements node.Service interface.
|
||||
var _ node.Lifecycle = (*Service)(nil)
|
||||
|
||||
func New(
|
||||
config params.NodeConfig,
|
||||
n types.Node,
|
||||
rpcClient *rpc.Client,
|
||||
ldb *leveldb.DB,
|
||||
mailMonitor *MailRequestMonitor,
|
||||
eventSub mailservers.EnvelopeEventSubscriber,
|
||||
) *Service {
|
||||
cache := mailservers.NewCache(ldb)
|
||||
peerStore := mailservers.NewPeerStore(cache)
|
||||
return &Service{
|
||||
storage: db.NewLevelDBStorage(ldb),
|
||||
n: n,
|
||||
rpcClient: rpcClient,
|
||||
config: config,
|
||||
mailMonitor: mailMonitor,
|
||||
peerStore: peerStore,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) NodeID() *ecdsa.PrivateKey {
|
||||
if s.server == nil {
|
||||
return nil
|
||||
}
|
||||
return s.server.PrivateKey
|
||||
}
|
||||
|
||||
func (s *Service) GetPeer(rawURL string) (*enode.Node, error) {
|
||||
if len(rawURL) == 0 {
|
||||
return mailservers.GetFirstConnected(s.server, s.peerStore)
|
||||
}
|
||||
return enode.ParseV4(rawURL)
|
||||
}
|
||||
|
||||
func (s *Service) InitProtocol(nodeName string, identity *ecdsa.PrivateKey, appDb, walletDb *sql.DB, httpServer *server.MediaServer, multiAccountDb *multiaccounts.Database, acc *multiaccounts.Account, accountManager *account.GethManager, rpcClient *rpc.Client, walletService *wallet.Service, communityTokensService *communitytokens.Service, wakuService *wakuv2.Waku, logger *zap.Logger) error {
|
||||
var err error
|
||||
if !s.config.ShhextConfig.PFSEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If Messenger has been already set up, we need to shut it down
|
||||
// before we init it again. Otherwise, it will lead to goroutines leakage
|
||||
// due to not stopped filters.
|
||||
if s.messenger != nil {
|
||||
if err := s.messenger.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.identity = identity
|
||||
|
||||
dataDir := filepath.Clean(s.config.ShhextConfig.BackupDisabledDataDir)
|
||||
|
||||
if err := os.MkdirAll(dataDir, os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envelopesMonitorConfig := &transport.EnvelopesMonitorConfig{
|
||||
MaxAttempts: s.config.ShhextConfig.MaxMessageDeliveryAttempts,
|
||||
AwaitOnlyMailServerConfirmations: s.config.ShhextConfig.MailServerConfirmations,
|
||||
IsMailserver: func(peer types.EnodeID) bool {
|
||||
return s.peerStore.Exist(peer)
|
||||
},
|
||||
EnvelopeEventsHandler: EnvelopeSignalHandler{},
|
||||
Logger: logger,
|
||||
}
|
||||
s.accountsDB, err = accounts.NewDB(appDb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.multiAccountsDB = multiAccountDb
|
||||
s.account = acc
|
||||
|
||||
options, err := buildMessengerOptions(s.config, identity, appDb, walletDb, httpServer, s.rpcClient, s.multiAccountsDB, acc, envelopesMonitorConfig, s.accountsDB, walletService, communityTokensService, wakuService, logger, &MessengerSignalsHandler{}, accountManager)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
messenger, err := protocol.NewMessenger(
|
||||
nodeName,
|
||||
identity,
|
||||
s.n,
|
||||
s.config.ShhextConfig.InstallationID,
|
||||
s.peerStore,
|
||||
options...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.messenger = messenger
|
||||
s.messenger.SetP2PServer(s.server)
|
||||
if s.config.ProcessBackedupMessages {
|
||||
s.messenger.EnableBackedupMessagesProcessing()
|
||||
}
|
||||
return messenger.Init()
|
||||
}
|
||||
|
||||
func (s *Service) StartMessenger() (*protocol.MessengerResponse, error) {
|
||||
// Start a loop that retrieves all messages and propagates them to status-mobile.
|
||||
s.cancelMessenger = make(chan struct{})
|
||||
response, err := s.messenger.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.messenger.StartRetrieveMessagesLoop(time.Second, s.cancelMessenger)
|
||||
go s.verifyTransactionLoop(30*time.Second, s.cancelMessenger)
|
||||
|
||||
if s.config.ShhextConfig.BandwidthStatsEnabled {
|
||||
go s.retrieveStats(5*time.Second, s.cancelMessenger)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (s *Service) retrieveStats(tick time.Duration, cancel <-chan struct{}) {
|
||||
ticker := time.NewTicker(tick)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
response := s.messenger.GetStats()
|
||||
PublisherSignalHandler{}.Stats(response)
|
||||
case <-cancel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type verifyTransactionClient struct {
|
||||
chainID *big.Int
|
||||
url string
|
||||
}
|
||||
|
||||
func (c *verifyTransactionClient) TransactionByHash(ctx context.Context, hash types.Hash) (coretypes.Message, coretypes.TransactionStatus, error) {
|
||||
signer := gethtypes.NewLondonSigner(c.chainID)
|
||||
client, err := ethclient.Dial(c.url)
|
||||
if err != nil {
|
||||
return coretypes.Message{}, coretypes.TransactionStatusPending, err
|
||||
}
|
||||
|
||||
transaction, pending, err := client.TransactionByHash(ctx, commongethtypes.BytesToHash(hash.Bytes()))
|
||||
if err != nil {
|
||||
return coretypes.Message{}, coretypes.TransactionStatusPending, err
|
||||
}
|
||||
|
||||
message, err := transaction.AsMessage(signer, nil)
|
||||
if err != nil {
|
||||
return coretypes.Message{}, coretypes.TransactionStatusPending, err
|
||||
}
|
||||
from := types.BytesToAddress(message.From().Bytes())
|
||||
to := types.BytesToAddress(message.To().Bytes())
|
||||
|
||||
if pending {
|
||||
return coretypes.NewMessage(
|
||||
from,
|
||||
&to,
|
||||
message.Nonce(),
|
||||
message.Value(),
|
||||
message.Gas(),
|
||||
message.GasPrice(),
|
||||
message.Data(),
|
||||
message.CheckNonce(),
|
||||
), coretypes.TransactionStatusPending, nil
|
||||
}
|
||||
|
||||
receipt, err := client.TransactionReceipt(ctx, commongethtypes.BytesToHash(hash.Bytes()))
|
||||
if err != nil {
|
||||
return coretypes.Message{}, coretypes.TransactionStatusPending, err
|
||||
}
|
||||
|
||||
coremessage := coretypes.NewMessage(
|
||||
from,
|
||||
&to,
|
||||
message.Nonce(),
|
||||
message.Value(),
|
||||
message.Gas(),
|
||||
message.GasPrice(),
|
||||
message.Data(),
|
||||
message.CheckNonce(),
|
||||
)
|
||||
|
||||
// Token transfer, check the logs
|
||||
if len(coremessage.Data()) != 0 {
|
||||
if w_common.IsTokenTransfer(receipt.Logs) {
|
||||
return coremessage, coretypes.TransactionStatus(receipt.Status), nil
|
||||
}
|
||||
return coremessage, coretypes.TransactionStatusFailed, nil
|
||||
}
|
||||
|
||||
return coremessage, coretypes.TransactionStatus(receipt.Status), nil
|
||||
}
|
||||
|
||||
func (s *Service) verifyTransactionLoop(tick time.Duration, cancel <-chan struct{}) {
|
||||
if s.config.ShhextConfig.VerifyTransactionURL == "" {
|
||||
log.Warn("not starting transaction loop")
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(tick)
|
||||
defer ticker.Stop()
|
||||
|
||||
ctx, cancelVerifyTransaction := context.WithCancel(context.Background())
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
accounts, err := s.accountsDB.GetActiveAccounts()
|
||||
if err != nil {
|
||||
log.Error("failed to retrieve accounts", "err", err)
|
||||
}
|
||||
var wallets []types.Address
|
||||
for _, account := range accounts {
|
||||
if account.IsWalletNonWatchOnlyAccount() {
|
||||
wallets = append(wallets, types.BytesToAddress(account.Address.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
response, err := s.messenger.ValidateTransactions(ctx, wallets)
|
||||
if err != nil {
|
||||
log.Error("failed to validate transactions", "err", err)
|
||||
continue
|
||||
}
|
||||
s.messenger.PublishMessengerResponse(response)
|
||||
|
||||
case <-cancel:
|
||||
cancelVerifyTransaction()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) EnableInstallation(installationID string) error {
|
||||
return s.messenger.EnableInstallation(installationID)
|
||||
}
|
||||
|
||||
// DisableInstallation disables an installation for multi-device sync.
|
||||
func (s *Service) DisableInstallation(installationID string) error {
|
||||
return s.messenger.DisableInstallation(installationID)
|
||||
}
|
||||
|
||||
// Protocols returns a new protocols list. In this case, there are none.
|
||||
func (s *Service) Protocols() []p2p.Protocol {
|
||||
return []p2p.Protocol{}
|
||||
}
|
||||
|
||||
// APIs returns a list of new APIs.
|
||||
func (s *Service) APIs() []gethrpc.API {
|
||||
panic("this is abstract service, use shhext or wakuext implementation")
|
||||
}
|
||||
|
||||
func (s *Service) SetP2PServer(server *p2p.Server) {
|
||||
s.server = server
|
||||
}
|
||||
|
||||
// Start is run when a service is started.
|
||||
// It does nothing in this case but is required by `node.Service` interface.
|
||||
func (s *Service) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop is run when a service is stopped.
|
||||
func (s *Service) Stop() error {
|
||||
log.Info("Stopping shhext service")
|
||||
if s.cancelMessenger != nil {
|
||||
select {
|
||||
case <-s.cancelMessenger:
|
||||
// channel already closed
|
||||
default:
|
||||
close(s.cancelMessenger)
|
||||
s.cancelMessenger = nil
|
||||
}
|
||||
}
|
||||
|
||||
if s.messenger != nil {
|
||||
if err := s.messenger.Shutdown(); err != nil {
|
||||
log.Error("failed to stop messenger", "err", err)
|
||||
return err
|
||||
}
|
||||
s.messenger = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildMessengerOptions(
|
||||
config params.NodeConfig,
|
||||
identity *ecdsa.PrivateKey,
|
||||
appDb *sql.DB,
|
||||
walletDb *sql.DB,
|
||||
httpServer *server.MediaServer,
|
||||
rpcClient *rpc.Client,
|
||||
multiAccounts *multiaccounts.Database,
|
||||
account *multiaccounts.Account,
|
||||
envelopesMonitorConfig *transport.EnvelopesMonitorConfig,
|
||||
accountsDB *accounts.Database,
|
||||
walletService *wallet.Service,
|
||||
communityTokensService *communitytokens.Service,
|
||||
wakuService *wakuv2.Waku,
|
||||
logger *zap.Logger,
|
||||
messengerSignalsHandler protocol.MessengerSignalsHandler,
|
||||
accountManager account.Manager,
|
||||
) ([]protocol.Option, error) {
|
||||
options := []protocol.Option{
|
||||
protocol.WithCustomLogger(logger),
|
||||
protocol.WithPushNotifications(),
|
||||
protocol.WithDatabase(appDb),
|
||||
protocol.WithWalletDatabase(walletDb),
|
||||
protocol.WithMultiAccounts(multiAccounts),
|
||||
protocol.WithMailserversDatabase(mailserversDB.NewDB(appDb)),
|
||||
protocol.WithAccount(account),
|
||||
protocol.WithBrowserDatabase(browsers.NewDB(appDb)),
|
||||
protocol.WithEnvelopesMonitorConfig(envelopesMonitorConfig),
|
||||
protocol.WithSignalsHandler(messengerSignalsHandler),
|
||||
protocol.WithENSVerificationConfig(config.ShhextConfig.VerifyENSURL, config.ShhextConfig.VerifyENSContractAddress),
|
||||
protocol.WithClusterConfig(config.ClusterConfig),
|
||||
protocol.WithTorrentConfig(&config.TorrentConfig),
|
||||
protocol.WithHTTPServer(httpServer),
|
||||
protocol.WithRPCClient(rpcClient),
|
||||
protocol.WithMessageCSV(config.OutputMessageCSVEnabled),
|
||||
protocol.WithWalletConfig(&config.WalletConfig),
|
||||
protocol.WithWalletService(walletService),
|
||||
protocol.WithCommunityTokensService(communityTokensService),
|
||||
protocol.WithWakuService(wakuService),
|
||||
protocol.WithAccountManager(accountManager),
|
||||
}
|
||||
|
||||
if config.ShhextConfig.DataSyncEnabled {
|
||||
options = append(options, protocol.WithDatasync())
|
||||
}
|
||||
|
||||
settings, err := accountsDB.GetSettings()
|
||||
if err != sql.ErrNoRows && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate anon metrics client config
|
||||
if settings.AnonMetricsShouldSend {
|
||||
keyBytes, err := hex.DecodeString(config.ShhextConfig.AnonMetricsSendID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := crypto.UnmarshalPubkey(keyBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
amcc := &anonmetrics.ClientConfig{
|
||||
ShouldSend: true,
|
||||
SendAddress: key,
|
||||
}
|
||||
options = append(options, protocol.WithAnonMetricsClientConfig(amcc))
|
||||
}
|
||||
|
||||
// Generate anon metrics server config
|
||||
if config.ShhextConfig.AnonMetricsServerEnabled {
|
||||
if len(config.ShhextConfig.AnonMetricsServerPostgresURI) == 0 {
|
||||
return nil, errors.New("AnonMetricsServerPostgresURI must be set")
|
||||
}
|
||||
|
||||
amsc := &anonmetrics.ServerConfig{
|
||||
Enabled: true,
|
||||
PostgresURI: config.ShhextConfig.AnonMetricsServerPostgresURI,
|
||||
}
|
||||
options = append(options, protocol.WithAnonMetricsServerConfig(amsc))
|
||||
}
|
||||
|
||||
if settings.TelemetryServerURL != "" {
|
||||
options = append(options, protocol.WithTelemetry(settings.TelemetryServerURL))
|
||||
}
|
||||
|
||||
if settings.PushNotificationsServerEnabled {
|
||||
config := &pushnotificationserver.Config{
|
||||
Enabled: true,
|
||||
Logger: logger,
|
||||
}
|
||||
options = append(options, protocol.WithPushNotificationServerConfig(config))
|
||||
}
|
||||
|
||||
var pushNotifServKey []*ecdsa.PublicKey
|
||||
for _, d := range config.ShhextConfig.DefaultPushNotificationsServers {
|
||||
pushNotifServKey = append(pushNotifServKey, d.PublicKey)
|
||||
}
|
||||
|
||||
options = append(options, protocol.WithPushNotificationClientConfig(&pushnotificationclient.Config{
|
||||
DefaultServers: pushNotifServKey,
|
||||
BlockMentions: settings.PushNotificationsBlockMentions,
|
||||
SendEnabled: settings.SendPushNotifications,
|
||||
AllowFromContactsOnly: settings.PushNotificationsFromContactsOnly,
|
||||
RemoteNotificationsEnabled: settings.RemotePushNotificationsEnabled,
|
||||
}))
|
||||
|
||||
if config.ShhextConfig.VerifyTransactionURL != "" {
|
||||
client := &verifyTransactionClient{
|
||||
url: config.ShhextConfig.VerifyTransactionURL,
|
||||
chainID: big.NewInt(config.ShhextConfig.VerifyTransactionChainID),
|
||||
}
|
||||
options = append(options, protocol.WithVerifyTransactionClient(client))
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func (s *Service) ConnectionChanged(state connection.State) {
|
||||
if s.messenger != nil {
|
||||
s.messenger.ConnectionChanged(state)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) Messenger() *protocol.Messenger {
|
||||
return s.messenger
|
||||
}
|
||||
|
||||
func tokenURIToCommunityID(tokenURI string) string {
|
||||
tmpStr := strings.Split(tokenURI, "/")
|
||||
|
||||
// Community NFTs have a tokenURI of the form "compressedCommunityID/tokenID"
|
||||
if len(tmpStr) != 2 {
|
||||
return ""
|
||||
}
|
||||
compressedCommunityID := tmpStr[0]
|
||||
|
||||
hexCommunityID, err := multiformat.DeserializeCompressedKey(compressedCommunityID)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
pubKey, err := common.HexToPubkey(hexCommunityID)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
communityID := types.EncodeHex(crypto.CompressPubkey(pubKey))
|
||||
|
||||
return communityID
|
||||
}
|
||||
|
||||
func (s *Service) GetCommunityID(tokenURI string) string {
|
||||
if tokenURI != "" {
|
||||
return tokenURIToCommunityID(tokenURI)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *Service) FillCollectibleMetadata(collectible *thirdparty.FullCollectibleData) error {
|
||||
if s.messenger == nil {
|
||||
return fmt.Errorf("messenger not ready")
|
||||
}
|
||||
|
||||
if collectible == nil {
|
||||
return fmt.Errorf("empty collectible")
|
||||
}
|
||||
|
||||
id := collectible.CollectibleData.ID
|
||||
communityID := collectible.CollectibleData.CommunityID
|
||||
|
||||
if communityID == "" {
|
||||
return fmt.Errorf("invalid communityID")
|
||||
}
|
||||
|
||||
// FetchCommunityInfo should have been previously called once to ensure
|
||||
// that the latest version of the CommunityDescription is available in the DB
|
||||
community, err := s.fetchCommunity(communityID, false)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if community == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tokenMetadata, err := s.fetchCommunityCollectibleMetadata(community, id.ContractID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tokenMetadata == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
communityToken, err := s.fetchCommunityToken(communityID, id.ContractID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
permission := fetchCommunityCollectiblePermission(community, id)
|
||||
|
||||
privilegesLevel := token.CommunityLevel
|
||||
if permission != nil {
|
||||
privilegesLevel = permissionTypeToPrivilegesLevel(permission.GetType())
|
||||
}
|
||||
|
||||
imagePayload, _ := images.GetPayloadFromURI(tokenMetadata.GetImage())
|
||||
|
||||
collectible.CollectibleData.ContractType = w_common.ContractTypeERC721
|
||||
collectible.CollectibleData.Provider = providerID
|
||||
collectible.CollectibleData.Name = tokenMetadata.GetName()
|
||||
collectible.CollectibleData.Description = tokenMetadata.GetDescription()
|
||||
collectible.CollectibleData.ImagePayload = imagePayload
|
||||
collectible.CollectibleData.Traits = getCollectibleCommunityTraits(communityToken)
|
||||
|
||||
if collectible.CollectionData == nil {
|
||||
collectible.CollectionData = &thirdparty.CollectionData{
|
||||
ID: id.ContractID,
|
||||
CommunityID: communityID,
|
||||
}
|
||||
}
|
||||
collectible.CollectionData.ContractType = w_common.ContractTypeERC721
|
||||
collectible.CollectionData.Provider = providerID
|
||||
collectible.CollectionData.Name = tokenMetadata.GetName()
|
||||
collectible.CollectionData.ImagePayload = imagePayload
|
||||
|
||||
collectible.CommunityInfo = communityToInfo(community)
|
||||
|
||||
collectible.CollectibleCommunityInfo = &thirdparty.CollectibleCommunityInfo{
|
||||
PrivilegesLevel: privilegesLevel,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func permissionTypeToPrivilegesLevel(permissionType protobuf.CommunityTokenPermission_Type) token.PrivilegesLevel {
|
||||
switch permissionType {
|
||||
case protobuf.CommunityTokenPermission_BECOME_TOKEN_OWNER:
|
||||
return token.OwnerLevel
|
||||
case protobuf.CommunityTokenPermission_BECOME_TOKEN_MASTER:
|
||||
return token.MasterLevel
|
||||
default:
|
||||
return token.CommunityLevel
|
||||
}
|
||||
}
|
||||
|
||||
func communityToInfo(community *communities.Community) *thirdparty.CommunityInfo {
|
||||
if community == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &thirdparty.CommunityInfo{
|
||||
CommunityName: community.Name(),
|
||||
CommunityColor: community.Color(),
|
||||
CommunityImagePayload: fetchCommunityImage(community),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) FetchCommunityInfo(communityID string) (*thirdparty.CommunityInfo, error) {
|
||||
community, err := s.fetchCommunity(communityID, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return communityToInfo(community), nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchCommunity(communityID string, fetchLatest bool) (*communities.Community, error) {
|
||||
if s.messenger == nil {
|
||||
return nil, fmt.Errorf("messenger not ready")
|
||||
}
|
||||
|
||||
// Try to fetch metadata from Messenger communities
|
||||
|
||||
// TODO: we need the shard information in the collectible to be able to retrieve info for
|
||||
// communities that have specific shards
|
||||
|
||||
if fetchLatest {
|
||||
// Try to fetch the latest version of the Community
|
||||
var shard *shard.Shard = nil // TODO: build this with info from token
|
||||
// NOTE: The community returned by this function will be nil if
|
||||
// the version we have in the DB is the latest available.
|
||||
_, err := s.messenger.FetchCommunity(&protocol.FetchCommunityRequest{
|
||||
CommunityKey: communityID,
|
||||
Shard: shard,
|
||||
TryDatabase: false,
|
||||
WaitForResponse: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Get the latest successfully fetched version of the Community
|
||||
community, err := s.messenger.FindCommunityInfoFromDB(communityID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return community, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchCommunityToken(communityID string, contractID thirdparty.ContractID) (*token.CommunityToken, error) {
|
||||
if s.messenger == nil {
|
||||
return nil, fmt.Errorf("messenger not ready")
|
||||
}
|
||||
|
||||
return s.messenger.GetCommunityToken(communityID, int(contractID.ChainID), contractID.Address.String())
|
||||
}
|
||||
|
||||
func (s *Service) fetchCommunityCollectibleMetadata(community *communities.Community, contractID thirdparty.ContractID) (*protobuf.CommunityTokenMetadata, error) {
|
||||
tokensMetadata := community.CommunityTokensMetadata()
|
||||
|
||||
for _, tokenMetadata := range tokensMetadata {
|
||||
contractAddresses := tokenMetadata.GetContractAddresses()
|
||||
if contractAddresses[uint64(contractID.ChainID)] == contractID.Address.Hex() {
|
||||
return tokenMetadata, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func tokenCriterionContainsCollectible(tokenCriterion *protobuf.TokenCriteria, id thirdparty.CollectibleUniqueID) bool {
|
||||
// Check if token type matches
|
||||
if tokenCriterion.Type != protobuf.CommunityTokenType_ERC721 {
|
||||
return false
|
||||
}
|
||||
|
||||
for chainID, contractAddressStr := range tokenCriterion.ContractAddresses {
|
||||
if chainID != uint64(id.ContractID.ChainID) {
|
||||
continue
|
||||
}
|
||||
|
||||
contractAddress := commongethtypes.HexToAddress(contractAddressStr)
|
||||
if contractAddress != id.ContractID.Address {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(tokenCriterion.TokenIds) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, tokenID := range tokenCriterion.TokenIds {
|
||||
tokenIDBigInt := new(big.Int).SetUint64(tokenID)
|
||||
if id.TokenID.Cmp(tokenIDBigInt) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func permissionContainsCollectible(permission *communities.CommunityTokenPermission, id thirdparty.CollectibleUniqueID) bool {
|
||||
// See if any token criterion contains the collectible we're looking for
|
||||
for _, tokenCriterion := range permission.TokenCriteria {
|
||||
if tokenCriterionContainsCollectible(tokenCriterion, id) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func fetchCommunityCollectiblePermission(community *communities.Community, id thirdparty.CollectibleUniqueID) *communities.CommunityTokenPermission {
|
||||
// Permnission types of interest
|
||||
permissionTypes := []protobuf.CommunityTokenPermission_Type{
|
||||
protobuf.CommunityTokenPermission_BECOME_TOKEN_OWNER,
|
||||
protobuf.CommunityTokenPermission_BECOME_TOKEN_MASTER,
|
||||
}
|
||||
|
||||
for _, permissionType := range permissionTypes {
|
||||
permissions := community.TokenPermissionsByType(permissionType)
|
||||
// See if any community permission matches the type we're looking for
|
||||
for _, permission := range permissions {
|
||||
if permissionContainsCollectible(permission, id) {
|
||||
return permission
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchCommunityImage(community *communities.Community) []byte {
|
||||
imageTypes := []string{
|
||||
images.LargeDimName,
|
||||
images.SmallDimName,
|
||||
}
|
||||
|
||||
communityImages := community.Images()
|
||||
|
||||
for _, imageType := range imageTypes {
|
||||
if pbImage, ok := communityImages[imageType]; ok {
|
||||
return pbImage.Payload
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func boolToString(value bool) string {
|
||||
if value {
|
||||
return "Yes"
|
||||
}
|
||||
return "No"
|
||||
}
|
||||
|
||||
func getCollectibleCommunityTraits(token *token.CommunityToken) []thirdparty.CollectibleTrait {
|
||||
if token == nil {
|
||||
return make([]thirdparty.CollectibleTrait, 0)
|
||||
}
|
||||
|
||||
totalStr := infinityString
|
||||
availableStr := infinityString
|
||||
if !token.InfiniteSupply {
|
||||
totalStr = token.Supply.String()
|
||||
// TODO: calculate available supply. See services/communitytokens/api.go
|
||||
availableStr = totalStr
|
||||
}
|
||||
|
||||
transferableStr := boolToString(token.Transferable)
|
||||
|
||||
destructibleStr := boolToString(token.RemoteSelfDestruct)
|
||||
|
||||
return []thirdparty.CollectibleTrait{
|
||||
{
|
||||
TraitType: "Symbol",
|
||||
Value: token.Symbol,
|
||||
},
|
||||
{
|
||||
TraitType: "Total",
|
||||
Value: totalStr,
|
||||
},
|
||||
{
|
||||
TraitType: "Available",
|
||||
Value: availableStr,
|
||||
},
|
||||
{
|
||||
TraitType: "Transferable",
|
||||
Value: transferableStr,
|
||||
},
|
||||
{
|
||||
TraitType: "Destructible",
|
||||
Value: destructibleStr,
|
||||
},
|
||||
}
|
||||
}
|
||||
186
vendor/github.com/status-im/status-go/services/ext/signal.go
generated
vendored
Normal file
186
vendor/github.com/status-im/status-go/services/ext/signal.go
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
package ext
|
||||
|
||||
import (
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol"
|
||||
"github.com/status-im/status-go/protocol/communities"
|
||||
"github.com/status-im/status-go/protocol/discord"
|
||||
"github.com/status-im/status-go/protocol/wakusync"
|
||||
"github.com/status-im/status-go/signal"
|
||||
)
|
||||
|
||||
// EnvelopeSignalHandler sends signals when envelope is sent or expired.
|
||||
type EnvelopeSignalHandler struct{}
|
||||
|
||||
// EnvelopeSent triggered when envelope delivered atleast to 1 peer.
|
||||
func (h EnvelopeSignalHandler) EnvelopeSent(identifiers [][]byte) {
|
||||
signal.SendEnvelopeSent(identifiers)
|
||||
}
|
||||
|
||||
// EnvelopeExpired triggered when envelope is expired but wasn't delivered to any peer.
|
||||
func (h EnvelopeSignalHandler) EnvelopeExpired(identifiers [][]byte, err error) {
|
||||
signal.SendEnvelopeExpired(identifiers, err)
|
||||
}
|
||||
|
||||
// MailServerRequestCompleted triggered when the mailserver sends a message to notify that the request has been completed
|
||||
func (h EnvelopeSignalHandler) MailServerRequestCompleted(requestID types.Hash, lastEnvelopeHash types.Hash, cursor []byte, err error) {
|
||||
signal.SendMailServerRequestCompleted(requestID, lastEnvelopeHash, cursor, err)
|
||||
}
|
||||
|
||||
// MailServerRequestExpired triggered when the mailserver request expires
|
||||
func (h EnvelopeSignalHandler) MailServerRequestExpired(hash types.Hash) {
|
||||
signal.SendMailServerRequestExpired(hash)
|
||||
}
|
||||
|
||||
// PublisherSignalHandler sends signals on protocol events
|
||||
type PublisherSignalHandler struct{}
|
||||
|
||||
func (h PublisherSignalHandler) DecryptMessageFailed(pubKey string) {
|
||||
signal.SendDecryptMessageFailed(pubKey)
|
||||
}
|
||||
|
||||
func (h PublisherSignalHandler) BundleAdded(identity string, installationID string) {
|
||||
signal.SendBundleAdded(identity, installationID)
|
||||
}
|
||||
|
||||
func (h PublisherSignalHandler) NewMessages(response *protocol.MessengerResponse) {
|
||||
signal.SendNewMessages(response)
|
||||
}
|
||||
|
||||
func (h PublisherSignalHandler) Stats(stats types.StatsSummary) {
|
||||
signal.SendStats(stats)
|
||||
}
|
||||
|
||||
// MessengerSignalHandler sends signals on messenger events
|
||||
type MessengerSignalsHandler struct{}
|
||||
|
||||
// MessageDelivered passes information that message was delivered
|
||||
func (m MessengerSignalsHandler) MessageDelivered(chatID string, messageID string) {
|
||||
signal.SendMessageDelivered(chatID, messageID)
|
||||
}
|
||||
|
||||
// BackupPerformed passes information that a backup was performed
|
||||
func (m MessengerSignalsHandler) BackupPerformed(lastBackup uint64) {
|
||||
signal.SendBackupPerformed(lastBackup)
|
||||
}
|
||||
|
||||
// MessageDelivered passes info about community that was requested before
|
||||
func (m MessengerSignalsHandler) CommunityInfoFound(community *communities.Community) {
|
||||
signal.SendCommunityInfoFound(community)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) MessengerResponse(response *protocol.MessengerResponse) {
|
||||
PublisherSignalHandler{}.NewMessages(response)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryRequestStarted(numBatches int) {
|
||||
signal.SendHistoricMessagesRequestStarted(numBatches)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryRequestCompleted() {
|
||||
signal.SendHistoricMessagesRequestCompleted()
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryArchivesProtocolEnabled() {
|
||||
signal.SendHistoryArchivesProtocolEnabled()
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryArchivesProtocolDisabled() {
|
||||
signal.SendHistoryArchivesProtocolDisabled()
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) CreatingHistoryArchives(communityID string) {
|
||||
signal.SendCreatingHistoryArchives(communityID)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) NoHistoryArchivesCreated(communityID string, from int, to int) {
|
||||
signal.SendNoHistoryArchivesCreated(communityID, from, to)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryArchivesCreated(communityID string, from int, to int) {
|
||||
signal.SendHistoryArchivesCreated(communityID, from, to)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryArchivesSeeding(communityID string) {
|
||||
signal.SendHistoryArchivesSeeding(communityID)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryArchivesUnseeded(communityID string) {
|
||||
signal.SendHistoryArchivesUnseeded(communityID)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) HistoryArchiveDownloaded(communityID string, from int, to int) {
|
||||
signal.SendHistoryArchiveDownloaded(communityID, from, to)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DownloadingHistoryArchivesStarted(communityID string) {
|
||||
signal.SendDownloadingHistoryArchivesStarted(communityID)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) ImportingHistoryArchiveMessages(communityID string) {
|
||||
signal.SendImportingHistoryArchiveMessages(communityID)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DownloadingHistoryArchivesFinished(communityID string) {
|
||||
signal.SendDownloadingHistoryArchivesFinished(communityID)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) StatusUpdatesTimedOut(statusUpdates *[]protocol.UserStatus) {
|
||||
signal.SendStatusUpdatesTimedOut(statusUpdates)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordCategoriesAndChannelsExtracted(categories []*discord.Category, channels []*discord.Channel, oldestMessageTimestamp int64, errors map[string]*discord.ImportError) {
|
||||
signal.SendDiscordCategoriesAndChannelsExtracted(categories, channels, oldestMessageTimestamp, errors)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordCommunityImportProgress(importProgress *discord.ImportProgress) {
|
||||
signal.SendDiscordCommunityImportProgress(importProgress)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordChannelImportProgress(importProgress *discord.ImportProgress) {
|
||||
signal.SendDiscordChannelImportProgress(importProgress)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordCommunityImportFinished(id string) {
|
||||
signal.SendDiscordCommunityImportFinished(id)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordChannelImportFinished(communityID string, channelID string) {
|
||||
signal.SendDiscordChannelImportFinished(communityID, channelID)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordCommunityImportCancelled(id string) {
|
||||
signal.SendDiscordCommunityImportCancelled(id)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordCommunityImportCleanedUp(id string) {
|
||||
signal.SendDiscordCommunityImportCleanedUp(id)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) DiscordChannelImportCancelled(id string) {
|
||||
signal.SendDiscordChannelImportCancelled(id)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) SendWakuFetchingBackupProgress(response *wakusync.WakuBackedUpDataResponse) {
|
||||
signal.SendWakuFetchingBackupProgress(response)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) SendWakuBackedUpProfile(response *wakusync.WakuBackedUpDataResponse) {
|
||||
signal.SendWakuBackedUpProfile(response)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) SendWakuBackedUpSettings(response *wakusync.WakuBackedUpDataResponse) {
|
||||
signal.SendWakuBackedUpSettings(response)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) SendWakuBackedUpKeypair(response *wakusync.WakuBackedUpDataResponse) {
|
||||
signal.SendWakuBackedUpKeypair(response)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) SendWakuBackedUpWatchOnlyAccount(response *wakusync.WakuBackedUpDataResponse) {
|
||||
signal.SendWakuBackedUpWatchOnlyAccount(response)
|
||||
}
|
||||
|
||||
func (m *MessengerSignalsHandler) SendCuratedCommunitiesUpdate(response *communities.KnownCommunitiesResponse) {
|
||||
signal.SendCuratedCommunitiesUpdate(response)
|
||||
}
|
||||
Reference in New Issue
Block a user