feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

View File

@@ -0,0 +1,342 @@
package async
import (
"context"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
)
type Command func(context.Context) error
type Commander interface {
Command(inteval ...time.Duration) Command
}
type Runner interface {
Run(context.Context) error
}
// SingleShotCommand runs once.
type SingleShotCommand struct {
Interval time.Duration
Init func(context.Context) error
Runable func(context.Context) error
}
func (c SingleShotCommand) Run(ctx context.Context) error {
timer := time.NewTimer(c.Interval)
if c.Init != nil {
err := c.Init(ctx)
if err != nil {
return err
}
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
_ = c.Runable(ctx)
}
}
}
// FiniteCommand terminates when error is nil.
type FiniteCommand struct {
Interval time.Duration
Runable func(context.Context) error
}
func (c FiniteCommand) Run(ctx context.Context) error {
err := c.Runable(ctx)
if err == nil {
return nil
}
ticker := time.NewTicker(c.Interval)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
err := c.Runable(ctx)
if err == nil {
return nil
}
}
}
}
// InfiniteCommand runs until context is closed.
type InfiniteCommand struct {
Interval time.Duration
Runable func(context.Context) error
}
func (c InfiniteCommand) Run(ctx context.Context) error {
_ = c.Runable(ctx)
ticker := time.NewTicker(c.Interval)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
_ = c.Runable(ctx)
}
}
}
func NewGroup(parent context.Context) *Group {
ctx, cancel := context.WithCancel(parent)
return &Group{
ctx: ctx,
cancel: cancel,
}
}
type Group struct {
ctx context.Context
cancel func()
wg sync.WaitGroup
}
func (g *Group) Add(cmd Command) {
g.wg.Add(1)
go func() {
_ = cmd(g.ctx)
g.wg.Done()
}()
}
func (g *Group) Stop() {
g.cancel()
}
func (g *Group) Wait() {
g.wg.Wait()
}
func (g *Group) WaitAsync() <-chan struct{} {
ch := make(chan struct{})
go func() {
g.Wait()
close(ch)
}()
return ch
}
func NewAtomicGroup(parent context.Context) *AtomicGroup {
ctx, cancel := context.WithCancel(parent)
ag := &AtomicGroup{ctx: ctx, cancel: cancel}
ag.done = ag.onFinish
return ag
}
// AtomicGroup terminates as soon as first goroutine terminates with error.
type AtomicGroup struct {
ctx context.Context
cancel func()
done func()
wg sync.WaitGroup
mu sync.Mutex
error error
}
type AtomicGroupKey string
func (d *AtomicGroup) SetName(name string) {
d.ctx = context.WithValue(d.ctx, AtomicGroupKey("name"), name)
}
func (d *AtomicGroup) Name() string {
val := d.ctx.Value(AtomicGroupKey("name"))
if val != nil {
return val.(string)
}
return ""
}
// Go spawns function in a goroutine and stores results or errors.
func (d *AtomicGroup) Add(cmd Command) {
d.wg.Add(1)
go func() {
defer d.done()
err := cmd(d.ctx)
d.mu.Lock()
defer d.mu.Unlock()
if err != nil {
// do not overwrite original error by context errors
if d.error != nil {
log.Info("async.Command failed", "error", err, "d.error", d.error, "group", d.Name())
return
}
d.error = err
d.cancel()
return
}
}()
}
// Wait for all downloaders to finish.
func (d *AtomicGroup) Wait() {
d.wg.Wait()
if d.Error() == nil {
d.mu.Lock()
defer d.mu.Unlock()
d.cancel()
}
}
func (d *AtomicGroup) WaitAsync() <-chan struct{} {
ch := make(chan struct{})
go func() {
d.Wait()
close(ch)
}()
return ch
}
// Error stores an error that was reported by any of the downloader. Should be called after Wait.
func (d *AtomicGroup) Error() error {
d.mu.Lock()
defer d.mu.Unlock()
return d.error
}
func (d *AtomicGroup) Stop() {
d.cancel()
}
func (d *AtomicGroup) onFinish() {
d.wg.Done()
}
func NewQueuedAtomicGroup(parent context.Context, limit uint32) *QueuedAtomicGroup {
qag := &QueuedAtomicGroup{NewAtomicGroup(parent), limit, 0, []Command{}, sync.Mutex{}}
baseDoneFunc := qag.done // save original done function
qag.AtomicGroup.done = func() {
baseDoneFunc()
qag.onFinish()
}
return qag
}
type QueuedAtomicGroup struct {
*AtomicGroup
limit uint32
count uint32
pendingCmds []Command
mu sync.Mutex
}
func (d *QueuedAtomicGroup) Add(cmd Command) {
d.mu.Lock()
if d.limit > 0 && d.count >= d.limit {
d.pendingCmds = append(d.pendingCmds, cmd)
d.mu.Unlock()
return
}
d.mu.Unlock()
d.run(cmd)
}
func (d *QueuedAtomicGroup) run(cmd Command) {
d.mu.Lock()
d.count++
d.mu.Unlock()
d.AtomicGroup.Add(cmd)
}
func (d *QueuedAtomicGroup) onFinish() {
d.mu.Lock()
d.count--
if d.count < d.limit && len(d.pendingCmds) > 0 {
cmd := d.pendingCmds[0]
d.pendingCmds = d.pendingCmds[1:]
d.mu.Unlock()
d.run(cmd)
return
}
d.mu.Unlock()
}
func NewErrorCounter(maxErrors int, msg string) *ErrorCounter {
return &ErrorCounter{maxErrors: maxErrors, msg: msg}
}
type ErrorCounter struct {
cnt int
maxErrors int
err error
msg string
}
// Returns false in case of counter overflow
func (ec *ErrorCounter) SetError(err error) bool {
log.Debug("ErrorCounter setError", "msg", ec.msg, "err", err, "cnt", ec.cnt)
ec.cnt++
// do not overwrite the first error
if ec.err == nil {
ec.err = err
}
if ec.cnt >= ec.maxErrors {
log.Error("ErrorCounter overflow", "msg", ec.msg)
return false
}
return true
}
func (ec *ErrorCounter) Error() error {
return ec.err
}
func (ec *ErrorCounter) MaxErrors() int {
return ec.maxErrors
}
type FiniteCommandWithErrorCounter struct {
FiniteCommand
*ErrorCounter
}
func (c FiniteCommandWithErrorCounter) Run(ctx context.Context) error {
f := func(ctx context.Context) (quit bool, err error) {
err = c.Runable(ctx)
if err == nil {
return true, err
}
if c.ErrorCounter.SetError(err) {
return false, err
}
return true, err
}
quit, err := f(ctx)
if quit {
return err
}
ticker := time.NewTicker(c.Interval)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
quit, err := f(ctx)
if quit {
return err
}
}
}
}

View File

@@ -0,0 +1,26 @@
package async
type MultiClientScheduler struct {
scheduler *Scheduler
}
func NewMultiClientScheduler() *MultiClientScheduler {
return &MultiClientScheduler{
scheduler: NewScheduler(),
}
}
func (s *MultiClientScheduler) Stop() {
s.scheduler.Stop()
}
func makeTaskType(requestID int32, origTaskType TaskType) TaskType {
return TaskType{
ID: int64(requestID)<<32 | origTaskType.ID,
Policy: origTaskType.Policy,
}
}
func (s *MultiClientScheduler) Enqueue(requestID int32, taskType TaskType, taskFn taskFunction, resFn resultFunction) (ignored bool) {
return s.scheduler.Enqueue(makeTaskType(requestID, taskType), taskFn, resFn)
}

View File

@@ -0,0 +1,202 @@
package async
import (
"context"
"errors"
"fmt"
"sync"
orderedmap "github.com/wk8/go-ordered-map/v2"
)
var ErrTaskOverwritten = errors.New("task overwritten")
type Scheduler struct {
queue *orderedmap.OrderedMap[TaskType, *taskContext]
queueMutex sync.Mutex
context context.Context
cancelFn context.CancelFunc
doNotDeleteCurrentTask bool
}
type ReplacementPolicy = int
const (
// ReplacementPolicyCancelOld for when the task arguments might change the result
ReplacementPolicyCancelOld ReplacementPolicy = iota
// ReplacementPolicyIgnoreNew for when the task arguments doesn't change the result
ReplacementPolicyIgnoreNew
)
type TaskType struct {
ID int64
Policy ReplacementPolicy
}
type taskFunction func(context.Context) (interface{}, error)
type resultFunction func(interface{}, TaskType, error)
type taskContext struct {
taskType TaskType
policy ReplacementPolicy
taskFn taskFunction
resFn resultFunction
}
func NewScheduler() *Scheduler {
return &Scheduler{
queue: orderedmap.New[TaskType, *taskContext](),
}
}
// Enqueue provides a queue of task types allowing only one task at a time of the corresponding type. The running task is the first one in the queue (s.queue.Oldest())
//
// Schedule policy for new tasks
// - pushed at the back of the queue (s.queue.PushBack()) if none of the same time already scheduled
// - overwrite the queued one of the same type, depending on the policy
// - In case of ReplacementPolicyIgnoreNew, the new task will be ignored
// - In case of ReplacementPolicyCancelOld, the old running task will be canceled or if not yet run overwritten and the new one will be executed when its turn comes.
//
// The task function (taskFn) might not be executed if
// - the task is ignored
// - the task is overwritten. The result function (resFn) will be called with ErrTaskOverwritten
//
// The result function (resFn) will always be called if the task is not ignored
func (s *Scheduler) Enqueue(taskType TaskType, taskFn taskFunction, resFn resultFunction) (ignored bool) {
s.queueMutex.Lock()
defer s.queueMutex.Unlock()
taskRunning := s.queue.Len() > 0
existingTask, typeInQueue := s.queue.Get(taskType)
newTask := &taskContext{
taskType: taskType,
policy: taskType.Policy,
taskFn: taskFn,
resFn: resFn,
}
if taskRunning {
if typeInQueue {
if s.queue.Oldest().Value.taskType == taskType {
// If same task type is running
if existingTask.policy == ReplacementPolicyCancelOld {
// If a previous task is running, cancel it
if s.cancelFn != nil {
s.cancelFn()
s.cancelFn = nil
} else {
// In case of multiple tasks of the same type, the previous one is overwritten
go func() {
existingTask.resFn(nil, existingTask.taskType, ErrTaskOverwritten)
}()
}
s.doNotDeleteCurrentTask = true
// Add it again to refresh the order of the task
s.queue.Delete(taskType)
s.queue.Set(taskType, newTask)
} else {
ignored = true
}
} else {
// if other task type is running
// notify the queued one that it is overwritten or ignored
if existingTask.policy == ReplacementPolicyCancelOld {
go func() {
existingTask.resFn(nil, existingTask.taskType, ErrTaskOverwritten)
}()
// Overwrite the queued one of the same type
existingTask.taskFn = taskFn
existingTask.resFn = resFn
} else {
ignored = true
}
}
} else {
// Policy does not matter for the fist enqueued task of a type
s.queue.Set(taskType, newTask)
}
} else {
// If no task is running add and run it. The worker will take care of scheduling new tasks added while running
s.queue.Set(taskType, newTask)
existingTask = newTask
s.runTask(existingTask, taskFn, func(res interface{}, runningTask *taskContext, err error) {
s.finishedTask(res, runningTask, resFn, err)
})
}
return ignored
}
func (s *Scheduler) runTask(tc *taskContext, taskFn taskFunction, resFn func(interface{}, *taskContext, error)) {
thisContext, thisCancelFn := context.WithCancel(context.Background())
s.cancelFn = thisCancelFn
s.context = thisContext
go func() {
res, err := taskFn(thisContext)
// Release context resources
thisCancelFn()
if errors.Is(err, context.Canceled) {
resFn(res, tc, fmt.Errorf("task canceled: %w", err))
} else {
resFn(res, tc, err)
}
}()
}
// finishedTask is the only one that can remove a task from the queue
// if the current running task completed (doNotDeleteCurrentTask is true)
func (s *Scheduler) finishedTask(finishedRes interface{}, doneTask *taskContext, finishedResFn resultFunction, finishedErr error) {
s.queueMutex.Lock()
// We always have a running task
current := s.queue.Oldest()
// Delete current task if not overwritten
if s.doNotDeleteCurrentTask {
s.doNotDeleteCurrentTask = false
} else {
s.queue.Delete(current.Value.taskType)
}
// Run next task
if pair := s.queue.Oldest(); pair != nil {
nextTask := pair.Value
s.runTask(nextTask, nextTask.taskFn, func(res interface{}, runningTask *taskContext, err error) {
s.finishedTask(res, runningTask, runningTask.resFn, err)
})
} else {
s.cancelFn = nil
}
s.queueMutex.Unlock()
// Report result
finishedResFn(finishedRes, doneTask.taskType, finishedErr)
}
func (s *Scheduler) Stop() {
s.queueMutex.Lock()
defer s.queueMutex.Unlock()
if s.cancelFn != nil {
s.cancelFn()
s.cancelFn = nil
}
// Empty the queue so the running task will not be restarted
for pair := s.queue.Oldest(); pair != nil; pair = pair.Next() {
// Notify the queued one that they are canceled
if pair.Value.policy == ReplacementPolicyCancelOld {
go func() {
pair.Value.resFn(nil, pair.Value.taskType, context.Canceled)
}()
}
s.queue.Delete(pair.Value.taskType)
}
}