21
vendor/github.com/libp2p/go-buffer-pool/LICENSE
generated
vendored
Normal file
21
vendor/github.com/libp2p/go-buffer-pool/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
29
vendor/github.com/libp2p/go-buffer-pool/LICENSE-BSD
generated
vendored
Normal file
29
vendor/github.com/libp2p/go-buffer-pool/LICENSE-BSD
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
### Applies to buffer.go and buffer_test.go ###
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
53
vendor/github.com/libp2p/go-buffer-pool/README.md
generated
vendored
Normal file
53
vendor/github.com/libp2p/go-buffer-pool/README.md
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
go-buffer-pool
|
||||
==================
|
||||
|
||||
[](https://protocol.ai)
|
||||
[](https://libp2p.io/)
|
||||
[](https://webchat.freenode.net/?channels=%23libp2p)
|
||||
[](https://codecov.io/gh/libp2p/go-buffer-pool)
|
||||
[](https://travis-ci.org/libp2p/go-buffer-pool)
|
||||
[](https://discuss.libp2p.io)
|
||||
|
||||
> A variable size buffer pool for go.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Use Case](#use-case)
|
||||
- [Advantages over GC](#advantages-over-gc)
|
||||
- [Disadvantages over GC:](#disadvantages-over-gc)
|
||||
- [Contribute](#contribute)
|
||||
- [License](#license)
|
||||
|
||||
## Use Case
|
||||
|
||||
Use this when you need to repeatedly allocate and free a bunch of temporary buffers of approximately the same size.
|
||||
|
||||
### Advantages over GC
|
||||
|
||||
* Reduces Memory Usage:
|
||||
* We don't have to wait for a GC to run before we can reuse memory. This is essential if you're repeatedly allocating large short-lived buffers.
|
||||
|
||||
* Reduces CPU usage:
|
||||
* It takes some load off of the GC (due to buffer reuse).
|
||||
* We don't have to zero buffers (fewer wasteful memory writes).
|
||||
|
||||
### Disadvantages over GC:
|
||||
|
||||
* Can leak memory contents. Unlike the go GC, we *don't* zero memory.
|
||||
* All buffers have a capacity of a power of 2. This is fine if you either (a) actually need buffers with this size or (b) expect these buffers to be temporary.
|
||||
* Requires that buffers be returned explicitly. This can lead to race conditions and memory corruption if the buffer is released while it's still in use.
|
||||
|
||||
## Contribute
|
||||
|
||||
PRs are welcome!
|
||||
|
||||
Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
|
||||
|
||||
## License
|
||||
|
||||
MIT © Protocol Labs
|
||||
BSD © The Go Authors
|
||||
|
||||
---
|
||||
|
||||
The last gx published version of this module was: 0.1.3: QmQDvJoB6aJWN3sjr3xsgXqKCXf4jU5zdMXpDMsBkYVNqa
|
||||
302
vendor/github.com/libp2p/go-buffer-pool/buffer.go
generated
vendored
Normal file
302
vendor/github.com/libp2p/go-buffer-pool/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
// This is a derivitive work of Go's bytes.Buffer implementation.
|
||||
//
|
||||
// Originally copyright 2009 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Modifications copyright 2018 Steven Allen. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by both a BSD-style and an MIT-style
|
||||
// license that can be found in the LICENSE_BSD and LICENSE files.
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Buffer is a buffer like bytes.Buffer that:
|
||||
//
|
||||
// 1. Uses a buffer pool.
|
||||
// 2. Frees memory on read.
|
||||
//
|
||||
// If you only have a few buffers and read/write at a steady rate, *don't* use
|
||||
// this package, it'll be slower.
|
||||
//
|
||||
// However:
|
||||
//
|
||||
// 1. If you frequently create/destroy buffers, this implementation will be
|
||||
// significantly nicer to the allocator.
|
||||
// 2. If you have many buffers with bursty traffic, this implementation will use
|
||||
// significantly less memory.
|
||||
type Buffer struct {
|
||||
// Pool is the buffer pool to use. If nil, this Buffer will use the
|
||||
// global buffer pool.
|
||||
Pool *BufferPool
|
||||
|
||||
buf []byte
|
||||
rOff int
|
||||
|
||||
// Preallocated slice for samll reads/writes.
|
||||
// This is *really* important for performance and only costs 8 words.
|
||||
bootstrap [64]byte
|
||||
}
|
||||
|
||||
// NewBuffer constructs a new buffer initialized to `buf`.
|
||||
// Unlike `bytes.Buffer`, we *copy* the buffer but don't reuse it (to ensure
|
||||
// that we *only* use buffers from the pool).
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
b := new(Buffer)
|
||||
if len(buf) > 0 {
|
||||
b.buf = b.getBuf(len(buf))
|
||||
copy(b.buf, buf)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// NewBufferString is identical to NewBuffer *except* that it allows one to
|
||||
// initialize the buffer from a string (without having to allocate an
|
||||
// intermediate bytes slice).
|
||||
func NewBufferString(buf string) *Buffer {
|
||||
b := new(Buffer)
|
||||
if len(buf) > 0 {
|
||||
b.buf = b.getBuf(len(buf))
|
||||
copy(b.buf, buf)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Buffer) grow(n int) int {
|
||||
wOff := len(b.buf)
|
||||
bCap := cap(b.buf)
|
||||
|
||||
if bCap >= wOff+n {
|
||||
b.buf = b.buf[:wOff+n]
|
||||
return wOff
|
||||
}
|
||||
|
||||
bSize := b.Len()
|
||||
|
||||
minCap := 2*bSize + n
|
||||
|
||||
// Slide if cap >= minCap.
|
||||
// Reallocate otherwise.
|
||||
if bCap >= minCap {
|
||||
copy(b.buf, b.buf[b.rOff:])
|
||||
} else {
|
||||
// Needs new buffer.
|
||||
newBuf := b.getBuf(minCap)
|
||||
copy(newBuf, b.buf[b.rOff:])
|
||||
b.returnBuf()
|
||||
b.buf = newBuf
|
||||
}
|
||||
|
||||
b.rOff = 0
|
||||
b.buf = b.buf[:bSize+n]
|
||||
return bSize
|
||||
}
|
||||
|
||||
func (b *Buffer) getPool() *BufferPool {
|
||||
if b.Pool == nil {
|
||||
return GlobalPool
|
||||
}
|
||||
return b.Pool
|
||||
}
|
||||
|
||||
func (b *Buffer) returnBuf() {
|
||||
if cap(b.buf) > len(b.bootstrap) {
|
||||
b.getPool().Put(b.buf)
|
||||
}
|
||||
b.buf = nil
|
||||
}
|
||||
|
||||
func (b *Buffer) getBuf(n int) []byte {
|
||||
if n <= len(b.bootstrap) {
|
||||
return b.bootstrap[:n]
|
||||
}
|
||||
return b.getPool().Get(n)
|
||||
}
|
||||
|
||||
// Len returns the number of bytes that can be read from this buffer.
|
||||
func (b *Buffer) Len() int {
|
||||
return len(b.buf) - b.rOff
|
||||
}
|
||||
|
||||
// Cap returns the current capacity of the buffer.
|
||||
//
|
||||
// Note: Buffer *may* re-allocate when writing (or growing by) `n` bytes even if
|
||||
// `Cap() < Len() + n` to avoid excessive copying.
|
||||
func (b *Buffer) Cap() int {
|
||||
return cap(b.buf)
|
||||
}
|
||||
|
||||
// Bytes returns the slice of bytes currently buffered in the Buffer.
|
||||
//
|
||||
// The buffer returned by Bytes is valid until the next call grow, truncate,
|
||||
// read, or write. Really, just don't touch the Buffer until you're done with
|
||||
// the return value of this function.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf[b.rOff:]
|
||||
}
|
||||
|
||||
// String returns the string representation of the buffer.
|
||||
//
|
||||
// It returns `<nil>` the buffer is a nil pointer.
|
||||
func (b *Buffer) String() string {
|
||||
if b == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return string(b.buf[b.rOff:])
|
||||
}
|
||||
|
||||
// WriteString writes a string to the buffer.
|
||||
//
|
||||
// This function is identical to Write except that it allows one to write a
|
||||
// string directly without allocating an intermediate byte slice.
|
||||
func (b *Buffer) WriteString(buf string) (int, error) {
|
||||
wOff := b.grow(len(buf))
|
||||
return copy(b.buf[wOff:], buf), nil
|
||||
}
|
||||
|
||||
// Truncate truncates the Buffer.
|
||||
//
|
||||
// Panics if `n > b.Len()`.
|
||||
//
|
||||
// This function may free memory by shrinking the internal buffer.
|
||||
func (b *Buffer) Truncate(n int) {
|
||||
if n < 0 || n > b.Len() {
|
||||
panic("truncation out of range")
|
||||
}
|
||||
b.buf = b.buf[:b.rOff+n]
|
||||
b.shrink()
|
||||
}
|
||||
|
||||
// Reset is equivalent to Truncate(0).
|
||||
func (b *Buffer) Reset() {
|
||||
b.returnBuf()
|
||||
b.rOff = 0
|
||||
}
|
||||
|
||||
// ReadByte reads a single byte from the Buffer.
|
||||
func (b *Buffer) ReadByte() (byte, error) {
|
||||
if b.rOff >= len(b.buf) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
c := b.buf[b.rOff]
|
||||
b.rOff++
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte to the Buffer.
|
||||
func (b *Buffer) WriteByte(c byte) error {
|
||||
wOff := b.grow(1)
|
||||
b.buf[wOff] = c
|
||||
return nil
|
||||
}
|
||||
|
||||
// Grow grows the internal buffer such that `n` bytes can be written without
|
||||
// reallocating.
|
||||
func (b *Buffer) Grow(n int) {
|
||||
wOff := b.grow(n)
|
||||
b.buf = b.buf[:wOff]
|
||||
}
|
||||
|
||||
// Next is an alternative to `Read` that returns a byte slice instead of taking
|
||||
// one.
|
||||
//
|
||||
// The returned byte slice is valid until the next read, write, grow, or
|
||||
// truncate.
|
||||
func (b *Buffer) Next(n int) []byte {
|
||||
m := b.Len()
|
||||
if m < n {
|
||||
n = m
|
||||
}
|
||||
data := b.buf[b.rOff : b.rOff+n]
|
||||
b.rOff += n
|
||||
return data
|
||||
}
|
||||
|
||||
// Write writes the byte slice to the buffer.
|
||||
func (b *Buffer) Write(buf []byte) (int, error) {
|
||||
wOff := b.grow(len(buf))
|
||||
return copy(b.buf[wOff:], buf), nil
|
||||
}
|
||||
|
||||
// WriteTo copies from the buffer into the given writer until the buffer is
|
||||
// empty.
|
||||
func (b *Buffer) WriteTo(w io.Writer) (int64, error) {
|
||||
if b.rOff < len(b.buf) {
|
||||
n, err := w.Write(b.buf[b.rOff:])
|
||||
b.rOff += n
|
||||
if b.rOff > len(b.buf) {
|
||||
panic("invalid write count")
|
||||
}
|
||||
b.shrink()
|
||||
return int64(n), err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// MinRead is the minimum slice size passed to a Read call by
|
||||
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
|
||||
// what is required to hold the contents of r, ReadFrom will not grow the
|
||||
// underlying buffer.
|
||||
const MinRead = 512
|
||||
|
||||
// ReadFrom reads from the given reader into the buffer.
|
||||
func (b *Buffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
n := int64(0)
|
||||
for {
|
||||
wOff := b.grow(MinRead)
|
||||
// Use *entire* buffer.
|
||||
b.buf = b.buf[:cap(b.buf)]
|
||||
|
||||
read, err := r.Read(b.buf[wOff:])
|
||||
b.buf = b.buf[:wOff+read]
|
||||
n += int64(read)
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
err = nil
|
||||
fallthrough
|
||||
default:
|
||||
b.shrink()
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads at most `len(buf)` bytes from the internal buffer into the given
|
||||
// buffer.
|
||||
func (b *Buffer) Read(buf []byte) (int, error) {
|
||||
if len(buf) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.rOff >= len(b.buf) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(buf, b.buf[b.rOff:])
|
||||
b.rOff += n
|
||||
b.shrink()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *Buffer) shrink() {
|
||||
c := b.Cap()
|
||||
// Either nil or bootstrap.
|
||||
if c <= len(b.bootstrap) {
|
||||
return
|
||||
}
|
||||
|
||||
l := b.Len()
|
||||
if l == 0 {
|
||||
// Shortcut if empty.
|
||||
b.returnBuf()
|
||||
b.rOff = 0
|
||||
} else if l*8 < c {
|
||||
// Only shrink when capacity > 8x length. Avoids shrinking too aggressively.
|
||||
newBuf := b.getBuf(l)
|
||||
copy(newBuf, b.buf[b.rOff:])
|
||||
b.returnBuf()
|
||||
b.rOff = 0
|
||||
b.buf = newBuf[:l]
|
||||
}
|
||||
}
|
||||
3
vendor/github.com/libp2p/go-buffer-pool/codecov.yml
generated
vendored
Normal file
3
vendor/github.com/libp2p/go-buffer-pool/codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
coverage:
|
||||
range: "50...100"
|
||||
comment: off
|
||||
117
vendor/github.com/libp2p/go-buffer-pool/pool.go
generated
vendored
Normal file
117
vendor/github.com/libp2p/go-buffer-pool/pool.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
// Package pool provides a sync.Pool equivalent that buckets incoming
|
||||
// requests to one of 32 sub-pools, one for each power of 2, 0-32.
|
||||
//
|
||||
// import (pool "github.com/libp2p/go-buffer-pool")
|
||||
// var p pool.BufferPool
|
||||
//
|
||||
// small := make([]byte, 1024)
|
||||
// large := make([]byte, 4194304)
|
||||
// p.Put(small)
|
||||
// p.Put(large)
|
||||
//
|
||||
// small2 := p.Get(1024)
|
||||
// large2 := p.Get(4194304)
|
||||
// fmt.Println("small2 len:", len(small2))
|
||||
// fmt.Println("large2 len:", len(large2))
|
||||
//
|
||||
// // Output:
|
||||
// // small2 len: 1024
|
||||
// // large2 len: 4194304
|
||||
//
|
||||
package pool
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/bits"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GlobalPool is a static Pool for reusing byteslices of various sizes.
|
||||
var GlobalPool = new(BufferPool)
|
||||
|
||||
// MaxLength is the maximum length of an element that can be added to the Pool.
|
||||
const MaxLength = math.MaxInt32
|
||||
|
||||
// BufferPool is a pool to handle cases of reusing elements of varying sizes. It
|
||||
// maintains 32 internal pools, for each power of 2 in 0-32.
|
||||
//
|
||||
// You should generally just call the package level Get and Put methods or use
|
||||
// the GlobalPool BufferPool instead of constructing your own.
|
||||
//
|
||||
// You MUST NOT copy Pool after using.
|
||||
type BufferPool struct {
|
||||
pools [32]sync.Pool // a list of singlePools
|
||||
ptrs sync.Pool
|
||||
}
|
||||
|
||||
type bufp struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// Get retrieves a buffer of the appropriate length from the buffer pool or
|
||||
// allocates a new one. Get may choose to ignore the pool and treat it as empty.
|
||||
// Callers should not assume any relation between values passed to Put and the
|
||||
// values returned by Get.
|
||||
//
|
||||
// If no suitable buffer exists in the pool, Get creates one.
|
||||
func (p *BufferPool) Get(length int) []byte {
|
||||
if length == 0 {
|
||||
return nil
|
||||
}
|
||||
// Calling this function with a negative length is invalid.
|
||||
// make will panic if length is negative, so we don't have to.
|
||||
if length > MaxLength || length < 0 {
|
||||
return make([]byte, length)
|
||||
}
|
||||
idx := nextLogBase2(uint32(length))
|
||||
if ptr := p.pools[idx].Get(); ptr != nil {
|
||||
bp := ptr.(*bufp)
|
||||
buf := bp.buf[:uint32(length)]
|
||||
bp.buf = nil
|
||||
p.ptrs.Put(ptr)
|
||||
return buf
|
||||
}
|
||||
return make([]byte, 1<<idx)[:uint32(length)]
|
||||
}
|
||||
|
||||
// Put adds x to the pool.
|
||||
func (p *BufferPool) Put(buf []byte) {
|
||||
capacity := cap(buf)
|
||||
if capacity == 0 || capacity > MaxLength {
|
||||
return // drop it
|
||||
}
|
||||
idx := prevLogBase2(uint32(capacity))
|
||||
var bp *bufp
|
||||
if ptr := p.ptrs.Get(); ptr != nil {
|
||||
bp = ptr.(*bufp)
|
||||
} else {
|
||||
bp = new(bufp)
|
||||
}
|
||||
bp.buf = buf
|
||||
p.pools[idx].Put(bp)
|
||||
}
|
||||
|
||||
// Get retrieves a buffer of the appropriate length from the global buffer pool
|
||||
// (or allocates a new one).
|
||||
func Get(length int) []byte {
|
||||
return GlobalPool.Get(length)
|
||||
}
|
||||
|
||||
// Put returns a buffer to the global buffer pool.
|
||||
func Put(slice []byte) {
|
||||
GlobalPool.Put(slice)
|
||||
}
|
||||
|
||||
// Log of base two, round up (for v > 0).
|
||||
func nextLogBase2(v uint32) uint32 {
|
||||
return uint32(bits.Len32(v - 1))
|
||||
}
|
||||
|
||||
// Log of base two, round down (for v > 0)
|
||||
func prevLogBase2(num uint32) uint32 {
|
||||
next := nextLogBase2(num)
|
||||
if num == (1 << uint32(next)) {
|
||||
return next
|
||||
}
|
||||
return next - 1
|
||||
}
|
||||
3
vendor/github.com/libp2p/go-buffer-pool/version.json
generated
vendored
Normal file
3
vendor/github.com/libp2p/go-buffer-pool/version.json
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"version": "v0.1.0"
|
||||
}
|
||||
119
vendor/github.com/libp2p/go-buffer-pool/writer.go
generated
vendored
Normal file
119
vendor/github.com/libp2p/go-buffer-pool/writer.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const WriterBufferSize = 4096
|
||||
|
||||
var bufioWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewWriterSize(nil, WriterBufferSize)
|
||||
},
|
||||
}
|
||||
|
||||
// Writer is a buffered writer that returns its internal buffer in a pool when
|
||||
// not in use.
|
||||
type Writer struct {
|
||||
W io.Writer
|
||||
bufw *bufio.Writer
|
||||
}
|
||||
|
||||
func (w *Writer) ensureBuffer() {
|
||||
if w.bufw == nil {
|
||||
w.bufw = bufioWriterPool.Get().(*bufio.Writer)
|
||||
w.bufw.Reset(w.W)
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes the given byte slice to the underlying connection.
|
||||
//
|
||||
// Note: Write won't return the write buffer to the pool even if it ends up
|
||||
// being empty after the write. You must call Flush() to do that.
|
||||
func (w *Writer) Write(b []byte) (int, error) {
|
||||
if w.bufw == nil {
|
||||
if len(b) >= WriterBufferSize {
|
||||
return w.W.Write(b)
|
||||
}
|
||||
w.bufw = bufioWriterPool.Get().(*bufio.Writer)
|
||||
w.bufw.Reset(w.W)
|
||||
}
|
||||
return w.bufw.Write(b)
|
||||
}
|
||||
|
||||
// Size returns the size of the underlying buffer.
|
||||
func (w *Writer) Size() int {
|
||||
return WriterBufferSize
|
||||
}
|
||||
|
||||
// Available returns the amount buffer space available.
|
||||
func (w *Writer) Available() int {
|
||||
if w.bufw != nil {
|
||||
return w.bufw.Available()
|
||||
}
|
||||
return WriterBufferSize
|
||||
}
|
||||
|
||||
// Buffered returns the amount of data buffered.
|
||||
func (w *Writer) Buffered() int {
|
||||
if w.bufw != nil {
|
||||
return w.bufw.Buffered()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte.
|
||||
func (w *Writer) WriteByte(b byte) error {
|
||||
w.ensureBuffer()
|
||||
return w.bufw.WriteByte(b)
|
||||
}
|
||||
|
||||
// WriteRune writes a single rune, returning the number of bytes written.
|
||||
func (w *Writer) WriteRune(r rune) (int, error) {
|
||||
w.ensureBuffer()
|
||||
return w.bufw.WriteRune(r)
|
||||
}
|
||||
|
||||
// WriteString writes a string, returning the number of bytes written.
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
w.ensureBuffer()
|
||||
return w.bufw.WriteString(s)
|
||||
}
|
||||
|
||||
// Flush flushes the write buffer, if any, and returns it to the pool.
|
||||
func (w *Writer) Flush() error {
|
||||
if w.bufw == nil {
|
||||
return nil
|
||||
}
|
||||
if err := w.bufw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.bufw.Reset(nil)
|
||||
bufioWriterPool.Put(w.bufw)
|
||||
w.bufw = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close flushes the underlying writer and closes it if it implements the
|
||||
// io.Closer interface.
|
||||
//
|
||||
// Note: Close() closes the writer even if Flush() fails to avoid leaking system
|
||||
// resources. If you want to make sure Flush() succeeds, call it first.
|
||||
func (w *Writer) Close() error {
|
||||
var (
|
||||
ferr, cerr error
|
||||
)
|
||||
ferr = w.Flush()
|
||||
|
||||
// always close even if flush fails.
|
||||
if closer, ok := w.W.(io.Closer); ok {
|
||||
cerr = closer.Close()
|
||||
}
|
||||
|
||||
if ferr != nil {
|
||||
return ferr
|
||||
}
|
||||
return cerr
|
||||
}
|
||||
1
vendor/github.com/libp2p/go-cidranger/.gitignore
generated
vendored
Normal file
1
vendor/github.com/libp2p/go-cidranger/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
vendor
|
||||
31
vendor/github.com/libp2p/go-cidranger/.travis.yml
generated
vendored
Normal file
31
vendor/github.com/libp2p/go-cidranger/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
os:
|
||||
- linux
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
|
||||
env:
|
||||
global:
|
||||
- GOTFLAGS="-race"
|
||||
matrix:
|
||||
- BUILD_DEPTYPE=gomod
|
||||
|
||||
|
||||
# disable travis install
|
||||
install:
|
||||
- true
|
||||
|
||||
script:
|
||||
- bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh)
|
||||
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg/mod
|
||||
- $HOME/.cache/go-build
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
33
vendor/github.com/libp2p/go-cidranger/Gopkg.lock
generated
vendored
Normal file
33
vendor/github.com/libp2p/go-cidranger/Gopkg.lock
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "UT"
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "UT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f85e109eda8f6080877185d1c39e98dd8795e1780c08beca28304b87fd855a1c"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert"]
|
||||
pruneopts = "UT"
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = ["github.com/stretchr/testify/assert"]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
34
vendor/github.com/libp2p/go-cidranger/Gopkg.toml
generated
vendored
Normal file
34
vendor/github.com/libp2p/go-cidranger/Gopkg.toml
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.1"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
21
vendor/github.com/libp2p/go-cidranger/LICENSE
generated
vendored
Normal file
21
vendor/github.com/libp2p/go-cidranger/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Yulin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
110
vendor/github.com/libp2p/go-cidranger/README.md
generated
vendored
Normal file
110
vendor/github.com/libp2p/go-cidranger/README.md
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
# cidranger
|
||||
|
||||
Fast IP to CIDR block(s) lookup using trie in Golang, inspired by [IPv4 route lookup linux](https://vincent.bernat.im/en/blog/2017-ipv4-route-lookup-linux). Possible use cases include detecting if a IP address is from published cloud provider CIDR blocks (e.g. 52.95.110.1 is contained in published AWS Route53 CIDR 52.95.110.0/24), IP routing rules, etc.
|
||||
|
||||
Forked from https://github.com/yl2chen/cidranger due to upstream inactivity.
|
||||
|
||||
[](https://godoc.org/github.com/libp2p/go-cidranger)
|
||||
[](https://travis-ci.org/libp2p/go-cidranger)
|
||||
[](https://coveralls.io/github/libp2p/go-cidranger?branch=master)
|
||||
[](https://goreportcard.com/report/github.com/libp2p/go-cidranger)
|
||||
|
||||
This is visualization of a trie storing CIDR blocks `128.0.0.0/2` `192.0.0.0/2` `200.0.0.0/5` without path compression, the 0/1 number on the path indicates the bit value of the IP address at specified bit position, hence the path from root node to a child node represents a CIDR block that contains all IP ranges of its children, and children's children.
|
||||
<p align="left"><img src="http://i.imgur.com/vSKTEBb.png" width="600" /></p>
|
||||
|
||||
Visualization of trie storing same CIDR blocks with path compression, improving both lookup speed and memory footprint.
|
||||
<p align="left"><img src="http://i.imgur.com/JtaDlD4.png" width="600" /></p>
|
||||
|
||||
## Getting Started
|
||||
Configure imports.
|
||||
```go
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/libp2p/go-cidranger"
|
||||
)
|
||||
```
|
||||
Create a new ranger implemented using Path-Compressed prefix trie.
|
||||
```go
|
||||
ranger := NewPCTrieRanger()
|
||||
```
|
||||
Inserts CIDR blocks.
|
||||
```go
|
||||
_, network1, _ := net.ParseCIDR("192.168.1.0/24")
|
||||
_, network2, _ := net.ParseCIDR("128.168.1.0/24")
|
||||
ranger.Insert(NewBasicRangerEntry(*network1))
|
||||
ranger.Insert(NewBasicRangerEntry(*network2))
|
||||
```
|
||||
To attach any additional value(s) to the entry, simply create custom struct
|
||||
storing the desired value(s) that implements the RangerEntry interface:
|
||||
```go
|
||||
type RangerEntry interface {
|
||||
Network() net.IPNet
|
||||
}
|
||||
```
|
||||
The prefix trie can be visualized as:
|
||||
```
|
||||
0.0.0.0/0 (target_pos:31:has_entry:false)
|
||||
| 1--> 128.0.0.0/1 (target_pos:30:has_entry:false)
|
||||
| | 0--> 128.168.1.0/24 (target_pos:7:has_entry:true)
|
||||
| | 1--> 192.168.1.0/24 (target_pos:7:has_entry:true)
|
||||
```
|
||||
To test if given IP is contained in constructed ranger,
|
||||
```go
|
||||
contains, err = ranger.Contains(net.ParseIP("128.168.1.0")) // returns true, nil
|
||||
contains, err = ranger.Contains(net.ParseIP("192.168.2.0")) // returns false, nil
|
||||
```
|
||||
To get all the networks given is contained in,
|
||||
```go
|
||||
containingNetworks, err = ranger.ContainingNetworks(net.ParseIP("128.168.1.0"))
|
||||
```
|
||||
To get all networks in ranger,
|
||||
```go
|
||||
entries, err := ranger.CoveredNetworks(*AllIPv4) // for IPv4
|
||||
entries, err := ranger.CoveredNetworks(*AllIPv6) // for IPv6
|
||||
```
|
||||
|
||||
## Benchmark
|
||||
Compare hit/miss case for IPv4/IPv6 using PC trie vs brute force implementation, Ranger is initialized with published AWS ip ranges (889 IPv4 CIDR blocks and 360 IPv6)
|
||||
```go
|
||||
// Ipv4 lookup hit scenario
|
||||
BenchmarkPCTrieHitIPv4UsingAWSRanges-4 5000000 353 ns/op
|
||||
BenchmarkBruteRangerHitIPv4UsingAWSRanges-4 100000 13719 ns/op
|
||||
|
||||
// Ipv6 lookup hit scenario, counter-intuitively faster then IPv4 due to less IPv6 CIDR
|
||||
// blocks in the AWS dataset, hence the constructed trie has less path splits and depth.
|
||||
BenchmarkPCTrieHitIPv6UsingAWSRanges-4 10000000 143 ns/op
|
||||
BenchmarkBruteRangerHitIPv6UsingAWSRanges-4 300000 5178 ns/op
|
||||
|
||||
// Ipv4 lookup miss scenario
|
||||
BenchmarkPCTrieMissIPv4UsingAWSRanges-4 20000000 96.5 ns/op
|
||||
BenchmarkBruteRangerMissIPv4UsingAWSRanges-4 50000 24781 ns/op
|
||||
|
||||
// Ipv6 lookup miss scenario
|
||||
BenchmarkPCTrieHMissIPv6UsingAWSRanges-4 10000000 115 ns/op
|
||||
BenchmarkBruteRangerMissIPv6UsingAWSRanges-4 100000 10824 ns/op
|
||||
```
|
||||
|
||||
## Example of IPv6 trie:
|
||||
```
|
||||
::/0 (target_pos:127:has_entry:false)
|
||||
| 0--> 2400::/14 (target_pos:113:has_entry:false)
|
||||
| | 0--> 2400:6400::/22 (target_pos:105:has_entry:false)
|
||||
| | | 0--> 2400:6500::/32 (target_pos:95:has_entry:false)
|
||||
| | | | 0--> 2400:6500::/39 (target_pos:88:has_entry:false)
|
||||
| | | | | 0--> 2400:6500:0:7000::/53 (target_pos:74:has_entry:false)
|
||||
| | | | | | 0--> 2400:6500:0:7000::/54 (target_pos:73:has_entry:false)
|
||||
| | | | | | | 0--> 2400:6500:0:7000::/55 (target_pos:72:has_entry:false)
|
||||
| | | | | | | | 0--> 2400:6500:0:7000::/56 (target_pos:71:has_entry:true)
|
||||
| | | | | | | | 1--> 2400:6500:0:7100::/56 (target_pos:71:has_entry:true)
|
||||
| | | | | | | 1--> 2400:6500:0:7200::/56 (target_pos:71:has_entry:true)
|
||||
| | | | | | 1--> 2400:6500:0:7400::/55 (target_pos:72:has_entry:false)
|
||||
| | | | | | | 0--> 2400:6500:0:7400::/56 (target_pos:71:has_entry:true)
|
||||
| | | | | | | 1--> 2400:6500:0:7500::/56 (target_pos:71:has_entry:true)
|
||||
| | | | | 1--> 2400:6500:100:7000::/54 (target_pos:73:has_entry:false)
|
||||
| | | | | | 0--> 2400:6500:100:7100::/56 (target_pos:71:has_entry:true)
|
||||
| | | | | | 1--> 2400:6500:100:7200::/56 (target_pos:71:has_entry:true)
|
||||
| | | | 1--> 2400:6500:ff00::/64 (target_pos:63:has_entry:true)
|
||||
| | | 1--> 2400:6700:ff00::/64 (target_pos:63:has_entry:true)
|
||||
| | 1--> 2403:b300:ff00::/64 (target_pos:63:has_entry:true)
|
||||
```
|
||||
124
vendor/github.com/libp2p/go-cidranger/brute.go
generated
vendored
Normal file
124
vendor/github.com/libp2p/go-cidranger/brute.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
package cidranger
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
rnet "github.com/libp2p/go-cidranger/net"
|
||||
)
|
||||
|
||||
// bruteRanger is a brute force implementation of Ranger. Insertion and
|
||||
// deletion of networks is performed on an internal storage in the form of
|
||||
// map[string]net.IPNet (constant time operations). However, inclusion tests are
|
||||
// always performed linearly at no guaranteed traversal order of recorded networks,
|
||||
// so one can assume a worst case performance of O(N). The performance can be
|
||||
// boosted many ways, e.g. changing usage of net.IPNet.Contains() to using masked
|
||||
// bits equality checking, but the main purpose of this implementation is for
|
||||
// testing because the correctness of this implementation can be easily guaranteed,
|
||||
// and used as the ground truth when running a wider range of 'random' tests on
|
||||
// other more sophisticated implementations.
|
||||
type bruteRanger struct {
|
||||
ipV4Entries map[string]RangerEntry
|
||||
ipV6Entries map[string]RangerEntry
|
||||
}
|
||||
|
||||
// newBruteRanger returns a new Ranger.
|
||||
func newBruteRanger() Ranger {
|
||||
return &bruteRanger{
|
||||
ipV4Entries: make(map[string]RangerEntry),
|
||||
ipV6Entries: make(map[string]RangerEntry),
|
||||
}
|
||||
}
|
||||
|
||||
// Insert inserts a RangerEntry into ranger.
|
||||
func (b *bruteRanger) Insert(entry RangerEntry) error {
|
||||
network := entry.Network()
|
||||
key := network.String()
|
||||
if _, found := b.ipV4Entries[key]; !found {
|
||||
entries, err := b.getEntriesByVersion(entry.Network().IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries[key] = entry
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes a RangerEntry identified by given network from ranger.
|
||||
func (b *bruteRanger) Remove(network net.IPNet) (RangerEntry, error) {
|
||||
networks, err := b.getEntriesByVersion(network.IP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := network.String()
|
||||
if networkToDelete, found := networks[key]; found {
|
||||
delete(networks, key)
|
||||
return networkToDelete, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Contains returns bool indicating whether given ip is contained by any
|
||||
// network in ranger.
|
||||
func (b *bruteRanger) Contains(ip net.IP) (bool, error) {
|
||||
entries, err := b.getEntriesByVersion(ip)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
network := entry.Network()
|
||||
if network.Contains(ip) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ContainingNetworks returns all RangerEntry(s) that given ip contained in.
|
||||
func (b *bruteRanger) ContainingNetworks(ip net.IP) ([]RangerEntry, error) {
|
||||
entries, err := b.getEntriesByVersion(ip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results := []RangerEntry{}
|
||||
for _, entry := range entries {
|
||||
network := entry.Network()
|
||||
if network.Contains(ip) {
|
||||
results = append(results, entry)
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// CoveredNetworks returns the list of RangerEntry(s) the given ipnet
|
||||
// covers. That is, the networks that are completely subsumed by the
|
||||
// specified network.
|
||||
func (b *bruteRanger) CoveredNetworks(network net.IPNet) ([]RangerEntry, error) {
|
||||
entries, err := b.getEntriesByVersion(network.IP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var results []RangerEntry
|
||||
testNetwork := rnet.NewNetwork(network)
|
||||
for _, entry := range entries {
|
||||
entryNetwork := rnet.NewNetwork(entry.Network())
|
||||
if testNetwork.Covers(entryNetwork) {
|
||||
results = append(results, entry)
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Len returns number of networks in ranger.
|
||||
func (b *bruteRanger) Len() int {
|
||||
return len(b.ipV4Entries) + len(b.ipV6Entries)
|
||||
}
|
||||
|
||||
func (b *bruteRanger) getEntriesByVersion(ip net.IP) (map[string]RangerEntry, error) {
|
||||
if ip.To4() != nil {
|
||||
return b.ipV4Entries, nil
|
||||
}
|
||||
if ip.To16() != nil {
|
||||
return b.ipV6Entries, nil
|
||||
}
|
||||
return nil, ErrInvalidNetworkInput
|
||||
}
|
||||
99
vendor/github.com/libp2p/go-cidranger/cidranger.go
generated
vendored
Normal file
99
vendor/github.com/libp2p/go-cidranger/cidranger.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
Package cidranger provides utility to store CIDR blocks and perform ip
|
||||
inclusion tests against it.
|
||||
|
||||
To create a new instance of the path-compressed trie:
|
||||
|
||||
ranger := NewPCTrieRanger()
|
||||
|
||||
To insert or remove an entry (any object that satisfies the RangerEntry
|
||||
interface):
|
||||
|
||||
_, network, _ := net.ParseCIDR("192.168.0.0/24")
|
||||
ranger.Insert(NewBasicRangerEntry(*network))
|
||||
ranger.Remove(network)
|
||||
|
||||
If you desire for any value to be attached to the entry, simply
|
||||
create custom struct that satisfies the RangerEntry interface:
|
||||
|
||||
type RangerEntry interface {
|
||||
Network() net.IPNet
|
||||
}
|
||||
|
||||
To test whether an IP is contained in the constructed networks ranger:
|
||||
|
||||
// returns bool, error
|
||||
containsBool, err := ranger.Contains(net.ParseIP("192.168.0.1"))
|
||||
|
||||
To get a list of CIDR blocks in constructed ranger that contains IP:
|
||||
|
||||
// returns []RangerEntry, error
|
||||
entries, err := ranger.ContainingNetworks(net.ParseIP("192.168.0.1"))
|
||||
|
||||
To get a list of all IPv4/IPv6 rangers respectively:
|
||||
|
||||
// returns []RangerEntry, error
|
||||
entries, err := ranger.CoveredNetworks(*AllIPv4)
|
||||
entries, err := ranger.CoveredNetworks(*AllIPv6)
|
||||
|
||||
*/
|
||||
package cidranger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// ErrInvalidNetworkInput is returned upon invalid network input.
|
||||
var ErrInvalidNetworkInput = fmt.Errorf("Invalid network input")
|
||||
|
||||
// ErrInvalidNetworkNumberInput is returned upon invalid network input.
|
||||
var ErrInvalidNetworkNumberInput = fmt.Errorf("Invalid network number input")
|
||||
|
||||
// AllIPv4 is a IPv4 CIDR that contains all networks
|
||||
var AllIPv4 = parseCIDRUnsafe("0.0.0.0/0")
|
||||
|
||||
// AllIPv6 is a IPv6 CIDR that contains all networks
|
||||
var AllIPv6 = parseCIDRUnsafe("0::0/0")
|
||||
|
||||
func parseCIDRUnsafe(s string) *net.IPNet {
|
||||
_, cidr, _ := net.ParseCIDR(s)
|
||||
return cidr
|
||||
}
|
||||
|
||||
// RangerEntry is an interface for insertable entry into a Ranger.
|
||||
type RangerEntry interface {
|
||||
Network() net.IPNet
|
||||
}
|
||||
|
||||
type basicRangerEntry struct {
|
||||
ipNet net.IPNet
|
||||
}
|
||||
|
||||
func (b *basicRangerEntry) Network() net.IPNet {
|
||||
return b.ipNet
|
||||
}
|
||||
|
||||
// NewBasicRangerEntry returns a basic RangerEntry that only stores the network
|
||||
// itself.
|
||||
func NewBasicRangerEntry(ipNet net.IPNet) RangerEntry {
|
||||
return &basicRangerEntry{
|
||||
ipNet: ipNet,
|
||||
}
|
||||
}
|
||||
|
||||
// Ranger is an interface for cidr block containment lookups.
|
||||
type Ranger interface {
|
||||
Insert(entry RangerEntry) error
|
||||
Remove(network net.IPNet) (RangerEntry, error)
|
||||
Contains(ip net.IP) (bool, error)
|
||||
ContainingNetworks(ip net.IP) ([]RangerEntry, error)
|
||||
CoveredNetworks(network net.IPNet) ([]RangerEntry, error)
|
||||
Len() int
|
||||
}
|
||||
|
||||
// NewPCTrieRanger returns a versionedRanger that supports both IPv4 and IPv6
|
||||
// using the path compressed trie implemention.
|
||||
func NewPCTrieRanger() Ranger {
|
||||
return newVersionedRanger(newRanger)
|
||||
}
|
||||
300
vendor/github.com/libp2p/go-cidranger/net/ip.go
generated
vendored
Normal file
300
vendor/github.com/libp2p/go-cidranger/net/ip.go
generated
vendored
Normal file
@@ -0,0 +1,300 @@
|
||||
/*
|
||||
Package net provides utility functions for working with IPs (net.IP).
|
||||
*/
|
||||
package net
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
)
|
||||
|
||||
// IPVersion is version of IP address.
|
||||
type IPVersion string
|
||||
|
||||
// Helper constants.
|
||||
const (
|
||||
IPv4Uint32Count = 1
|
||||
IPv6Uint32Count = 4
|
||||
|
||||
BitsPerUint32 = 32
|
||||
BytePerUint32 = 4
|
||||
|
||||
IPv4 IPVersion = "IPv4"
|
||||
IPv6 IPVersion = "IPv6"
|
||||
)
|
||||
|
||||
// ErrInvalidBitPosition is returned when bits requested is not valid.
|
||||
var ErrInvalidBitPosition = fmt.Errorf("bit position not valid")
|
||||
|
||||
// ErrVersionMismatch is returned upon mismatch in network input versions.
|
||||
var ErrVersionMismatch = fmt.Errorf("Network input version mismatch")
|
||||
|
||||
// ErrNoGreatestCommonBit is an error returned when no greatest common bit
|
||||
// exists for the cidr ranges.
|
||||
var ErrNoGreatestCommonBit = fmt.Errorf("No greatest common bit")
|
||||
|
||||
// NetworkNumber represents an IP address using uint32 as internal storage.
|
||||
// IPv4 usings 1 uint32, while IPv6 uses 4 uint32.
|
||||
type NetworkNumber []uint32
|
||||
|
||||
// NewNetworkNumber returns a equivalent NetworkNumber to given IP address,
|
||||
// return nil if ip is neither IPv4 nor IPv6.
|
||||
func NewNetworkNumber(ip net.IP) NetworkNumber {
|
||||
if ip == nil {
|
||||
return nil
|
||||
}
|
||||
coercedIP := ip.To4()
|
||||
parts := 1
|
||||
if coercedIP == nil {
|
||||
coercedIP = ip.To16()
|
||||
parts = 4
|
||||
}
|
||||
if coercedIP == nil {
|
||||
return nil
|
||||
}
|
||||
nn := make(NetworkNumber, parts)
|
||||
for i := 0; i < parts; i++ {
|
||||
idx := i * net.IPv4len
|
||||
nn[i] = binary.BigEndian.Uint32(coercedIP[idx : idx+net.IPv4len])
|
||||
}
|
||||
return nn
|
||||
}
|
||||
|
||||
// ToV4 returns ip address if ip is IPv4, returns nil otherwise.
|
||||
func (n NetworkNumber) ToV4() NetworkNumber {
|
||||
if len(n) != IPv4Uint32Count {
|
||||
return nil
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ToV6 returns ip address if ip is IPv6, returns nil otherwise.
|
||||
func (n NetworkNumber) ToV6() NetworkNumber {
|
||||
if len(n) != IPv6Uint32Count {
|
||||
return nil
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ToIP returns equivalent net.IP.
|
||||
func (n NetworkNumber) ToIP() net.IP {
|
||||
ip := make(net.IP, len(n)*BytePerUint32)
|
||||
for i := 0; i < len(n); i++ {
|
||||
idx := i * net.IPv4len
|
||||
binary.BigEndian.PutUint32(ip[idx:idx+net.IPv4len], n[i])
|
||||
}
|
||||
if len(ip) == net.IPv4len {
|
||||
ip = net.IPv4(ip[0], ip[1], ip[2], ip[3])
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// Equal is the equality test for 2 network numbers.
|
||||
func (n NetworkNumber) Equal(n1 NetworkNumber) bool {
|
||||
if len(n) != len(n1) {
|
||||
return false
|
||||
}
|
||||
if n[0] != n1[0] {
|
||||
return false
|
||||
}
|
||||
if len(n) == IPv6Uint32Count {
|
||||
return n[1] == n1[1] && n[2] == n1[2] && n[3] == n1[3]
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Next returns the next logical network number.
|
||||
func (n NetworkNumber) Next() NetworkNumber {
|
||||
newIP := make(NetworkNumber, len(n))
|
||||
copy(newIP, n)
|
||||
for i := len(newIP) - 1; i >= 0; i-- {
|
||||
newIP[i]++
|
||||
if newIP[i] > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return newIP
|
||||
}
|
||||
|
||||
// Previous returns the previous logical network number.
|
||||
func (n NetworkNumber) Previous() NetworkNumber {
|
||||
newIP := make(NetworkNumber, len(n))
|
||||
copy(newIP, n)
|
||||
for i := len(newIP) - 1; i >= 0; i-- {
|
||||
newIP[i]--
|
||||
if newIP[i] < math.MaxUint32 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return newIP
|
||||
}
|
||||
|
||||
// Bit returns uint32 representing the bit value at given position, e.g.,
|
||||
// "128.0.0.0" has bit value of 1 at position 31, and 0 for positions 30 to 0.
|
||||
func (n NetworkNumber) Bit(position uint) (uint32, error) {
|
||||
if int(position) > len(n)*BitsPerUint32-1 {
|
||||
return 0, ErrInvalidBitPosition
|
||||
}
|
||||
idx := len(n) - 1 - int(position/BitsPerUint32)
|
||||
// Mod 31 to get array index.
|
||||
rShift := position & (BitsPerUint32 - 1)
|
||||
return (n[idx] >> rShift) & 1, nil
|
||||
}
|
||||
|
||||
// LeastCommonBitPosition returns the smallest position of the preceding common
|
||||
// bits of the 2 network numbers, and returns an error ErrNoGreatestCommonBit
|
||||
// if the two network number diverges from the first bit.
|
||||
// e.g., if the network number diverges after the 1st bit, it returns 131 for
|
||||
// IPv6 and 31 for IPv4 .
|
||||
func (n NetworkNumber) LeastCommonBitPosition(n1 NetworkNumber) (uint, error) {
|
||||
if len(n) != len(n1) {
|
||||
return 0, ErrVersionMismatch
|
||||
}
|
||||
for i := 0; i < len(n); i++ {
|
||||
mask := uint32(1) << 31
|
||||
pos := uint(31)
|
||||
for ; mask > 0; mask >>= 1 {
|
||||
if n[i]&mask != n1[i]&mask {
|
||||
if i == 0 && pos == 31 {
|
||||
return 0, ErrNoGreatestCommonBit
|
||||
}
|
||||
return (pos + 1) + uint(BitsPerUint32)*uint(len(n)-i-1), nil
|
||||
}
|
||||
pos--
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Network represents a block of network numbers, also known as CIDR.
|
||||
type Network struct {
|
||||
Number NetworkNumber
|
||||
Mask NetworkNumberMask
|
||||
}
|
||||
|
||||
// NewNetwork returns Network built using given net.IPNet.
|
||||
func NewNetwork(ipNet net.IPNet) Network {
|
||||
ones, _ := ipNet.Mask.Size()
|
||||
return Network{
|
||||
Number: NewNetworkNumber(ipNet.IP),
|
||||
Mask: NetworkNumberMask(ones),
|
||||
}
|
||||
}
|
||||
|
||||
// Masked returns a new network conforming to new mask.
|
||||
func (n Network) Masked(ones int) Network {
|
||||
mask := NetworkNumberMask(ones)
|
||||
return Network{
|
||||
Number: mask.Mask(n.Number),
|
||||
Mask: mask,
|
||||
}
|
||||
}
|
||||
|
||||
func sub(a, b uint8) uint8 {
|
||||
res := a - b
|
||||
if res > a {
|
||||
res = 0
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func mask(m NetworkNumberMask) (mask1, mask2, mask3, mask4 uint32) {
|
||||
// We're relying on overflow here.
|
||||
const ones uint32 = 0xFFFFFFFF
|
||||
mask1 = ones << sub(1*32, uint8(m))
|
||||
mask2 = ones << sub(2*32, uint8(m))
|
||||
mask3 = ones << sub(3*32, uint8(m))
|
||||
mask4 = ones << sub(4*32, uint8(m))
|
||||
return
|
||||
}
|
||||
|
||||
// Contains returns true if NetworkNumber is in range of Network, false
|
||||
// otherwise.
|
||||
func (n Network) Contains(nn NetworkNumber) bool {
|
||||
if len(n.Number) != len(nn) {
|
||||
return false
|
||||
}
|
||||
const ones uint32 = 0xFFFFFFFF
|
||||
|
||||
mask1, mask2, mask3, mask4 := mask(n.Mask)
|
||||
switch len(n.Number) {
|
||||
case IPv4Uint32Count:
|
||||
return nn[0]&mask1 == n.Number[0]
|
||||
case IPv6Uint32Count:
|
||||
return nn[0]&mask1 == n.Number[0] &&
|
||||
nn[1]&mask2 == n.Number[1] &&
|
||||
nn[2]&mask3 == n.Number[2] &&
|
||||
nn[3]&mask4 == n.Number[3]
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Contains returns true if Network covers o, false otherwise
|
||||
func (n Network) Covers(o Network) bool {
|
||||
return n.Contains(o.Number) && n.Mask <= o.Mask
|
||||
}
|
||||
|
||||
// LeastCommonBitPosition returns the smallest position of the preceding common
|
||||
// bits of the 2 networks, and returns an error ErrNoGreatestCommonBit
|
||||
// if the two network number diverges from the first bit.
|
||||
func (n Network) LeastCommonBitPosition(n1 Network) (uint, error) {
|
||||
maskSize := n.Mask
|
||||
if n1.Mask < n.Mask {
|
||||
maskSize = n1.Mask
|
||||
}
|
||||
maskPosition := len(n1.Number)*BitsPerUint32 - int(maskSize)
|
||||
lcb, err := n.Number.LeastCommonBitPosition(n1.Number)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint(math.Max(float64(maskPosition), float64(lcb))), nil
|
||||
}
|
||||
|
||||
// Equal is the equality test for 2 networks.
|
||||
func (n Network) Equal(n1 Network) bool {
|
||||
return n.Number.Equal(n1.Number) && n.Mask == n1.Mask
|
||||
}
|
||||
|
||||
func (n Network) String() string {
|
||||
return fmt.Sprintf("%s/%d", n.Number.ToIP(), n.Mask)
|
||||
}
|
||||
|
||||
func (n Network) IPNet() net.IPNet {
|
||||
return net.IPNet{
|
||||
IP: n.Number.ToIP(),
|
||||
Mask: net.CIDRMask(int(n.Mask), len(n.Number)*32),
|
||||
}
|
||||
}
|
||||
|
||||
// NetworkNumberMask is an IP address.
|
||||
type NetworkNumberMask int
|
||||
|
||||
// Mask returns a new masked NetworkNumber from given NetworkNumber.
|
||||
func (m NetworkNumberMask) Mask(n NetworkNumber) NetworkNumber {
|
||||
mask1, mask2, mask3, mask4 := mask(m)
|
||||
|
||||
result := make(NetworkNumber, len(n))
|
||||
switch len(n) {
|
||||
case IPv4Uint32Count:
|
||||
result[0] = n[0] & mask1
|
||||
case IPv6Uint32Count:
|
||||
result[0] = n[0] & mask1
|
||||
result[1] = n[1] & mask2
|
||||
result[2] = n[2] & mask3
|
||||
result[3] = n[3] & mask4
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NextIP returns the next sequential ip.
|
||||
func NextIP(ip net.IP) net.IP {
|
||||
return NewNetworkNumber(ip).Next().ToIP()
|
||||
}
|
||||
|
||||
// PreviousIP returns the previous sequential ip.
|
||||
func PreviousIP(ip net.IP) net.IP {
|
||||
return NewNetworkNumber(ip).Previous().ToIP()
|
||||
}
|
||||
404
vendor/github.com/libp2p/go-cidranger/trie.go
generated
vendored
Normal file
404
vendor/github.com/libp2p/go-cidranger/trie.go
generated
vendored
Normal file
@@ -0,0 +1,404 @@
|
||||
package cidranger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
rnet "github.com/libp2p/go-cidranger/net"
|
||||
)
|
||||
|
||||
// prefixTrie is a path-compressed (PC) trie implementation of the
|
||||
// ranger interface inspired by this blog post:
|
||||
// https://vincent.bernat.im/en/blog/2017-ipv4-route-lookup-linux
|
||||
//
|
||||
// CIDR blocks are stored using a prefix tree structure where each node has its
|
||||
// parent as prefix, and the path from the root node represents current CIDR
|
||||
// block.
|
||||
//
|
||||
// For IPv4, the trie structure guarantees max depth of 32 as IPv4 addresses are
|
||||
// 32 bits long and each bit represents a prefix tree starting at that bit. This
|
||||
// property also guarantees constant lookup time in Big-O notation.
|
||||
//
|
||||
// Path compression compresses a string of node with only 1 child into a single
|
||||
// node, decrease the amount of lookups necessary during containment tests.
|
||||
//
|
||||
// Level compression dictates the amount of direct children of a node by
|
||||
// allowing it to handle multiple bits in the path. The heuristic (based on
|
||||
// children population) to decide when the compression and decompression happens
|
||||
// is outlined in the prior linked blog, and will be experimented with in more
|
||||
// depth in this project in the future.
|
||||
//
|
||||
// Note: Can not insert both IPv4 and IPv6 network addresses into the same
|
||||
// prefix trie, use versionedRanger wrapper instead.
|
||||
//
|
||||
// TODO: Implement level-compressed component of the LPC trie.
|
||||
type prefixTrie struct {
|
||||
parent *prefixTrie
|
||||
children [2]*prefixTrie
|
||||
|
||||
numBitsSkipped uint
|
||||
numBitsHandled uint
|
||||
|
||||
network rnet.Network
|
||||
entry RangerEntry
|
||||
|
||||
size int // This is only maintained in the root trie.
|
||||
}
|
||||
|
||||
var ip4ZeroCIDR, ip6ZeroCIDR net.IPNet
|
||||
|
||||
func init() {
|
||||
_, v4, _ := net.ParseCIDR("0.0.0.0/0")
|
||||
_, v6, _ := net.ParseCIDR("0::0/0")
|
||||
ip4ZeroCIDR = *v4
|
||||
ip6ZeroCIDR = *v6
|
||||
}
|
||||
|
||||
func newRanger(version rnet.IPVersion) Ranger {
|
||||
return newPrefixTree(version)
|
||||
}
|
||||
|
||||
// newPrefixTree creates a new prefixTrie.
|
||||
func newPrefixTree(version rnet.IPVersion) *prefixTrie {
|
||||
rootNet := ip4ZeroCIDR
|
||||
if version == rnet.IPv6 {
|
||||
rootNet = ip6ZeroCIDR
|
||||
}
|
||||
return &prefixTrie{
|
||||
numBitsSkipped: 0,
|
||||
numBitsHandled: 1,
|
||||
network: rnet.NewNetwork(rootNet),
|
||||
}
|
||||
}
|
||||
|
||||
func newPathprefixTrie(network rnet.Network, numBitsSkipped uint) *prefixTrie {
|
||||
version := rnet.IPv4
|
||||
if len(network.Number) == rnet.IPv6Uint32Count {
|
||||
version = rnet.IPv6
|
||||
}
|
||||
path := newPrefixTree(version)
|
||||
path.numBitsSkipped = numBitsSkipped
|
||||
path.network = network.Masked(int(numBitsSkipped))
|
||||
return path
|
||||
}
|
||||
|
||||
func newEntryTrie(network rnet.Network, entry RangerEntry) *prefixTrie {
|
||||
leaf := newPathprefixTrie(network, uint(network.Mask))
|
||||
leaf.entry = entry
|
||||
return leaf
|
||||
}
|
||||
|
||||
// Insert inserts a RangerEntry into prefix trie.
|
||||
func (p *prefixTrie) Insert(entry RangerEntry) error {
|
||||
network := entry.Network()
|
||||
sizeIncreased, err := p.insert(rnet.NewNetwork(network), entry)
|
||||
if sizeIncreased {
|
||||
p.size++
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove removes RangerEntry identified by given network from trie.
|
||||
func (p *prefixTrie) Remove(network net.IPNet) (RangerEntry, error) {
|
||||
entry, err := p.remove(rnet.NewNetwork(network))
|
||||
if entry != nil {
|
||||
p.size--
|
||||
}
|
||||
return entry, err
|
||||
}
|
||||
|
||||
// Contains returns boolean indicating whether given ip is contained in any
|
||||
// of the inserted networks.
|
||||
func (p *prefixTrie) Contains(ip net.IP) (bool, error) {
|
||||
nn := rnet.NewNetworkNumber(ip)
|
||||
if nn == nil {
|
||||
return false, ErrInvalidNetworkNumberInput
|
||||
}
|
||||
return p.contains(nn)
|
||||
}
|
||||
|
||||
// ContainingNetworks returns the list of RangerEntry(s) the given ip is
|
||||
// contained in in ascending prefix order.
|
||||
func (p *prefixTrie) ContainingNetworks(ip net.IP) ([]RangerEntry, error) {
|
||||
nn := rnet.NewNetworkNumber(ip)
|
||||
if nn == nil {
|
||||
return nil, ErrInvalidNetworkNumberInput
|
||||
}
|
||||
return p.containingNetworks(nn)
|
||||
}
|
||||
|
||||
// CoveredNetworks returns the list of RangerEntry(s) the given ipnet
|
||||
// covers. That is, the networks that are completely subsumed by the
|
||||
// specified network.
|
||||
func (p *prefixTrie) CoveredNetworks(network net.IPNet) ([]RangerEntry, error) {
|
||||
net := rnet.NewNetwork(network)
|
||||
return p.coveredNetworks(net)
|
||||
}
|
||||
|
||||
// Len returns number of networks in ranger.
|
||||
func (p *prefixTrie) Len() int {
|
||||
return p.size
|
||||
}
|
||||
|
||||
// String returns string representation of trie, mainly for visualization and
|
||||
// debugging.
|
||||
func (p *prefixTrie) String() string {
|
||||
children := []string{}
|
||||
padding := strings.Repeat("| ", p.level()+1)
|
||||
for bits, child := range p.children {
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
childStr := fmt.Sprintf("\n%s%d--> %s", padding, bits, child.String())
|
||||
children = append(children, childStr)
|
||||
}
|
||||
return fmt.Sprintf("%s (target_pos:%d:has_entry:%t)%s", p.network,
|
||||
p.targetBitPosition(), p.hasEntry(), strings.Join(children, ""))
|
||||
}
|
||||
|
||||
func (p *prefixTrie) contains(number rnet.NetworkNumber) (bool, error) {
|
||||
if !p.network.Contains(number) {
|
||||
return false, nil
|
||||
}
|
||||
if p.hasEntry() {
|
||||
return true, nil
|
||||
}
|
||||
if p.targetBitPosition() < 0 {
|
||||
return false, nil
|
||||
}
|
||||
bit, err := p.targetBitFromIP(number)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
child := p.children[bit]
|
||||
if child != nil {
|
||||
return child.contains(number)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *prefixTrie) containingNetworks(number rnet.NetworkNumber) ([]RangerEntry, error) {
|
||||
results := []RangerEntry{}
|
||||
if !p.network.Contains(number) {
|
||||
return results, nil
|
||||
}
|
||||
if p.hasEntry() {
|
||||
results = []RangerEntry{p.entry}
|
||||
}
|
||||
if p.targetBitPosition() < 0 {
|
||||
return results, nil
|
||||
}
|
||||
bit, err := p.targetBitFromIP(number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
child := p.children[bit]
|
||||
if child != nil {
|
||||
ranges, err := child.containingNetworks(number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ranges) > 0 {
|
||||
if len(results) > 0 {
|
||||
results = append(results, ranges...)
|
||||
} else {
|
||||
results = ranges
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (p *prefixTrie) coveredNetworks(network rnet.Network) ([]RangerEntry, error) {
|
||||
var results []RangerEntry
|
||||
if network.Covers(p.network) {
|
||||
for entry := range p.walkDepth() {
|
||||
results = append(results, entry)
|
||||
}
|
||||
} else if p.targetBitPosition() >= 0 {
|
||||
bit, err := p.targetBitFromIP(network.Number)
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
child := p.children[bit]
|
||||
if child != nil {
|
||||
return child.coveredNetworks(network)
|
||||
}
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (p *prefixTrie) insert(network rnet.Network, entry RangerEntry) (bool, error) {
|
||||
if p.network.Equal(network) {
|
||||
sizeIncreased := p.entry == nil
|
||||
p.entry = entry
|
||||
return sizeIncreased, nil
|
||||
}
|
||||
|
||||
bit, err := p.targetBitFromIP(network.Number)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
existingChild := p.children[bit]
|
||||
|
||||
// No existing child, insert new leaf trie.
|
||||
if existingChild == nil {
|
||||
p.appendTrie(bit, newEntryTrie(network, entry))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check whether it is necessary to insert additional path prefix between current trie and existing child,
|
||||
// in the case that inserted network diverges on its path to existing child.
|
||||
lcb, err := network.LeastCommonBitPosition(existingChild.network)
|
||||
divergingBitPos := int(lcb) - 1
|
||||
if divergingBitPos > existingChild.targetBitPosition() {
|
||||
pathPrefix := newPathprefixTrie(network, p.totalNumberOfBits()-lcb)
|
||||
err := p.insertPrefix(bit, pathPrefix, existingChild)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Update new child
|
||||
existingChild = pathPrefix
|
||||
}
|
||||
return existingChild.insert(network, entry)
|
||||
}
|
||||
|
||||
func (p *prefixTrie) appendTrie(bit uint32, prefix *prefixTrie) {
|
||||
p.children[bit] = prefix
|
||||
prefix.parent = p
|
||||
}
|
||||
|
||||
func (p *prefixTrie) insertPrefix(bit uint32, pathPrefix, child *prefixTrie) error {
|
||||
// Set parent/child relationship between current trie and inserted pathPrefix
|
||||
p.children[bit] = pathPrefix
|
||||
pathPrefix.parent = p
|
||||
|
||||
// Set parent/child relationship between inserted pathPrefix and original child
|
||||
pathPrefixBit, err := pathPrefix.targetBitFromIP(child.network.Number)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pathPrefix.children[pathPrefixBit] = child
|
||||
child.parent = pathPrefix
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *prefixTrie) remove(network rnet.Network) (RangerEntry, error) {
|
||||
if p.hasEntry() && p.network.Equal(network) {
|
||||
entry := p.entry
|
||||
p.entry = nil
|
||||
|
||||
err := p.compressPathIfPossible()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
bit, err := p.targetBitFromIP(network.Number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
child := p.children[bit]
|
||||
if child != nil {
|
||||
return child.remove(network)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *prefixTrie) qualifiesForPathCompression() bool {
|
||||
// Current prefix trie can be path compressed if it meets all following.
|
||||
// 1. records no CIDR entry
|
||||
// 2. has single or no child
|
||||
// 3. is not root trie
|
||||
return !p.hasEntry() && p.childrenCount() <= 1 && p.parent != nil
|
||||
}
|
||||
|
||||
func (p *prefixTrie) compressPathIfPossible() error {
|
||||
if !p.qualifiesForPathCompression() {
|
||||
// Does not qualify to be compressed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find lone child.
|
||||
var loneChild *prefixTrie
|
||||
for _, child := range p.children {
|
||||
if child != nil {
|
||||
loneChild = child
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find root of currnt single child lineage.
|
||||
parent := p.parent
|
||||
for ; parent.qualifiesForPathCompression(); parent = parent.parent {
|
||||
}
|
||||
parentBit, err := parent.targetBitFromIP(p.network.Number)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parent.children[parentBit] = loneChild
|
||||
|
||||
// Attempts to furthur apply path compression at current lineage parent, in case current lineage
|
||||
// compressed into parent.
|
||||
return parent.compressPathIfPossible()
|
||||
}
|
||||
|
||||
func (p *prefixTrie) childrenCount() int {
|
||||
count := 0
|
||||
for _, child := range p.children {
|
||||
if child != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (p *prefixTrie) totalNumberOfBits() uint {
|
||||
return rnet.BitsPerUint32 * uint(len(p.network.Number))
|
||||
}
|
||||
|
||||
func (p *prefixTrie) targetBitPosition() int {
|
||||
return int(p.totalNumberOfBits()-p.numBitsSkipped) - 1
|
||||
}
|
||||
|
||||
func (p *prefixTrie) targetBitFromIP(n rnet.NetworkNumber) (uint32, error) {
|
||||
// This is a safe uint boxing of int since we should never attempt to get
|
||||
// target bit at a negative position.
|
||||
return n.Bit(uint(p.targetBitPosition()))
|
||||
}
|
||||
|
||||
func (p *prefixTrie) hasEntry() bool {
|
||||
return p.entry != nil
|
||||
}
|
||||
|
||||
func (p *prefixTrie) level() int {
|
||||
if p.parent == nil {
|
||||
return 0
|
||||
}
|
||||
return p.parent.level() + 1
|
||||
}
|
||||
|
||||
// walkDepth walks the trie in depth order, for unit testing.
|
||||
func (p *prefixTrie) walkDepth() <-chan RangerEntry {
|
||||
entries := make(chan RangerEntry)
|
||||
go func() {
|
||||
if p.hasEntry() {
|
||||
entries <- p.entry
|
||||
}
|
||||
childEntriesList := []<-chan RangerEntry{}
|
||||
for _, trie := range p.children {
|
||||
if trie == nil {
|
||||
continue
|
||||
}
|
||||
childEntriesList = append(childEntriesList, trie.walkDepth())
|
||||
}
|
||||
for _, childEntries := range childEntriesList {
|
||||
for entry := range childEntries {
|
||||
entries <- entry
|
||||
}
|
||||
}
|
||||
close(entries)
|
||||
}()
|
||||
return entries
|
||||
}
|
||||
77
vendor/github.com/libp2p/go-cidranger/version.go
generated
vendored
Normal file
77
vendor/github.com/libp2p/go-cidranger/version.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
package cidranger
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
rnet "github.com/libp2p/go-cidranger/net"
|
||||
)
|
||||
|
||||
type rangerFactory func(rnet.IPVersion) Ranger
|
||||
|
||||
type versionedRanger struct {
|
||||
ipV4Ranger Ranger
|
||||
ipV6Ranger Ranger
|
||||
}
|
||||
|
||||
func newVersionedRanger(factory rangerFactory) Ranger {
|
||||
return &versionedRanger{
|
||||
ipV4Ranger: factory(rnet.IPv4),
|
||||
ipV6Ranger: factory(rnet.IPv6),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *versionedRanger) Insert(entry RangerEntry) error {
|
||||
network := entry.Network()
|
||||
ranger, err := v.getRangerForIP(network.IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ranger.Insert(entry)
|
||||
}
|
||||
|
||||
func (v *versionedRanger) Remove(network net.IPNet) (RangerEntry, error) {
|
||||
ranger, err := v.getRangerForIP(network.IP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ranger.Remove(network)
|
||||
}
|
||||
|
||||
func (v *versionedRanger) Contains(ip net.IP) (bool, error) {
|
||||
ranger, err := v.getRangerForIP(ip)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return ranger.Contains(ip)
|
||||
}
|
||||
|
||||
func (v *versionedRanger) ContainingNetworks(ip net.IP) ([]RangerEntry, error) {
|
||||
ranger, err := v.getRangerForIP(ip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ranger.ContainingNetworks(ip)
|
||||
}
|
||||
|
||||
func (v *versionedRanger) CoveredNetworks(network net.IPNet) ([]RangerEntry, error) {
|
||||
ranger, err := v.getRangerForIP(network.IP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ranger.CoveredNetworks(network)
|
||||
}
|
||||
|
||||
// Len returns number of networks in ranger.
|
||||
func (v *versionedRanger) Len() int {
|
||||
return v.ipV4Ranger.Len() + v.ipV6Ranger.Len()
|
||||
}
|
||||
|
||||
func (v *versionedRanger) getRangerForIP(ip net.IP) (Ranger, error) {
|
||||
if ip.To4() != nil {
|
||||
return v.ipV4Ranger, nil
|
||||
}
|
||||
if ip.To16() != nil {
|
||||
return v.ipV6Ranger, nil
|
||||
}
|
||||
return nil, ErrInvalidNetworkNumberInput
|
||||
}
|
||||
30
vendor/github.com/libp2p/go-flow-metrics/.travis.yml
generated
vendored
Normal file
30
vendor/github.com/libp2p/go-flow-metrics/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
os:
|
||||
- linux
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.17.x
|
||||
|
||||
env:
|
||||
global:
|
||||
- GOTFLAGS="-race"
|
||||
matrix:
|
||||
- BUILD_DEPTYPE=gomod
|
||||
|
||||
|
||||
# disable travis install
|
||||
install:
|
||||
- true
|
||||
|
||||
script:
|
||||
- bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh)
|
||||
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg/mod
|
||||
- $HOME/.cache/go-build
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
21
vendor/github.com/libp2p/go-flow-metrics/LICENSE
generated
vendored
Normal file
21
vendor/github.com/libp2p/go-flow-metrics/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017 Protocol Labs
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
41
vendor/github.com/libp2p/go-flow-metrics/README.md
generated
vendored
Normal file
41
vendor/github.com/libp2p/go-flow-metrics/README.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
go-flow-metrics
|
||||
==================
|
||||
|
||||
[](https://protocol.ai)
|
||||
[](http://webchat.freenode.net/?channels=%23libp2p)
|
||||
[](https://libp2p.io/)
|
||||
[](https://travis-ci.org/libp2p/go-flow-metrics)
|
||||
[](https://discuss.libp2p.io)
|
||||
|
||||
|
||||
> A simple library for tracking flow metrics.
|
||||
|
||||
A simple alternative to [rcrowley's
|
||||
go-metrics](https://github.com/rcrowley/go-metrics) that's a lot faster (and
|
||||
only does simple bandwidth metrics).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Install](#install)
|
||||
- [Contribute](#contribute)
|
||||
- [License](#license)
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
make install
|
||||
```
|
||||
|
||||
## Contribute
|
||||
|
||||
PRs are welcome!
|
||||
|
||||
Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
|
||||
|
||||
## License
|
||||
|
||||
MIT © Protocol Labs
|
||||
|
||||
---
|
||||
|
||||
The last gx published version of this module was: 0.2.0: QmQFXpvKpF34dK9HcE7k8Ksk8V4BwWYZtdEcjzu5aUgRVr
|
||||
71
vendor/github.com/libp2p/go-flow-metrics/meter.go
generated
vendored
Normal file
71
vendor/github.com/libp2p/go-flow-metrics/meter.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package flow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Snapshot is a rate/total snapshot.
|
||||
type Snapshot struct {
|
||||
Rate float64
|
||||
Total uint64
|
||||
LastUpdate time.Time
|
||||
}
|
||||
|
||||
// NewMeter returns a new Meter with the correct idle time.
|
||||
//
|
||||
// While zero-value Meters can be used, their "last update" time will start at
|
||||
// the program start instead of when the meter was created.
|
||||
func NewMeter() *Meter {
|
||||
return &Meter{
|
||||
snapshot: Snapshot{
|
||||
LastUpdate: time.Now(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s Snapshot) String() string {
|
||||
return fmt.Sprintf("%d (%f/s)", s.Total, s.Rate)
|
||||
}
|
||||
|
||||
// Meter is a meter for monitoring a flow.
|
||||
type Meter struct {
|
||||
accumulator uint64
|
||||
|
||||
// managed by the sweeper loop.
|
||||
registered bool
|
||||
|
||||
// Take lock.
|
||||
snapshot Snapshot
|
||||
}
|
||||
|
||||
// Mark updates the total.
|
||||
func (m *Meter) Mark(count uint64) {
|
||||
if count > 0 && atomic.AddUint64(&m.accumulator, count) == count {
|
||||
// The accumulator is 0 so we probably need to register. We may
|
||||
// already _be_ registered however, if we are, the registration
|
||||
// loop will notice that `m.registered` is set and ignore us.
|
||||
globalSweeper.Register(m)
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshot gets a snapshot of the total and rate.
|
||||
func (m *Meter) Snapshot() Snapshot {
|
||||
globalSweeper.snapshotMu.RLock()
|
||||
defer globalSweeper.snapshotMu.RUnlock()
|
||||
return m.snapshot
|
||||
}
|
||||
|
||||
// Reset sets accumulator, total and rate to zero.
|
||||
func (m *Meter) Reset() {
|
||||
globalSweeper.snapshotMu.Lock()
|
||||
atomic.StoreUint64(&m.accumulator, 0)
|
||||
m.snapshot.Rate = 0
|
||||
m.snapshot.Total = 0
|
||||
globalSweeper.snapshotMu.Unlock()
|
||||
}
|
||||
|
||||
func (m *Meter) String() string {
|
||||
return m.Snapshot().String()
|
||||
}
|
||||
82
vendor/github.com/libp2p/go-flow-metrics/registry.go
generated
vendored
Normal file
82
vendor/github.com/libp2p/go-flow-metrics/registry.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package flow
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MeterRegistry is a registry for named meters.
|
||||
type MeterRegistry struct {
|
||||
meters sync.Map
|
||||
}
|
||||
|
||||
// Get gets (or creates) a meter by name.
|
||||
func (r *MeterRegistry) Get(name string) *Meter {
|
||||
if m, ok := r.meters.Load(name); ok {
|
||||
return m.(*Meter)
|
||||
}
|
||||
m, _ := r.meters.LoadOrStore(name, NewMeter())
|
||||
return m.(*Meter)
|
||||
}
|
||||
|
||||
// FindIdle finds all meters that haven't been used since the given time.
|
||||
func (r *MeterRegistry) FindIdle(since time.Time) []string {
|
||||
var idle []string
|
||||
r.walkIdle(since, func(key interface{}) {
|
||||
idle = append(idle, key.(string))
|
||||
})
|
||||
return idle
|
||||
}
|
||||
|
||||
// TrimIdle trims that haven't been updated since the given time. Returns the
|
||||
// number of timers trimmed.
|
||||
func (r *MeterRegistry) TrimIdle(since time.Time) (trimmed int) {
|
||||
// keep these as interfaces to avoid allocating when calling delete.
|
||||
var idle []interface{}
|
||||
r.walkIdle(since, func(key interface{}) {
|
||||
idle = append(idle, since)
|
||||
})
|
||||
for _, i := range idle {
|
||||
r.meters.Delete(i)
|
||||
}
|
||||
return len(idle)
|
||||
}
|
||||
|
||||
func (r *MeterRegistry) walkIdle(since time.Time, cb func(key interface{})) {
|
||||
// Yes, this is a global lock. However, all taking this does is pause
|
||||
// snapshotting.
|
||||
globalSweeper.snapshotMu.RLock()
|
||||
defer globalSweeper.snapshotMu.RUnlock()
|
||||
|
||||
r.meters.Range(func(k, v interface{}) bool {
|
||||
// So, this _is_ slightly inaccurate.
|
||||
if v.(*Meter).snapshot.LastUpdate.Before(since) {
|
||||
cb(k)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Remove removes the named meter from the registry.
|
||||
//
|
||||
// Note: The only reason to do this is to save a bit of memory. Unused meters
|
||||
// don't consume any CPU (after they go idle).
|
||||
func (r *MeterRegistry) Remove(name string) {
|
||||
r.meters.Delete(name)
|
||||
}
|
||||
|
||||
// ForEach calls the passed function for each registered meter.
|
||||
func (r *MeterRegistry) ForEach(iterFunc func(string, *Meter)) {
|
||||
r.meters.Range(func(k, v interface{}) bool {
|
||||
iterFunc(k.(string), v.(*Meter))
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Clear removes all meters from the registry.
|
||||
func (r *MeterRegistry) Clear() {
|
||||
r.meters.Range(func(k, v interface{}) bool {
|
||||
r.meters.Delete(k)
|
||||
return true
|
||||
})
|
||||
}
|
||||
191
vendor/github.com/libp2p/go-flow-metrics/sweeper.go
generated
vendored
Normal file
191
vendor/github.com/libp2p/go-flow-metrics/sweeper.go
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
package flow
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
)
|
||||
|
||||
// IdleRate the rate at which we declare a meter idle (and stop tracking it
|
||||
// until it's re-registered).
|
||||
//
|
||||
// The default ensures that 1 event every ~30s will keep the meter from going
|
||||
// idle.
|
||||
var IdleRate = 1e-13
|
||||
|
||||
// Alpha for EWMA of 1s
|
||||
var alpha = 1 - math.Exp(-1.0)
|
||||
|
||||
// The global sweeper.
|
||||
var globalSweeper sweeper
|
||||
|
||||
var cl = clock.New()
|
||||
|
||||
// SetClock sets a clock to use in the sweeper.
|
||||
// This will probably only ever be useful for testing purposes.
|
||||
func SetClock(c clock.Clock) {
|
||||
cl = c
|
||||
}
|
||||
|
||||
type sweeper struct {
|
||||
sweepOnce sync.Once
|
||||
|
||||
snapshotMu sync.RWMutex
|
||||
meters []*Meter
|
||||
activeMeters int
|
||||
|
||||
lastUpdateTime time.Time
|
||||
registerChannel chan *Meter
|
||||
}
|
||||
|
||||
func (sw *sweeper) start() {
|
||||
sw.registerChannel = make(chan *Meter, 16)
|
||||
go sw.run()
|
||||
}
|
||||
|
||||
func (sw *sweeper) run() {
|
||||
for m := range sw.registerChannel {
|
||||
sw.register(m)
|
||||
sw.runActive()
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *sweeper) register(m *Meter) {
|
||||
if m.registered {
|
||||
// registered twice, move on.
|
||||
return
|
||||
}
|
||||
m.registered = true
|
||||
sw.meters = append(sw.meters, m)
|
||||
}
|
||||
|
||||
func (sw *sweeper) runActive() {
|
||||
ticker := cl.Ticker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
sw.lastUpdateTime = cl.Now()
|
||||
for len(sw.meters) > 0 {
|
||||
// Scale back allocation.
|
||||
if len(sw.meters)*2 < cap(sw.meters) {
|
||||
newMeters := make([]*Meter, len(sw.meters))
|
||||
copy(newMeters, sw.meters)
|
||||
sw.meters = newMeters
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
sw.update()
|
||||
case m := <-sw.registerChannel:
|
||||
sw.register(m)
|
||||
}
|
||||
}
|
||||
sw.meters = nil
|
||||
// Till next time.
|
||||
}
|
||||
|
||||
func (sw *sweeper) update() {
|
||||
sw.snapshotMu.Lock()
|
||||
defer sw.snapshotMu.Unlock()
|
||||
|
||||
now := cl.Now()
|
||||
tdiff := now.Sub(sw.lastUpdateTime)
|
||||
if tdiff <= 0 {
|
||||
return
|
||||
}
|
||||
sw.lastUpdateTime = now
|
||||
timeMultiplier := float64(time.Second) / float64(tdiff)
|
||||
|
||||
// Calculate the bandwidth for all active meters.
|
||||
for i, m := range sw.meters[:sw.activeMeters] {
|
||||
total := atomic.LoadUint64(&m.accumulator)
|
||||
diff := total - m.snapshot.Total
|
||||
instant := timeMultiplier * float64(diff)
|
||||
|
||||
if diff > 0 {
|
||||
m.snapshot.LastUpdate = now
|
||||
}
|
||||
|
||||
if m.snapshot.Rate == 0 {
|
||||
m.snapshot.Rate = instant
|
||||
} else {
|
||||
m.snapshot.Rate += alpha * (instant - m.snapshot.Rate)
|
||||
}
|
||||
m.snapshot.Total = total
|
||||
|
||||
// This is equivalent to one zeros, then one, then 30 zeros.
|
||||
// We'll consider that to be "idle".
|
||||
if m.snapshot.Rate > IdleRate {
|
||||
continue
|
||||
}
|
||||
|
||||
// Ok, so we are idle...
|
||||
|
||||
// Mark this as idle by zeroing the accumulator.
|
||||
swappedTotal := atomic.SwapUint64(&m.accumulator, 0)
|
||||
|
||||
// So..., are we really idle?
|
||||
if swappedTotal > total {
|
||||
// Not so idle...
|
||||
// Now we need to make sure this gets re-registered.
|
||||
|
||||
// First, add back what we removed. If we can do this
|
||||
// fast enough, we can put it back before anyone
|
||||
// notices.
|
||||
currentTotal := atomic.AddUint64(&m.accumulator, swappedTotal)
|
||||
|
||||
// Did we make it?
|
||||
if currentTotal == swappedTotal {
|
||||
// Yes! Nobody noticed, move along.
|
||||
continue
|
||||
}
|
||||
// No. Someone noticed and will (or has) put back into
|
||||
// the registration channel.
|
||||
//
|
||||
// Remove the snapshot total, it'll get added back on
|
||||
// registration.
|
||||
//
|
||||
// `^uint64(total - 1)` is the two's complement of
|
||||
// `total`. It's the "correct" way to subtract
|
||||
// atomically in go.
|
||||
atomic.AddUint64(&m.accumulator, ^uint64(m.snapshot.Total-1))
|
||||
}
|
||||
|
||||
// Reset the rate, keep the total.
|
||||
m.registered = false
|
||||
m.snapshot.Rate = 0
|
||||
sw.meters[i] = nil
|
||||
}
|
||||
|
||||
// Re-add the total to all the newly active accumulators and set the snapshot to the total.
|
||||
// 1. We don't do this on register to avoid having to take the snapshot lock.
|
||||
// 2. We skip calculating the bandwidth for this round so we get an _accurate_ bandwidth calculation.
|
||||
for _, m := range sw.meters[sw.activeMeters:] {
|
||||
total := atomic.AddUint64(&m.accumulator, m.snapshot.Total)
|
||||
if total > m.snapshot.Total {
|
||||
m.snapshot.LastUpdate = now
|
||||
}
|
||||
m.snapshot.Total = total
|
||||
}
|
||||
|
||||
// compress and trim the meter list
|
||||
var newLen int
|
||||
for _, m := range sw.meters {
|
||||
if m != nil {
|
||||
sw.meters[newLen] = m
|
||||
newLen++
|
||||
}
|
||||
}
|
||||
|
||||
sw.meters = sw.meters[:newLen]
|
||||
|
||||
// Finally, mark all meters still in the list as "active".
|
||||
sw.activeMeters = len(sw.meters)
|
||||
}
|
||||
|
||||
func (sw *sweeper) Register(m *Meter) {
|
||||
sw.sweepOnce.Do(sw.start)
|
||||
sw.registerChannel <- m
|
||||
}
|
||||
21
vendor/github.com/libp2p/go-libp2p-asn-util/LICENSE
generated
vendored
Normal file
21
vendor/github.com/libp2p/go-libp2p-asn-util/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) Protocol Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
48
vendor/github.com/libp2p/go-libp2p-asn-util/README.md
generated
vendored
Normal file
48
vendor/github.com/libp2p/go-libp2p-asn-util/README.md
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
# go-libp2p-asn-util
|
||||
===
|
||||
|
||||
[](http://protocol.ai)
|
||||
[](http://github.com/libp2p/libp2p)
|
||||
|
||||
A library to lookup the ASN(Autonomous System Number) for an IP address. It uses the IPv6 to ASN database downloaded from https://iptoasn.com/.
|
||||
Supports ONLY IPv6 addresses for now.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Install](#install)
|
||||
- [Usage](#usage)
|
||||
- [Documentation](#documentation)
|
||||
- [Contribute](#contribute)
|
||||
- [License](#license)
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
go get github.com/libp2p/go-libp2p-asn-util
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
asn "github.com/libp2p/go-libp2p-asn-util"
|
||||
)
|
||||
|
||||
func main() {
|
||||
store, err := asn.NewAsnStore()
|
||||
|
||||
asNumber,err := store.AsnForIP(net.ParseIP("2a03:2880:f003:c07:face:b00c::2"))
|
||||
}
|
||||
```
|
||||
|
||||
## Contribute
|
||||
|
||||
Feel free to join in. All welcome. Open an [issue](https://github.com/libp2p/go-libp2p-asn/issues)!
|
||||
|
||||
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
---
|
||||
95
vendor/github.com/libp2p/go-libp2p-asn-util/asn.go
generated
vendored
Normal file
95
vendor/github.com/libp2p/go-libp2p-asn-util/asn.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
package asnutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-cidranger"
|
||||
)
|
||||
|
||||
var Store *lazyAsnStore
|
||||
|
||||
func init() {
|
||||
Store = &lazyAsnStore{}
|
||||
}
|
||||
|
||||
type networkWithAsn struct {
|
||||
nn net.IPNet
|
||||
asn string
|
||||
}
|
||||
|
||||
func (e *networkWithAsn) Network() net.IPNet {
|
||||
return e.nn
|
||||
}
|
||||
|
||||
type asnStore struct {
|
||||
cr cidranger.Ranger
|
||||
}
|
||||
|
||||
// AsnForIPv6 returns the AS number for the given IPv6 address.
|
||||
// If no mapping exists for the given IP, this function will
|
||||
// return an empty ASN and a nil error.
|
||||
func (a *asnStore) AsnForIPv6(ip net.IP) (string, error) {
|
||||
if ip.To16() == nil {
|
||||
return "", errors.New("ONLY IPv6 addresses supported for now")
|
||||
}
|
||||
|
||||
ns, err := a.cr.ContainingNetworks(ip)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to find matching networks for the given ip: %w", err)
|
||||
}
|
||||
|
||||
if len(ns) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// longest prefix match
|
||||
n := ns[len(ns)-1].(*networkWithAsn)
|
||||
return n.asn, nil
|
||||
}
|
||||
|
||||
func newAsnStore() (*asnStore, error) {
|
||||
cr := cidranger.NewPCTrieRanger()
|
||||
|
||||
for _, v := range ipv6CidrToAsnPairList {
|
||||
_, nn, err := net.ParseCIDR(v.cidr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse CIDR %s: %w", v.cidr, err)
|
||||
}
|
||||
|
||||
if err := cr.Insert(&networkWithAsn{*nn, v.asn}); err != nil {
|
||||
return nil, fmt.Errorf("failed to insert CIDR %s in Trie store: %w", v.cidr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &asnStore{cr}, nil
|
||||
}
|
||||
|
||||
// lazyAsnStore builds the underlying trie on first call to AsnForIPv6.
|
||||
// Alternatively, Init can be called to manually trigger initialization.
|
||||
type lazyAsnStore struct {
|
||||
store *asnStore
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// AsnForIPv6 returns the AS number for the given IPv6 address.
|
||||
// If no mapping exists for the given IP, this function will
|
||||
// return an empty ASN and a nil error.
|
||||
func (a *lazyAsnStore) AsnForIPv6(ip net.IP) (string, error) {
|
||||
a.once.Do(a.init)
|
||||
return a.store.AsnForIPv6(ip)
|
||||
}
|
||||
|
||||
func (a *lazyAsnStore) Init() {
|
||||
a.once.Do(a.init)
|
||||
}
|
||||
|
||||
func (a *lazyAsnStore) init() {
|
||||
store, err := newAsnStore()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
a.store = store
|
||||
}
|
||||
5
vendor/github.com/libp2p/go-libp2p-asn-util/doc.go
generated
vendored
Normal file
5
vendor/github.com/libp2p/go-libp2p-asn-util/doc.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
// Package asnutil provides a mapping lookup for IPv6 ASNs.
|
||||
package asnutil
|
||||
|
||||
//go:generate go run ./generate/
|
||||
//go:generate go fmt ./...
|
||||
79014
vendor/github.com/libp2p/go-libp2p-asn-util/ipv6_asn_map.gen.go
generated
vendored
Normal file
79014
vendor/github.com/libp2p/go-libp2p-asn-util/ipv6_asn_map.gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3
vendor/github.com/libp2p/go-libp2p-asn-util/version.json
generated
vendored
Normal file
3
vendor/github.com/libp2p/go-libp2p-asn-util/version.json
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"version": "v0.3.0"
|
||||
}
|
||||
2
vendor/github.com/libp2p/go-libp2p-pubsub/.codecov.yml
generated
vendored
Normal file
2
vendor/github.com/libp2p/go-libp2p-pubsub/.codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github_checks:
|
||||
annotations: false
|
||||
5
vendor/github.com/libp2p/go-libp2p-pubsub/.gitignore
generated
vendored
Normal file
5
vendor/github.com/libp2p/go-libp2p-pubsub/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
cover.out
|
||||
prof.out
|
||||
go-floodsub.test
|
||||
|
||||
.idea/
|
||||
8
vendor/github.com/libp2p/go-libp2p-pubsub/LICENSE
generated
vendored
Normal file
8
vendor/github.com/libp2p/go-libp2p-pubsub/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
This project is transitioning from an MIT-only license to a dual MIT/Apache-2.0 license.
|
||||
Unless otherwise noted, all code contributed prior to 2019-05-06 and not contributed by
|
||||
a user listed in [this signoff issue](https://github.com/ipfs/go-ipfs/issues/6302) is
|
||||
licensed under MIT-only. All new contributions (and past contributions since 2019-05-06)
|
||||
are licensed under a dual MIT/Apache-2.0 license.
|
||||
|
||||
MIT: https://www.opensource.org/licenses/mit
|
||||
Apache-2.0: https://www.apache.org/licenses/license-2.0
|
||||
5
vendor/github.com/libp2p/go-libp2p-pubsub/LICENSE-APACHE
generated
vendored
Normal file
5
vendor/github.com/libp2p/go-libp2p-pubsub/LICENSE-APACHE
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
19
vendor/github.com/libp2p/go-libp2p-pubsub/LICENSE-MIT
generated
vendored
Normal file
19
vendor/github.com/libp2p/go-libp2p-pubsub/LICENSE-MIT
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
155
vendor/github.com/libp2p/go-libp2p-pubsub/README.md
generated
vendored
Normal file
155
vendor/github.com/libp2p/go-libp2p-pubsub/README.md
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
# go-libp2p-pubsub
|
||||
|
||||
<p align="left">
|
||||
<a href="http://protocol.ai"><img src="https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square" /></a>
|
||||
<a href="http://libp2p.io/"><img src="https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square" /></a>
|
||||
<a href="http://webchat.freenode.net/?channels=%23libp2p"><img src="https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square" /></a>
|
||||
<a href="https://discuss.libp2p.io"><img src="https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg?style=flat-square"/></a>
|
||||
</p>
|
||||
|
||||
<p align="left">
|
||||
<a href="https://codecov.io/gh/libp2p/go-libp2p-pubsub"><img src="https://codecov.io/gh/libp2p/go-libp2p-pubsub/branch/master/graph/badge.svg"></a>
|
||||
<a href="https://goreportcard.com/report/github.com/libp2p/go-libp2p-pubsub"><img src="https://goreportcard.com/badge/github.com/libp2p/go-libp2p-pubsub" /></a>
|
||||
<a href="https://github.com/RichardLitt/standard-readme"><img src="https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square" /></a>
|
||||
<a href="https://godoc.org/github.com/libp2p/go-libp2p-pubsub"><img src="http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square" /></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.14.0-orange.svg?style=flat-square" /></a>
|
||||
<br>
|
||||
</p>
|
||||
|
||||
This repo contains the canonical pubsub implementation for libp2p. We currently provide three message router options:
|
||||
- Floodsub, which is the baseline flooding protocol.
|
||||
- Randomsub, which is a simple probabilistic router that propagates to random subsets of peers.
|
||||
- Gossipsub, which is a more advanced router with mesh formation and gossip propagation. See [spec](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) and [implementation](https://github.com/libp2p/go-libp2p-pubsub/blob/master/gossipsub.go) for more details.
|
||||
|
||||
|
||||
## Repo Lead Maintainer
|
||||
|
||||
[@vyzo](https://github.com/vyzo/)
|
||||
|
||||
> This repo follows the [Repo Lead Maintainer Protocol](https://github.com/ipfs/team-mgmt/blob/master/LEAD_MAINTAINER_PROTOCOL.md)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Install](#install)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Documentation](#documentation)
|
||||
- [Tracing](#tracing)
|
||||
- [Contribute](#contribute)
|
||||
- [License](#license)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
go get github.com/libp2p/go-libp2p-pubsub
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
To be used for messaging in p2p instrastructure (as part of libp2p) such as IPFS, Ethereum, other blockchains, etc.
|
||||
|
||||
### Example
|
||||
|
||||
https://github.com/libp2p/go-libp2p/tree/master/examples/pubsub
|
||||
|
||||
## Documentation
|
||||
|
||||
See the [libp2p specs](https://github.com/libp2p/specs/tree/master/pubsub) for high level documentation and [godoc](https://godoc.org/github.com/libp2p/go-libp2p-pubsub) for API documentation.
|
||||
|
||||
### In this repo, you will find
|
||||
|
||||
```
|
||||
.
|
||||
├── LICENSE
|
||||
├── README.md
|
||||
# Regular Golang repo set up
|
||||
├── codecov.yml
|
||||
├── pb
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── doc.go
|
||||
# PubSub base
|
||||
├── pubsub.go
|
||||
├── blacklist.go
|
||||
├── notify.go
|
||||
├── comm.go
|
||||
├── discovery.go
|
||||
├── sign.go
|
||||
├── subscription.go
|
||||
├── topic.go
|
||||
├── trace.go
|
||||
├── tracer.go
|
||||
├── validation.go
|
||||
# Floodsub router
|
||||
├── floodsub.go
|
||||
# Randomsub router
|
||||
├── randomsub.go
|
||||
# Gossipsub router
|
||||
├── gossipsub.go
|
||||
├── score.go
|
||||
├── score_params.go
|
||||
└── mcache.go
|
||||
```
|
||||
|
||||
### Tracing
|
||||
|
||||
The pubsub system supports _tracing_, which collects all events pertaining to the internals of the system. This allows you to recreate the complete message flow and state of the system for analysis purposes.
|
||||
|
||||
To enable tracing, instantiate the pubsub system using the `WithEventTracer` option; the option accepts a tracer with three available implementations in-package (trace to json, pb, or a remote peer).
|
||||
If you want to trace using a remote peer, you can do so using the `traced` daemon from [go-libp2p-pubsub-tracer](https://github.com/libp2p/go-libp2p-pubsub-tracer). The package also includes a utility program, `tracestat`, for analyzing the traces collected by the daemon.
|
||||
|
||||
For instance, to capture the trace as a json file, you can use the following option:
|
||||
```go
|
||||
tracer, err := pubsub.NewJSONTracer("/path/to/trace.json")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubsub.NewGossipSub(..., pubsub.WithEventTracer(tracer))
|
||||
```
|
||||
|
||||
To capture the trace as a protobuf, you can use the following option:
|
||||
```go
|
||||
tracer, err := pubsub.NewPBTracer("/path/to/trace.pb")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubsub.NewGossipSub(..., pubsub.WithEventTracer(tracer))
|
||||
```
|
||||
|
||||
Finally, to use the remote tracer, you can use the following incantations:
|
||||
```go
|
||||
// assuming that your tracer runs in x.x.x.x and has a peer ID of QmTracer
|
||||
pi, err := peer.AddrInfoFromP2pAddr(ma.StringCast("/ip4/x.x.x.x/tcp/4001/p2p/QmTracer"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tracer, err := pubsub.NewRemoteTracer(ctx, host, pi)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ps, err := pubsub.NewGossipSub(..., pubsub.WithEventTracer(tracer))
|
||||
```
|
||||
|
||||
## Contribute
|
||||
|
||||
Contributions welcome. Please check out [the issues](https://github.com/libp2p/go-libp2p-pubsub/issues).
|
||||
|
||||
Check out our [contributing document](https://github.com/libp2p/community/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
|
||||
|
||||
## License
|
||||
|
||||
The go-libp2p-pubsub project is dual-licensed under Apache 2.0 and MIT terms:
|
||||
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](./LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
- MIT license ([LICENSE-MIT](./LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
107
vendor/github.com/libp2p/go-libp2p-pubsub/backoff.go
generated
vendored
Normal file
107
vendor/github.com/libp2p/go-libp2p-pubsub/backoff.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
MinBackoffDelay = 100 * time.Millisecond
|
||||
MaxBackoffDelay = 10 * time.Second
|
||||
TimeToLive = 10 * time.Minute
|
||||
BackoffCleanupInterval = 1 * time.Minute
|
||||
BackoffMultiplier = 2
|
||||
MaxBackoffJitterCoff = 100
|
||||
MaxBackoffAttempts = 4
|
||||
)
|
||||
|
||||
type backoffHistory struct {
|
||||
duration time.Duration
|
||||
lastTried time.Time
|
||||
attempts int
|
||||
}
|
||||
|
||||
type backoff struct {
|
||||
mu sync.Mutex
|
||||
info map[peer.ID]*backoffHistory
|
||||
ct int // size threshold that kicks off the cleaner
|
||||
ci time.Duration // cleanup intervals
|
||||
maxAttempts int // maximum backoff attempts prior to ejection
|
||||
}
|
||||
|
||||
func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Duration, maxAttempts int) *backoff {
|
||||
b := &backoff{
|
||||
mu: sync.Mutex{},
|
||||
ct: sizeThreshold,
|
||||
ci: cleanupInterval,
|
||||
maxAttempts: maxAttempts,
|
||||
info: make(map[peer.ID]*backoffHistory),
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().UnixNano()) // used for jitter
|
||||
go b.cleanupLoop(ctx)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *backoff) updateAndGet(id peer.ID) (time.Duration, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
h, ok := b.info[id]
|
||||
switch {
|
||||
case !ok || time.Since(h.lastTried) > TimeToLive:
|
||||
// first request goes immediately.
|
||||
h = &backoffHistory{
|
||||
duration: time.Duration(0),
|
||||
attempts: 0,
|
||||
}
|
||||
case h.attempts >= b.maxAttempts:
|
||||
return 0, fmt.Errorf("peer %s has reached its maximum backoff attempts", id)
|
||||
|
||||
case h.duration < MinBackoffDelay:
|
||||
h.duration = MinBackoffDelay
|
||||
|
||||
case h.duration < MaxBackoffDelay:
|
||||
jitter := rand.Intn(MaxBackoffJitterCoff)
|
||||
h.duration = (BackoffMultiplier * h.duration) + time.Duration(jitter)*time.Millisecond
|
||||
if h.duration > MaxBackoffDelay || h.duration < 0 {
|
||||
h.duration = MaxBackoffDelay
|
||||
}
|
||||
}
|
||||
|
||||
h.attempts += 1
|
||||
h.lastTried = time.Now()
|
||||
b.info[id] = h
|
||||
return h.duration, nil
|
||||
}
|
||||
|
||||
func (b *backoff) cleanup() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
for id, h := range b.info {
|
||||
if time.Since(h.lastTried) > TimeToLive {
|
||||
delete(b.info, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backoff) cleanupLoop(ctx context.Context) {
|
||||
ticker := time.NewTicker(b.ci)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return // pubsub shutting down
|
||||
case <-ticker.C:
|
||||
b.cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
58
vendor/github.com/libp2p/go-libp2p-pubsub/blacklist.go
generated
vendored
Normal file
58
vendor/github.com/libp2p/go-libp2p-pubsub/blacklist.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/libp2p/go-libp2p-pubsub/timecache"
|
||||
)
|
||||
|
||||
// Blacklist is an interface for peer blacklisting.
|
||||
type Blacklist interface {
|
||||
Add(peer.ID) bool
|
||||
Contains(peer.ID) bool
|
||||
}
|
||||
|
||||
// MapBlacklist is a blacklist implementation using a perfect map
|
||||
type MapBlacklist map[peer.ID]struct{}
|
||||
|
||||
// NewMapBlacklist creates a new MapBlacklist
|
||||
func NewMapBlacklist() Blacklist {
|
||||
return MapBlacklist(make(map[peer.ID]struct{}))
|
||||
}
|
||||
|
||||
func (b MapBlacklist) Add(p peer.ID) bool {
|
||||
b[p] = struct{}{}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b MapBlacklist) Contains(p peer.ID) bool {
|
||||
_, ok := b[p]
|
||||
return ok
|
||||
}
|
||||
|
||||
// TimeCachedBlacklist is a blacklist implementation using a time cache
|
||||
type TimeCachedBlacklist struct {
|
||||
tc timecache.TimeCache
|
||||
}
|
||||
|
||||
// NewTimeCachedBlacklist creates a new TimeCachedBlacklist with the given expiry duration
|
||||
func NewTimeCachedBlacklist(expiry time.Duration) (Blacklist, error) {
|
||||
b := &TimeCachedBlacklist{tc: timecache.NewTimeCache(expiry)}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Add returns a bool saying whether Add of peer was successful
|
||||
func (b *TimeCachedBlacklist) Add(p peer.ID) bool {
|
||||
s := p.String()
|
||||
if b.tc.Has(s) {
|
||||
return false
|
||||
}
|
||||
b.tc.Add(s)
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *TimeCachedBlacklist) Contains(p peer.ID) bool {
|
||||
return b.tc.Has(p.String())
|
||||
}
|
||||
3
vendor/github.com/libp2p/go-libp2p-pubsub/codecov.yml
generated
vendored
Normal file
3
vendor/github.com/libp2p/go-libp2p-pubsub/codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
coverage:
|
||||
range: "50...100"
|
||||
comment: off
|
||||
231
vendor/github.com/libp2p/go-libp2p-pubsub/comm.go
generated
vendored
Normal file
231
vendor/github.com/libp2p/go-libp2p-pubsub/comm.go
generated
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
pool "github.com/libp2p/go-buffer-pool"
|
||||
"github.com/multiformats/go-varint"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-msgio"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
// get the initial RPC containing all of our subscriptions to send to new peers
|
||||
func (p *PubSub) getHelloPacket() *RPC {
|
||||
var rpc RPC
|
||||
|
||||
subscriptions := make(map[string]bool)
|
||||
|
||||
for t := range p.mySubs {
|
||||
subscriptions[t] = true
|
||||
}
|
||||
|
||||
for t := range p.myRelays {
|
||||
subscriptions[t] = true
|
||||
}
|
||||
|
||||
for t := range subscriptions {
|
||||
as := &pb.RPC_SubOpts{
|
||||
Topicid: proto.String(t),
|
||||
Subscribe: proto.Bool(true),
|
||||
}
|
||||
rpc.Subscriptions = append(rpc.Subscriptions, as)
|
||||
}
|
||||
return &rpc
|
||||
}
|
||||
|
||||
func (p *PubSub) handleNewStream(s network.Stream) {
|
||||
peer := s.Conn().RemotePeer()
|
||||
|
||||
p.inboundStreamsMx.Lock()
|
||||
other, dup := p.inboundStreams[peer]
|
||||
if dup {
|
||||
log.Debugf("duplicate inbound stream from %s; resetting other stream", peer)
|
||||
other.Reset()
|
||||
}
|
||||
p.inboundStreams[peer] = s
|
||||
p.inboundStreamsMx.Unlock()
|
||||
|
||||
defer func() {
|
||||
p.inboundStreamsMx.Lock()
|
||||
if p.inboundStreams[peer] == s {
|
||||
delete(p.inboundStreams, peer)
|
||||
}
|
||||
p.inboundStreamsMx.Unlock()
|
||||
}()
|
||||
|
||||
r := msgio.NewVarintReaderSize(s, p.maxMessageSize)
|
||||
for {
|
||||
msgbytes, err := r.ReadMsg()
|
||||
if err != nil {
|
||||
r.ReleaseMsg(msgbytes)
|
||||
if err != io.EOF {
|
||||
s.Reset()
|
||||
log.Debugf("error reading rpc from %s: %s", s.Conn().RemotePeer(), err)
|
||||
} else {
|
||||
// Just be nice. They probably won't read this
|
||||
// but it doesn't hurt to send it.
|
||||
s.Close()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
rpc := new(RPC)
|
||||
err = rpc.Unmarshal(msgbytes)
|
||||
r.ReleaseMsg(msgbytes)
|
||||
if err != nil {
|
||||
s.Reset()
|
||||
log.Warnf("bogus rpc from %s: %s", s.Conn().RemotePeer(), err)
|
||||
return
|
||||
}
|
||||
|
||||
rpc.from = peer
|
||||
select {
|
||||
case p.incoming <- rpc:
|
||||
case <-p.ctx.Done():
|
||||
// Close is useless because the other side isn't reading.
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PubSub) notifyPeerDead(pid peer.ID) {
|
||||
p.peerDeadPrioLk.RLock()
|
||||
p.peerDeadMx.Lock()
|
||||
p.peerDeadPend[pid] = struct{}{}
|
||||
p.peerDeadMx.Unlock()
|
||||
p.peerDeadPrioLk.RUnlock()
|
||||
|
||||
select {
|
||||
case p.peerDead <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan *RPC) {
|
||||
s, err := p.host.NewStream(p.ctx, pid, p.rt.Protocols()...)
|
||||
if err != nil {
|
||||
log.Debug("opening new stream to peer: ", err, pid)
|
||||
|
||||
select {
|
||||
case p.newPeerError <- pid:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
go p.handleSendingMessages(ctx, s, outgoing)
|
||||
go p.handlePeerDead(s)
|
||||
select {
|
||||
case p.newPeerStream <- s:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing <-chan *RPC) {
|
||||
select {
|
||||
case <-time.After(backoff):
|
||||
p.handleNewPeer(ctx, pid, outgoing)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PubSub) handlePeerDead(s network.Stream) {
|
||||
pid := s.Conn().RemotePeer()
|
||||
|
||||
_, err := s.Read([]byte{0})
|
||||
if err == nil {
|
||||
log.Debugf("unexpected message from %s", pid)
|
||||
}
|
||||
|
||||
s.Reset()
|
||||
p.notifyPeerDead(pid)
|
||||
}
|
||||
|
||||
func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) {
|
||||
writeRpc := func(rpc *RPC) error {
|
||||
size := uint64(rpc.Size())
|
||||
|
||||
buf := pool.Get(varint.UvarintSize(size) + int(size))
|
||||
defer pool.Put(buf)
|
||||
|
||||
n := binary.PutUvarint(buf, size)
|
||||
_, err := rpc.MarshalTo(buf[n:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
defer s.Close()
|
||||
for {
|
||||
select {
|
||||
case rpc, ok := <-outgoing:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
err := writeRpc(rpc)
|
||||
if err != nil {
|
||||
s.Reset()
|
||||
log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err)
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func rpcWithSubs(subs ...*pb.RPC_SubOpts) *RPC {
|
||||
return &RPC{
|
||||
RPC: pb.RPC{
|
||||
Subscriptions: subs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func rpcWithMessages(msgs ...*pb.Message) *RPC {
|
||||
return &RPC{RPC: pb.RPC{Publish: msgs}}
|
||||
}
|
||||
|
||||
func rpcWithControl(msgs []*pb.Message,
|
||||
ihave []*pb.ControlIHave,
|
||||
iwant []*pb.ControlIWant,
|
||||
graft []*pb.ControlGraft,
|
||||
prune []*pb.ControlPrune) *RPC {
|
||||
return &RPC{
|
||||
RPC: pb.RPC{
|
||||
Publish: msgs,
|
||||
Control: &pb.ControlMessage{
|
||||
Ihave: ihave,
|
||||
Iwant: iwant,
|
||||
Graft: graft,
|
||||
Prune: prune,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func copyRPC(rpc *RPC) *RPC {
|
||||
res := new(RPC)
|
||||
*res = *rpc
|
||||
if rpc.Control != nil {
|
||||
res.Control = new(pb.ControlMessage)
|
||||
*res.Control = *rpc.Control
|
||||
}
|
||||
return res
|
||||
}
|
||||
348
vendor/github.com/libp2p/go-libp2p-pubsub/discovery.go
generated
vendored
Normal file
348
vendor/github.com/libp2p/go-libp2p-pubsub/discovery.go
generated
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/discovery"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
discimpl "github.com/libp2p/go-libp2p/p2p/discovery/backoff"
|
||||
)
|
||||
|
||||
var (
|
||||
// poll interval
|
||||
|
||||
// DiscoveryPollInitialDelay is how long the discovery system waits after it first starts before polling
|
||||
DiscoveryPollInitialDelay = 0 * time.Millisecond
|
||||
// DiscoveryPollInterval is approximately how long the discovery system waits in between checks for whether the
|
||||
// more peers are needed for any topic
|
||||
DiscoveryPollInterval = 1 * time.Second
|
||||
)
|
||||
|
||||
// interval at which to retry advertisements when they fail.
|
||||
const discoveryAdvertiseRetryInterval = 2 * time.Minute
|
||||
|
||||
type DiscoverOpt func(*discoverOptions) error
|
||||
|
||||
type discoverOptions struct {
|
||||
connFactory BackoffConnectorFactory
|
||||
opts []discovery.Option
|
||||
}
|
||||
|
||||
func defaultDiscoverOptions() *discoverOptions {
|
||||
rngSrc := rand.NewSource(rand.Int63())
|
||||
minBackoff, maxBackoff := time.Second*10, time.Hour
|
||||
cacheSize := 100
|
||||
dialTimeout := time.Minute * 2
|
||||
discoverOpts := &discoverOptions{
|
||||
connFactory: func(host host.Host) (*discimpl.BackoffConnector, error) {
|
||||
backoff := discimpl.NewExponentialBackoff(minBackoff, maxBackoff, discimpl.FullJitter, time.Second, 5.0, 0, rand.New(rngSrc))
|
||||
return discimpl.NewBackoffConnector(host, cacheSize, dialTimeout, backoff)
|
||||
},
|
||||
}
|
||||
|
||||
return discoverOpts
|
||||
}
|
||||
|
||||
// discover represents the discovery pipeline.
|
||||
// The discovery pipeline handles advertising and discovery of peers
|
||||
type discover struct {
|
||||
p *PubSub
|
||||
|
||||
// discovery assists in discovering and advertising peers for a topic
|
||||
discovery discovery.Discovery
|
||||
|
||||
// advertising tracks which topics are being advertised
|
||||
advertising map[string]context.CancelFunc
|
||||
|
||||
// discoverQ handles continuing peer discovery
|
||||
discoverQ chan *discoverReq
|
||||
|
||||
// ongoing tracks ongoing discovery requests
|
||||
ongoing map[string]struct{}
|
||||
|
||||
// done handles completion of a discovery request
|
||||
done chan string
|
||||
|
||||
// connector handles connecting to new peers found via discovery
|
||||
connector *discimpl.BackoffConnector
|
||||
|
||||
// options are the set of options to be used to complete struct construction in Start
|
||||
options *discoverOptions
|
||||
}
|
||||
|
||||
// MinTopicSize returns a function that checks if a router is ready for publishing based on the topic size.
|
||||
// The router ultimately decides the whether it is ready or not, the given size is just a suggestion. Note
|
||||
// that the topic size does not include the router in the count.
|
||||
func MinTopicSize(size int) RouterReady {
|
||||
return func(rt PubSubRouter, topic string) (bool, error) {
|
||||
return rt.EnoughPeers(topic, size), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Start attaches the discovery pipeline to a pubsub instance, initializes discovery and starts event loop
|
||||
func (d *discover) Start(p *PubSub, opts ...DiscoverOpt) error {
|
||||
if d.discovery == nil || p == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.p = p
|
||||
d.advertising = make(map[string]context.CancelFunc)
|
||||
d.discoverQ = make(chan *discoverReq, 32)
|
||||
d.ongoing = make(map[string]struct{})
|
||||
d.done = make(chan string)
|
||||
|
||||
conn, err := d.options.connFactory(p.host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.connector = conn
|
||||
|
||||
go d.discoverLoop()
|
||||
go d.pollTimer()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *discover) pollTimer() {
|
||||
select {
|
||||
case <-time.After(DiscoveryPollInitialDelay):
|
||||
case <-d.p.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case d.p.eval <- d.requestDiscovery:
|
||||
case <-d.p.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(DiscoveryPollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
select {
|
||||
case d.p.eval <- d.requestDiscovery:
|
||||
case <-d.p.ctx.Done():
|
||||
return
|
||||
}
|
||||
case <-d.p.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *discover) requestDiscovery() {
|
||||
for t := range d.p.myTopics {
|
||||
if !d.p.rt.EnoughPeers(t, 0) {
|
||||
d.discoverQ <- &discoverReq{topic: t, done: make(chan struct{}, 1)}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *discover) discoverLoop() {
|
||||
for {
|
||||
select {
|
||||
case discover := <-d.discoverQ:
|
||||
topic := discover.topic
|
||||
|
||||
if _, ok := d.ongoing[topic]; ok {
|
||||
discover.done <- struct{}{}
|
||||
continue
|
||||
}
|
||||
|
||||
d.ongoing[topic] = struct{}{}
|
||||
|
||||
go func() {
|
||||
d.handleDiscovery(d.p.ctx, topic, discover.opts)
|
||||
select {
|
||||
case d.done <- topic:
|
||||
case <-d.p.ctx.Done():
|
||||
}
|
||||
discover.done <- struct{}{}
|
||||
}()
|
||||
case topic := <-d.done:
|
||||
delete(d.ongoing, topic)
|
||||
case <-d.p.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Advertise advertises this node's interest in a topic to a discovery service. Advertise is not thread-safe.
|
||||
func (d *discover) Advertise(topic string) {
|
||||
if d.discovery == nil {
|
||||
return
|
||||
}
|
||||
|
||||
advertisingCtx, cancel := context.WithCancel(d.p.ctx)
|
||||
|
||||
if _, ok := d.advertising[topic]; ok {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
d.advertising[topic] = cancel
|
||||
|
||||
go func() {
|
||||
next, err := d.discovery.Advertise(advertisingCtx, topic)
|
||||
if err != nil {
|
||||
log.Warnf("bootstrap: error providing rendezvous for %s: %s", topic, err.Error())
|
||||
if next == 0 {
|
||||
next = discoveryAdvertiseRetryInterval
|
||||
}
|
||||
}
|
||||
|
||||
t := time.NewTimer(next)
|
||||
defer t.Stop()
|
||||
|
||||
for advertisingCtx.Err() == nil {
|
||||
select {
|
||||
case <-t.C:
|
||||
next, err = d.discovery.Advertise(advertisingCtx, topic)
|
||||
if err != nil {
|
||||
log.Warnf("bootstrap: error providing rendezvous for %s: %s", topic, err.Error())
|
||||
if next == 0 {
|
||||
next = discoveryAdvertiseRetryInterval
|
||||
}
|
||||
}
|
||||
t.Reset(next)
|
||||
case <-advertisingCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// StopAdvertise stops advertising this node's interest in a topic. StopAdvertise is not thread-safe.
|
||||
func (d *discover) StopAdvertise(topic string) {
|
||||
if d.discovery == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if advertiseCancel, ok := d.advertising[topic]; ok {
|
||||
advertiseCancel()
|
||||
delete(d.advertising, topic)
|
||||
}
|
||||
}
|
||||
|
||||
// Discover searches for additional peers interested in a given topic
|
||||
func (d *discover) Discover(topic string, opts ...discovery.Option) {
|
||||
if d.discovery == nil {
|
||||
return
|
||||
}
|
||||
|
||||
d.discoverQ <- &discoverReq{topic, opts, make(chan struct{}, 1)}
|
||||
}
|
||||
|
||||
// Bootstrap attempts to bootstrap to a given topic. Returns true if bootstrapped successfully, false otherwise.
|
||||
func (d *discover) Bootstrap(ctx context.Context, topic string, ready RouterReady, opts ...discovery.Option) bool {
|
||||
if d.discovery == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
t := time.NewTimer(time.Hour)
|
||||
if !t.Stop() {
|
||||
<-t.C
|
||||
}
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
// Check if ready for publishing
|
||||
bootstrapped := make(chan bool, 1)
|
||||
select {
|
||||
case d.p.eval <- func() {
|
||||
done, _ := ready(d.p.rt, topic)
|
||||
bootstrapped <- done
|
||||
}:
|
||||
if <-bootstrapped {
|
||||
return true
|
||||
}
|
||||
case <-d.p.ctx.Done():
|
||||
return false
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
|
||||
// If not ready discover more peers
|
||||
disc := &discoverReq{topic, opts, make(chan struct{}, 1)}
|
||||
select {
|
||||
case d.discoverQ <- disc:
|
||||
case <-d.p.ctx.Done():
|
||||
return false
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-disc.done:
|
||||
case <-d.p.ctx.Done():
|
||||
return false
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
|
||||
t.Reset(time.Millisecond * 100)
|
||||
select {
|
||||
case <-t.C:
|
||||
case <-d.p.ctx.Done():
|
||||
return false
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *discover) handleDiscovery(ctx context.Context, topic string, opts []discovery.Option) {
|
||||
discoverCtx, cancel := context.WithTimeout(ctx, time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
peerCh, err := d.discovery.FindPeers(discoverCtx, topic, opts...)
|
||||
if err != nil {
|
||||
log.Debugf("error finding peers for topic %s: %v", topic, err)
|
||||
return
|
||||
}
|
||||
|
||||
d.connector.Connect(ctx, peerCh)
|
||||
}
|
||||
|
||||
type discoverReq struct {
|
||||
topic string
|
||||
opts []discovery.Option
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
type pubSubDiscovery struct {
|
||||
discovery.Discovery
|
||||
opts []discovery.Option
|
||||
}
|
||||
|
||||
func (d *pubSubDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
|
||||
return d.Discovery.Advertise(ctx, "floodsub:"+ns, append(opts, d.opts...)...)
|
||||
}
|
||||
|
||||
func (d *pubSubDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
|
||||
return d.Discovery.FindPeers(ctx, "floodsub:"+ns, append(opts, d.opts...)...)
|
||||
}
|
||||
|
||||
// WithDiscoveryOpts passes libp2p Discovery options into the PubSub discovery subsystem
|
||||
func WithDiscoveryOpts(opts ...discovery.Option) DiscoverOpt {
|
||||
return func(d *discoverOptions) error {
|
||||
d.opts = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BackoffConnectorFactory creates a BackoffConnector that is attached to a given host
|
||||
type BackoffConnectorFactory func(host host.Host) (*discimpl.BackoffConnector, error)
|
||||
|
||||
// WithDiscoverConnector adds a custom connector that deals with how the discovery subsystem connects to peers
|
||||
func WithDiscoverConnector(connFactory BackoffConnectorFactory) DiscoverOpt {
|
||||
return func(d *discoverOptions) error {
|
||||
d.connFactory = connFactory
|
||||
return nil
|
||||
}
|
||||
}
|
||||
27
vendor/github.com/libp2p/go-libp2p-pubsub/doc.go
generated
vendored
Normal file
27
vendor/github.com/libp2p/go-libp2p-pubsub/doc.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// The pubsub package provides facilities for the Publish/Subscribe pattern of message
|
||||
// propagation, also known as overlay multicast.
|
||||
// The implementation provides topic-based pubsub, with pluggable routing algorithms.
|
||||
//
|
||||
// The main interface to the library is the PubSub object.
|
||||
// You can construct this object with the following constructors:
|
||||
//
|
||||
// - NewFloodSub creates an instance that uses the floodsub routing algorithm.
|
||||
//
|
||||
// - NewGossipSub creates an instance that uses the gossipsub routing algorithm.
|
||||
//
|
||||
// - NewRandomSub creates an instance that uses the randomsub routing algorithm.
|
||||
//
|
||||
// In addition, there is a generic constructor that creates a pubsub instance with
|
||||
// a custom PubSubRouter interface. This procedure is currently reserved for internal
|
||||
// use within the package.
|
||||
//
|
||||
// Once you have constructed a PubSub instance, you need to establish some connections
|
||||
// to your peers; the implementation relies on ambient peer discovery, leaving bootstrap
|
||||
// and active peer discovery up to the client.
|
||||
//
|
||||
// To publish a message to some topic, use Publish; you don't need to be subscribed
|
||||
// to the topic in order to publish.
|
||||
//
|
||||
// To subscribe to a topic, use Subscribe; this will give you a subscription interface
|
||||
// from which new messages can be pumped.
|
||||
package pubsub
|
||||
108
vendor/github.com/libp2p/go-libp2p-pubsub/floodsub.go
generated
vendored
Normal file
108
vendor/github.com/libp2p/go-libp2p-pubsub/floodsub.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
FloodSubID = protocol.ID("/floodsub/1.0.0")
|
||||
FloodSubTopicSearchSize = 5
|
||||
)
|
||||
|
||||
// NewFloodsubWithProtocols returns a new floodsub-enabled PubSub objecting using the protocols specified in ps.
|
||||
func NewFloodsubWithProtocols(ctx context.Context, h host.Host, ps []protocol.ID, opts ...Option) (*PubSub, error) {
|
||||
rt := &FloodSubRouter{
|
||||
protocols: ps,
|
||||
}
|
||||
return NewPubSub(ctx, h, rt, opts...)
|
||||
}
|
||||
|
||||
// NewFloodSub returns a new PubSub object using the FloodSubRouter.
|
||||
func NewFloodSub(ctx context.Context, h host.Host, opts ...Option) (*PubSub, error) {
|
||||
return NewFloodsubWithProtocols(ctx, h, []protocol.ID{FloodSubID}, opts...)
|
||||
}
|
||||
|
||||
type FloodSubRouter struct {
|
||||
p *PubSub
|
||||
protocols []protocol.ID
|
||||
tracer *pubsubTracer
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) Protocols() []protocol.ID {
|
||||
return fs.protocols
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) Attach(p *PubSub) {
|
||||
fs.p = p
|
||||
fs.tracer = p.tracer
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
fs.tracer.AddPeer(p, proto)
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) RemovePeer(p peer.ID) {
|
||||
fs.tracer.RemovePeer(p)
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) EnoughPeers(topic string, suggested int) bool {
|
||||
// check all peers in the topic
|
||||
tmap, ok := fs.p.topics[topic]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if suggested == 0 {
|
||||
suggested = FloodSubTopicSearchSize
|
||||
}
|
||||
|
||||
if len(tmap) >= suggested {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {}
|
||||
|
||||
func (fs *FloodSubRouter) Publish(msg *Message) {
|
||||
from := msg.ReceivedFrom
|
||||
topic := msg.GetTopic()
|
||||
|
||||
out := rpcWithMessages(msg.Message)
|
||||
for pid := range fs.p.topics[topic] {
|
||||
if pid == from || pid == peer.ID(msg.GetFrom()) {
|
||||
continue
|
||||
}
|
||||
|
||||
mch, ok := fs.p.peers[pid]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case mch <- out:
|
||||
fs.tracer.SendRPC(out, pid)
|
||||
default:
|
||||
log.Infof("dropping message to peer %s: queue full", pid)
|
||||
fs.tracer.DropRPC(out, pid)
|
||||
// Drop it. The peer is too slow.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) Join(topic string) {
|
||||
fs.tracer.Join(topic)
|
||||
}
|
||||
|
||||
func (fs *FloodSubRouter) Leave(topic string) {
|
||||
fs.tracer.Leave(topic)
|
||||
}
|
||||
200
vendor/github.com/libp2p/go-libp2p-pubsub/gossip_tracer.go
generated
vendored
Normal file
200
vendor/github.com/libp2p/go-libp2p-pubsub/gossip_tracer.go
generated
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
// gossipTracer is an internal tracer that tracks IWANT requests in order to penalize
|
||||
// peers who don't follow up on IWANT requests after an IHAVE advertisement.
|
||||
// The tracking of promises is probabilistic to avoid using too much memory.
|
||||
type gossipTracer struct {
|
||||
sync.Mutex
|
||||
|
||||
idGen *msgIDGenerator
|
||||
|
||||
followUpTime time.Duration
|
||||
|
||||
// promises for messages by message ID; for each message tracked, we track the promise
|
||||
// expiration time for each peer.
|
||||
promises map[string]map[peer.ID]time.Time
|
||||
// promises for each peer; for each peer, we track the promised message IDs.
|
||||
// this index allows us to quickly void promises when a peer is throttled.
|
||||
peerPromises map[peer.ID]map[string]struct{}
|
||||
}
|
||||
|
||||
func newGossipTracer() *gossipTracer {
|
||||
return &gossipTracer{
|
||||
idGen: newMsgIdGenerator(),
|
||||
promises: make(map[string]map[peer.ID]time.Time),
|
||||
peerPromises: make(map[peer.ID]map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (gt *gossipTracer) Start(gs *GossipSubRouter) {
|
||||
if gt == nil {
|
||||
return
|
||||
}
|
||||
|
||||
gt.idGen = gs.p.idGen
|
||||
gt.followUpTime = gs.params.IWantFollowupTime
|
||||
}
|
||||
|
||||
// track a promise to deliver a message from a list of msgIDs we are requesting
|
||||
func (gt *gossipTracer) AddPromise(p peer.ID, msgIDs []string) {
|
||||
if gt == nil {
|
||||
return
|
||||
}
|
||||
|
||||
idx := rand.Intn(len(msgIDs))
|
||||
mid := msgIDs[idx]
|
||||
|
||||
gt.Lock()
|
||||
defer gt.Unlock()
|
||||
|
||||
promises, ok := gt.promises[mid]
|
||||
if !ok {
|
||||
promises = make(map[peer.ID]time.Time)
|
||||
gt.promises[mid] = promises
|
||||
}
|
||||
|
||||
_, ok = promises[p]
|
||||
if !ok {
|
||||
promises[p] = time.Now().Add(gt.followUpTime)
|
||||
peerPromises, ok := gt.peerPromises[p]
|
||||
if !ok {
|
||||
peerPromises = make(map[string]struct{})
|
||||
gt.peerPromises[p] = peerPromises
|
||||
}
|
||||
peerPromises[mid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// returns the number of broken promises for each peer who didn't follow up
|
||||
// on an IWANT request.
|
||||
func (gt *gossipTracer) GetBrokenPromises() map[peer.ID]int {
|
||||
if gt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
gt.Lock()
|
||||
defer gt.Unlock()
|
||||
|
||||
var res map[peer.ID]int
|
||||
now := time.Now()
|
||||
|
||||
// find broken promises from peers
|
||||
for mid, promises := range gt.promises {
|
||||
for p, expire := range promises {
|
||||
if expire.Before(now) {
|
||||
if res == nil {
|
||||
res = make(map[peer.ID]int)
|
||||
}
|
||||
res[p]++
|
||||
|
||||
delete(promises, p)
|
||||
|
||||
peerPromises := gt.peerPromises[p]
|
||||
delete(peerPromises, mid)
|
||||
if len(peerPromises) == 0 {
|
||||
delete(gt.peerPromises, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(promises) == 0 {
|
||||
delete(gt.promises, mid)
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
var _ RawTracer = (*gossipTracer)(nil)
|
||||
|
||||
func (gt *gossipTracer) fulfillPromise(msg *Message) {
|
||||
mid := gt.idGen.ID(msg)
|
||||
|
||||
gt.Lock()
|
||||
defer gt.Unlock()
|
||||
|
||||
promises, ok := gt.promises[mid]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(gt.promises, mid)
|
||||
|
||||
// delete the promise for all peers that promised it, as they have no way to fulfill it.
|
||||
for p := range promises {
|
||||
peerPromises, ok := gt.peerPromises[p]
|
||||
if ok {
|
||||
delete(peerPromises, mid)
|
||||
if len(peerPromises) == 0 {
|
||||
delete(gt.peerPromises, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (gt *gossipTracer) DeliverMessage(msg *Message) {
|
||||
// someone delivered a message, fulfill promises for it
|
||||
gt.fulfillPromise(msg)
|
||||
}
|
||||
|
||||
func (gt *gossipTracer) RejectMessage(msg *Message, reason string) {
|
||||
// A message got rejected, so we can fulfill promises and let the score penalty apply
|
||||
// from invalid message delivery.
|
||||
// We do take exception and apply promise penalty regardless in the following cases, where
|
||||
// the peer delivered an obviously invalid message.
|
||||
switch reason {
|
||||
case RejectMissingSignature:
|
||||
return
|
||||
case RejectInvalidSignature:
|
||||
return
|
||||
}
|
||||
|
||||
gt.fulfillPromise(msg)
|
||||
}
|
||||
|
||||
func (gt *gossipTracer) ValidateMessage(msg *Message) {
|
||||
// we consider the promise fulfilled as soon as the message begins validation
|
||||
// if it was a case of signature issue it would have been rejected immediately
|
||||
// without triggering the Validate trace
|
||||
gt.fulfillPromise(msg)
|
||||
}
|
||||
|
||||
func (gt *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {}
|
||||
func (gt *gossipTracer) RemovePeer(p peer.ID) {}
|
||||
func (gt *gossipTracer) Join(topic string) {}
|
||||
func (gt *gossipTracer) Leave(topic string) {}
|
||||
func (gt *gossipTracer) Graft(p peer.ID, topic string) {}
|
||||
func (gt *gossipTracer) Prune(p peer.ID, topic string) {}
|
||||
func (gt *gossipTracer) DuplicateMessage(msg *Message) {}
|
||||
func (gt *gossipTracer) RecvRPC(rpc *RPC) {}
|
||||
func (gt *gossipTracer) SendRPC(rpc *RPC, p peer.ID) {}
|
||||
func (gt *gossipTracer) DropRPC(rpc *RPC, p peer.ID) {}
|
||||
func (gt *gossipTracer) UndeliverableMessage(msg *Message) {}
|
||||
|
||||
func (gt *gossipTracer) ThrottlePeer(p peer.ID) {
|
||||
gt.Lock()
|
||||
defer gt.Unlock()
|
||||
|
||||
peerPromises, ok := gt.peerPromises[p]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for mid := range peerPromises {
|
||||
promises := gt.promises[mid]
|
||||
delete(promises, p)
|
||||
if len(promises) == 0 {
|
||||
delete(gt.promises, mid)
|
||||
}
|
||||
}
|
||||
|
||||
delete(gt.peerPromises, p)
|
||||
}
|
||||
1963
vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub.go
generated
vendored
Normal file
1963
vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
52
vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub_feat.go
generated
vendored
Normal file
52
vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub_feat.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
// GossipSubFeatureTest is a feature test function; it takes a feature and a protocol ID and
|
||||
// should return true if the feature is supported by the protocol
|
||||
type GossipSubFeatureTest = func(GossipSubFeature, protocol.ID) bool
|
||||
|
||||
// GossipSubFeature is a feature discriminant enum
|
||||
type GossipSubFeature int
|
||||
|
||||
const (
|
||||
// Protocol supports basic GossipSub Mesh -- gossipsub-v1.0 compatible
|
||||
GossipSubFeatureMesh = iota
|
||||
// Protocol supports Peer eXchange on prune -- gossipsub-v1.1 compatible
|
||||
GossipSubFeaturePX
|
||||
)
|
||||
|
||||
// GossipSubDefaultProtocols is the default gossipsub router protocol list
|
||||
var GossipSubDefaultProtocols = []protocol.ID{GossipSubID_v11, GossipSubID_v10, FloodSubID}
|
||||
|
||||
// GossipSubDefaultFeatures is the feature test function for the default gossipsub protocols
|
||||
func GossipSubDefaultFeatures(feat GossipSubFeature, proto protocol.ID) bool {
|
||||
switch feat {
|
||||
case GossipSubFeatureMesh:
|
||||
return proto == GossipSubID_v11 || proto == GossipSubID_v10
|
||||
case GossipSubFeaturePX:
|
||||
return proto == GossipSubID_v11
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// WithGossipSubProtocols is a gossipsub router option that configures a custom protocol list
|
||||
// and feature test function
|
||||
func WithGossipSubProtocols(protos []protocol.ID, feature GossipSubFeatureTest) Option {
|
||||
return func(ps *PubSub) error {
|
||||
gs, ok := ps.rt.(*GossipSubRouter)
|
||||
if !ok {
|
||||
return fmt.Errorf("pubsub router is not gossipsub")
|
||||
}
|
||||
|
||||
gs.protos = protos
|
||||
gs.feature = feature
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
11
vendor/github.com/libp2p/go-libp2p-pubsub/maintainer.json
generated
vendored
Normal file
11
vendor/github.com/libp2p/go-libp2p-pubsub/maintainer.json
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"repoLeadMaintainer": {
|
||||
"name": "Dimitris Vyzovitis",
|
||||
"email": "vyzo@protocol.ai",
|
||||
"username": "@vyzo"
|
||||
},
|
||||
"workingGroup": {
|
||||
"name": "libp2p",
|
||||
"entryPoint": "https://github.com/libp2p/libp2p"
|
||||
}
|
||||
}
|
||||
104
vendor/github.com/libp2p/go-libp2p-pubsub/mcache.go
generated
vendored
Normal file
104
vendor/github.com/libp2p/go-libp2p-pubsub/mcache.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// NewMessageCache creates a sliding window cache that remembers messages for as
|
||||
// long as `history` slots.
|
||||
//
|
||||
// When queried for messages to advertise, the cache only returns messages in
|
||||
// the last `gossip` slots.
|
||||
//
|
||||
// The `gossip` parameter must be smaller or equal to `history`, or this
|
||||
// function will panic.
|
||||
//
|
||||
// The slack between `gossip` and `history` accounts for the reaction time
|
||||
// between when a message is advertised via IHAVE gossip, and the peer pulls it
|
||||
// via an IWANT command.
|
||||
func NewMessageCache(gossip, history int) *MessageCache {
|
||||
if gossip > history {
|
||||
err := fmt.Errorf("invalid parameters for message cache; gossip slots (%d) cannot be larger than history slots (%d)",
|
||||
gossip, history)
|
||||
panic(err)
|
||||
}
|
||||
return &MessageCache{
|
||||
msgs: make(map[string]*Message),
|
||||
peertx: make(map[string]map[peer.ID]int),
|
||||
history: make([][]CacheEntry, history),
|
||||
gossip: gossip,
|
||||
msgID: func(msg *Message) string {
|
||||
return DefaultMsgIdFn(msg.Message)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type MessageCache struct {
|
||||
msgs map[string]*Message
|
||||
peertx map[string]map[peer.ID]int
|
||||
history [][]CacheEntry
|
||||
gossip int
|
||||
msgID func(*Message) string
|
||||
}
|
||||
|
||||
func (mc *MessageCache) SetMsgIdFn(msgID func(*Message) string) {
|
||||
mc.msgID = msgID
|
||||
}
|
||||
|
||||
type CacheEntry struct {
|
||||
mid string
|
||||
topic string
|
||||
}
|
||||
|
||||
func (mc *MessageCache) Put(msg *Message) {
|
||||
mid := mc.msgID(msg)
|
||||
mc.msgs[mid] = msg
|
||||
mc.history[0] = append(mc.history[0], CacheEntry{mid: mid, topic: msg.GetTopic()})
|
||||
}
|
||||
|
||||
func (mc *MessageCache) Get(mid string) (*Message, bool) {
|
||||
m, ok := mc.msgs[mid]
|
||||
return m, ok
|
||||
}
|
||||
|
||||
func (mc *MessageCache) GetForPeer(mid string, p peer.ID) (*Message, int, bool) {
|
||||
m, ok := mc.msgs[mid]
|
||||
if !ok {
|
||||
return nil, 0, false
|
||||
}
|
||||
|
||||
tx, ok := mc.peertx[mid]
|
||||
if !ok {
|
||||
tx = make(map[peer.ID]int)
|
||||
mc.peertx[mid] = tx
|
||||
}
|
||||
tx[p]++
|
||||
|
||||
return m, tx[p], true
|
||||
}
|
||||
|
||||
func (mc *MessageCache) GetGossipIDs(topic string) []string {
|
||||
var mids []string
|
||||
for _, entries := range mc.history[:mc.gossip] {
|
||||
for _, entry := range entries {
|
||||
if entry.topic == topic {
|
||||
mids = append(mids, entry.mid)
|
||||
}
|
||||
}
|
||||
}
|
||||
return mids
|
||||
}
|
||||
|
||||
func (mc *MessageCache) Shift() {
|
||||
last := mc.history[len(mc.history)-1]
|
||||
for _, entry := range last {
|
||||
delete(mc.msgs, entry.mid)
|
||||
delete(mc.peertx, entry.mid)
|
||||
}
|
||||
for i := len(mc.history) - 2; i >= 0; i-- {
|
||||
mc.history[i+1] = mc.history[i]
|
||||
}
|
||||
mc.history[0] = nil
|
||||
}
|
||||
52
vendor/github.com/libp2p/go-libp2p-pubsub/midgen.go
generated
vendored
Normal file
52
vendor/github.com/libp2p/go-libp2p-pubsub/midgen.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
// msgIDGenerator handles computing IDs for msgs
|
||||
// It allows setting custom generators(MsgIdFunction) per topic
|
||||
type msgIDGenerator struct {
|
||||
Default MsgIdFunction
|
||||
|
||||
topicGensLk sync.RWMutex
|
||||
topicGens map[string]MsgIdFunction
|
||||
}
|
||||
|
||||
func newMsgIdGenerator() *msgIDGenerator {
|
||||
return &msgIDGenerator{
|
||||
Default: DefaultMsgIdFn,
|
||||
topicGens: make(map[string]MsgIdFunction),
|
||||
}
|
||||
}
|
||||
|
||||
// Set sets custom id generator(MsgIdFunction) for topic.
|
||||
func (m *msgIDGenerator) Set(topic string, gen MsgIdFunction) {
|
||||
m.topicGensLk.Lock()
|
||||
m.topicGens[topic] = gen
|
||||
m.topicGensLk.Unlock()
|
||||
}
|
||||
|
||||
// ID computes ID for the msg or short-circuits with the cached value.
|
||||
func (m *msgIDGenerator) ID(msg *Message) string {
|
||||
if msg.ID != "" {
|
||||
return msg.ID
|
||||
}
|
||||
|
||||
msg.ID = m.RawID(msg.Message)
|
||||
return msg.ID
|
||||
}
|
||||
|
||||
// RawID computes ID for the proto 'msg'.
|
||||
func (m *msgIDGenerator) RawID(msg *pb.Message) string {
|
||||
m.topicGensLk.RLock()
|
||||
gen, ok := m.topicGens[msg.GetTopic()]
|
||||
m.topicGensLk.RUnlock()
|
||||
if !ok {
|
||||
gen = m.Default
|
||||
}
|
||||
|
||||
return gen(msg)
|
||||
}
|
||||
75
vendor/github.com/libp2p/go-libp2p-pubsub/notify.go
generated
vendored
Normal file
75
vendor/github.com/libp2p/go-libp2p-pubsub/notify.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var _ network.Notifiee = (*PubSubNotif)(nil)
|
||||
|
||||
type PubSubNotif PubSub
|
||||
|
||||
func (p *PubSubNotif) OpenedStream(n network.Network, s network.Stream) {
|
||||
}
|
||||
|
||||
func (p *PubSubNotif) ClosedStream(n network.Network, s network.Stream) {
|
||||
}
|
||||
|
||||
func (p *PubSubNotif) Connected(n network.Network, c network.Conn) {
|
||||
// ignore transient connections
|
||||
if c.Stat().Transient {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
p.newPeersPrioLk.RLock()
|
||||
p.newPeersMx.Lock()
|
||||
p.newPeersPend[c.RemotePeer()] = struct{}{}
|
||||
p.newPeersMx.Unlock()
|
||||
p.newPeersPrioLk.RUnlock()
|
||||
|
||||
select {
|
||||
case p.newPeers <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *PubSubNotif) Disconnected(n network.Network, c network.Conn) {
|
||||
}
|
||||
|
||||
func (p *PubSubNotif) Listen(n network.Network, _ ma.Multiaddr) {
|
||||
}
|
||||
|
||||
func (p *PubSubNotif) ListenClose(n network.Network, _ ma.Multiaddr) {
|
||||
}
|
||||
|
||||
func (p *PubSubNotif) Initialize() {
|
||||
isTransient := func(pid peer.ID) bool {
|
||||
for _, c := range p.host.Network().ConnsToPeer(pid) {
|
||||
if !c.Stat().Transient {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
p.newPeersPrioLk.RLock()
|
||||
p.newPeersMx.Lock()
|
||||
for _, pid := range p.host.Network().Peers() {
|
||||
if isTransient(pid) {
|
||||
continue
|
||||
}
|
||||
|
||||
p.newPeersPend[pid] = struct{}{}
|
||||
}
|
||||
p.newPeersMx.Unlock()
|
||||
p.newPeersPrioLk.RUnlock()
|
||||
|
||||
select {
|
||||
case p.newPeers <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
11
vendor/github.com/libp2p/go-libp2p-pubsub/pb/Makefile
generated
vendored
Normal file
11
vendor/github.com/libp2p/go-libp2p-pubsub/pb/Makefile
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
PB = $(wildcard *.proto)
|
||||
GO = $(PB:.proto=.pb.go)
|
||||
|
||||
all: $(GO)
|
||||
|
||||
%.pb.go: %.proto
|
||||
protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $<
|
||||
|
||||
clean:
|
||||
rm -f *.pb.go
|
||||
rm -f *.go
|
||||
2649
vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.pb.go
generated
vendored
Normal file
2649
vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
57
vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.proto
generated
vendored
Normal file
57
vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.proto
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
syntax = "proto2";
|
||||
|
||||
package pubsub.pb;
|
||||
|
||||
message RPC {
|
||||
repeated SubOpts subscriptions = 1;
|
||||
repeated Message publish = 2;
|
||||
|
||||
message SubOpts {
|
||||
optional bool subscribe = 1; // subscribe or unsubcribe
|
||||
optional string topicid = 2;
|
||||
}
|
||||
|
||||
optional ControlMessage control = 3;
|
||||
}
|
||||
|
||||
message Message {
|
||||
optional bytes from = 1;
|
||||
optional bytes data = 2;
|
||||
optional bytes seqno = 3;
|
||||
optional string topic = 4;
|
||||
optional bytes signature = 5;
|
||||
optional bytes key = 6;
|
||||
}
|
||||
|
||||
message ControlMessage {
|
||||
repeated ControlIHave ihave = 1;
|
||||
repeated ControlIWant iwant = 2;
|
||||
repeated ControlGraft graft = 3;
|
||||
repeated ControlPrune prune = 4;
|
||||
}
|
||||
|
||||
message ControlIHave {
|
||||
optional string topicID = 1;
|
||||
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
|
||||
repeated string messageIDs = 2;
|
||||
}
|
||||
|
||||
message ControlIWant {
|
||||
// implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings
|
||||
repeated string messageIDs = 1;
|
||||
}
|
||||
|
||||
message ControlGraft {
|
||||
optional string topicID = 1;
|
||||
}
|
||||
|
||||
message ControlPrune {
|
||||
optional string topicID = 1;
|
||||
repeated PeerInfo peers = 2;
|
||||
optional uint64 backoff = 3;
|
||||
}
|
||||
|
||||
message PeerInfo {
|
||||
optional bytes peerID = 1;
|
||||
optional bytes signedPeerRecord = 2;
|
||||
}
|
||||
6624
vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.pb.go
generated
vendored
Normal file
6624
vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
150
vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.proto
generated
vendored
Normal file
150
vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.proto
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
syntax = "proto2";
|
||||
|
||||
package pubsub.pb;
|
||||
|
||||
message TraceEvent {
|
||||
optional Type type = 1;
|
||||
optional bytes peerID = 2;
|
||||
optional int64 timestamp = 3;
|
||||
|
||||
optional PublishMessage publishMessage = 4;
|
||||
optional RejectMessage rejectMessage = 5;
|
||||
optional DuplicateMessage duplicateMessage = 6;
|
||||
optional DeliverMessage deliverMessage = 7;
|
||||
optional AddPeer addPeer = 8;
|
||||
optional RemovePeer removePeer = 9;
|
||||
optional RecvRPC recvRPC = 10;
|
||||
optional SendRPC sendRPC = 11;
|
||||
optional DropRPC dropRPC = 12;
|
||||
optional Join join = 13;
|
||||
optional Leave leave = 14;
|
||||
optional Graft graft = 15;
|
||||
optional Prune prune = 16;
|
||||
|
||||
enum Type {
|
||||
PUBLISH_MESSAGE = 0;
|
||||
REJECT_MESSAGE = 1;
|
||||
DUPLICATE_MESSAGE = 2;
|
||||
DELIVER_MESSAGE = 3;
|
||||
ADD_PEER = 4;
|
||||
REMOVE_PEER = 5;
|
||||
RECV_RPC = 6;
|
||||
SEND_RPC = 7;
|
||||
DROP_RPC = 8;
|
||||
JOIN = 9;
|
||||
LEAVE = 10;
|
||||
GRAFT = 11;
|
||||
PRUNE = 12;
|
||||
}
|
||||
|
||||
message PublishMessage {
|
||||
optional bytes messageID = 1;
|
||||
optional string topic = 2;
|
||||
}
|
||||
|
||||
message RejectMessage {
|
||||
optional bytes messageID = 1;
|
||||
optional bytes receivedFrom = 2;
|
||||
optional string reason = 3;
|
||||
optional string topic = 4;
|
||||
}
|
||||
|
||||
message DuplicateMessage {
|
||||
optional bytes messageID = 1;
|
||||
optional bytes receivedFrom = 2;
|
||||
optional string topic = 3;
|
||||
}
|
||||
|
||||
message DeliverMessage {
|
||||
optional bytes messageID = 1;
|
||||
optional string topic = 2;
|
||||
optional bytes receivedFrom = 3;
|
||||
}
|
||||
|
||||
message AddPeer {
|
||||
optional bytes peerID = 1;
|
||||
optional string proto = 2;
|
||||
}
|
||||
|
||||
message RemovePeer {
|
||||
optional bytes peerID = 1;
|
||||
}
|
||||
|
||||
message RecvRPC {
|
||||
optional bytes receivedFrom = 1;
|
||||
optional RPCMeta meta = 2;
|
||||
}
|
||||
|
||||
message SendRPC {
|
||||
optional bytes sendTo = 1;
|
||||
optional RPCMeta meta = 2;
|
||||
}
|
||||
|
||||
message DropRPC {
|
||||
optional bytes sendTo = 1;
|
||||
optional RPCMeta meta = 2;
|
||||
}
|
||||
|
||||
message Join {
|
||||
optional string topic = 1;
|
||||
}
|
||||
|
||||
message Leave {
|
||||
optional string topic = 2;
|
||||
}
|
||||
|
||||
message Graft {
|
||||
optional bytes peerID = 1;
|
||||
optional string topic = 2;
|
||||
}
|
||||
|
||||
message Prune {
|
||||
optional bytes peerID = 1;
|
||||
optional string topic = 2;
|
||||
}
|
||||
|
||||
message RPCMeta {
|
||||
repeated MessageMeta messages = 1;
|
||||
repeated SubMeta subscription = 2;
|
||||
optional ControlMeta control = 3;
|
||||
}
|
||||
|
||||
message MessageMeta {
|
||||
optional bytes messageID = 1;
|
||||
optional string topic = 2;
|
||||
}
|
||||
|
||||
message SubMeta {
|
||||
optional bool subscribe = 1;
|
||||
optional string topic = 2;
|
||||
}
|
||||
|
||||
message ControlMeta {
|
||||
repeated ControlIHaveMeta ihave = 1;
|
||||
repeated ControlIWantMeta iwant = 2;
|
||||
repeated ControlGraftMeta graft = 3;
|
||||
repeated ControlPruneMeta prune = 4;
|
||||
}
|
||||
|
||||
message ControlIHaveMeta {
|
||||
optional string topic = 1;
|
||||
repeated bytes messageIDs = 2;
|
||||
}
|
||||
|
||||
message ControlIWantMeta {
|
||||
repeated bytes messageIDs = 1;
|
||||
}
|
||||
|
||||
message ControlGraftMeta {
|
||||
optional string topic = 1;
|
||||
}
|
||||
|
||||
message ControlPruneMeta {
|
||||
optional string topic = 1;
|
||||
repeated bytes peers = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message TraceEventBatch {
|
||||
repeated TraceEvent batch = 1;
|
||||
}
|
||||
453
vendor/github.com/libp2p/go-libp2p-pubsub/peer_gater.go
generated
vendored
Normal file
453
vendor/github.com/libp2p/go-libp2p-pubsub/peer_gater.go
generated
vendored
Normal file
@@ -0,0 +1,453 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultPeerGaterRetainStats = 6 * time.Hour
|
||||
DefaultPeerGaterQuiet = time.Minute
|
||||
DefaultPeerGaterDuplicateWeight = 0.125
|
||||
DefaultPeerGaterIgnoreWeight = 1.0
|
||||
DefaultPeerGaterRejectWeight = 16.0
|
||||
DefaultPeerGaterThreshold = 0.33
|
||||
DefaultPeerGaterGlobalDecay = ScoreParameterDecay(2 * time.Minute)
|
||||
DefaultPeerGaterSourceDecay = ScoreParameterDecay(time.Hour)
|
||||
)
|
||||
|
||||
// PeerGaterParams groups together parameters that control the operation of the peer gater
|
||||
type PeerGaterParams struct {
|
||||
// when the ratio of throttled/validated messages exceeds this threshold, the gater turns on
|
||||
Threshold float64
|
||||
// (linear) decay parameter for gater counters
|
||||
GlobalDecay float64 // global counter decay
|
||||
SourceDecay float64 // per IP counter decay
|
||||
// decay interval
|
||||
DecayInterval time.Duration
|
||||
// counter zeroing threshold
|
||||
DecayToZero float64
|
||||
// how long to retain stats
|
||||
RetainStats time.Duration
|
||||
// quiet interval before turning off the gater; if there are no validation throttle events
|
||||
// for this interval, the gater turns off
|
||||
Quiet time.Duration
|
||||
// weight of duplicate message deliveries
|
||||
DuplicateWeight float64
|
||||
// weight of ignored messages
|
||||
IgnoreWeight float64
|
||||
// weight of rejected messages
|
||||
RejectWeight float64
|
||||
|
||||
// priority topic delivery weights
|
||||
TopicDeliveryWeights map[string]float64
|
||||
}
|
||||
|
||||
func (p *PeerGaterParams) validate() error {
|
||||
if p.Threshold <= 0 {
|
||||
return fmt.Errorf("invalid Threshold; must be > 0")
|
||||
}
|
||||
if p.GlobalDecay <= 0 || p.GlobalDecay >= 1 {
|
||||
return fmt.Errorf("invalid GlobalDecay; must be between 0 and 1")
|
||||
}
|
||||
if p.SourceDecay <= 0 || p.SourceDecay >= 1 {
|
||||
return fmt.Errorf("invalid SourceDecay; must be between 0 and 1")
|
||||
}
|
||||
if p.DecayInterval < time.Second {
|
||||
return fmt.Errorf("invalid DecayInterval; must be at least 1s")
|
||||
}
|
||||
if p.DecayToZero <= 0 || p.DecayToZero >= 1 {
|
||||
return fmt.Errorf("invalid DecayToZero; must be between 0 and 1")
|
||||
}
|
||||
// no need to check stats retention; a value of 0 means we don't retain stats
|
||||
if p.Quiet < time.Second {
|
||||
return fmt.Errorf("invalud Quiet interval; must be at least 1s")
|
||||
}
|
||||
if p.DuplicateWeight <= 0 {
|
||||
return fmt.Errorf("invalid DuplicateWeight; must be > 0")
|
||||
}
|
||||
if p.IgnoreWeight < 1 {
|
||||
return fmt.Errorf("invalid IgnoreWeight; must be >= 1")
|
||||
}
|
||||
if p.RejectWeight < 1 {
|
||||
return fmt.Errorf("invalud RejectWeight; must be >= 1")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithTopicDeliveryWeights is a fluid setter for the priority topic delivery weights
|
||||
func (p *PeerGaterParams) WithTopicDeliveryWeights(w map[string]float64) *PeerGaterParams {
|
||||
p.TopicDeliveryWeights = w
|
||||
return p
|
||||
}
|
||||
|
||||
// NewPeerGaterParams creates a new PeerGaterParams struct, using the specified threshold and decay
|
||||
// parameters and default values for all other parameters.
|
||||
func NewPeerGaterParams(threshold, globalDecay, sourceDecay float64) *PeerGaterParams {
|
||||
return &PeerGaterParams{
|
||||
Threshold: threshold,
|
||||
GlobalDecay: globalDecay,
|
||||
SourceDecay: sourceDecay,
|
||||
DecayToZero: DefaultDecayToZero,
|
||||
DecayInterval: DefaultDecayInterval,
|
||||
RetainStats: DefaultPeerGaterRetainStats,
|
||||
Quiet: DefaultPeerGaterQuiet,
|
||||
DuplicateWeight: DefaultPeerGaterDuplicateWeight,
|
||||
IgnoreWeight: DefaultPeerGaterIgnoreWeight,
|
||||
RejectWeight: DefaultPeerGaterRejectWeight,
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultPeerGaterParams creates a new PeerGaterParams struct using default values
|
||||
func DefaultPeerGaterParams() *PeerGaterParams {
|
||||
return NewPeerGaterParams(DefaultPeerGaterThreshold, DefaultPeerGaterGlobalDecay, DefaultPeerGaterSourceDecay)
|
||||
}
|
||||
|
||||
// the gater object.
|
||||
type peerGater struct {
|
||||
sync.Mutex
|
||||
|
||||
host host.Host
|
||||
|
||||
// gater parameters
|
||||
params *PeerGaterParams
|
||||
|
||||
// counters
|
||||
validate, throttle float64
|
||||
|
||||
// time of last validation throttle
|
||||
lastThrottle time.Time
|
||||
|
||||
// stats per peer.ID -- multiple peer IDs may share the same stats object if they are
|
||||
// colocated in the same IP
|
||||
peerStats map[peer.ID]*peerGaterStats
|
||||
// stats per IP
|
||||
ipStats map[string]*peerGaterStats
|
||||
|
||||
// for unit tests
|
||||
getIP func(peer.ID) string
|
||||
}
|
||||
|
||||
type peerGaterStats struct {
|
||||
// number of connected peer IDs mapped to this stat object
|
||||
connected int
|
||||
// stats expiration time -- only valid if connected = 0
|
||||
expire time.Time
|
||||
|
||||
// counters
|
||||
deliver, duplicate, ignore, reject float64
|
||||
}
|
||||
|
||||
// WithPeerGater is a gossipsub router option that enables reactive validation queue
|
||||
// management.
|
||||
// The Gater is activated if the ratio of throttled/validated messages exceeds the specified
|
||||
// threshold.
|
||||
// Once active, the Gater probabilistically throttles peers _before_ they enter the validation
|
||||
// queue, performing Random Early Drop.
|
||||
// The throttle decision is randomized, with the probability of allowing messages to enter the
|
||||
// validation queue controlled by the statistical observations of the performance of all peers
|
||||
// in the IP address of the gated peer.
|
||||
// The Gater deactivates if there is no validation throttlinc occurring for the specified quiet
|
||||
// interval.
|
||||
func WithPeerGater(params *PeerGaterParams) Option {
|
||||
return func(ps *PubSub) error {
|
||||
gs, ok := ps.rt.(*GossipSubRouter)
|
||||
if !ok {
|
||||
return fmt.Errorf("pubsub router is not gossipsub")
|
||||
}
|
||||
|
||||
err := params.validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gs.gate = newPeerGater(ps.ctx, ps.host, params)
|
||||
|
||||
// hook the tracer
|
||||
if ps.tracer != nil {
|
||||
ps.tracer.raw = append(ps.tracer.raw, gs.gate)
|
||||
} else {
|
||||
ps.tracer = &pubsubTracer{
|
||||
raw: []RawTracer{gs.gate},
|
||||
pid: ps.host.ID(),
|
||||
idGen: ps.idGen,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newPeerGater(ctx context.Context, host host.Host, params *PeerGaterParams) *peerGater {
|
||||
pg := &peerGater{
|
||||
params: params,
|
||||
peerStats: make(map[peer.ID]*peerGaterStats),
|
||||
ipStats: make(map[string]*peerGaterStats),
|
||||
host: host,
|
||||
}
|
||||
go pg.background(ctx)
|
||||
return pg
|
||||
}
|
||||
|
||||
func (pg *peerGater) background(ctx context.Context) {
|
||||
tick := time.NewTicker(pg.params.DecayInterval)
|
||||
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
pg.decayStats()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pg *peerGater) decayStats() {
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
pg.validate *= pg.params.GlobalDecay
|
||||
if pg.validate < pg.params.DecayToZero {
|
||||
pg.validate = 0
|
||||
}
|
||||
|
||||
pg.throttle *= pg.params.GlobalDecay
|
||||
if pg.throttle < pg.params.DecayToZero {
|
||||
pg.throttle = 0
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for ip, st := range pg.ipStats {
|
||||
if st.connected > 0 {
|
||||
st.deliver *= pg.params.SourceDecay
|
||||
if st.deliver < pg.params.DecayToZero {
|
||||
st.deliver = 0
|
||||
}
|
||||
|
||||
st.duplicate *= pg.params.SourceDecay
|
||||
if st.duplicate < pg.params.DecayToZero {
|
||||
st.duplicate = 0
|
||||
}
|
||||
|
||||
st.ignore *= pg.params.SourceDecay
|
||||
if st.ignore < pg.params.DecayToZero {
|
||||
st.ignore = 0
|
||||
}
|
||||
|
||||
st.reject *= pg.params.SourceDecay
|
||||
if st.reject < pg.params.DecayToZero {
|
||||
st.reject = 0
|
||||
}
|
||||
} else if st.expire.Before(now) {
|
||||
delete(pg.ipStats, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pg *peerGater) getPeerStats(p peer.ID) *peerGaterStats {
|
||||
st, ok := pg.peerStats[p]
|
||||
if !ok {
|
||||
st = pg.getIPStats(p)
|
||||
pg.peerStats[p] = st
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
func (pg *peerGater) getIPStats(p peer.ID) *peerGaterStats {
|
||||
ip := pg.getPeerIP(p)
|
||||
st, ok := pg.ipStats[ip]
|
||||
if !ok {
|
||||
st = &peerGaterStats{}
|
||||
pg.ipStats[ip] = st
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
func (pg *peerGater) getPeerIP(p peer.ID) string {
|
||||
if pg.getIP != nil {
|
||||
return pg.getIP(p)
|
||||
}
|
||||
|
||||
connToIP := func(c network.Conn) string {
|
||||
remote := c.RemoteMultiaddr()
|
||||
ip, err := manet.ToIP(remote)
|
||||
if err != nil {
|
||||
log.Warnf("error determining IP for remote peer in %s: %s", remote, err)
|
||||
return "<unknown>"
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
|
||||
conns := pg.host.Network().ConnsToPeer(p)
|
||||
switch len(conns) {
|
||||
case 0:
|
||||
return "<unknown>"
|
||||
case 1:
|
||||
return connToIP(conns[0])
|
||||
default:
|
||||
// we have multiple connections -- order by number of streams and use the one with the
|
||||
// most streams; it's a nightmare to track multiple IPs per peer, so pick the best one.
|
||||
streams := make(map[string]int)
|
||||
for _, c := range conns {
|
||||
if c.Stat().Transient {
|
||||
// ignore transient
|
||||
continue
|
||||
}
|
||||
streams[c.ID()] = len(c.GetStreams())
|
||||
}
|
||||
sort.Slice(conns, func(i, j int) bool {
|
||||
return streams[conns[i].ID()] > streams[conns[j].ID()]
|
||||
})
|
||||
return connToIP(conns[0])
|
||||
}
|
||||
}
|
||||
|
||||
// router interface
|
||||
func (pg *peerGater) AcceptFrom(p peer.ID) AcceptStatus {
|
||||
if pg == nil {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
// check the quiet period; if the validation queue has not throttled for more than the Quiet
|
||||
// interval, we turn off the circuit breaker and accept.
|
||||
if time.Since(pg.lastThrottle) > pg.params.Quiet {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
// no throttle events -- or they have decayed; accept.
|
||||
if pg.throttle == 0 {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
// check the throttle/validate ration; if it is below threshold we accept.
|
||||
if pg.validate != 0 && pg.throttle/pg.validate < pg.params.Threshold {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
st := pg.getPeerStats(p)
|
||||
|
||||
// compute the goodput of the peer; the denominator is the weighted mix of message counters
|
||||
total := st.deliver + pg.params.DuplicateWeight*st.duplicate + pg.params.IgnoreWeight*st.ignore + pg.params.RejectWeight*st.reject
|
||||
if total == 0 {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
// we make a randomized decision based on the goodput of the peer.
|
||||
// the probabiity is biased by adding 1 to the delivery counter so that we don't unconditionally
|
||||
// throttle in the first negative event; it also ensures that a peer always has a chance of being
|
||||
// accepted; this is not a sinkhole/blacklist.
|
||||
threshold := (1 + st.deliver) / (1 + total)
|
||||
if rand.Float64() < threshold {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
log.Debugf("throttling peer %s with threshold %f", p, threshold)
|
||||
return AcceptControl
|
||||
}
|
||||
|
||||
// -- RawTracer interface methods
|
||||
var _ RawTracer = (*peerGater)(nil)
|
||||
|
||||
// tracer interface
|
||||
func (pg *peerGater) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
st := pg.getPeerStats(p)
|
||||
st.connected++
|
||||
}
|
||||
|
||||
func (pg *peerGater) RemovePeer(p peer.ID) {
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
st := pg.getPeerStats(p)
|
||||
st.connected--
|
||||
st.expire = time.Now().Add(pg.params.RetainStats)
|
||||
|
||||
delete(pg.peerStats, p)
|
||||
}
|
||||
|
||||
func (pg *peerGater) Join(topic string) {}
|
||||
func (pg *peerGater) Leave(topic string) {}
|
||||
func (pg *peerGater) Graft(p peer.ID, topic string) {}
|
||||
func (pg *peerGater) Prune(p peer.ID, topic string) {}
|
||||
|
||||
func (pg *peerGater) ValidateMessage(msg *Message) {
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
pg.validate++
|
||||
}
|
||||
|
||||
func (pg *peerGater) DeliverMessage(msg *Message) {
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||
|
||||
topic := msg.GetTopic()
|
||||
weight := pg.params.TopicDeliveryWeights[topic]
|
||||
|
||||
if weight == 0 {
|
||||
weight = 1
|
||||
}
|
||||
|
||||
st.deliver += weight
|
||||
}
|
||||
|
||||
func (pg *peerGater) RejectMessage(msg *Message, reason string) {
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
switch reason {
|
||||
case RejectValidationQueueFull:
|
||||
fallthrough
|
||||
case RejectValidationThrottled:
|
||||
pg.lastThrottle = time.Now()
|
||||
pg.throttle++
|
||||
|
||||
case RejectValidationIgnored:
|
||||
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||
st.ignore++
|
||||
|
||||
default:
|
||||
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||
st.reject++
|
||||
}
|
||||
}
|
||||
|
||||
func (pg *peerGater) DuplicateMessage(msg *Message) {
|
||||
pg.Lock()
|
||||
defer pg.Unlock()
|
||||
|
||||
st := pg.getPeerStats(msg.ReceivedFrom)
|
||||
st.duplicate++
|
||||
}
|
||||
|
||||
func (pg *peerGater) ThrottlePeer(p peer.ID) {}
|
||||
|
||||
func (pg *peerGater) RecvRPC(rpc *RPC) {}
|
||||
|
||||
func (pg *peerGater) SendRPC(rpc *RPC, p peer.ID) {}
|
||||
|
||||
func (pg *peerGater) DropRPC(rpc *RPC, p peer.ID) {}
|
||||
|
||||
func (pg *peerGater) UndeliverableMessage(msg *Message) {}
|
||||
1422
vendor/github.com/libp2p/go-libp2p-pubsub/pubsub.go
generated
vendored
Normal file
1422
vendor/github.com/libp2p/go-libp2p-pubsub/pubsub.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
168
vendor/github.com/libp2p/go-libp2p-pubsub/randomsub.go
generated
vendored
Normal file
168
vendor/github.com/libp2p/go-libp2p-pubsub/randomsub.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
RandomSubID = protocol.ID("/randomsub/1.0.0")
|
||||
)
|
||||
|
||||
var (
|
||||
RandomSubD = 6
|
||||
)
|
||||
|
||||
// NewRandomSub returns a new PubSub object using RandomSubRouter as the router.
|
||||
func NewRandomSub(ctx context.Context, h host.Host, size int, opts ...Option) (*PubSub, error) {
|
||||
rt := &RandomSubRouter{
|
||||
size: size,
|
||||
peers: make(map[peer.ID]protocol.ID),
|
||||
}
|
||||
return NewPubSub(ctx, h, rt, opts...)
|
||||
}
|
||||
|
||||
// RandomSubRouter is a router that implements a random propagation strategy.
|
||||
// For each message, it selects the square root of the network size peers, with a min of RandomSubD,
|
||||
// and forwards the message to them.
|
||||
type RandomSubRouter struct {
|
||||
p *PubSub
|
||||
peers map[peer.ID]protocol.ID
|
||||
size int
|
||||
tracer *pubsubTracer
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) Protocols() []protocol.ID {
|
||||
return []protocol.ID{RandomSubID, FloodSubID}
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) Attach(p *PubSub) {
|
||||
rs.p = p
|
||||
rs.tracer = p.tracer
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
rs.tracer.AddPeer(p, proto)
|
||||
rs.peers[p] = proto
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) RemovePeer(p peer.ID) {
|
||||
rs.tracer.RemovePeer(p)
|
||||
delete(rs.peers, p)
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) EnoughPeers(topic string, suggested int) bool {
|
||||
// check all peers in the topic
|
||||
tmap, ok := rs.p.topics[topic]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
fsPeers := 0
|
||||
rsPeers := 0
|
||||
|
||||
// count floodsub and randomsub peers
|
||||
for p := range tmap {
|
||||
switch rs.peers[p] {
|
||||
case FloodSubID:
|
||||
fsPeers++
|
||||
case RandomSubID:
|
||||
rsPeers++
|
||||
}
|
||||
}
|
||||
|
||||
if suggested == 0 {
|
||||
suggested = RandomSubD
|
||||
}
|
||||
|
||||
if fsPeers+rsPeers >= suggested {
|
||||
return true
|
||||
}
|
||||
|
||||
if rsPeers >= RandomSubD {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus {
|
||||
return AcceptAll
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {}
|
||||
|
||||
func (rs *RandomSubRouter) Publish(msg *Message) {
|
||||
from := msg.ReceivedFrom
|
||||
|
||||
tosend := make(map[peer.ID]struct{})
|
||||
rspeers := make(map[peer.ID]struct{})
|
||||
src := peer.ID(msg.GetFrom())
|
||||
|
||||
topic := msg.GetTopic()
|
||||
tmap, ok := rs.p.topics[topic]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for p := range tmap {
|
||||
if p == from || p == src {
|
||||
continue
|
||||
}
|
||||
|
||||
if rs.peers[p] == FloodSubID {
|
||||
tosend[p] = struct{}{}
|
||||
} else {
|
||||
rspeers[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if len(rspeers) > RandomSubD {
|
||||
target := RandomSubD
|
||||
sqrt := int(math.Ceil(math.Sqrt(float64(rs.size))))
|
||||
if sqrt > target {
|
||||
target = sqrt
|
||||
}
|
||||
if target > len(rspeers) {
|
||||
target = len(rspeers)
|
||||
}
|
||||
xpeers := peerMapToList(rspeers)
|
||||
shufflePeers(xpeers)
|
||||
xpeers = xpeers[:target]
|
||||
for _, p := range xpeers {
|
||||
tosend[p] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
for p := range rspeers {
|
||||
tosend[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
out := rpcWithMessages(msg.Message)
|
||||
for p := range tosend {
|
||||
mch, ok := rs.p.peers[p]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case mch <- out:
|
||||
rs.tracer.SendRPC(out, p)
|
||||
default:
|
||||
log.Infof("dropping message to peer %s: queue full", p)
|
||||
rs.tracer.DropRPC(out, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) Join(topic string) {
|
||||
rs.tracer.Join(topic)
|
||||
}
|
||||
|
||||
func (rs *RandomSubRouter) Leave(topic string) {
|
||||
rs.tracer.Join(topic)
|
||||
}
|
||||
1081
vendor/github.com/libp2p/go-libp2p-pubsub/score.go
generated
vendored
Normal file
1081
vendor/github.com/libp2p/go-libp2p-pubsub/score.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
423
vendor/github.com/libp2p/go-libp2p-pubsub/score_params.go
generated
vendored
Normal file
423
vendor/github.com/libp2p/go-libp2p-pubsub/score_params.go
generated
vendored
Normal file
@@ -0,0 +1,423 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
type PeerScoreThresholds struct {
|
||||
// whether it is allowed to just set some params and not all of them.
|
||||
SkipAtomicValidation bool
|
||||
|
||||
// GossipThreshold is the score threshold below which gossip propagation is suppressed;
|
||||
// should be negative.
|
||||
GossipThreshold float64
|
||||
|
||||
// PublishThreshold is the score threshold below which we shouldn't publish when using flood
|
||||
// publishing (also applies to fanout and floodsub peers); should be negative and <= GossipThreshold.
|
||||
PublishThreshold float64
|
||||
|
||||
// GraylistThreshold is the score threshold below which message processing is suppressed altogether,
|
||||
// implementing an effective gray list according to peer score; should be negative and <= PublishThreshold.
|
||||
GraylistThreshold float64
|
||||
|
||||
// AcceptPXThreshold is the score threshold below which PX will be ignored; this should be positive
|
||||
// and limited to scores attainable by bootstrappers and other trusted nodes.
|
||||
AcceptPXThreshold float64
|
||||
|
||||
// OpportunisticGraftThreshold is the median mesh score threshold before triggering opportunistic
|
||||
// grafting; this should have a small positive value.
|
||||
OpportunisticGraftThreshold float64
|
||||
}
|
||||
|
||||
func (p *PeerScoreThresholds) validate() error {
|
||||
|
||||
if !p.SkipAtomicValidation || p.PublishThreshold != 0 || p.GossipThreshold != 0 || p.GraylistThreshold != 0 {
|
||||
if p.GossipThreshold > 0 || isInvalidNumber(p.GossipThreshold) {
|
||||
return fmt.Errorf("invalid gossip threshold; it must be <= 0 and a valid number")
|
||||
}
|
||||
if p.PublishThreshold > 0 || p.PublishThreshold > p.GossipThreshold || isInvalidNumber(p.PublishThreshold) {
|
||||
return fmt.Errorf("invalid publish threshold; it must be <= 0 and <= gossip threshold and a valid number")
|
||||
}
|
||||
if p.GraylistThreshold > 0 || p.GraylistThreshold > p.PublishThreshold || isInvalidNumber(p.GraylistThreshold) {
|
||||
return fmt.Errorf("invalid graylist threshold; it must be <= 0 and <= publish threshold and a valid number")
|
||||
}
|
||||
}
|
||||
|
||||
if !p.SkipAtomicValidation || p.AcceptPXThreshold != 0 {
|
||||
if p.AcceptPXThreshold < 0 || isInvalidNumber(p.AcceptPXThreshold) {
|
||||
return fmt.Errorf("invalid accept PX threshold; it must be >= 0 and a valid number")
|
||||
}
|
||||
}
|
||||
|
||||
if !p.SkipAtomicValidation || p.OpportunisticGraftThreshold != 0 {
|
||||
if p.OpportunisticGraftThreshold < 0 || isInvalidNumber(p.OpportunisticGraftThreshold) {
|
||||
return fmt.Errorf("invalid opportunistic grafting threshold; it must be >= 0 and a valid number")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type PeerScoreParams struct {
|
||||
// whether it is allowed to just set some params and not all of them.
|
||||
SkipAtomicValidation bool
|
||||
|
||||
// Score parameters per topic.
|
||||
Topics map[string]*TopicScoreParams
|
||||
|
||||
// Aggregate topic score cap; this limits the total contribution of topics towards a positive
|
||||
// score. It must be positive (or 0 for no cap).
|
||||
TopicScoreCap float64
|
||||
|
||||
// P5: Application-specific peer scoring
|
||||
AppSpecificScore func(p peer.ID) float64
|
||||
AppSpecificWeight float64
|
||||
|
||||
// P6: IP-colocation factor.
|
||||
// The parameter has an associated counter which counts the number of peers with the same IP.
|
||||
// If the number of peers in the same IP exceeds IPColocationFactorThreshold, then the value
|
||||
// is the square of the difference, ie (PeersInSameIP - IPColocationThreshold)^2.
|
||||
// If the number of peers in the same IP is less than the threshold, then the value is 0.
|
||||
// The weight of the parameter MUST be negative, unless you want to disable for testing.
|
||||
// Note: In order to simulate many IPs in a managable manner when testing, you can set the weight to 0
|
||||
// thus disabling the IP colocation penalty.
|
||||
IPColocationFactorWeight float64
|
||||
IPColocationFactorThreshold int
|
||||
IPColocationFactorWhitelist []*net.IPNet
|
||||
|
||||
// P7: behavioural pattern penalties.
|
||||
// This parameter has an associated counter which tracks misbehaviour as detected by the
|
||||
// router. The router currently applies penalties for the following behaviors:
|
||||
// - attempting to re-graft before the prune backoff time has elapsed.
|
||||
// - not following up in IWANT requests for messages advertised with IHAVE.
|
||||
//
|
||||
// The value of the parameter is the square of the counter over the threshold, which decays with
|
||||
// BehaviourPenaltyDecay.
|
||||
// The weight of the parameter MUST be negative (or zero to disable).
|
||||
BehaviourPenaltyWeight, BehaviourPenaltyThreshold, BehaviourPenaltyDecay float64
|
||||
|
||||
// the decay interval for parameter counters.
|
||||
DecayInterval time.Duration
|
||||
|
||||
// counter value below which it is considered 0.
|
||||
DecayToZero float64
|
||||
|
||||
// time to remember counters for a disconnected peer.
|
||||
RetainScore time.Duration
|
||||
|
||||
// time to remember a message delivery for. Default to global TimeCacheDuration if 0.
|
||||
SeenMsgTTL time.Duration
|
||||
}
|
||||
|
||||
type TopicScoreParams struct {
|
||||
// whether it is allowed to just set some params and not all of them.
|
||||
SkipAtomicValidation bool
|
||||
|
||||
// The weight of the topic.
|
||||
TopicWeight float64
|
||||
|
||||
// P1: time in the mesh
|
||||
// This is the time the peer has been grafted in the mesh.
|
||||
// The value of the parameter is the time/TimeInMeshQuantum, capped by TimeInMeshCap.
|
||||
// The weight of the parameter MUST be positive (or zero to disable).
|
||||
TimeInMeshWeight float64
|
||||
TimeInMeshQuantum time.Duration
|
||||
TimeInMeshCap float64
|
||||
|
||||
// P2: first message deliveries
|
||||
// This is the number of message deliveries in the topic.
|
||||
// The value of the parameter is a counter, decaying with FirstMessageDeliveriesDecay, and capped
|
||||
// by FirstMessageDeliveriesCap.
|
||||
// The weight of the parameter MUST be positive (or zero to disable).
|
||||
FirstMessageDeliveriesWeight, FirstMessageDeliveriesDecay float64
|
||||
FirstMessageDeliveriesCap float64
|
||||
|
||||
// P3: mesh message deliveries
|
||||
// This is the number of message deliveries in the mesh, within the MeshMessageDeliveriesWindow of
|
||||
// message validation; deliveries during validation also count and are retroactively applied
|
||||
// when validation succeeds.
|
||||
// This window accounts for the minimum time before a hostile mesh peer trying to game the score
|
||||
// could replay back a valid message we just sent them.
|
||||
// It effectively tracks first and near-first deliveries, i.e., a message seen from a mesh peer
|
||||
// before we have forwarded it to them.
|
||||
// The parameter has an associated counter, decaying with MeshMessageDeliveriesDecay.
|
||||
// If the counter exceeds the threshold, its value is 0.
|
||||
// If the counter is below the MeshMessageDeliveriesThreshold, the value is the square of
|
||||
// the deficit, ie (MessageDeliveriesThreshold - counter)^2
|
||||
// The penalty is only activated after MeshMessageDeliveriesActivation time in the mesh.
|
||||
// The weight of the parameter MUST be negative (or zero to disable).
|
||||
MeshMessageDeliveriesWeight, MeshMessageDeliveriesDecay float64
|
||||
MeshMessageDeliveriesCap, MeshMessageDeliveriesThreshold float64
|
||||
MeshMessageDeliveriesWindow, MeshMessageDeliveriesActivation time.Duration
|
||||
|
||||
// P3b: sticky mesh propagation failures
|
||||
// This is a sticky penalty that applies when a peer gets pruned from the mesh with an active
|
||||
// mesh message delivery penalty.
|
||||
// The weight of the parameter MUST be negative (or zero to disable)
|
||||
MeshFailurePenaltyWeight, MeshFailurePenaltyDecay float64
|
||||
|
||||
// P4: invalid messages
|
||||
// This is the number of invalid messages in the topic.
|
||||
// The value of the parameter is the square of the counter, decaying with
|
||||
// InvalidMessageDeliveriesDecay.
|
||||
// The weight of the parameter MUST be negative (or zero to disable).
|
||||
InvalidMessageDeliveriesWeight, InvalidMessageDeliveriesDecay float64
|
||||
}
|
||||
|
||||
// peer score parameter validation
|
||||
func (p *PeerScoreParams) validate() error {
|
||||
for topic, params := range p.Topics {
|
||||
err := params.validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid score parameters for topic %s: %w", topic, err)
|
||||
}
|
||||
}
|
||||
|
||||
if !p.SkipAtomicValidation || p.TopicScoreCap != 0 {
|
||||
// check that the topic score is 0 or something positive
|
||||
if p.TopicScoreCap < 0 || isInvalidNumber(p.TopicScoreCap) {
|
||||
return fmt.Errorf("invalid topic score cap; must be positive (or 0 for no cap) and a valid number")
|
||||
}
|
||||
}
|
||||
|
||||
// check that we have an app specific score; the weight can be anything (but expected positive)
|
||||
if p.AppSpecificScore == nil {
|
||||
if p.SkipAtomicValidation {
|
||||
p.AppSpecificScore = func(p peer.ID) float64 {
|
||||
return 0
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("missing application specific score function")
|
||||
}
|
||||
}
|
||||
|
||||
if !p.SkipAtomicValidation || p.IPColocationFactorWeight != 0 {
|
||||
// check the IP collocation factor
|
||||
if p.IPColocationFactorWeight > 0 || isInvalidNumber(p.IPColocationFactorWeight) {
|
||||
return fmt.Errorf("invalid IPColocationFactorWeight; must be negative (or 0 to disable) and a valid number")
|
||||
}
|
||||
if p.IPColocationFactorWeight != 0 && p.IPColocationFactorThreshold < 1 {
|
||||
return fmt.Errorf("invalid IPColocationFactorThreshold; must be at least 1")
|
||||
}
|
||||
}
|
||||
|
||||
// check the behaviour penalty
|
||||
if !p.SkipAtomicValidation || p.BehaviourPenaltyWeight != 0 || p.BehaviourPenaltyThreshold != 0 {
|
||||
if p.BehaviourPenaltyWeight > 0 || isInvalidNumber(p.BehaviourPenaltyWeight) {
|
||||
return fmt.Errorf("invalid BehaviourPenaltyWeight; must be negative (or 0 to disable) and a valid number")
|
||||
}
|
||||
if p.BehaviourPenaltyWeight != 0 && (p.BehaviourPenaltyDecay <= 0 || p.BehaviourPenaltyDecay >= 1 || isInvalidNumber(p.BehaviourPenaltyDecay)) {
|
||||
return fmt.Errorf("invalid BehaviourPenaltyDecay; must be between 0 and 1")
|
||||
}
|
||||
if p.BehaviourPenaltyThreshold < 0 || isInvalidNumber(p.BehaviourPenaltyThreshold) {
|
||||
return fmt.Errorf("invalid BehaviourPenaltyThreshold; must be >= 0 and a valid number")
|
||||
}
|
||||
}
|
||||
|
||||
// check the decay parameters
|
||||
if !p.SkipAtomicValidation || p.DecayInterval != 0 || p.DecayToZero != 0 {
|
||||
if p.DecayInterval < time.Second {
|
||||
return fmt.Errorf("invalid DecayInterval; must be at least 1s")
|
||||
}
|
||||
if p.DecayToZero <= 0 || p.DecayToZero >= 1 || isInvalidNumber(p.DecayToZero) {
|
||||
return fmt.Errorf("invalid DecayToZero; must be between 0 and 1")
|
||||
}
|
||||
}
|
||||
|
||||
// no need to check the score retention; a value of 0 means that we don't retain scores
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TopicScoreParams) validate() error {
|
||||
// make sure we have a sane topic weight
|
||||
if p.TopicWeight < 0 || isInvalidNumber(p.TopicWeight) {
|
||||
return fmt.Errorf("invalid topic weight; must be >= 0 and a valid number")
|
||||
}
|
||||
|
||||
// check P1
|
||||
if err := p.validateTimeInMeshParams(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check P2
|
||||
if err := p.validateMessageDeliveryParams(); err != nil {
|
||||
return err
|
||||
}
|
||||
// check P3
|
||||
if err := p.validateMeshMessageDeliveryParams(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check P3b
|
||||
if err := p.validateMessageFailurePenaltyParams(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check P4
|
||||
if err := p.validateInvalidMessageDeliveryParams(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TopicScoreParams) validateTimeInMeshParams() error {
|
||||
if p.SkipAtomicValidation {
|
||||
// in non-atomic mode, parameters at their zero values are dismissed from validation.
|
||||
if p.TimeInMeshWeight == 0 && p.TimeInMeshQuantum == 0 && p.TimeInMeshCap == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// either atomic validation mode, or some parameters have been set a value,
|
||||
// hence, proceed with normal validation of all related parameters in this context.
|
||||
|
||||
if p.TimeInMeshQuantum == 0 {
|
||||
return fmt.Errorf("invalid TimeInMeshQuantum; must be non zero")
|
||||
}
|
||||
if p.TimeInMeshWeight < 0 || isInvalidNumber(p.TimeInMeshWeight) {
|
||||
return fmt.Errorf("invalid TimeInMeshWeight; must be positive (or 0 to disable) and a valid number")
|
||||
}
|
||||
if p.TimeInMeshWeight != 0 && p.TimeInMeshQuantum <= 0 {
|
||||
return fmt.Errorf("invalid TimeInMeshQuantum; must be positive")
|
||||
}
|
||||
if p.TimeInMeshWeight != 0 && (p.TimeInMeshCap <= 0 || isInvalidNumber(p.TimeInMeshCap)) {
|
||||
return fmt.Errorf("invalid TimeInMeshCap; must be positive and a valid number")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TopicScoreParams) validateMessageDeliveryParams() error {
|
||||
if p.SkipAtomicValidation {
|
||||
// in non-atomic mode, parameters at their zero values are dismissed from validation.
|
||||
if p.FirstMessageDeliveriesWeight == 0 && p.FirstMessageDeliveriesCap == 0 && p.FirstMessageDeliveriesDecay == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// either atomic validation mode, or some parameters have been set a value,
|
||||
// hence, proceed with normal validation of all related parameters in this context.
|
||||
|
||||
if p.FirstMessageDeliveriesWeight < 0 || isInvalidNumber(p.FirstMessageDeliveriesWeight) {
|
||||
return fmt.Errorf("invallid FirstMessageDeliveriesWeight; must be positive (or 0 to disable) and a valid number")
|
||||
}
|
||||
if p.FirstMessageDeliveriesWeight != 0 && (p.FirstMessageDeliveriesDecay <= 0 || p.FirstMessageDeliveriesDecay >= 1 || isInvalidNumber(p.FirstMessageDeliveriesDecay)) {
|
||||
return fmt.Errorf("invalid FirstMessageDeliveriesDecay; must be between 0 and 1")
|
||||
}
|
||||
if p.FirstMessageDeliveriesWeight != 0 && (p.FirstMessageDeliveriesCap <= 0 || isInvalidNumber(p.FirstMessageDeliveriesCap)) {
|
||||
return fmt.Errorf("invalid FirstMessageDeliveriesCap; must be positive and a valid number")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TopicScoreParams) validateMeshMessageDeliveryParams() error {
|
||||
if p.SkipAtomicValidation {
|
||||
// in non-atomic mode, parameters at their zero values are dismissed from validation.
|
||||
if p.MeshMessageDeliveriesWeight == 0 &&
|
||||
p.MeshMessageDeliveriesCap == 0 &&
|
||||
p.MeshMessageDeliveriesDecay == 0 &&
|
||||
p.MeshMessageDeliveriesThreshold == 0 &&
|
||||
p.MeshMessageDeliveriesWindow == 0 &&
|
||||
p.MeshMessageDeliveriesActivation == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// either atomic validation mode, or some parameters have been set a value,
|
||||
// hence, proceed with normal validation of all related parameters in this context.
|
||||
|
||||
if p.MeshMessageDeliveriesWeight > 0 || isInvalidNumber(p.MeshMessageDeliveriesWeight) {
|
||||
return fmt.Errorf("invalid MeshMessageDeliveriesWeight; must be negative (or 0 to disable) and a valid number")
|
||||
}
|
||||
if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesDecay <= 0 || p.MeshMessageDeliveriesDecay >= 1 || isInvalidNumber(p.MeshMessageDeliveriesDecay)) {
|
||||
return fmt.Errorf("invalid MeshMessageDeliveriesDecay; must be between 0 and 1")
|
||||
}
|
||||
if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesCap <= 0 || isInvalidNumber(p.MeshMessageDeliveriesCap)) {
|
||||
return fmt.Errorf("invalid MeshMessageDeliveriesCap; must be positive and a valid number")
|
||||
}
|
||||
if p.MeshMessageDeliveriesWeight != 0 && (p.MeshMessageDeliveriesThreshold <= 0 || isInvalidNumber(p.MeshMessageDeliveriesThreshold)) {
|
||||
return fmt.Errorf("invalid MeshMessageDeliveriesThreshold; must be positive and a valid number")
|
||||
}
|
||||
if p.MeshMessageDeliveriesWindow < 0 {
|
||||
return fmt.Errorf("invalid MeshMessageDeliveriesWindow; must be non-negative")
|
||||
}
|
||||
if p.MeshMessageDeliveriesWeight != 0 && p.MeshMessageDeliveriesActivation < time.Second {
|
||||
return fmt.Errorf("invalid MeshMessageDeliveriesActivation; must be at least 1s")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TopicScoreParams) validateMessageFailurePenaltyParams() error {
|
||||
if p.SkipAtomicValidation {
|
||||
// in selective mode, parameters at their zero values are dismissed from validation.
|
||||
if p.MeshFailurePenaltyDecay == 0 && p.MeshFailurePenaltyWeight == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// either atomic validation mode, or some parameters have been set a value,
|
||||
// hence, proceed with normal validation of all related parameters in this context.
|
||||
|
||||
if p.MeshFailurePenaltyWeight > 0 || isInvalidNumber(p.MeshFailurePenaltyWeight) {
|
||||
return fmt.Errorf("invalid MeshFailurePenaltyWeight; must be negative (or 0 to disable) and a valid number")
|
||||
}
|
||||
if p.MeshFailurePenaltyWeight != 0 && (isInvalidNumber(p.MeshFailurePenaltyDecay) || p.MeshFailurePenaltyDecay <= 0 || p.MeshFailurePenaltyDecay >= 1) {
|
||||
return fmt.Errorf("invalid MeshFailurePenaltyDecay; must be between 0 and 1")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TopicScoreParams) validateInvalidMessageDeliveryParams() error {
|
||||
if p.SkipAtomicValidation {
|
||||
// in selective mode, parameters at their zero values are dismissed from validation.
|
||||
if p.InvalidMessageDeliveriesDecay == 0 && p.InvalidMessageDeliveriesWeight == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// either atomic validation mode, or some parameters have been set a value,
|
||||
// hence, proceed with normal validation of all related parameters in this context.
|
||||
|
||||
if p.InvalidMessageDeliveriesWeight > 0 || isInvalidNumber(p.InvalidMessageDeliveriesWeight) {
|
||||
return fmt.Errorf("invalid InvalidMessageDeliveriesWeight; must be negative (or 0 to disable) and a valid number")
|
||||
}
|
||||
if p.InvalidMessageDeliveriesDecay <= 0 || p.InvalidMessageDeliveriesDecay >= 1 || isInvalidNumber(p.InvalidMessageDeliveriesDecay) {
|
||||
return fmt.Errorf("invalid InvalidMessageDeliveriesDecay; must be between 0 and 1")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultDecayInterval = time.Second
|
||||
DefaultDecayToZero = 0.01
|
||||
)
|
||||
|
||||
// ScoreParameterDecay computes the decay factor for a parameter, assuming the DecayInterval is 1s
|
||||
// and that the value decays to zero if it drops below 0.01
|
||||
func ScoreParameterDecay(decay time.Duration) float64 {
|
||||
return ScoreParameterDecayWithBase(decay, DefaultDecayInterval, DefaultDecayToZero)
|
||||
}
|
||||
|
||||
// ScoreParameterDecayWithBase computes the decay factor for a parameter using base as the DecayInterval
|
||||
func ScoreParameterDecayWithBase(decay time.Duration, base time.Duration, decayToZero float64) float64 {
|
||||
// the decay is linear, so after n ticks the value is factor^n
|
||||
// so factor^n = decayToZero => factor = decayToZero^(1/n)
|
||||
ticks := float64(decay / base)
|
||||
return math.Pow(decayToZero, 1/ticks)
|
||||
}
|
||||
|
||||
// checks whether the provided floating-point number is `Not a Number`
|
||||
// or an infinite number.
|
||||
func isInvalidNumber(num float64) bool {
|
||||
return math.IsNaN(num) || math.IsInf(num, 0)
|
||||
}
|
||||
138
vendor/github.com/libp2p/go-libp2p-pubsub/sign.go
generated
vendored
Normal file
138
vendor/github.com/libp2p/go-libp2p-pubsub/sign.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// MessageSignaturePolicy describes if signatures are produced, expected, and/or verified.
|
||||
type MessageSignaturePolicy uint8
|
||||
|
||||
// LaxSign and LaxNoSign are deprecated. In the future msgSigning and msgVerification can be unified.
|
||||
const (
|
||||
// msgSigning is set when the locally produced messages must be signed
|
||||
msgSigning MessageSignaturePolicy = 1 << iota
|
||||
// msgVerification is set when external messages must be verfied
|
||||
msgVerification
|
||||
)
|
||||
|
||||
const (
|
||||
// StrictSign produces signatures and expects and verifies incoming signatures
|
||||
StrictSign = msgSigning | msgVerification
|
||||
// StrictNoSign does not produce signatures and drops and penalises incoming messages that carry one
|
||||
StrictNoSign = msgVerification
|
||||
// LaxSign produces signatures and validates incoming signatures iff one is present
|
||||
// Deprecated: it is recommend to either strictly enable, or strictly disable, signatures.
|
||||
LaxSign = msgSigning
|
||||
// LaxNoSign does not produce signatures and validates incoming signatures iff one is present
|
||||
// Deprecated: it is recommend to either strictly enable, or strictly disable, signatures.
|
||||
LaxNoSign = 0
|
||||
)
|
||||
|
||||
// mustVerify is true when a message signature must be verified.
|
||||
// If signatures are not expected, verification checks if the signature is absent.
|
||||
func (policy MessageSignaturePolicy) mustVerify() bool {
|
||||
return policy&msgVerification != 0
|
||||
}
|
||||
|
||||
// mustSign is true when messages should be signed, and incoming messages are expected to have a signature.
|
||||
func (policy MessageSignaturePolicy) mustSign() bool {
|
||||
return policy&msgSigning != 0
|
||||
}
|
||||
|
||||
const SignPrefix = "libp2p-pubsub:"
|
||||
|
||||
func verifyMessageSignature(m *pb.Message) error {
|
||||
pubk, err := messagePubKey(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xm := *m
|
||||
xm.Signature = nil
|
||||
xm.Key = nil
|
||||
bytes, err := xm.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes = withSignPrefix(bytes)
|
||||
|
||||
valid, err := pubk.Verify(bytes, m.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid signature")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func messagePubKey(m *pb.Message) (crypto.PubKey, error) {
|
||||
var pubk crypto.PubKey
|
||||
|
||||
pid, err := peer.IDFromBytes(m.From)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.Key == nil {
|
||||
// no attached key, it must be extractable from the source ID
|
||||
pubk, err = pid.ExtractPublicKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot extract signing key: %s", err.Error())
|
||||
}
|
||||
if pubk == nil {
|
||||
return nil, fmt.Errorf("cannot extract signing key")
|
||||
}
|
||||
} else {
|
||||
pubk, err = crypto.UnmarshalPublicKey(m.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal signing key: %s", err.Error())
|
||||
}
|
||||
|
||||
// verify that the source ID matches the attached key
|
||||
if !pid.MatchesPublicKey(pubk) {
|
||||
return nil, fmt.Errorf("bad signing key; source ID %s doesn't match key", pid)
|
||||
}
|
||||
}
|
||||
|
||||
return pubk, nil
|
||||
}
|
||||
|
||||
func signMessage(pid peer.ID, key crypto.PrivKey, m *pb.Message) error {
|
||||
bytes, err := m.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes = withSignPrefix(bytes)
|
||||
|
||||
sig, err := key.Sign(bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.Signature = sig
|
||||
|
||||
pk, _ := pid.ExtractPublicKey()
|
||||
if pk == nil {
|
||||
pubk, err := crypto.MarshalPublicKey(key.GetPublic())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Key = pubk
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func withSignPrefix(bytes []byte) []byte {
|
||||
return append([]byte(SignPrefix), bytes...)
|
||||
}
|
||||
51
vendor/github.com/libp2p/go-libp2p-pubsub/subscription.go
generated
vendored
Normal file
51
vendor/github.com/libp2p/go-libp2p-pubsub/subscription.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Subscription handles the details of a particular Topic subscription.
|
||||
// There may be many subscriptions for a given Topic.
|
||||
type Subscription struct {
|
||||
topic string
|
||||
ch chan *Message
|
||||
cancelCh chan<- *Subscription
|
||||
ctx context.Context
|
||||
err error
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// Topic returns the topic string associated with the Subscription
|
||||
func (sub *Subscription) Topic() string {
|
||||
return sub.topic
|
||||
}
|
||||
|
||||
// Next returns the next message in our subscription
|
||||
func (sub *Subscription) Next(ctx context.Context) (*Message, error) {
|
||||
select {
|
||||
case msg, ok := <-sub.ch:
|
||||
if !ok {
|
||||
return msg, sub.err
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel closes the subscription. If this is the last active subscription then pubsub will send an unsubscribe
|
||||
// announcement to the network.
|
||||
func (sub *Subscription) Cancel() {
|
||||
select {
|
||||
case sub.cancelCh <- sub:
|
||||
case <-sub.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (sub *Subscription) close() {
|
||||
sub.once.Do(func() {
|
||||
close(sub.ch)
|
||||
})
|
||||
}
|
||||
149
vendor/github.com/libp2p/go-libp2p-pubsub/subscription_filter.go
generated
vendored
Normal file
149
vendor/github.com/libp2p/go-libp2p-pubsub/subscription_filter.go
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// ErrTooManySubscriptions may be returned by a SubscriptionFilter to signal that there are too many
|
||||
// subscriptions to process.
|
||||
var ErrTooManySubscriptions = errors.New("too many subscriptions")
|
||||
|
||||
// SubscriptionFilter is a function that tells us whether we are interested in allowing and tracking
|
||||
// subscriptions for a given topic.
|
||||
//
|
||||
// The filter is consulted whenever a subscription notification is received by another peer; if the
|
||||
// filter returns false, then the notification is ignored.
|
||||
//
|
||||
// The filter is also consulted when joining topics; if the filter returns false, then the Join
|
||||
// operation will result in an error.
|
||||
type SubscriptionFilter interface {
|
||||
// CanSubscribe returns true if the topic is of interest and we can subscribe to it
|
||||
CanSubscribe(topic string) bool
|
||||
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
// It should filter only the subscriptions of interest and my return an error if (for instance)
|
||||
// there are too many subscriptions.
|
||||
FilterIncomingSubscriptions(peer.ID, []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error)
|
||||
}
|
||||
|
||||
// WithSubscriptionFilter is a pubsub option that specifies a filter for subscriptions
|
||||
// in topics of interest.
|
||||
func WithSubscriptionFilter(subFilter SubscriptionFilter) Option {
|
||||
return func(ps *PubSub) error {
|
||||
ps.subFilter = subFilter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewAllowlistSubscriptionFilter creates a subscription filter that only allows explicitly
|
||||
// specified topics for local subscriptions and incoming peer subscriptions.
|
||||
func NewAllowlistSubscriptionFilter(topics ...string) SubscriptionFilter {
|
||||
allow := make(map[string]struct{})
|
||||
for _, topic := range topics {
|
||||
allow[topic] = struct{}{}
|
||||
}
|
||||
|
||||
return &allowlistSubscriptionFilter{allow: allow}
|
||||
}
|
||||
|
||||
type allowlistSubscriptionFilter struct {
|
||||
allow map[string]struct{}
|
||||
}
|
||||
|
||||
var _ SubscriptionFilter = (*allowlistSubscriptionFilter)(nil)
|
||||
|
||||
func (f *allowlistSubscriptionFilter) CanSubscribe(topic string) bool {
|
||||
_, ok := f.allow[topic]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (f *allowlistSubscriptionFilter) FilterIncomingSubscriptions(from peer.ID, subs []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) {
|
||||
return FilterSubscriptions(subs, f.CanSubscribe), nil
|
||||
}
|
||||
|
||||
// NewRegexpSubscriptionFilter creates a subscription filter that only allows topics that
|
||||
// match a regular expression for local subscriptions and incoming peer subscriptions.
|
||||
//
|
||||
// Warning: the user should take care to match start/end of string in the supplied regular
|
||||
// expression, otherwise the filter might match unwanted topics unexpectedly.
|
||||
func NewRegexpSubscriptionFilter(rx *regexp.Regexp) SubscriptionFilter {
|
||||
return &rxSubscriptionFilter{allow: rx}
|
||||
}
|
||||
|
||||
type rxSubscriptionFilter struct {
|
||||
allow *regexp.Regexp
|
||||
}
|
||||
|
||||
var _ SubscriptionFilter = (*rxSubscriptionFilter)(nil)
|
||||
|
||||
func (f *rxSubscriptionFilter) CanSubscribe(topic string) bool {
|
||||
return f.allow.MatchString(topic)
|
||||
}
|
||||
|
||||
func (f *rxSubscriptionFilter) FilterIncomingSubscriptions(from peer.ID, subs []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) {
|
||||
return FilterSubscriptions(subs, f.CanSubscribe), nil
|
||||
}
|
||||
|
||||
// FilterSubscriptions filters (and deduplicates) a list of subscriptions.
|
||||
// filter should return true if a topic is of interest.
|
||||
func FilterSubscriptions(subs []*pb.RPC_SubOpts, filter func(string) bool) []*pb.RPC_SubOpts {
|
||||
accept := make(map[string]*pb.RPC_SubOpts)
|
||||
|
||||
for _, sub := range subs {
|
||||
topic := sub.GetTopicid()
|
||||
|
||||
if !filter(topic) {
|
||||
continue
|
||||
}
|
||||
|
||||
otherSub, ok := accept[topic]
|
||||
if ok {
|
||||
if sub.GetSubscribe() != otherSub.GetSubscribe() {
|
||||
delete(accept, topic)
|
||||
}
|
||||
} else {
|
||||
accept[topic] = sub
|
||||
}
|
||||
}
|
||||
|
||||
if len(accept) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]*pb.RPC_SubOpts, 0, len(accept))
|
||||
for _, sub := range accept {
|
||||
result = append(result, sub)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// WrapLimitSubscriptionFilter wraps a subscription filter with a hard limit in the number of
|
||||
// subscriptions allowed in an RPC message.
|
||||
func WrapLimitSubscriptionFilter(filter SubscriptionFilter, limit int) SubscriptionFilter {
|
||||
return &limitSubscriptionFilter{filter: filter, limit: limit}
|
||||
}
|
||||
|
||||
type limitSubscriptionFilter struct {
|
||||
filter SubscriptionFilter
|
||||
limit int
|
||||
}
|
||||
|
||||
var _ SubscriptionFilter = (*limitSubscriptionFilter)(nil)
|
||||
|
||||
func (f *limitSubscriptionFilter) CanSubscribe(topic string) bool {
|
||||
return f.filter.CanSubscribe(topic)
|
||||
}
|
||||
|
||||
func (f *limitSubscriptionFilter) FilterIncomingSubscriptions(from peer.ID, subs []*pb.RPC_SubOpts) ([]*pb.RPC_SubOpts, error) {
|
||||
if len(subs) > f.limit {
|
||||
return nil, ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
return f.filter.FilterIncomingSubscriptions(from, subs)
|
||||
}
|
||||
259
vendor/github.com/libp2p/go-libp2p-pubsub/tag_tracer.go
generated
vendored
Normal file
259
vendor/github.com/libp2p/go-libp2p-pubsub/tag_tracer.go
generated
vendored
Normal file
@@ -0,0 +1,259 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
// GossipSubConnTagBumpMessageDelivery is the amount to add to the connection manager
|
||||
// tag that tracks message deliveries. Each time a peer is the first to deliver a
|
||||
// message within a topic, we "bump" a tag by this amount, up to a maximum
|
||||
// of GossipSubConnTagMessageDeliveryCap.
|
||||
// Note that the delivery tags decay over time, decreasing by GossipSubConnTagDecayAmount
|
||||
// at every GossipSubConnTagDecayInterval.
|
||||
GossipSubConnTagBumpMessageDelivery = 1
|
||||
|
||||
// GossipSubConnTagDecayInterval is the decay interval for decaying connection manager tags.
|
||||
GossipSubConnTagDecayInterval = 10 * time.Minute
|
||||
|
||||
// GossipSubConnTagDecayAmount is subtracted from decaying tag values at each decay interval.
|
||||
GossipSubConnTagDecayAmount = 1
|
||||
|
||||
// GossipSubConnTagMessageDeliveryCap is the maximum value for the connection manager tags that
|
||||
// track message deliveries.
|
||||
GossipSubConnTagMessageDeliveryCap = 15
|
||||
)
|
||||
|
||||
// tagTracer is an internal tracer that applies connection manager tags to peer
|
||||
// connections based on their behavior.
|
||||
//
|
||||
// We tag a peer's connections for the following reasons:
|
||||
// - Directly connected peers are tagged with GossipSubConnTagValueDirectPeer (default 1000).
|
||||
// - Mesh peers are tagged with a value of GossipSubConnTagValueMeshPeer (default 20).
|
||||
// If a peer is in multiple topic meshes, they'll be tagged for each.
|
||||
// - For each message that we receive, we bump a delivery tag for peer that delivered the message
|
||||
// first.
|
||||
// The delivery tags have a maximum value, GossipSubConnTagMessageDeliveryCap, and they decay at
|
||||
// a rate of GossipSubConnTagDecayAmount / GossipSubConnTagDecayInterval.
|
||||
type tagTracer struct {
|
||||
sync.RWMutex
|
||||
|
||||
cmgr connmgr.ConnManager
|
||||
idGen *msgIDGenerator
|
||||
decayer connmgr.Decayer
|
||||
decaying map[string]connmgr.DecayingTag
|
||||
direct map[peer.ID]struct{}
|
||||
|
||||
// a map of message ids to the set of peers who delivered the message after the first delivery,
|
||||
// but before the message was finished validating
|
||||
nearFirst map[string]map[peer.ID]struct{}
|
||||
}
|
||||
|
||||
func newTagTracer(cmgr connmgr.ConnManager) *tagTracer {
|
||||
decayer, ok := connmgr.SupportsDecay(cmgr)
|
||||
if !ok {
|
||||
log.Debugf("connection manager does not support decaying tags, delivery tags will not be applied")
|
||||
}
|
||||
return &tagTracer{
|
||||
cmgr: cmgr,
|
||||
idGen: newMsgIdGenerator(),
|
||||
decayer: decayer,
|
||||
decaying: make(map[string]connmgr.DecayingTag),
|
||||
nearFirst: make(map[string]map[peer.ID]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tagTracer) Start(gs *GossipSubRouter) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.idGen = gs.p.idGen
|
||||
t.direct = gs.direct
|
||||
}
|
||||
|
||||
func (t *tagTracer) tagPeerIfDirect(p peer.ID) {
|
||||
if t.direct == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// tag peer if it is a direct peer
|
||||
_, direct := t.direct[p]
|
||||
if direct {
|
||||
t.cmgr.Protect(p, "pubsub:<direct>")
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tagTracer) tagMeshPeer(p peer.ID, topic string) {
|
||||
tag := topicTag(topic)
|
||||
t.cmgr.Protect(p, tag)
|
||||
}
|
||||
|
||||
func (t *tagTracer) untagMeshPeer(p peer.ID, topic string) {
|
||||
tag := topicTag(topic)
|
||||
t.cmgr.Unprotect(p, tag)
|
||||
}
|
||||
|
||||
func topicTag(topic string) string {
|
||||
return fmt.Sprintf("pubsub:%s", topic)
|
||||
}
|
||||
|
||||
func (t *tagTracer) addDeliveryTag(topic string) {
|
||||
if t.decayer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("pubsub-deliveries:%s", topic)
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
tag, err := t.decayer.RegisterDecayingTag(
|
||||
name,
|
||||
GossipSubConnTagDecayInterval,
|
||||
connmgr.DecayFixed(GossipSubConnTagDecayAmount),
|
||||
connmgr.BumpSumBounded(0, GossipSubConnTagMessageDeliveryCap))
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("unable to create decaying delivery tag: %s", err)
|
||||
return
|
||||
}
|
||||
t.decaying[topic] = tag
|
||||
}
|
||||
|
||||
func (t *tagTracer) removeDeliveryTag(topic string) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
tag, ok := t.decaying[topic]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
err := tag.Close()
|
||||
if err != nil {
|
||||
log.Warnf("error closing decaying connmgr tag: %s", err)
|
||||
}
|
||||
delete(t.decaying, topic)
|
||||
}
|
||||
|
||||
func (t *tagTracer) bumpDeliveryTag(p peer.ID, topic string) error {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
tag, ok := t.decaying[topic]
|
||||
if !ok {
|
||||
return fmt.Errorf("no decaying tag registered for topic %s", topic)
|
||||
}
|
||||
return tag.Bump(p, GossipSubConnTagBumpMessageDelivery)
|
||||
}
|
||||
|
||||
func (t *tagTracer) bumpTagsForMessage(p peer.ID, msg *Message) {
|
||||
topic := msg.GetTopic()
|
||||
err := t.bumpDeliveryTag(p, topic)
|
||||
if err != nil {
|
||||
log.Warnf("error bumping delivery tag: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// nearFirstPeers returns the peers who delivered the message while it was still validating
|
||||
func (t *tagTracer) nearFirstPeers(msg *Message) []peer.ID {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
peersMap, ok := t.nearFirst[t.idGen.ID(msg)]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
peers := make([]peer.ID, 0, len(peersMap))
|
||||
for p := range peersMap {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// -- RawTracer interface methods
|
||||
var _ RawTracer = (*tagTracer)(nil)
|
||||
|
||||
func (t *tagTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
t.tagPeerIfDirect(p)
|
||||
}
|
||||
|
||||
func (t *tagTracer) Join(topic string) {
|
||||
t.addDeliveryTag(topic)
|
||||
}
|
||||
|
||||
func (t *tagTracer) DeliverMessage(msg *Message) {
|
||||
nearFirst := t.nearFirstPeers(msg)
|
||||
|
||||
t.bumpTagsForMessage(msg.ReceivedFrom, msg)
|
||||
for _, p := range nearFirst {
|
||||
t.bumpTagsForMessage(p, msg)
|
||||
}
|
||||
|
||||
// delete the delivery state for this message
|
||||
t.Lock()
|
||||
delete(t.nearFirst, t.idGen.ID(msg))
|
||||
t.Unlock()
|
||||
}
|
||||
|
||||
func (t *tagTracer) Leave(topic string) {
|
||||
t.removeDeliveryTag(topic)
|
||||
}
|
||||
|
||||
func (t *tagTracer) Graft(p peer.ID, topic string) {
|
||||
t.tagMeshPeer(p, topic)
|
||||
}
|
||||
|
||||
func (t *tagTracer) Prune(p peer.ID, topic string) {
|
||||
t.untagMeshPeer(p, topic)
|
||||
}
|
||||
|
||||
func (t *tagTracer) ValidateMessage(msg *Message) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
// create map to start tracking the peers who deliver while we're validating
|
||||
id := t.idGen.ID(msg)
|
||||
if _, exists := t.nearFirst[id]; exists {
|
||||
return
|
||||
}
|
||||
t.nearFirst[id] = make(map[peer.ID]struct{})
|
||||
}
|
||||
|
||||
func (t *tagTracer) DuplicateMessage(msg *Message) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
id := t.idGen.ID(msg)
|
||||
peers, ok := t.nearFirst[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
peers[msg.ReceivedFrom] = struct{}{}
|
||||
}
|
||||
|
||||
func (t *tagTracer) RejectMessage(msg *Message, reason string) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
// We want to delete the near-first delivery tracking for messages that have passed through
|
||||
// the validation pipeline. Other rejection reasons (missing signature, etc) skip the validation
|
||||
// queue, so we don't want to remove the state in case the message is still validating.
|
||||
switch reason {
|
||||
case RejectValidationThrottled:
|
||||
fallthrough
|
||||
case RejectValidationIgnored:
|
||||
fallthrough
|
||||
case RejectValidationFailed:
|
||||
delete(t.nearFirst, t.idGen.ID(msg))
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tagTracer) RemovePeer(peer.ID) {}
|
||||
func (t *tagTracer) ThrottlePeer(p peer.ID) {}
|
||||
func (t *tagTracer) RecvRPC(rpc *RPC) {}
|
||||
func (t *tagTracer) SendRPC(rpc *RPC, p peer.ID) {}
|
||||
func (t *tagTracer) DropRPC(rpc *RPC, p peer.ID) {}
|
||||
func (t *tagTracer) UndeliverableMessage(msg *Message) {}
|
||||
56
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/first_seen_cache.go
generated
vendored
Normal file
56
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/first_seen_cache.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FirstSeenCache is a time cache that only marks the expiry of a message when first added.
|
||||
type FirstSeenCache struct {
|
||||
lk sync.RWMutex
|
||||
m map[string]time.Time
|
||||
ttl time.Duration
|
||||
|
||||
done func()
|
||||
}
|
||||
|
||||
var _ TimeCache = (*FirstSeenCache)(nil)
|
||||
|
||||
func newFirstSeenCache(ttl time.Duration) *FirstSeenCache {
|
||||
tc := &FirstSeenCache{
|
||||
m: make(map[string]time.Time),
|
||||
ttl: ttl,
|
||||
}
|
||||
|
||||
ctx, done := context.WithCancel(context.Background())
|
||||
tc.done = done
|
||||
go background(ctx, &tc.lk, tc.m)
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
func (tc *FirstSeenCache) Done() {
|
||||
tc.done()
|
||||
}
|
||||
|
||||
func (tc *FirstSeenCache) Has(s string) bool {
|
||||
tc.lk.RLock()
|
||||
defer tc.lk.RUnlock()
|
||||
|
||||
_, ok := tc.m[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (tc *FirstSeenCache) Add(s string) bool {
|
||||
tc.lk.Lock()
|
||||
defer tc.lk.Unlock()
|
||||
|
||||
_, ok := tc.m[s]
|
||||
if ok {
|
||||
return false
|
||||
}
|
||||
|
||||
tc.m[s] = time.Now().Add(tc.ttl)
|
||||
return true
|
||||
}
|
||||
58
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/last_seen_cache.go
generated
vendored
Normal file
58
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/last_seen_cache.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LastSeenCache is a time cache that extends the expiry of a seen message when added
|
||||
// or checked for presence with Has..
|
||||
type LastSeenCache struct {
|
||||
lk sync.Mutex
|
||||
m map[string]time.Time
|
||||
ttl time.Duration
|
||||
|
||||
done func()
|
||||
}
|
||||
|
||||
var _ TimeCache = (*LastSeenCache)(nil)
|
||||
|
||||
func newLastSeenCache(ttl time.Duration) *LastSeenCache {
|
||||
tc := &LastSeenCache{
|
||||
m: make(map[string]time.Time),
|
||||
ttl: ttl,
|
||||
}
|
||||
|
||||
ctx, done := context.WithCancel(context.Background())
|
||||
tc.done = done
|
||||
go background(ctx, &tc.lk, tc.m)
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
func (tc *LastSeenCache) Done() {
|
||||
tc.done()
|
||||
}
|
||||
|
||||
func (tc *LastSeenCache) Add(s string) bool {
|
||||
tc.lk.Lock()
|
||||
defer tc.lk.Unlock()
|
||||
|
||||
_, ok := tc.m[s]
|
||||
tc.m[s] = time.Now().Add(tc.ttl)
|
||||
|
||||
return !ok
|
||||
}
|
||||
|
||||
func (tc *LastSeenCache) Has(s string) bool {
|
||||
tc.lk.Lock()
|
||||
defer tc.lk.Unlock()
|
||||
|
||||
_, ok := tc.m[s]
|
||||
if ok {
|
||||
tc.m[s] = time.Now().Add(tc.ttl)
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
52
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/time_cache.go
generated
vendored
Normal file
52
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/time_cache.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
logger "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logger.Logger("pubsub/timecache")
|
||||
|
||||
// Stategy is the TimeCache expiration strategy to use.
|
||||
type Strategy uint8
|
||||
|
||||
const (
|
||||
// Strategy_FirstSeen expires an entry from the time it was added.
|
||||
Strategy_FirstSeen Strategy = iota
|
||||
// Stategy_LastSeen expires an entry from the last time it was touched by an Add or Has.
|
||||
Strategy_LastSeen
|
||||
)
|
||||
|
||||
// TimeCache is a cahe of recently seen messages (by id).
|
||||
type TimeCache interface {
|
||||
// Add adds an id into the cache, if it is not already there.
|
||||
// Returns true if the id was newly added to the cache.
|
||||
// Depending on the implementation strategy, it may or may not update the expiry of
|
||||
// an existing entry.
|
||||
Add(string) bool
|
||||
// Has checks the cache for the presence of an id.
|
||||
// Depending on the implementation strategy, it may or may not update the expiry of
|
||||
// an existing entry.
|
||||
Has(string) bool
|
||||
// Done signals that the user is done with this cache, which it may stop background threads
|
||||
// and relinquish resources.
|
||||
Done()
|
||||
}
|
||||
|
||||
// NewTimeCache defaults to the original ("first seen") cache implementation
|
||||
func NewTimeCache(ttl time.Duration) TimeCache {
|
||||
return NewTimeCacheWithStrategy(Strategy_FirstSeen, ttl)
|
||||
}
|
||||
|
||||
func NewTimeCacheWithStrategy(strategy Strategy, ttl time.Duration) TimeCache {
|
||||
switch strategy {
|
||||
case Strategy_FirstSeen:
|
||||
return newFirstSeenCache(ttl)
|
||||
case Strategy_LastSeen:
|
||||
return newLastSeenCache(ttl)
|
||||
default:
|
||||
// Default to the original time cache implementation
|
||||
return newFirstSeenCache(ttl)
|
||||
}
|
||||
}
|
||||
35
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/util.go
generated
vendored
Normal file
35
vendor/github.com/libp2p/go-libp2p-pubsub/timecache/util.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package timecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var backgroundSweepInterval = time.Minute
|
||||
|
||||
func background(ctx context.Context, lk sync.Locker, m map[string]time.Time) {
|
||||
ticker := time.NewTicker(backgroundSweepInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case now := <-ticker.C:
|
||||
sweep(lk, m, now)
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sweep(lk sync.Locker, m map[string]time.Time, now time.Time) {
|
||||
lk.Lock()
|
||||
defer lk.Unlock()
|
||||
|
||||
for k, expiry := range m {
|
||||
if expiry.Before(now) {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
477
vendor/github.com/libp2p/go-libp2p-pubsub/topic.go
generated
vendored
Normal file
477
vendor/github.com/libp2p/go-libp2p-pubsub/topic.go
generated
vendored
Normal file
@@ -0,0 +1,477 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// ErrTopicClosed is returned if a Topic is utilized after it has been closed
|
||||
var ErrTopicClosed = errors.New("this Topic is closed, try opening a new one")
|
||||
|
||||
// ErrNilSignKey is returned if a nil private key was provided
|
||||
var ErrNilSignKey = errors.New("nil sign key")
|
||||
|
||||
// ErrEmptyPeerID is returned if an empty peer ID was provided
|
||||
var ErrEmptyPeerID = errors.New("empty peer ID")
|
||||
|
||||
// Topic is the handle for a pubsub topic
|
||||
type Topic struct {
|
||||
p *PubSub
|
||||
topic string
|
||||
|
||||
evtHandlerMux sync.RWMutex
|
||||
evtHandlers map[*TopicEventHandler]struct{}
|
||||
|
||||
mux sync.RWMutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// String returns the topic associated with t
|
||||
func (t *Topic) String() string {
|
||||
return t.topic
|
||||
}
|
||||
|
||||
// SetScoreParams sets the topic score parameters if the pubsub router supports peer
|
||||
// scoring
|
||||
func (t *Topic) SetScoreParams(p *TopicScoreParams) error {
|
||||
err := p.validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid topic score parameters: %w", err)
|
||||
}
|
||||
|
||||
t.mux.Lock()
|
||||
defer t.mux.Unlock()
|
||||
|
||||
if t.closed {
|
||||
return ErrTopicClosed
|
||||
}
|
||||
|
||||
result := make(chan error, 1)
|
||||
update := func() {
|
||||
gs, ok := t.p.rt.(*GossipSubRouter)
|
||||
if !ok {
|
||||
result <- fmt.Errorf("pubsub router is not gossipsub")
|
||||
return
|
||||
}
|
||||
|
||||
if gs.score == nil {
|
||||
result <- fmt.Errorf("peer scoring is not enabled in router")
|
||||
return
|
||||
}
|
||||
|
||||
err := gs.score.SetTopicScoreParams(t.topic, p)
|
||||
result <- err
|
||||
}
|
||||
|
||||
select {
|
||||
case t.p.eval <- update:
|
||||
err = <-result
|
||||
return err
|
||||
|
||||
case <-t.p.ctx.Done():
|
||||
return t.p.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// EventHandler creates a handle for topic specific events
|
||||
// Multiple event handlers may be created and will operate independently of each other
|
||||
func (t *Topic) EventHandler(opts ...TopicEventHandlerOpt) (*TopicEventHandler, error) {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
if t.closed {
|
||||
return nil, ErrTopicClosed
|
||||
}
|
||||
|
||||
h := &TopicEventHandler{
|
||||
topic: t,
|
||||
err: nil,
|
||||
|
||||
evtLog: make(map[peer.ID]EventType),
|
||||
evtLogCh: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
err := opt(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
|
||||
select {
|
||||
case t.p.eval <- func() {
|
||||
tmap := t.p.topics[t.topic]
|
||||
for p := range tmap {
|
||||
h.evtLog[p] = PeerJoin
|
||||
}
|
||||
|
||||
t.evtHandlerMux.Lock()
|
||||
t.evtHandlers[h] = struct{}{}
|
||||
t.evtHandlerMux.Unlock()
|
||||
done <- struct{}{}
|
||||
}:
|
||||
case <-t.p.ctx.Done():
|
||||
return nil, t.p.ctx.Err()
|
||||
}
|
||||
|
||||
<-done
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (t *Topic) sendNotification(evt PeerEvent) {
|
||||
t.evtHandlerMux.RLock()
|
||||
defer t.evtHandlerMux.RUnlock()
|
||||
|
||||
for h := range t.evtHandlers {
|
||||
h.sendNotification(evt)
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe returns a new Subscription for the topic.
|
||||
// Note that subscription is not an instantaneous operation. It may take some time
|
||||
// before the subscription is processed by the pubsub main loop and propagated to our peers.
|
||||
func (t *Topic) Subscribe(opts ...SubOpt) (*Subscription, error) {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
if t.closed {
|
||||
return nil, ErrTopicClosed
|
||||
}
|
||||
|
||||
sub := &Subscription{
|
||||
topic: t.topic,
|
||||
ctx: t.p.ctx,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
err := opt(sub)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if sub.ch == nil {
|
||||
// apply the default size
|
||||
sub.ch = make(chan *Message, 32)
|
||||
}
|
||||
|
||||
out := make(chan *Subscription, 1)
|
||||
|
||||
t.p.disc.Discover(sub.topic)
|
||||
|
||||
select {
|
||||
case t.p.addSub <- &addSubReq{
|
||||
sub: sub,
|
||||
resp: out,
|
||||
}:
|
||||
case <-t.p.ctx.Done():
|
||||
return nil, t.p.ctx.Err()
|
||||
}
|
||||
|
||||
return <-out, nil
|
||||
}
|
||||
|
||||
// Relay enables message relaying for the topic and returns a reference
|
||||
// cancel function. Subsequent calls increase the reference counter.
|
||||
// To completely disable the relay, all references must be cancelled.
|
||||
func (t *Topic) Relay() (RelayCancelFunc, error) {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
if t.closed {
|
||||
return nil, ErrTopicClosed
|
||||
}
|
||||
|
||||
out := make(chan RelayCancelFunc, 1)
|
||||
|
||||
t.p.disc.Discover(t.topic)
|
||||
|
||||
select {
|
||||
case t.p.addRelay <- &addRelayReq{
|
||||
topic: t.topic,
|
||||
resp: out,
|
||||
}:
|
||||
case <-t.p.ctx.Done():
|
||||
return nil, t.p.ctx.Err()
|
||||
}
|
||||
|
||||
return <-out, nil
|
||||
}
|
||||
|
||||
// RouterReady is a function that decides if a router is ready to publish
|
||||
type RouterReady func(rt PubSubRouter, topic string) (bool, error)
|
||||
|
||||
// ProvideKey is a function that provides a private key and its associated peer ID when publishing a new message
|
||||
type ProvideKey func() (crypto.PrivKey, peer.ID)
|
||||
|
||||
type PublishOptions struct {
|
||||
ready RouterReady
|
||||
customKey ProvideKey
|
||||
local bool
|
||||
}
|
||||
|
||||
type PubOpt func(pub *PublishOptions) error
|
||||
|
||||
// Publish publishes data to topic.
|
||||
func (t *Topic) Publish(ctx context.Context, data []byte, opts ...PubOpt) error {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
if t.closed {
|
||||
return ErrTopicClosed
|
||||
}
|
||||
|
||||
pid := t.p.signID
|
||||
key := t.p.signKey
|
||||
|
||||
pub := &PublishOptions{}
|
||||
for _, opt := range opts {
|
||||
err := opt(pub)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pub.customKey != nil && !pub.local {
|
||||
key, pid = pub.customKey()
|
||||
if key == nil {
|
||||
return ErrNilSignKey
|
||||
}
|
||||
if len(pid) == 0 {
|
||||
return ErrEmptyPeerID
|
||||
}
|
||||
}
|
||||
|
||||
m := &pb.Message{
|
||||
Data: data,
|
||||
Topic: &t.topic,
|
||||
From: nil,
|
||||
Seqno: nil,
|
||||
}
|
||||
if pid != "" {
|
||||
m.From = []byte(pid)
|
||||
m.Seqno = t.p.nextSeqno()
|
||||
}
|
||||
if key != nil {
|
||||
m.From = []byte(pid)
|
||||
err := signMessage(pid, key, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pub.ready != nil {
|
||||
if t.p.disc.discovery != nil {
|
||||
t.p.disc.Bootstrap(ctx, t.topic, pub.ready)
|
||||
} else {
|
||||
// TODO: we could likely do better than polling every 200ms.
|
||||
// For example, block this goroutine on a channel,
|
||||
// and check again whenever events tell us that the number of
|
||||
// peers has increased.
|
||||
var ticker *time.Ticker
|
||||
readyLoop:
|
||||
for {
|
||||
// Check if ready for publishing.
|
||||
// Similar to what disc.Bootstrap does.
|
||||
res := make(chan bool, 1)
|
||||
select {
|
||||
case t.p.eval <- func() {
|
||||
done, _ := pub.ready(t.p.rt, t.topic)
|
||||
res <- done
|
||||
}:
|
||||
if <-res {
|
||||
break readyLoop
|
||||
}
|
||||
case <-t.p.ctx.Done():
|
||||
return t.p.ctx.Err()
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
if ticker == nil {
|
||||
ticker = time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("router is not ready: %w", ctx.Err())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return t.p.val.PushLocal(&Message{m, "", t.p.host.ID(), nil, pub.local})
|
||||
}
|
||||
|
||||
// WithReadiness returns a publishing option for only publishing when the router is ready.
|
||||
// This option is not useful unless PubSub is also using WithDiscovery
|
||||
func WithReadiness(ready RouterReady) PubOpt {
|
||||
return func(pub *PublishOptions) error {
|
||||
pub.ready = ready
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLocalPublication returns a publishing option to notify in-process subscribers only.
|
||||
// It prevents message publication to mesh peers.
|
||||
// Useful in edge cases where the msg needs to be only delivered to the in-process subscribers,
|
||||
// e.g. not to spam the network with outdated msgs.
|
||||
// Should not be used specifically for in-process pubsubing.
|
||||
func WithLocalPublication(local bool) PubOpt {
|
||||
return func(pub *PublishOptions) error {
|
||||
pub.local = local
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSecretKeyAndPeerId returns a publishing option for providing a custom private key and its corresponding peer ID
|
||||
// This option is useful when we want to send messages from "virtual", never-connectable peers in the network
|
||||
func WithSecretKeyAndPeerId(key crypto.PrivKey, pid peer.ID) PubOpt {
|
||||
return func(pub *PublishOptions) error {
|
||||
pub.customKey = func() (crypto.PrivKey, peer.ID) {
|
||||
return key, pid
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes down the topic. Will return an error unless there are no active event handlers or subscriptions.
|
||||
// Does not error if the topic is already closed.
|
||||
func (t *Topic) Close() error {
|
||||
t.mux.Lock()
|
||||
defer t.mux.Unlock()
|
||||
if t.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
req := &rmTopicReq{t, make(chan error, 1)}
|
||||
|
||||
select {
|
||||
case t.p.rmTopic <- req:
|
||||
case <-t.p.ctx.Done():
|
||||
return t.p.ctx.Err()
|
||||
}
|
||||
|
||||
err := <-req.resp
|
||||
|
||||
if err == nil {
|
||||
t.closed = true
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ListPeers returns a list of peers we are connected to in the given topic.
|
||||
func (t *Topic) ListPeers() []peer.ID {
|
||||
t.mux.RLock()
|
||||
defer t.mux.RUnlock()
|
||||
if t.closed {
|
||||
return []peer.ID{}
|
||||
}
|
||||
|
||||
return t.p.ListPeers(t.topic)
|
||||
}
|
||||
|
||||
type EventType int
|
||||
|
||||
const (
|
||||
PeerJoin EventType = iota
|
||||
PeerLeave
|
||||
)
|
||||
|
||||
// TopicEventHandler is used to manage topic specific events. No Subscription is required to receive events.
|
||||
type TopicEventHandler struct {
|
||||
topic *Topic
|
||||
err error
|
||||
|
||||
evtLogMx sync.Mutex
|
||||
evtLog map[peer.ID]EventType
|
||||
evtLogCh chan struct{}
|
||||
}
|
||||
|
||||
type TopicEventHandlerOpt func(t *TopicEventHandler) error
|
||||
|
||||
type PeerEvent struct {
|
||||
Type EventType
|
||||
Peer peer.ID
|
||||
}
|
||||
|
||||
// Cancel closes the topic event handler
|
||||
func (t *TopicEventHandler) Cancel() {
|
||||
topic := t.topic
|
||||
t.err = fmt.Errorf("topic event handler cancelled by calling handler.Cancel()")
|
||||
|
||||
topic.evtHandlerMux.Lock()
|
||||
delete(topic.evtHandlers, t)
|
||||
t.topic.evtHandlerMux.Unlock()
|
||||
}
|
||||
|
||||
func (t *TopicEventHandler) sendNotification(evt PeerEvent) {
|
||||
t.evtLogMx.Lock()
|
||||
t.addToEventLog(evt)
|
||||
t.evtLogMx.Unlock()
|
||||
}
|
||||
|
||||
// addToEventLog assumes a lock has been taken to protect the event log
|
||||
func (t *TopicEventHandler) addToEventLog(evt PeerEvent) {
|
||||
e, ok := t.evtLog[evt.Peer]
|
||||
if !ok {
|
||||
t.evtLog[evt.Peer] = evt.Type
|
||||
// send signal that an event has been added to the event log
|
||||
select {
|
||||
case t.evtLogCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
} else if e != evt.Type {
|
||||
delete(t.evtLog, evt.Peer)
|
||||
}
|
||||
}
|
||||
|
||||
// pullFromEventLog assumes a lock has been taken to protect the event log
|
||||
func (t *TopicEventHandler) pullFromEventLog() (PeerEvent, bool) {
|
||||
for k, v := range t.evtLog {
|
||||
evt := PeerEvent{Peer: k, Type: v}
|
||||
delete(t.evtLog, k)
|
||||
return evt, true
|
||||
}
|
||||
return PeerEvent{}, false
|
||||
}
|
||||
|
||||
// NextPeerEvent returns the next event regarding subscribed peers
|
||||
// Guarantees: Peer Join and Peer Leave events for a given peer will fire in order.
|
||||
// Unless a peer both Joins and Leaves before NextPeerEvent emits either event
|
||||
// all events will eventually be received from NextPeerEvent.
|
||||
func (t *TopicEventHandler) NextPeerEvent(ctx context.Context) (PeerEvent, error) {
|
||||
for {
|
||||
t.evtLogMx.Lock()
|
||||
evt, ok := t.pullFromEventLog()
|
||||
if ok {
|
||||
// make sure an event log signal is available if there are events in the event log
|
||||
if len(t.evtLog) > 0 {
|
||||
select {
|
||||
case t.evtLogCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
t.evtLogMx.Unlock()
|
||||
return evt, nil
|
||||
}
|
||||
t.evtLogMx.Unlock()
|
||||
|
||||
select {
|
||||
case <-t.evtLogCh:
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
return PeerEvent{}, ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
530
vendor/github.com/libp2p/go-libp2p-pubsub/trace.go
generated
vendored
Normal file
530
vendor/github.com/libp2p/go-libp2p-pubsub/trace.go
generated
vendored
Normal file
@@ -0,0 +1,530 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
// EventTracer is a generic event tracer interface.
|
||||
// This is a high level tracing interface which delivers tracing events, as defined by the protobuf
|
||||
// schema in pb/trace.proto.
|
||||
type EventTracer interface {
|
||||
Trace(evt *pb.TraceEvent)
|
||||
}
|
||||
|
||||
// RawTracer is a low level tracing interface that allows an application to trace the internal
|
||||
// operation of the pubsub subsystem.
|
||||
//
|
||||
// Note that the tracers are invoked synchronously, which means that application tracers must
|
||||
// take care to not block or modify arguments.
|
||||
//
|
||||
// Warning: this interface is not fixed, we may be adding new methods as necessitated by the system
|
||||
// in the future.
|
||||
type RawTracer interface {
|
||||
// AddPeer is invoked when a new peer is added.
|
||||
AddPeer(p peer.ID, proto protocol.ID)
|
||||
// RemovePeer is invoked when a peer is removed.
|
||||
RemovePeer(p peer.ID)
|
||||
// Join is invoked when a new topic is joined
|
||||
Join(topic string)
|
||||
// Leave is invoked when a topic is abandoned
|
||||
Leave(topic string)
|
||||
// Graft is invoked when a new peer is grafted on the mesh (gossipsub)
|
||||
Graft(p peer.ID, topic string)
|
||||
// Prune is invoked when a peer is pruned from the message (gossipsub)
|
||||
Prune(p peer.ID, topic string)
|
||||
// ValidateMessage is invoked when a message first enters the validation pipeline.
|
||||
ValidateMessage(msg *Message)
|
||||
// DeliverMessage is invoked when a message is delivered
|
||||
DeliverMessage(msg *Message)
|
||||
// RejectMessage is invoked when a message is Rejected or Ignored.
|
||||
// The reason argument can be one of the named strings Reject*.
|
||||
RejectMessage(msg *Message, reason string)
|
||||
// DuplicateMessage is invoked when a duplicate message is dropped.
|
||||
DuplicateMessage(msg *Message)
|
||||
// ThrottlePeer is invoked when a peer is throttled by the peer gater.
|
||||
ThrottlePeer(p peer.ID)
|
||||
// RecvRPC is invoked when an incoming RPC is received.
|
||||
RecvRPC(rpc *RPC)
|
||||
// SendRPC is invoked when a RPC is sent.
|
||||
SendRPC(rpc *RPC, p peer.ID)
|
||||
// DropRPC is invoked when an outbound RPC is dropped, typically because of a queue full.
|
||||
DropRPC(rpc *RPC, p peer.ID)
|
||||
// UndeliverableMessage is invoked when the consumer of Subscribe is not reading messages fast enough and
|
||||
// the pressure release mechanism trigger, dropping messages.
|
||||
UndeliverableMessage(msg *Message)
|
||||
}
|
||||
|
||||
// pubsub tracer details
|
||||
type pubsubTracer struct {
|
||||
tracer EventTracer
|
||||
raw []RawTracer
|
||||
pid peer.ID
|
||||
idGen *msgIDGenerator
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) PublishMessage(msg *Message) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_PUBLISH_MESSAGE.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
PublishMessage: &pb.TraceEvent_PublishMessage{
|
||||
MessageID: []byte(t.idGen.ID(msg)),
|
||||
Topic: msg.Message.Topic,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) ValidateMessage(msg *Message) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom != t.pid {
|
||||
for _, tr := range t.raw {
|
||||
tr.ValidateMessage(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) RejectMessage(msg *Message, reason string) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom != t.pid {
|
||||
for _, tr := range t.raw {
|
||||
tr.RejectMessage(msg, reason)
|
||||
}
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_REJECT_MESSAGE.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
RejectMessage: &pb.TraceEvent_RejectMessage{
|
||||
MessageID: []byte(t.idGen.ID(msg)),
|
||||
ReceivedFrom: []byte(msg.ReceivedFrom),
|
||||
Reason: &reason,
|
||||
Topic: msg.Topic,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) DuplicateMessage(msg *Message) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom != t.pid {
|
||||
for _, tr := range t.raw {
|
||||
tr.DuplicateMessage(msg)
|
||||
}
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_DUPLICATE_MESSAGE.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
DuplicateMessage: &pb.TraceEvent_DuplicateMessage{
|
||||
MessageID: []byte(t.idGen.ID(msg)),
|
||||
ReceivedFrom: []byte(msg.ReceivedFrom),
|
||||
Topic: msg.Topic,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) DeliverMessage(msg *Message) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom != t.pid {
|
||||
for _, tr := range t.raw {
|
||||
tr.DeliverMessage(msg)
|
||||
}
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_DELIVER_MESSAGE.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
DeliverMessage: &pb.TraceEvent_DeliverMessage{
|
||||
MessageID: []byte(t.idGen.ID(msg)),
|
||||
Topic: msg.Topic,
|
||||
ReceivedFrom: []byte(msg.ReceivedFrom),
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.AddPeer(p, proto)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
protoStr := string(proto)
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_ADD_PEER.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
AddPeer: &pb.TraceEvent_AddPeer{
|
||||
PeerID: []byte(p),
|
||||
Proto: &protoStr,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) RemovePeer(p peer.ID) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.RemovePeer(p)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_REMOVE_PEER.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
RemovePeer: &pb.TraceEvent_RemovePeer{
|
||||
PeerID: []byte(p),
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) RecvRPC(rpc *RPC) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.RecvRPC(rpc)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_RECV_RPC.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
RecvRPC: &pb.TraceEvent_RecvRPC{
|
||||
ReceivedFrom: []byte(rpc.from),
|
||||
Meta: t.traceRPCMeta(rpc),
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) SendRPC(rpc *RPC, p peer.ID) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.SendRPC(rpc, p)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_SEND_RPC.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
SendRPC: &pb.TraceEvent_SendRPC{
|
||||
SendTo: []byte(p),
|
||||
Meta: t.traceRPCMeta(rpc),
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) DropRPC(rpc *RPC, p peer.ID) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.DropRPC(rpc, p)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_DROP_RPC.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
DropRPC: &pb.TraceEvent_DropRPC{
|
||||
SendTo: []byte(p),
|
||||
Meta: t.traceRPCMeta(rpc),
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) UndeliverableMessage(msg *Message) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.UndeliverableMessage(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta {
|
||||
rpcMeta := new(pb.TraceEvent_RPCMeta)
|
||||
|
||||
var msgs []*pb.TraceEvent_MessageMeta
|
||||
for _, m := range rpc.Publish {
|
||||
msgs = append(msgs, &pb.TraceEvent_MessageMeta{
|
||||
MessageID: []byte(t.idGen.RawID(m)),
|
||||
Topic: m.Topic,
|
||||
})
|
||||
}
|
||||
rpcMeta.Messages = msgs
|
||||
|
||||
var subs []*pb.TraceEvent_SubMeta
|
||||
for _, sub := range rpc.Subscriptions {
|
||||
subs = append(subs, &pb.TraceEvent_SubMeta{
|
||||
Subscribe: sub.Subscribe,
|
||||
Topic: sub.Topicid,
|
||||
})
|
||||
}
|
||||
rpcMeta.Subscription = subs
|
||||
|
||||
if rpc.Control != nil {
|
||||
var ihave []*pb.TraceEvent_ControlIHaveMeta
|
||||
for _, ctl := range rpc.Control.Ihave {
|
||||
var mids [][]byte
|
||||
for _, mid := range ctl.MessageIDs {
|
||||
mids = append(mids, []byte(mid))
|
||||
}
|
||||
ihave = append(ihave, &pb.TraceEvent_ControlIHaveMeta{
|
||||
Topic: ctl.TopicID,
|
||||
MessageIDs: mids,
|
||||
})
|
||||
}
|
||||
|
||||
var iwant []*pb.TraceEvent_ControlIWantMeta
|
||||
for _, ctl := range rpc.Control.Iwant {
|
||||
var mids [][]byte
|
||||
for _, mid := range ctl.MessageIDs {
|
||||
mids = append(mids, []byte(mid))
|
||||
}
|
||||
iwant = append(iwant, &pb.TraceEvent_ControlIWantMeta{
|
||||
MessageIDs: mids,
|
||||
})
|
||||
}
|
||||
|
||||
var graft []*pb.TraceEvent_ControlGraftMeta
|
||||
for _, ctl := range rpc.Control.Graft {
|
||||
graft = append(graft, &pb.TraceEvent_ControlGraftMeta{
|
||||
Topic: ctl.TopicID,
|
||||
})
|
||||
}
|
||||
|
||||
var prune []*pb.TraceEvent_ControlPruneMeta
|
||||
for _, ctl := range rpc.Control.Prune {
|
||||
peers := make([][]byte, 0, len(ctl.Peers))
|
||||
for _, pi := range ctl.Peers {
|
||||
peers = append(peers, pi.PeerID)
|
||||
}
|
||||
prune = append(prune, &pb.TraceEvent_ControlPruneMeta{
|
||||
Topic: ctl.TopicID,
|
||||
Peers: peers,
|
||||
})
|
||||
}
|
||||
|
||||
rpcMeta.Control = &pb.TraceEvent_ControlMeta{
|
||||
Ihave: ihave,
|
||||
Iwant: iwant,
|
||||
Graft: graft,
|
||||
Prune: prune,
|
||||
}
|
||||
}
|
||||
|
||||
return rpcMeta
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) Join(topic string) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.Join(topic)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_JOIN.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
Join: &pb.TraceEvent_Join{
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) Leave(topic string) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.Leave(topic)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_LEAVE.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
Leave: &pb.TraceEvent_Leave{
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) Graft(p peer.ID, topic string) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.Graft(p, topic)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_GRAFT.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
Graft: &pb.TraceEvent_Graft{
|
||||
PeerID: []byte(p),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) Prune(p peer.ID, topic string) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.Prune(p, topic)
|
||||
}
|
||||
|
||||
if t.tracer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
evt := &pb.TraceEvent{
|
||||
Type: pb.TraceEvent_PRUNE.Enum(),
|
||||
PeerID: []byte(t.pid),
|
||||
Timestamp: &now,
|
||||
Prune: &pb.TraceEvent_Prune{
|
||||
PeerID: []byte(p),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
|
||||
t.tracer.Trace(evt)
|
||||
}
|
||||
|
||||
func (t *pubsubTracer) ThrottlePeer(p peer.ID) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, tr := range t.raw {
|
||||
tr.ThrottlePeer(p)
|
||||
}
|
||||
}
|
||||
303
vendor/github.com/libp2p/go-libp2p-pubsub/tracer.go
generated
vendored
Normal file
303
vendor/github.com/libp2p/go-libp2p-pubsub/tracer.go
generated
vendored
Normal file
@@ -0,0 +1,303 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
"github.com/libp2p/go-msgio/protoio"
|
||||
)
|
||||
|
||||
var TraceBufferSize = 1 << 16 // 64K ought to be enough for everyone; famous last words.
|
||||
var MinTraceBatchSize = 16
|
||||
|
||||
// rejection reasons
|
||||
const (
|
||||
RejectBlacklstedPeer = "blacklisted peer"
|
||||
RejectBlacklistedSource = "blacklisted source"
|
||||
RejectMissingSignature = "missing signature"
|
||||
RejectUnexpectedSignature = "unexpected signature"
|
||||
RejectUnexpectedAuthInfo = "unexpected auth info"
|
||||
RejectInvalidSignature = "invalid signature"
|
||||
RejectValidationQueueFull = "validation queue full"
|
||||
RejectValidationThrottled = "validation throttled"
|
||||
RejectValidationFailed = "validation failed"
|
||||
RejectValidationIgnored = "validation ignored"
|
||||
RejectSelfOrigin = "self originated message"
|
||||
)
|
||||
|
||||
type basicTracer struct {
|
||||
ch chan struct{}
|
||||
mx sync.Mutex
|
||||
buf []*pb.TraceEvent
|
||||
lossy bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (t *basicTracer) Trace(evt *pb.TraceEvent) {
|
||||
t.mx.Lock()
|
||||
defer t.mx.Unlock()
|
||||
|
||||
if t.closed {
|
||||
return
|
||||
}
|
||||
|
||||
if t.lossy && len(t.buf) > TraceBufferSize {
|
||||
log.Debug("trace buffer overflow; dropping trace event")
|
||||
} else {
|
||||
t.buf = append(t.buf, evt)
|
||||
}
|
||||
|
||||
select {
|
||||
case t.ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (t *basicTracer) Close() {
|
||||
t.mx.Lock()
|
||||
defer t.mx.Unlock()
|
||||
if !t.closed {
|
||||
t.closed = true
|
||||
close(t.ch)
|
||||
}
|
||||
}
|
||||
|
||||
// JSONTracer is a tracer that writes events to a file, encoded in ndjson.
|
||||
type JSONTracer struct {
|
||||
basicTracer
|
||||
w io.WriteCloser
|
||||
}
|
||||
|
||||
// NewJsonTracer creates a new JSONTracer writing traces to file.
|
||||
func NewJSONTracer(file string) (*JSONTracer, error) {
|
||||
return OpenJSONTracer(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
}
|
||||
|
||||
// OpenJSONTracer creates a new JSONTracer, with explicit control of OpenFile flags and permissions.
|
||||
func OpenJSONTracer(file string, flags int, perm os.FileMode) (*JSONTracer, error) {
|
||||
f, err := os.OpenFile(file, flags, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr := &JSONTracer{w: f, basicTracer: basicTracer{ch: make(chan struct{}, 1)}}
|
||||
go tr.doWrite()
|
||||
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
func (t *JSONTracer) doWrite() {
|
||||
var buf []*pb.TraceEvent
|
||||
enc := json.NewEncoder(t.w)
|
||||
for {
|
||||
_, ok := <-t.ch
|
||||
|
||||
t.mx.Lock()
|
||||
tmp := t.buf
|
||||
t.buf = buf[:0]
|
||||
buf = tmp
|
||||
t.mx.Unlock()
|
||||
|
||||
for i, evt := range buf {
|
||||
err := enc.Encode(evt)
|
||||
if err != nil {
|
||||
log.Warnf("error writing event trace: %s", err.Error())
|
||||
}
|
||||
buf[i] = nil
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.w.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var _ EventTracer = (*JSONTracer)(nil)
|
||||
|
||||
// PBTracer is a tracer that writes events to a file, as delimited protobufs.
|
||||
type PBTracer struct {
|
||||
basicTracer
|
||||
w io.WriteCloser
|
||||
}
|
||||
|
||||
func NewPBTracer(file string) (*PBTracer, error) {
|
||||
return OpenPBTracer(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
}
|
||||
|
||||
// OpenPBTracer creates a new PBTracer, with explicit control of OpenFile flags and permissions.
|
||||
func OpenPBTracer(file string, flags int, perm os.FileMode) (*PBTracer, error) {
|
||||
f, err := os.OpenFile(file, flags, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr := &PBTracer{w: f, basicTracer: basicTracer{ch: make(chan struct{}, 1)}}
|
||||
go tr.doWrite()
|
||||
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
func (t *PBTracer) doWrite() {
|
||||
var buf []*pb.TraceEvent
|
||||
w := protoio.NewDelimitedWriter(t.w)
|
||||
for {
|
||||
_, ok := <-t.ch
|
||||
|
||||
t.mx.Lock()
|
||||
tmp := t.buf
|
||||
t.buf = buf[:0]
|
||||
buf = tmp
|
||||
t.mx.Unlock()
|
||||
|
||||
for i, evt := range buf {
|
||||
err := w.WriteMsg(evt)
|
||||
if err != nil {
|
||||
log.Warnf("error writing event trace: %s", err.Error())
|
||||
}
|
||||
buf[i] = nil
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.w.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var _ EventTracer = (*PBTracer)(nil)
|
||||
|
||||
const RemoteTracerProtoID = protocol.ID("/libp2p/pubsub/tracer/1.0.0")
|
||||
|
||||
// RemoteTracer is a tracer that sends trace events to a remote peer
|
||||
type RemoteTracer struct {
|
||||
basicTracer
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
peer peer.ID
|
||||
}
|
||||
|
||||
// NewRemoteTracer constructs a RemoteTracer, tracing to the peer identified by pi
|
||||
func NewRemoteTracer(ctx context.Context, host host.Host, pi peer.AddrInfo) (*RemoteTracer, error) {
|
||||
tr := &RemoteTracer{ctx: ctx, host: host, peer: pi.ID, basicTracer: basicTracer{ch: make(chan struct{}, 1), lossy: true}}
|
||||
host.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.PermanentAddrTTL)
|
||||
go tr.doWrite()
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
func (t *RemoteTracer) doWrite() {
|
||||
var buf []*pb.TraceEvent
|
||||
|
||||
s, err := t.openStream()
|
||||
if err != nil {
|
||||
log.Debugf("error opening remote tracer stream: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var batch pb.TraceEventBatch
|
||||
|
||||
gzipW := gzip.NewWriter(s)
|
||||
w := protoio.NewDelimitedWriter(gzipW)
|
||||
|
||||
for {
|
||||
_, ok := <-t.ch
|
||||
|
||||
// deadline for batch accumulation
|
||||
deadline := time.Now().Add(time.Second)
|
||||
|
||||
t.mx.Lock()
|
||||
for len(t.buf) < MinTraceBatchSize && time.Now().Before(deadline) {
|
||||
t.mx.Unlock()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.mx.Lock()
|
||||
}
|
||||
|
||||
tmp := t.buf
|
||||
t.buf = buf[:0]
|
||||
buf = tmp
|
||||
t.mx.Unlock()
|
||||
|
||||
if len(buf) == 0 {
|
||||
goto end
|
||||
}
|
||||
|
||||
batch.Batch = buf
|
||||
|
||||
err = w.WriteMsg(&batch)
|
||||
if err != nil {
|
||||
log.Debugf("error writing trace event batch: %s", err)
|
||||
goto end
|
||||
}
|
||||
|
||||
err = gzipW.Flush()
|
||||
if err != nil {
|
||||
log.Debugf("error flushin gzip stream: %s", err)
|
||||
goto end
|
||||
}
|
||||
|
||||
end:
|
||||
// nil out the buffer to gc consumed events
|
||||
for i := range buf {
|
||||
buf[i] = nil
|
||||
}
|
||||
|
||||
if !ok {
|
||||
if err != nil {
|
||||
s.Reset()
|
||||
} else {
|
||||
gzipW.Close()
|
||||
s.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.Reset()
|
||||
s, err = t.openStream()
|
||||
if err != nil {
|
||||
log.Debugf("error opening remote tracer stream: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
gzipW.Reset(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *RemoteTracer) openStream() (network.Stream, error) {
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(t.ctx, time.Minute)
|
||||
s, err := t.host.NewStream(ctx, t.peer, RemoteTracerProtoID)
|
||||
cancel()
|
||||
if err != nil {
|
||||
if t.ctx.Err() != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// wait a minute and try again, to account for transient server downtime
|
||||
select {
|
||||
case <-time.After(time.Minute):
|
||||
continue
|
||||
case <-t.ctx.Done():
|
||||
return nil, t.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
|
||||
var _ EventTracer = (*RemoteTracer)(nil)
|
||||
590
vendor/github.com/libp2p/go-libp2p-pubsub/validation.go
generated
vendored
Normal file
590
vendor/github.com/libp2p/go-libp2p-pubsub/validation.go
generated
vendored
Normal file
@@ -0,0 +1,590 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultValidateQueueSize = 32
|
||||
defaultValidateConcurrency = 1024
|
||||
defaultValidateThrottle = 8192
|
||||
)
|
||||
|
||||
// ValidationError is an error that may be signalled from message publication when the message
|
||||
// fails validation
|
||||
type ValidationError struct {
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e ValidationError) Error() string {
|
||||
return e.Reason
|
||||
}
|
||||
|
||||
// Validator is a function that validates a message with a binary decision: accept or reject.
|
||||
type Validator func(context.Context, peer.ID, *Message) bool
|
||||
|
||||
// ValidatorEx is an extended validation function that validates a message with an enumerated decision
|
||||
type ValidatorEx func(context.Context, peer.ID, *Message) ValidationResult
|
||||
|
||||
// ValidationResult represents the decision of an extended validator
|
||||
type ValidationResult int
|
||||
|
||||
const (
|
||||
// ValidationAccept is a validation decision that indicates a valid message that should be accepted and
|
||||
// delivered to the application and forwarded to the network.
|
||||
ValidationAccept = ValidationResult(0)
|
||||
// ValidationReject is a validation decision that indicates an invalid message that should not be
|
||||
// delivered to the application or forwarded to the application. Furthermore the peer that forwarded
|
||||
// the message should be penalized by peer scoring routers.
|
||||
ValidationReject = ValidationResult(1)
|
||||
// ValidationIgnore is a validation decision that indicates a message that should be ignored: it will
|
||||
// be neither delivered to the application nor forwarded to the network. However, in contrast to
|
||||
// ValidationReject, the peer that forwarded the message must not be penalized by peer scoring routers.
|
||||
ValidationIgnore = ValidationResult(2)
|
||||
// internal
|
||||
validationThrottled = ValidationResult(-1)
|
||||
)
|
||||
|
||||
// ValidatorOpt is an option for RegisterTopicValidator.
|
||||
type ValidatorOpt func(addVal *addValReq) error
|
||||
|
||||
// validation represents the validator pipeline.
|
||||
// The validator pipeline performs signature validation and runs a
|
||||
// sequence of user-configured validators per-topic. It is possible to
|
||||
// adjust various concurrency parameters, such as the number of
|
||||
// workers and the max number of simultaneous validations. The user
|
||||
// can also attach inline validators that will be executed
|
||||
// synchronously; this may be useful to prevent superfluous
|
||||
// context-switching for lightweight tasks.
|
||||
type validation struct {
|
||||
p *PubSub
|
||||
|
||||
tracer *pubsubTracer
|
||||
|
||||
// mx protects the validator map
|
||||
mx sync.Mutex
|
||||
// topicVals tracks per topic validators
|
||||
topicVals map[string]*validatorImpl
|
||||
|
||||
// defaultVals tracks default validators applicable to all topics
|
||||
defaultVals []*validatorImpl
|
||||
|
||||
// validateQ is the front-end to the validation pipeline
|
||||
validateQ chan *validateReq
|
||||
|
||||
// validateThrottle limits the number of active validation goroutines
|
||||
validateThrottle chan struct{}
|
||||
|
||||
// this is the number of synchronous validation workers
|
||||
validateWorkers int
|
||||
}
|
||||
|
||||
// validation requests
|
||||
type validateReq struct {
|
||||
vals []*validatorImpl
|
||||
src peer.ID
|
||||
msg *Message
|
||||
}
|
||||
|
||||
// representation of topic validators
|
||||
type validatorImpl struct {
|
||||
topic string
|
||||
validate ValidatorEx
|
||||
validateTimeout time.Duration
|
||||
validateThrottle chan struct{}
|
||||
validateInline bool
|
||||
}
|
||||
|
||||
// async request to add a topic validators
|
||||
type addValReq struct {
|
||||
topic string
|
||||
validate interface{}
|
||||
timeout time.Duration
|
||||
throttle int
|
||||
inline bool
|
||||
resp chan error
|
||||
}
|
||||
|
||||
// async request to remove a topic validator
|
||||
type rmValReq struct {
|
||||
topic string
|
||||
resp chan error
|
||||
}
|
||||
|
||||
// newValidation creates a new validation pipeline
|
||||
func newValidation() *validation {
|
||||
return &validation{
|
||||
topicVals: make(map[string]*validatorImpl),
|
||||
validateQ: make(chan *validateReq, defaultValidateQueueSize),
|
||||
validateThrottle: make(chan struct{}, defaultValidateThrottle),
|
||||
validateWorkers: runtime.NumCPU(),
|
||||
}
|
||||
}
|
||||
|
||||
// Start attaches the validation pipeline to a pubsub instance and starts background
|
||||
// workers
|
||||
func (v *validation) Start(p *PubSub) {
|
||||
v.p = p
|
||||
v.tracer = p.tracer
|
||||
for i := 0; i < v.validateWorkers; i++ {
|
||||
go v.validateWorker()
|
||||
}
|
||||
}
|
||||
|
||||
// AddValidator adds a new validator
|
||||
func (v *validation) AddValidator(req *addValReq) {
|
||||
val, err := v.makeValidator(req)
|
||||
if err != nil {
|
||||
req.resp <- err
|
||||
return
|
||||
}
|
||||
|
||||
v.mx.Lock()
|
||||
defer v.mx.Unlock()
|
||||
|
||||
topic := val.topic
|
||||
|
||||
_, ok := v.topicVals[topic]
|
||||
if ok {
|
||||
req.resp <- fmt.Errorf("duplicate validator for topic %s", topic)
|
||||
return
|
||||
}
|
||||
|
||||
v.topicVals[topic] = val
|
||||
req.resp <- nil
|
||||
}
|
||||
|
||||
func (v *validation) makeValidator(req *addValReq) (*validatorImpl, error) {
|
||||
makeValidatorEx := func(v Validator) ValidatorEx {
|
||||
return func(ctx context.Context, p peer.ID, msg *Message) ValidationResult {
|
||||
if v(ctx, p, msg) {
|
||||
return ValidationAccept
|
||||
} else {
|
||||
return ValidationReject
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var validator ValidatorEx
|
||||
switch v := req.validate.(type) {
|
||||
case func(ctx context.Context, p peer.ID, msg *Message) bool:
|
||||
validator = makeValidatorEx(Validator(v))
|
||||
case Validator:
|
||||
validator = makeValidatorEx(v)
|
||||
|
||||
case func(ctx context.Context, p peer.ID, msg *Message) ValidationResult:
|
||||
validator = ValidatorEx(v)
|
||||
case ValidatorEx:
|
||||
validator = v
|
||||
|
||||
default:
|
||||
topic := req.topic
|
||||
if req.topic == "" {
|
||||
topic = "(default)"
|
||||
}
|
||||
return nil, fmt.Errorf("unknown validator type for topic %s; must be an instance of Validator or ValidatorEx", topic)
|
||||
}
|
||||
|
||||
val := &validatorImpl{
|
||||
topic: req.topic,
|
||||
validate: validator,
|
||||
validateTimeout: 0,
|
||||
validateThrottle: make(chan struct{}, defaultValidateConcurrency),
|
||||
validateInline: req.inline,
|
||||
}
|
||||
|
||||
if req.timeout > 0 {
|
||||
val.validateTimeout = req.timeout
|
||||
}
|
||||
|
||||
if req.throttle > 0 {
|
||||
val.validateThrottle = make(chan struct{}, req.throttle)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// RemoveValidator removes an existing validator
|
||||
func (v *validation) RemoveValidator(req *rmValReq) {
|
||||
v.mx.Lock()
|
||||
defer v.mx.Unlock()
|
||||
|
||||
topic := req.topic
|
||||
|
||||
_, ok := v.topicVals[topic]
|
||||
if ok {
|
||||
delete(v.topicVals, topic)
|
||||
req.resp <- nil
|
||||
} else {
|
||||
req.resp <- fmt.Errorf("no validator for topic %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// PushLocal synchronously pushes a locally published message and performs applicable
|
||||
// validations.
|
||||
// Returns an error if validation fails
|
||||
func (v *validation) PushLocal(msg *Message) error {
|
||||
v.p.tracer.PublishMessage(msg)
|
||||
|
||||
err := v.p.checkSigningPolicy(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vals := v.getValidators(msg)
|
||||
return v.validate(vals, msg.ReceivedFrom, msg, true)
|
||||
}
|
||||
|
||||
// Push pushes a message into the validation pipeline.
|
||||
// It returns true if the message can be forwarded immediately without validation.
|
||||
func (v *validation) Push(src peer.ID, msg *Message) bool {
|
||||
vals := v.getValidators(msg)
|
||||
|
||||
if len(vals) > 0 || msg.Signature != nil {
|
||||
select {
|
||||
case v.validateQ <- &validateReq{vals, src, msg}:
|
||||
default:
|
||||
log.Debugf("message validation throttled: queue full; dropping message from %s", src)
|
||||
v.tracer.RejectMessage(msg, RejectValidationQueueFull)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// getValidators returns all validators that apply to a given message
|
||||
func (v *validation) getValidators(msg *Message) []*validatorImpl {
|
||||
v.mx.Lock()
|
||||
defer v.mx.Unlock()
|
||||
|
||||
var vals []*validatorImpl
|
||||
vals = append(vals, v.defaultVals...)
|
||||
|
||||
topic := msg.GetTopic()
|
||||
|
||||
val, ok := v.topicVals[topic]
|
||||
if !ok {
|
||||
return vals
|
||||
}
|
||||
|
||||
return append(vals, val)
|
||||
}
|
||||
|
||||
// validateWorker is an active goroutine performing inline validation
|
||||
func (v *validation) validateWorker() {
|
||||
for {
|
||||
select {
|
||||
case req := <-v.validateQ:
|
||||
v.validate(req.vals, req.src, req.msg, false)
|
||||
case <-v.p.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validate performs validation and only sends the message if all validators succeed
|
||||
func (v *validation) validate(vals []*validatorImpl, src peer.ID, msg *Message, synchronous bool) error {
|
||||
// If signature verification is enabled, but signing is disabled,
|
||||
// the Signature is required to be nil upon receiving the message in PubSub.pushMsg.
|
||||
if msg.Signature != nil {
|
||||
if !v.validateSignature(msg) {
|
||||
log.Debugf("message signature validation failed; dropping message from %s", src)
|
||||
v.tracer.RejectMessage(msg, RejectInvalidSignature)
|
||||
return ValidationError{Reason: RejectInvalidSignature}
|
||||
}
|
||||
}
|
||||
|
||||
// we can mark the message as seen now that we have verified the signature
|
||||
// and avoid invoking user validators more than once
|
||||
id := v.p.idGen.ID(msg)
|
||||
if !v.p.markSeen(id) {
|
||||
v.tracer.DuplicateMessage(msg)
|
||||
return nil
|
||||
} else {
|
||||
v.tracer.ValidateMessage(msg)
|
||||
}
|
||||
|
||||
var inline, async []*validatorImpl
|
||||
for _, val := range vals {
|
||||
if val.validateInline || synchronous {
|
||||
inline = append(inline, val)
|
||||
} else {
|
||||
async = append(async, val)
|
||||
}
|
||||
}
|
||||
|
||||
// apply inline (synchronous) validators
|
||||
result := ValidationAccept
|
||||
loop:
|
||||
for _, val := range inline {
|
||||
switch val.validateMsg(v.p.ctx, src, msg) {
|
||||
case ValidationAccept:
|
||||
case ValidationReject:
|
||||
result = ValidationReject
|
||||
break loop
|
||||
case ValidationIgnore:
|
||||
result = ValidationIgnore
|
||||
}
|
||||
}
|
||||
|
||||
if result == ValidationReject {
|
||||
log.Debugf("message validation failed; dropping message from %s", src)
|
||||
v.tracer.RejectMessage(msg, RejectValidationFailed)
|
||||
return ValidationError{Reason: RejectValidationFailed}
|
||||
}
|
||||
|
||||
// apply async validators
|
||||
if len(async) > 0 {
|
||||
select {
|
||||
case v.validateThrottle <- struct{}{}:
|
||||
go func() {
|
||||
v.doValidateTopic(async, src, msg, result)
|
||||
<-v.validateThrottle
|
||||
}()
|
||||
default:
|
||||
log.Debugf("message validation throttled; dropping message from %s", src)
|
||||
v.tracer.RejectMessage(msg, RejectValidationThrottled)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if result == ValidationIgnore {
|
||||
v.tracer.RejectMessage(msg, RejectValidationIgnored)
|
||||
return ValidationError{Reason: RejectValidationIgnored}
|
||||
}
|
||||
|
||||
// no async validators, accepted message, send it!
|
||||
select {
|
||||
case v.p.sendMsg <- msg:
|
||||
return nil
|
||||
case <-v.p.ctx.Done():
|
||||
return v.p.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (v *validation) validateSignature(msg *Message) bool {
|
||||
err := verifyMessageSignature(msg.Message)
|
||||
if err != nil {
|
||||
log.Debugf("signature verification error: %s", err.Error())
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (v *validation) doValidateTopic(vals []*validatorImpl, src peer.ID, msg *Message, r ValidationResult) {
|
||||
result := v.validateTopic(vals, src, msg)
|
||||
|
||||
if result == ValidationAccept && r != ValidationAccept {
|
||||
result = r
|
||||
}
|
||||
|
||||
switch result {
|
||||
case ValidationAccept:
|
||||
v.p.sendMsg <- msg
|
||||
case ValidationReject:
|
||||
log.Debugf("message validation failed; dropping message from %s", src)
|
||||
v.tracer.RejectMessage(msg, RejectValidationFailed)
|
||||
return
|
||||
case ValidationIgnore:
|
||||
log.Debugf("message validation punted; ignoring message from %s", src)
|
||||
v.tracer.RejectMessage(msg, RejectValidationIgnored)
|
||||
return
|
||||
case validationThrottled:
|
||||
log.Debugf("message validation throttled; ignoring message from %s", src)
|
||||
v.tracer.RejectMessage(msg, RejectValidationThrottled)
|
||||
|
||||
default:
|
||||
// BUG: this would be an internal programming error, so a panic seems appropiate.
|
||||
panic(fmt.Errorf("unexpected validation result: %d", result))
|
||||
}
|
||||
}
|
||||
|
||||
func (v *validation) validateTopic(vals []*validatorImpl, src peer.ID, msg *Message) ValidationResult {
|
||||
if len(vals) == 1 {
|
||||
return v.validateSingleTopic(vals[0], src, msg)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(v.p.ctx)
|
||||
defer cancel()
|
||||
|
||||
rch := make(chan ValidationResult, len(vals))
|
||||
rcount := 0
|
||||
|
||||
for _, val := range vals {
|
||||
rcount++
|
||||
|
||||
select {
|
||||
case val.validateThrottle <- struct{}{}:
|
||||
go func(val *validatorImpl) {
|
||||
rch <- val.validateMsg(ctx, src, msg)
|
||||
<-val.validateThrottle
|
||||
}(val)
|
||||
|
||||
default:
|
||||
log.Debugf("validation throttled for topic %s", val.topic)
|
||||
rch <- validationThrottled
|
||||
}
|
||||
}
|
||||
|
||||
result := ValidationAccept
|
||||
loop:
|
||||
for i := 0; i < rcount; i++ {
|
||||
switch <-rch {
|
||||
case ValidationAccept:
|
||||
case ValidationReject:
|
||||
result = ValidationReject
|
||||
break loop
|
||||
case ValidationIgnore:
|
||||
// throttled validation has the same effect, but takes precedence over Ignore as it is not
|
||||
// known whether the throttled validator would have signaled rejection.
|
||||
if result != validationThrottled {
|
||||
result = ValidationIgnore
|
||||
}
|
||||
case validationThrottled:
|
||||
result = validationThrottled
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// fast path for single topic validation that avoids the extra goroutine
|
||||
func (v *validation) validateSingleTopic(val *validatorImpl, src peer.ID, msg *Message) ValidationResult {
|
||||
select {
|
||||
case val.validateThrottle <- struct{}{}:
|
||||
res := val.validateMsg(v.p.ctx, src, msg)
|
||||
<-val.validateThrottle
|
||||
return res
|
||||
|
||||
default:
|
||||
log.Debugf("validation throttled for topic %s", val.topic)
|
||||
return validationThrottled
|
||||
}
|
||||
}
|
||||
|
||||
func (val *validatorImpl) validateMsg(ctx context.Context, src peer.ID, msg *Message) ValidationResult {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugf("validation done; took %s", time.Since(start))
|
||||
}()
|
||||
|
||||
if val.validateTimeout > 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, val.validateTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
r := val.validate(ctx, src, msg)
|
||||
switch r {
|
||||
case ValidationAccept:
|
||||
fallthrough
|
||||
case ValidationReject:
|
||||
fallthrough
|
||||
case ValidationIgnore:
|
||||
return r
|
||||
|
||||
default:
|
||||
log.Warnf("Unexpected result from validator: %d; ignoring message", r)
|
||||
return ValidationIgnore
|
||||
}
|
||||
}
|
||||
|
||||
// / Options
|
||||
// WithDefaultValidator adds a validator that applies to all topics by default; it can be used
|
||||
// more than once and add multiple validators. Having a defult validator does not inhibit registering
|
||||
// a per topic validator.
|
||||
func WithDefaultValidator(val interface{}, opts ...ValidatorOpt) Option {
|
||||
return func(ps *PubSub) error {
|
||||
addVal := &addValReq{
|
||||
validate: val,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
err := opt(addVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
val, err := ps.val.makeValidator(addVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ps.val.defaultVals = append(ps.val.defaultVals, val)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithValidateQueueSize sets the buffer of validate queue. Defaults to 32.
|
||||
// When queue is full, validation is throttled and new messages are dropped.
|
||||
func WithValidateQueueSize(n int) Option {
|
||||
return func(ps *PubSub) error {
|
||||
if n > 0 {
|
||||
ps.val.validateQ = make(chan *validateReq, n)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("validate queue size must be > 0")
|
||||
}
|
||||
}
|
||||
|
||||
// WithValidateThrottle sets the upper bound on the number of active validation
|
||||
// goroutines across all topics. The default is 8192.
|
||||
func WithValidateThrottle(n int) Option {
|
||||
return func(ps *PubSub) error {
|
||||
ps.val.validateThrottle = make(chan struct{}, n)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithValidateWorkers sets the number of synchronous validation worker goroutines.
|
||||
// Defaults to NumCPU.
|
||||
//
|
||||
// The synchronous validation workers perform signature validation, apply inline
|
||||
// user validators, and schedule asynchronous user validators.
|
||||
// You can adjust this parameter to devote less cpu time to synchronous validation.
|
||||
func WithValidateWorkers(n int) Option {
|
||||
return func(ps *PubSub) error {
|
||||
if n > 0 {
|
||||
ps.val.validateWorkers = n
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("number of validation workers must be > 0")
|
||||
}
|
||||
}
|
||||
|
||||
// WithValidatorTimeout is an option that sets a timeout for an (asynchronous) topic validator.
|
||||
// By default there is no timeout in asynchronous validators.
|
||||
func WithValidatorTimeout(timeout time.Duration) ValidatorOpt {
|
||||
return func(addVal *addValReq) error {
|
||||
addVal.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithValidatorConcurrency is an option that sets the topic validator throttle.
|
||||
// This controls the number of active validation goroutines for the topic; the default is 1024.
|
||||
func WithValidatorConcurrency(n int) ValidatorOpt {
|
||||
return func(addVal *addValReq) error {
|
||||
addVal.throttle = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithValidatorInline is an option that sets the validation disposition to synchronous:
|
||||
// it will be executed inline in validation front-end, without spawning a new goroutine.
|
||||
// This is suitable for simple or cpu-bound validators that do not block.
|
||||
func WithValidatorInline(inline bool) ValidatorOpt {
|
||||
return func(addVal *addValReq) error {
|
||||
addVal.inline = inline
|
||||
return nil
|
||||
}
|
||||
}
|
||||
101
vendor/github.com/libp2p/go-libp2p-pubsub/validation_builtin.go
generated
vendored
Normal file
101
vendor/github.com/libp2p/go-libp2p-pubsub/validation_builtin.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// PeerMetadataStore is an interface for storing and retrieving per peer metadata
|
||||
type PeerMetadataStore interface {
|
||||
// Get retrieves the metadata associated with a peer;
|
||||
// It should return nil if there is no metadata associated with the peer and not an error.
|
||||
Get(context.Context, peer.ID) ([]byte, error)
|
||||
// Put sets the metadata associated with a peer.
|
||||
Put(context.Context, peer.ID, []byte) error
|
||||
}
|
||||
|
||||
// BasicSeqnoValidator is a basic validator, usable as a default validator, that ignores replayed
|
||||
// messages outside the seen cache window. The validator uses the message seqno as a peer-specific
|
||||
// nonce to decide whether the message should be propagated, comparing to the maximal nonce store
|
||||
// in the peer metadata store. This is useful to ensure that there can be no infinitely propagating
|
||||
// messages in the network regardless of the seen cache span and network diameter.
|
||||
// It requires that pubsub is instantiated with a strict message signing policy and that seqnos
|
||||
// are not disabled, ie it doesn't support anonymous mode.
|
||||
//
|
||||
// Warning: See https://github.com/libp2p/rust-libp2p/issues/3453
|
||||
// TL;DR: rust is currently violating the spec by issuing a random seqno, which creates an
|
||||
// interoperability hazard. We expect this issue to be addressed in the not so distant future,
|
||||
// but keep this in mind if you are in a mixed environment with (older) rust nodes.
|
||||
type BasicSeqnoValidator struct {
|
||||
mx sync.RWMutex
|
||||
meta PeerMetadataStore
|
||||
}
|
||||
|
||||
// NewBasicSeqnoValidator constructs a BasicSeqnoValidator using the givven PeerMetadataStore.
|
||||
func NewBasicSeqnoValidator(meta PeerMetadataStore) ValidatorEx {
|
||||
val := &BasicSeqnoValidator{
|
||||
meta: meta,
|
||||
}
|
||||
return val.validate
|
||||
}
|
||||
|
||||
func (v *BasicSeqnoValidator) validate(ctx context.Context, _ peer.ID, m *Message) ValidationResult {
|
||||
p := m.GetFrom()
|
||||
|
||||
v.mx.RLock()
|
||||
nonceBytes, err := v.meta.Get(ctx, p)
|
||||
v.mx.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
log.Warn("error retrieving peer nonce: %s", err)
|
||||
return ValidationIgnore
|
||||
}
|
||||
|
||||
var nonce uint64
|
||||
if len(nonceBytes) > 0 {
|
||||
nonce = binary.BigEndian.Uint64(nonceBytes)
|
||||
}
|
||||
|
||||
var seqno uint64
|
||||
seqnoBytes := m.GetSeqno()
|
||||
if len(seqnoBytes) > 0 {
|
||||
seqno = binary.BigEndian.Uint64(seqnoBytes)
|
||||
}
|
||||
|
||||
// compare against the largest seen nonce
|
||||
if seqno <= nonce {
|
||||
return ValidationIgnore
|
||||
}
|
||||
|
||||
// get the nonce and compare again with an exclusive lock before commiting (cf concurrent validation)
|
||||
v.mx.Lock()
|
||||
defer v.mx.Unlock()
|
||||
|
||||
nonceBytes, err = v.meta.Get(ctx, p)
|
||||
if err != nil {
|
||||
log.Warn("error retrieving peer nonce: %s", err)
|
||||
return ValidationIgnore
|
||||
}
|
||||
|
||||
if len(nonceBytes) > 0 {
|
||||
nonce = binary.BigEndian.Uint64(nonceBytes)
|
||||
}
|
||||
|
||||
if seqno <= nonce {
|
||||
return ValidationIgnore
|
||||
}
|
||||
|
||||
// update the nonce
|
||||
nonceBytes = make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(nonceBytes, seqno)
|
||||
|
||||
err = v.meta.Put(ctx, p, nonceBytes)
|
||||
if err != nil {
|
||||
log.Warn("error storing peer nonce: %s", err)
|
||||
}
|
||||
|
||||
return ValidationAccept
|
||||
}
|
||||
2
vendor/github.com/libp2p/go-libp2p/.codecov.yml
generated
vendored
Normal file
2
vendor/github.com/libp2p/go-libp2p/.codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github_checks:
|
||||
annotations: false
|
||||
6
vendor/github.com/libp2p/go-libp2p/.gitignore
generated
vendored
Normal file
6
vendor/github.com/libp2p/go-libp2p/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
*.swp
|
||||
.idea
|
||||
*.qlog
|
||||
*.sqlog
|
||||
*.qlog.zst
|
||||
*.sqlog.zst
|
||||
293
vendor/github.com/libp2p/go-libp2p/CHANGELOG.md
generated
vendored
Normal file
293
vendor/github.com/libp2p/go-libp2p/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,293 @@
|
||||
# Table Of Contents <!-- omit in toc -->
|
||||
- [v0.28.0](#v0280)
|
||||
- [v0.27.0](#v0270)
|
||||
- [v0.26.4](#v0264)
|
||||
- [v0.26.3](#v0263)
|
||||
- [v0.26.2](#v0262)
|
||||
- [v0.26.1](#v0261)
|
||||
- [v0.26.0](#v0260)
|
||||
- [v0.25.1](#v0251)
|
||||
- [v0.25.0](#v0250)
|
||||
|
||||
# [v0.28.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.28.0)
|
||||
|
||||
## 🔦 Highlights <!-- omit in toc -->
|
||||
|
||||
### Smart Dialing <!-- omit in toc -->
|
||||
|
||||
This release introduces smart dialing logic. Currently, libp2p dials all addresses of a remote peer in parallel, and
|
||||
aborts all outstanding dials as soon as the first one succeeds.
|
||||
Dialing many addresses in parallel creates a lot of churn on the client side, and unnecessary load on the network and
|
||||
on the server side, and is heavily discouraged by the networking community (see [RFC 8305](https://www.rfc-editor.org/rfc/rfc8305) for example).
|
||||
|
||||
When connecting to a peer we first determine the order to dial its addresses. This ranking logic considers a number of corner cases
|
||||
described in detail in the documentation of the swarm package (`swarm.DefaultDialRanker`).
|
||||
At a high level, this is what happens:
|
||||
* If a peer offers a WebTransport and a QUIC address (on the same IP:port), the QUIC address is preferred.
|
||||
* If a peer has a QUIC and a TCP address, the QUIC address is dialed first. Only if the connection attempt doesn't succeed within 250ms, a TCP connection is started.
|
||||
|
||||
Our measurements on the IPFS network show that for >90% of established libp2p connections, the first connection attempt succeeds,
|
||||
leading a dramatic decrease in the number of aborted connection attempts.
|
||||
|
||||
We also added new metrics to the swarm Grafana dashboard, showing:
|
||||
* The number of connection attempts it took to establish a connection
|
||||
* The delay introduced by the ranking logic
|
||||
|
||||
This feature should be safe to enable for nodes running in data centers and for most nodes in home networks.
|
||||
However, there are some (mostly home and corporate networks) that block all UDP traffic. If enabled, the current implementation
|
||||
of the smart dialing logic will lead to a regression, since it preferes QUIC addresses over TCP addresses. Nodes would still be
|
||||
able to connect, but connection establishment of the TCP connection would be delayed by 250ms.
|
||||
|
||||
In a future release (see #1605 for details), we will introduce a feature called blackhole detection. By observing the outcome of
|
||||
QUIC connection attempts, we can determine if UDP traffic is blocked (namely, if all QUIC connection attempts fail), and stop
|
||||
dialing QUIC in this case altogether. Once this detection logic is in place, smart dialing will be enabled by default.
|
||||
|
||||
### More Metrics! <!-- omit in toc -->
|
||||
Since the last release, we've added metrics for:
|
||||
* [Holepunching](https://github.com/libp2p/go-libp2p/pull/2246)
|
||||
* Smart Dialing (see above)
|
||||
|
||||
### WebTransport <!-- omit in toc -->
|
||||
* [#2251](https://github.com/libp2p/go-libp2p/pull/2251): Infer public WebTransport address from `quic-v1` addresses if both transports are using the same port for both quic-v1 and WebTransport addresses.
|
||||
* [#2271](https://github.com/libp2p/go-libp2p/pull/2271): Only add certificate hashes to WebTransport mulitaddress if listening on WebTransport
|
||||
|
||||
## Housekeeping updates <!-- omit in toc -->
|
||||
* Identify
|
||||
* [#2303](https://github.com/libp2p/go-libp2p/pull/2303): Don't send default protocol version
|
||||
* Prevent polluting PeerStore with local addrs
|
||||
* [#2325](https://github.com/libp2p/go-libp2p/pull/2325): Don't save signed peer records
|
||||
* [#2300](https://github.com/libp2p/go-libp2p/pull/2300): Filter received addresses based on the node's remote address
|
||||
* WebSocket
|
||||
* [#2280](https://github.com/libp2p/go-libp2p/pull/2280): Reverted back to the Gorilla library for WebSocket
|
||||
* NAT
|
||||
* [#2248](https://github.com/libp2p/go-libp2p/pull/2248): Move NAT mapping logic out of the host
|
||||
|
||||
## 🐞 Bugfixes <!-- omit in toc -->
|
||||
* Identify
|
||||
* [Reject signed peer records on peer ID mismatch](https://github.com/libp2p/go-libp2p/commit/8d771355b41297623e05b04a865d029a2522a074)
|
||||
* [#2299](https://github.com/libp2p/go-libp2p/pull/2299): Avoid spuriously pushing updates
|
||||
* Swarm
|
||||
* [#2322](https://github.com/libp2p/go-libp2p/pull/2322): Dedup addresses to dial
|
||||
* [#2284](https://github.com/libp2p/go-libp2p/pull/2284): Change maps with multiaddress keys to use strings
|
||||
* QUIC
|
||||
* [#2262](https://github.com/libp2p/go-libp2p/pull/2262): Prioritize listen connections for reuse
|
||||
* [#2276](https://github.com/libp2p/go-libp2p/pull/2276): Don't panic when quic-go's accept call errors
|
||||
* [#2263](https://github.com/libp2p/go-libp2p/pull/2263): Fix race condition when generating random holepunch packet
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.27.0...v0.28.0
|
||||
|
||||
# [v0.27.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.27.0)
|
||||
|
||||
### Breaking Changes <!-- omit in toc -->
|
||||
|
||||
* The `LocalPrivateKey` method was removed from the `network.Conn` interface. [#2144](https://github.com/libp2p/go-libp2p/pull/2144)
|
||||
|
||||
## 🔦 Highlights <!-- omit in toc -->
|
||||
|
||||
### Additional metrics <!-- omit in toc -->
|
||||
Since the last release, we've added metrics for:
|
||||
* [Relay Service](https://github.com/libp2p/go-libp2p/pull/2154): RequestStatus, RequestCounts, RejectionReasons for Reservation and Connection Requests,
|
||||
ConnectionDuration, BytesTransferred, Relay Service Status.
|
||||
* [Autorelay](https://github.com/libp2p/go-libp2p/pull/2185): relay finder status, reservation request outcomes, current reservations, candidate circuit v2 support, current candidates, relay addresses updated, num relay address, and scheduled work times
|
||||
|
||||
## 🐞 Bugfixes <!-- omit in toc -->
|
||||
|
||||
* autonat: don't change status on dial request refused [2225](https://github.com/libp2p/go-libp2p/pull/2225)
|
||||
* relaysvc: fix flaky TestReachabilityChangeEvent [2215](https://github.com/libp2p/go-libp2p/pull/2215)
|
||||
* basichost: prevent duplicate dials [2196](https://github.com/libp2p/go-libp2p/pull/2196)
|
||||
* websocket: don't set a WSS multiaddr for accepted unencrypted conns [2199](https://github.com/libp2p/go-libp2p/pull/2199)
|
||||
* identify: Fix IdentifyWait when Connected events happen out of order [2173](https://github.com/libp2p/go-libp2p/pull/2173)
|
||||
* circuitv2: cleanup relay service properly [2164](https://github.com/libp2p/go-libp2p/pull/2164)
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.4...v0.27.0
|
||||
|
||||
# [v0.26.4](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.4)
|
||||
|
||||
This patch release fixes a busy-looping happening inside AutoRelay on private nodes, see [2208](https://github.com/libp2p/go-libp2p/pull/2208).
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.4
|
||||
|
||||
# [v0.26.3](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.3)
|
||||
|
||||
* rcmgr: fix JSON marshalling of ResourceManagerStat peer map [2156](https://github.com/libp2p/go-libp2p/pull/2156)
|
||||
* websocket: Don't limit message sizes in the websocket reader [2193](https://github.com/libp2p/go-libp2p/pull/2193)
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.3
|
||||
|
||||
# [v0.26.2](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.2)
|
||||
|
||||
This patch release fixes two bugs:
|
||||
* A panic in WebTransport: https://github.com/quic-go/webtransport-go/releases/tag/v0.5.2
|
||||
* Incorrect accounting of accepted connections in the swarm metrics: [#2147](https://github.com/libp2p/go-libp2p/pull/2147)
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.2
|
||||
|
||||
# v0.26.1
|
||||
|
||||
This version was retracted due to errors when publishing the release.
|
||||
|
||||
# [v0.26.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.0)
|
||||
|
||||
## 🔦 Highlights <!-- omit in toc -->
|
||||
|
||||
### Circuit Relay Changes <!-- omit in toc -->
|
||||
|
||||
#### [Removed Circuit Relay v1](https://github.com/libp2p/go-libp2p/pull/2107) <!-- omit in toc -->
|
||||
|
||||
We've decided to remove support for Circuit Relay v1 in this release. v1 Relays have been retired a few months ago. Notably, running the Relay v1 protocol was expensive and resulted in only a small number of nodes in the network. Users had to either manually configure these nodes as static relays, or discover them from the DHT.
|
||||
Furthermore, rust-libp2p [has dropped support](https://github.com/libp2p/rust-libp2p/pull/2549) and js-libp2p [is dropping support](https://github.com/libp2p/js-libp2p/pull/1533) for Relay v1.
|
||||
|
||||
Support for Relay v2 was first added in [late 2021 in v0.16.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.16.0). With Circuit Relay v2 it became cheap to run (limited) relays. Public nodes also started the relay service by default. There's now a massive number of Relay v2 nodes on the IPFS network, and they don't advertise their service to the DHT any more. Because there's now so many of these nodes, connecting to just a small number of nodes (e.g. by joining the DHT), a node is statistically guaranteed to connect to some relays.
|
||||
|
||||
#### [Unlimited Relay v2](https://github.com/libp2p/go-libp2p/pull/2125) <!-- omit in toc -->
|
||||
|
||||
In conjunction with removing relay v1, we also added an option to Circuit Relay v2 to disable limits.
|
||||
This done by enabling `WithInfiniteLimits`. When enabled this allows for users to have a drop in replacement for Relay v1 with Relay v2.
|
||||
|
||||
### Additional metrics <!-- omit in toc -->
|
||||
|
||||
Since the last release, we've added additional metrics to different components.
|
||||
Metrics were added to:
|
||||
* [AutoNat](https://github.com/libp2p/go-libp2p/pull/2086): Current Reachability Status and Confidence, Client and Server DialResponses, Server DialRejections. The dashboard is [available here](https://github.com/libp2p/go-libp2p/blob/master/dashboards/autonat/autonat.json).
|
||||
* Swarm:
|
||||
- [Early Muxer Selection](https://github.com/libp2p/go-libp2p/pull/2119): Added early_muxer label indicating whether a connection was established using early muxer selection.
|
||||
- [IP Version](https://github.com/libp2p/go-libp2p/pull/2114): Added ip_version label to connection metrics
|
||||
* Identify:
|
||||
- Metrics for Identify, IdentifyPush, PushesTriggered (https://github.com/libp2p/go-libp2p/pull/2069)
|
||||
- Address Count, Protocol Count, Connection IDPush Support (https://github.com/libp2p/go-libp2p/pull/2126)
|
||||
|
||||
|
||||
We also migrated the metric dashboards to a top-level [dashboards](https://github.com/libp2p/go-libp2p/tree/master/dashboards) directory.
|
||||
|
||||
## 🐞 Bugfixes <!-- omit in toc -->
|
||||
|
||||
### AutoNat <!-- omit in toc -->
|
||||
* [Fixed a bug](https://github.com/libp2p/go-libp2p/issues/2091) where AutoNat would emit events when the observed address has changed even though the node reachability hadn't changed.
|
||||
|
||||
### Relay Manager <!-- omit in toc -->
|
||||
* [Fixed a bug](https://github.com/libp2p/go-libp2p/pull/2093) where the Relay Manager started a new relay even though the previous reachability was `Public` or if a relay already existed.
|
||||
|
||||
### [Stop sending detailed error messages on closing QUIC connections](https://github.com/libp2p/go-libp2p/pull/2112) <!-- omit in toc -->
|
||||
|
||||
Users reported seeing confusing error messages and could not determine the root cause or if the error was from a local or remote peer:
|
||||
|
||||
```{12D... Application error 0x0: conn-27571160: system: cannot reserve inbound connection: resource limit exceeded}```
|
||||
|
||||
This error occurred when a connection had been made with a remote peer but the remote peer dropped the connection (due to it exceeding limits).
|
||||
This was actually an `Application error` emitted by `quic-go` and it was a bug in go-libp2p that we sent the whole message.
|
||||
For now, we decided to stop sending this confusing error message. In the future, we will report such errors via [error codes](https://github.com/libp2p/specs/issues/479).
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.1...v0.26.0
|
||||
|
||||
# [v0.25.1](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.1)
|
||||
|
||||
Fix some test-utils used by https://github.com/libp2p/go-libp2p-kad-dht
|
||||
|
||||
* mocknet: Start host in mocknet by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2078
|
||||
* chore: update go-multistream by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2081
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.0...v0.25.1
|
||||
|
||||
# [v0.25.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.0)
|
||||
|
||||
## 🔦 Highlights <!-- omit in toc -->
|
||||
|
||||
### Metrics <!-- omit in toc -->
|
||||
|
||||
We've started instrumenting the entire stack. In this release, we're adding metrics for:
|
||||
* the swarm: tracking incoming and outgoing connections, transports, security protocols and stream multiplexers in use: (https://github.com/libp2p/go-libp2p/blob/master/dashboards/swarm/swarm.json)
|
||||
* the event bus: tracking how different events are propagated through the stack and to external consumers (https://github.com/libp2p/go-libp2p/blob/master/dashboards/eventbus/eventbus.json)
|
||||
|
||||
Our metrics effort is still ongoing, see https://github.com/libp2p/go-libp2p/issues/1356 for progress. We'll add metrics and dashboards for more libp2p components in a future release.
|
||||
|
||||
### Switching to Google's official Protobuf compiler <!-- omit in toc -->
|
||||
|
||||
So far, we were using GoGo Protobuf to compile our Protobuf definitions to Go code. However, this library was deprecated in October last year: https://twitter.com/awalterschulze/status/1584553056100057088. We [benchmarked](https://github.com/libp2p/go-libp2p/issues/1976#issuecomment-1371527732) serialization and deserialization, and found that it's (only) 20% slower than GoGo. Since the vast majority of go-libp2p's CPU time is spent in code paths other than Protobuf handling, switching to the official compiler seemed like a worthwhile tradeoff.
|
||||
|
||||
### Removal of OpenSSL <!-- omit in toc -->
|
||||
|
||||
Before this release, go-libp2p had an option to use OpenSSL bindings for certain cryptographic primitives, mostly to speed up the generation of signatures and their verification. When building go-libp2p using `go build`, we'd use the standard library crypto packages. OpenSSL was only used when passing in a build tag: `go build -tags openssl`.
|
||||
Maintaining our own fork of the long unmaintained [go-openssl package](https://github.com/libp2p/go-openssl) has proven to place a larger than expected maintenance burden on the libp2p stewards, and when we recently discovered a range of new bugs ([this](https://github.com/libp2p/go-openssl/issues/38) and [this](https://github.com/libp2p/go-libp2p/issues/1892) and [this](https://github.com/libp2p/go-libp2p/issues/1951)), we decided to re-evaluate if this code path is really worth it. The results surprised us, it turns out that:
|
||||
* The Go standard library is faster than OpenSSL for all key types that are not RSA.
|
||||
* Verifying RSA signatures is as fast as Ed25519 signatures using the Go standard library, and even faster in OpenSSL.
|
||||
* Generating RSA signatures is painfully slow, both using Go standard library crypto and using OpenSSL (but even slower using Go standard library).
|
||||
|
||||
Now the good news is, that if your node is not using an RSA key, it will never create any RSA signatures (it might need to verify them though, when it connects to a node that uses RSA keys). If you're concerned about CPU performance, it's a good idea to avoid RSA keys (the same applies to bandwidth, RSA keys are huge!). Even for nodes using RSA keys, it turns out that generating the signatures is not a significant part of their CPU load, as verified by profiling one of Kubo's bootstrap nodes.
|
||||
|
||||
We therefore concluded that it's safe to drop this code path altogether, and thereby reduce our maintenance burden.
|
||||
|
||||
### New Resource Manager types <!-- omit in toc -->
|
||||
|
||||
* Introduces a new type `LimitVal` which can explicitly specify "use default", "unlimited", "block all", as well as any positive number. The zero value of `LimitVal` (the value when you create the object in Go) is "Use default".
|
||||
* The JSON marshalling of this is straightforward.
|
||||
* Introduces a new `ResourceLimits` type which uses `LimitVal` instead of ints so it can encode the above for the resources.
|
||||
* Changes `LimitConfig` to `PartialLimitConfig` and uses `ResourceLimits`. This along with the marshalling changes means you can now marshal the fact that some resource limit is set to block all.
|
||||
* Because the default is to use the defaults, this avoids the footgun of initializing the resource manager with 0 limits (that would block everything).
|
||||
|
||||
In general, you can go from a resource config with defaults to a concrete one with `.Build()`. e.g. `ResourceLimits.Build() => BaseLimit`, `PartialLimitConfig.Build() => ConcreteLimitConfig`, `LimitVal.Build() => int`. See PR #2000 for more details.
|
||||
|
||||
If you're using the defaults for the resource manager, there should be no changes needed.
|
||||
|
||||
### Other Breaking Changes <!-- omit in toc -->
|
||||
|
||||
We've cleaned up our API to consistently use `protocol.ID` for libp2p and application protocols. Specifically, this means that the peer store now uses `protocol.ID`s, and the host's `SetStreamHandler` as well.
|
||||
|
||||
## What's Changed <!-- omit in toc -->
|
||||
* chore: use generic LRU cache by @muXxer in https://github.com/libp2p/go-libp2p/pull/1980
|
||||
* core/crypto: drop all OpenSSL code paths by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1953
|
||||
* add WebTransport to the list of default transports by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1915
|
||||
* identify: remove old code targeting Go 1.17 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1964
|
||||
* core: remove introspection package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1978
|
||||
* identify: remove support for Identify Delta by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1975
|
||||
* roadmap: remove optimizations of the TCP-based handshake by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1959
|
||||
* circuitv2: correctly set the transport in the ConnectionState by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1972
|
||||
* switch to Google's Protobuf library, make protobufs compile with go generate by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1979
|
||||
* ci: run go generate as part of the go-check workflow by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1986
|
||||
* ci: use GitHub token to install protoc by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1996
|
||||
* feat: add some users to the readme by @p-shahi in https://github.com/libp2p/go-libp2p/pull/1981
|
||||
* CI: Fast multidimensional Interop tests by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1991
|
||||
* Fix: Ignore zero values when marshalling Limits. by @ajnavarro in https://github.com/libp2p/go-libp2p/pull/1998
|
||||
* feat: add ci flakiness score to readme by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2002
|
||||
* peerstore: make it possible to use an empty peer ID by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2006
|
||||
* feat: rcmgr: Export resource manager errors by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2008
|
||||
* feat: ci test-plans: Parse test timeout parameter for interop test by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2014
|
||||
* Clean addresses with peer id before adding to addrbook by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2007
|
||||
* Expose muxer ids by @aschmahmann in https://github.com/libp2p/go-libp2p/pull/2012
|
||||
* swarm: add a basic metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1973
|
||||
* consistently use protocol.ID instead of strings by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2004
|
||||
* swarm metrics: fix datasource for dashboard by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2024
|
||||
* chore: remove textual roadmap in favor for Starmap by @p-shahi in https://github.com/libp2p/go-libp2p/pull/2036
|
||||
* rcmgr: *: Always close connscope by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2037
|
||||
* chore: remove license files from the eventbus package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2042
|
||||
* Migrate to test-plan composite action by @thomaseizinger in https://github.com/libp2p/go-libp2p/pull/2039
|
||||
* use quic-go and webtransport-go from quic-go organization by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2040
|
||||
* holepunch: fix flaky test by not removing holepunch protocol handler by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1948
|
||||
* quic / webtransport: extend test to test dialing a draft-29 and a v1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1957
|
||||
* p2p/test: add test for EvtLocalAddressesUpdated event by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2016
|
||||
* quic, tcp: only register Prometheus counters when metrics are enabled by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1971
|
||||
* p2p/test: fix flaky notification test by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2051
|
||||
* quic: disable sending of Version Negotiation packets by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2015
|
||||
* eventbus: add metrics by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2038
|
||||
* metrics: use a single slice pool for all metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2054
|
||||
* webtransport: tidy up some test output by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2053
|
||||
* set names for eventbus event subscriptions by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2057
|
||||
* autorelay: Split libp2p.EnableAutoRelay into 2 functions by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2022
|
||||
* rcmgr: Use prometheus SDK for rcmgr metrics by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2044
|
||||
* websocket: Replace gorilla websocket transport with nhooyr websocket transport by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1982
|
||||
* rcmgr: add libp2p prefix to all metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2063
|
||||
* chore: git-ignore various flavors of qlog files by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2064
|
||||
* interop: Update interop test to match spec by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2049
|
||||
* chore: update webtransport-go to v0.5.1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2072
|
||||
* identify: refactor sending of Identify pushes by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1984
|
||||
* feat!: rcmgr: Change LimitConfig to use LimitVal type by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2000
|
||||
* p2p/test/quic: use contexts with a timeout for Connect calls by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2070
|
||||
* identify: add some basic metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2069
|
||||
* chore: Release v0.25.0 by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2077
|
||||
|
||||
## New Contributors <!-- omit in toc -->
|
||||
* @muXxer made their first contribution in https://github.com/libp2p/go-libp2p/pull/1980
|
||||
* @ajnavarro made their first contribution in https://github.com/libp2p/go-libp2p/pull/1998
|
||||
* @sukunrt made their first contribution in https://github.com/libp2p/go-libp2p/pull/2007
|
||||
* @thomaseizinger made their first contribution in https://github.com/libp2p/go-libp2p/pull/2039
|
||||
|
||||
**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.24.2...v0.25.0
|
||||
21
vendor/github.com/libp2p/go-libp2p/LICENSE
generated
vendored
Normal file
21
vendor/github.com/libp2p/go-libp2p/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
102
vendor/github.com/libp2p/go-libp2p/README.md
generated
vendored
Normal file
102
vendor/github.com/libp2p/go-libp2p/README.md
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
|
||||
<h1 align="center">
|
||||
<a href="libp2p.io"><img width="250" src="https://github.com/libp2p/libp2p/blob/master/logo/black-bg-2.png?raw=true" alt="libp2p hex logo" /></a>
|
||||
</h1>
|
||||
|
||||
<h3 align="center">The Go implementation of the libp2p Networking Stack.</h3>
|
||||
|
||||
<p align="center">
|
||||
<a href="http://protocol.ai"><img src="https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square" /></a>
|
||||
<a href="http://libp2p.io/"><img src="https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square" /></a>
|
||||
<a href="https://pkg.go.dev/github.com/libp2p/go-libp2p"><img src="https://pkg.go.dev/badge/github.com/libp2p/go-libp2p.svg" alt="Go Reference"></a>
|
||||
<a href="https://discuss.libp2p.io"><img src="https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg"/></a>
|
||||
<a href="https://marcopolo.github.io/FlakyTests/"><img src="https://marcopolo.github.io/FlakyTests/current-score.svg"/></a>
|
||||
</p>
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [Background](#background)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Usage](#usage)
|
||||
- [Examples](#examples)
|
||||
- [Development](#development)
|
||||
- [Tests](#tests)
|
||||
- [Contribute](#contribute)
|
||||
- [Supported Go Versions](#supported-go-versions)
|
||||
|
||||
## Background
|
||||
|
||||
[libp2p](https://github.com/libp2p/specs) is a networking stack and library modularized out of [The IPFS Project](https://github.com/ipfs/ipfs), and bundled separately for other tools to use.
|
||||
>
|
||||
libp2p is the product of a long, and arduous quest of understanding -- a deep dive into the internet's network stack, and plentiful peer-to-peer protocols from the past. Building large-scale peer-to-peer systems has been complex and difficult in the last 15 years, and libp2p is a way to fix that. It is a "network stack" -- a protocol suite -- that cleanly separates concerns, and enables sophisticated applications to only use the protocols they absolutely need, without giving up interoperability and upgradeability. libp2p grew out of IPFS, but it is built so that lots of people can use it, for lots of different projects.
|
||||
|
||||
To learn more, check out the following resources:
|
||||
- [**Our documentation**](https://docs.libp2p.io)
|
||||
- [**Our community discussion forum**](https://discuss.libp2p.io)
|
||||
- [**The libp2p Specification**](https://github.com/libp2p/specs)
|
||||
- [**js-libp2p implementation**](https://github.com/libp2p/js-libp2p)
|
||||
- [**rust-libp2p implementation**](https://github.com/libp2p/rust-libp2p)
|
||||
|
||||
## Roadmap
|
||||
|
||||
Our roadmap for go-libp2p can be found here: https://github.com/libp2p/go-libp2p/blob/master/ROADMAP.md
|
||||
This document represents current projects the go-libp2p team is focused on and provides an estimation of completion targets. It is a completementary roadmap to the overarching libp2p project roadmap: https://github.com/libp2p/specs/blob/master/ROADMAP.md
|
||||
|
||||
## Usage
|
||||
|
||||
This repository (`go-libp2p`) serves as the entrypoint to the universe of packages that compose the Go implementation of the libp2p stack.
|
||||
|
||||
You can start using go-libp2p in your Go application simply by adding imports from our repos, e.g.:
|
||||
|
||||
```go
|
||||
import "github.com/libp2p/go-libp2p"
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
Examples can be found in the [examples folder](examples).
|
||||
|
||||
|
||||
# Contribute
|
||||
|
||||
go-libp2p is MIT-licensed open source software. We welcome contributions big and small! Take a look at the [community contributing notes](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md). Please make sure to check the [issues](https://github.com/libp2p/go-libp2p/issues). Search the closed ones before reporting things, and help us with the open ones.
|
||||
|
||||
Guidelines:
|
||||
|
||||
- read the [libp2p spec](https://github.com/libp2p/specs)
|
||||
- ask questions or talk about things in our [discussion forums](https://discuss.libp2p.io), or open an [issue](https://github.com/libp2p/go-libp2p/issues) for bug reports, or #libp2p-implementers on [Filecoin slack](https://filecoin.io/slack).
|
||||
- ensure you are able to contribute (no legal issues please -- we use the DCO)
|
||||
- get in touch with @libp2p/go-libp2p-maintainers about how best to contribute
|
||||
- have fun!
|
||||
|
||||
There's a few things you can do right now to help out:
|
||||
- Go through the modules below and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrasture behind it - for instance, you may need to read up on p2p and more complex operations like muxing to be able to help technically.
|
||||
- **Perform code reviews**.
|
||||
- **Add tests**. There can never be enough tests.
|
||||
|
||||
## Supported Go Versions
|
||||
|
||||
We test against and support the two most recent major releases of Go. This is
|
||||
informed by Go's own [security policy](https://go.dev/security).
|
||||
|
||||
# Notable Users
|
||||
Some notable users of go-libp2p are:
|
||||
- [Kubo](https://github.com/ipfs/kubo) - The original Go implementation of IPFS
|
||||
- [Lotus](https://github.com/filecoin-project/lotus) - An implementation of the Filecoin protocol
|
||||
- [Drand](https://github.com/drand/drand) - A distributed random beacon daemon
|
||||
- [Prysm](https://github.com/prysmaticlabs/prysm) - An Ethereum Beacon Chain consensus client built by [Prysmatic Labs](https://prysmaticlabs.com/)
|
||||
- [Berty](https://github.com/berty/berty) - An open, secure, offline-first, peer-to-peer and zero trust messaging app.
|
||||
- [Wasp](https://github.com/iotaledger/wasp) - A node that runs IOTA Smart Contracts built by the [IOTA Foundation](https://www.iota.org/)
|
||||
- [Mina](https://github.com/minaprotocol/mina) - A lightweight, constant-sized blockchain that runs zero-knowledge smart contracts
|
||||
- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) - A modular, extensible framework for building Ethereum compatible networks
|
||||
- [Celestia Node](https://github.com/celestiaorg/celestia-node) - The Go implementation of Celestia's data availability nodes
|
||||
- [Status go](https://github.com/status-im/status-go) - Status bindings for go-ethereum, built by [Status.im](https://status.im/)
|
||||
- [Flow](https://github.com/onflow/flow-go) - A blockchain built to support games, apps, and digital assets built by [Dapper Labs](https://www.dapperlabs.com/)
|
||||
- [Swarm Bee](https://github.com/ethersphere/bee) - A client for connecting to the [Swarm network](https://www.ethswarm.org/)
|
||||
- [Elrond Go](https://github.com/multiversx/mx-chain-go) - The Go implementation of the the Elrond network protocol
|
||||
- [Sonr](https://github.com/sonr-io/sonr) - A platform to integrate DID Documents, WebAuthn, and IPFS and manage digital identity and assets.
|
||||
- [EdgeVPN](https://github.com/mudler/edgevpn) - A decentralized, immutable, portable VPN and reverse proxy over p2p.
|
||||
- [Kairos](https://github.com/kairos-io/kairos) - A Kubernetes-focused, Cloud Native Linux meta-distribution.
|
||||
- [Oasis Core](https://github.com/oasisprotocol/oasis-core) - The consensus and runtime layers of the [Oasis protocol](https://oasisprotocol.org/).
|
||||
|
||||
Please open a pull request if you want your project to be added here.
|
||||
5
vendor/github.com/libp2p/go-libp2p/ROADMAP.md
generated
vendored
Normal file
5
vendor/github.com/libp2p/go-libp2p/ROADMAP.md
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# go-libp2p roadmap Q4’22/Q1’23
|
||||
|
||||
Please see our roadmap in [Starmap](https://starmap.site/roadmap/github.com/libp2p/go-libp2p/issues/1806#simple)
|
||||
|
||||
Please add any feedback or questions in: https://github.com/libp2p/go-libp2p/issues/1806
|
||||
20
vendor/github.com/libp2p/go-libp2p/SECURITY.md
generated
vendored
Normal file
20
vendor/github.com/libp2p/go-libp2p/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# Security Policy
|
||||
|
||||
go-libp2p is still in development. This means that there may be problems in our protocols,
|
||||
or there may be mistakes in our implementations.
|
||||
We take security vulnerabilities very seriously. If you discover a security issue,
|
||||
please bring it to our attention right away!
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you find a vulnerability that may affect live deployments -- for example, by exposing
|
||||
a remote execution exploit -- please [**report privately**](https://github.com/libp2p/go-libp2p/security/advisories/new).
|
||||
Please **DO NOT file a public issue**.
|
||||
|
||||
If the issue is an implementation weakness that cannot be immediately exploited or
|
||||
something not yet deployed, just discuss it openly.
|
||||
If you need assistance, please reach out to [security@libp2p.io](mailto:security@libp2p.io).
|
||||
|
||||
## Reporting a non security bug
|
||||
|
||||
For non-security bugs, please simply file a GitHub [issue](https://github.com/libp2p/go-libp2p/issues/new).
|
||||
483
vendor/github.com/libp2p/go-libp2p/config/config.go
generated
vendored
Normal file
483
vendor/github.com/libp2p/go-libp2p/config/config.go
generated
vendored
Normal file
@@ -0,0 +1,483 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/pnet"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"github.com/libp2p/go-libp2p/core/sec"
|
||||
"github.com/libp2p/go-libp2p/core/sec/insecure"
|
||||
"github.com/libp2p/go-libp2p/core/transport"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/autonat"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/autorelay"
|
||||
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
blankhost "github.com/libp2p/go-libp2p/p2p/host/blank"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
|
||||
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
|
||||
routed "github.com/libp2p/go-libp2p/p2p/host/routed"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/swarm"
|
||||
tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
|
||||
circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
|
||||
relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
madns "github.com/multiformats/go-multiaddr-dns"
|
||||
"go.uber.org/fx"
|
||||
"go.uber.org/fx/fxevent"
|
||||
)
|
||||
|
||||
// AddrsFactory is a function that takes a set of multiaddrs we're listening on and
|
||||
// returns the set of multiaddrs we should advertise to the network.
|
||||
type AddrsFactory = bhost.AddrsFactory
|
||||
|
||||
// NATManagerC is a NATManager constructor.
|
||||
type NATManagerC func(network.Network) bhost.NATManager
|
||||
|
||||
type RoutingC func(host.Host) (routing.PeerRouting, error)
|
||||
|
||||
// AutoNATConfig defines the AutoNAT behavior for the libp2p host.
|
||||
type AutoNATConfig struct {
|
||||
ForceReachability *network.Reachability
|
||||
EnableService bool
|
||||
ThrottleGlobalLimit int
|
||||
ThrottlePeerLimit int
|
||||
ThrottleInterval time.Duration
|
||||
}
|
||||
|
||||
type Security struct {
|
||||
ID protocol.ID
|
||||
Constructor interface{}
|
||||
}
|
||||
|
||||
// Config describes a set of settings for a libp2p node
|
||||
//
|
||||
// This is *not* a stable interface. Use the options defined in the root
|
||||
// package.
|
||||
type Config struct {
|
||||
// UserAgent is the identifier this node will send to other peers when
|
||||
// identifying itself, e.g. via the identify protocol.
|
||||
//
|
||||
// Set it via the UserAgent option function.
|
||||
UserAgent string
|
||||
|
||||
// ProtocolVersion is the protocol version that identifies the family
|
||||
// of protocols used by the peer in the Identify protocol. It is set
|
||||
// using the [ProtocolVersion] option.
|
||||
ProtocolVersion string
|
||||
|
||||
PeerKey crypto.PrivKey
|
||||
|
||||
QUICReuse []fx.Option
|
||||
Transports []fx.Option
|
||||
Muxers []tptu.StreamMuxer
|
||||
SecurityTransports []Security
|
||||
Insecure bool
|
||||
PSK pnet.PSK
|
||||
|
||||
DialTimeout time.Duration
|
||||
|
||||
RelayCustom bool
|
||||
Relay bool // should the relay transport be used
|
||||
|
||||
EnableRelayService bool // should we run a circuitv2 relay (if publicly reachable)
|
||||
RelayServiceOpts []relayv2.Option
|
||||
|
||||
ListenAddrs []ma.Multiaddr
|
||||
AddrsFactory bhost.AddrsFactory
|
||||
ConnectionGater connmgr.ConnectionGater
|
||||
|
||||
ConnManager connmgr.ConnManager
|
||||
ResourceManager network.ResourceManager
|
||||
|
||||
NATManager NATManagerC
|
||||
Peerstore peerstore.Peerstore
|
||||
Reporter metrics.Reporter
|
||||
|
||||
MultiaddrResolver *madns.Resolver
|
||||
|
||||
DisablePing bool
|
||||
|
||||
Routing RoutingC
|
||||
|
||||
EnableAutoRelay bool
|
||||
AutoRelayOpts []autorelay.Option
|
||||
AutoNATConfig
|
||||
|
||||
EnableHolePunching bool
|
||||
HolePunchingOptions []holepunch.Option
|
||||
|
||||
DisableMetrics bool
|
||||
PrometheusRegisterer prometheus.Registerer
|
||||
|
||||
DialRanker network.DialRanker
|
||||
|
||||
SwarmOpts []swarm.Option
|
||||
}
|
||||
|
||||
func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swarm, error) {
|
||||
if cfg.Peerstore == nil {
|
||||
return nil, fmt.Errorf("no peerstore specified")
|
||||
}
|
||||
|
||||
// Check this early. Prevents us from even *starting* without verifying this.
|
||||
if pnet.ForcePrivateNetwork && len(cfg.PSK) == 0 {
|
||||
log.Error("tried to create a libp2p node with no Private" +
|
||||
" Network Protector but usage of Private Networks" +
|
||||
" is forced by the environment")
|
||||
// Note: This is *also* checked the upgrader itself, so it'll be
|
||||
// enforced even *if* you don't use the libp2p constructor.
|
||||
return nil, pnet.ErrNotInPrivateNetwork
|
||||
}
|
||||
|
||||
if cfg.PeerKey == nil {
|
||||
return nil, fmt.Errorf("no peer key specified")
|
||||
}
|
||||
|
||||
// Obtain Peer ID from public key
|
||||
pid, err := peer.IDFromPublicKey(cfg.PeerKey.GetPublic())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cfg.Peerstore.AddPrivKey(pid, cfg.PeerKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cfg.Peerstore.AddPubKey(pid, cfg.PeerKey.GetPublic()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := cfg.SwarmOpts
|
||||
if cfg.Reporter != nil {
|
||||
opts = append(opts, swarm.WithMetrics(cfg.Reporter))
|
||||
}
|
||||
if cfg.ConnectionGater != nil {
|
||||
opts = append(opts, swarm.WithConnectionGater(cfg.ConnectionGater))
|
||||
}
|
||||
if cfg.DialTimeout != 0 {
|
||||
opts = append(opts, swarm.WithDialTimeout(cfg.DialTimeout))
|
||||
}
|
||||
if cfg.ResourceManager != nil {
|
||||
opts = append(opts, swarm.WithResourceManager(cfg.ResourceManager))
|
||||
}
|
||||
if cfg.MultiaddrResolver != nil {
|
||||
opts = append(opts, swarm.WithMultiaddrResolver(cfg.MultiaddrResolver))
|
||||
}
|
||||
if cfg.DialRanker != nil {
|
||||
opts = append(opts, swarm.WithDialRanker(cfg.DialRanker))
|
||||
}
|
||||
|
||||
if enableMetrics {
|
||||
opts = append(opts,
|
||||
swarm.WithMetricsTracer(swarm.NewMetricsTracer(swarm.WithRegisterer(cfg.PrometheusRegisterer))))
|
||||
}
|
||||
// TODO: Make the swarm implementation configurable.
|
||||
return swarm.NewSwarm(pid, cfg.Peerstore, eventBus, opts...)
|
||||
}
|
||||
|
||||
func (cfg *Config) addTransports(h host.Host) error {
|
||||
swrm, ok := h.Network().(transport.TransportNetwork)
|
||||
if !ok {
|
||||
// Should probably skip this if no transports.
|
||||
return fmt.Errorf("swarm does not support transports")
|
||||
}
|
||||
|
||||
fxopts := []fx.Option{
|
||||
fx.WithLogger(func() fxevent.Logger { return getFXLogger() }),
|
||||
fx.Provide(fx.Annotate(tptu.New, fx.ParamTags(`name:"security"`))),
|
||||
fx.Supply(cfg.Muxers),
|
||||
fx.Supply(h.ID()),
|
||||
fx.Provide(func() host.Host { return h }),
|
||||
fx.Provide(func() crypto.PrivKey { return h.Peerstore().PrivKey(h.ID()) }),
|
||||
fx.Provide(func() connmgr.ConnectionGater { return cfg.ConnectionGater }),
|
||||
fx.Provide(func() pnet.PSK { return cfg.PSK }),
|
||||
fx.Provide(func() network.ResourceManager { return cfg.ResourceManager }),
|
||||
fx.Provide(func() *madns.Resolver { return cfg.MultiaddrResolver }),
|
||||
}
|
||||
fxopts = append(fxopts, cfg.Transports...)
|
||||
if cfg.Insecure {
|
||||
fxopts = append(fxopts,
|
||||
fx.Provide(
|
||||
fx.Annotate(
|
||||
func(id peer.ID, priv crypto.PrivKey) []sec.SecureTransport {
|
||||
return []sec.SecureTransport{insecure.NewWithIdentity(insecure.ID, id, priv)}
|
||||
},
|
||||
fx.ResultTags(`name:"security"`),
|
||||
),
|
||||
),
|
||||
)
|
||||
} else {
|
||||
// fx groups are unordered, but we need to preserve the order of the security transports
|
||||
// First of all, we construct the security transports that are needed,
|
||||
// and save them to a group call security_unordered.
|
||||
for _, s := range cfg.SecurityTransports {
|
||||
fxName := fmt.Sprintf(`name:"security_%s"`, s.ID)
|
||||
fxopts = append(fxopts, fx.Supply(fx.Annotate(s.ID, fx.ResultTags(fxName))))
|
||||
fxopts = append(fxopts,
|
||||
fx.Provide(fx.Annotate(
|
||||
s.Constructor,
|
||||
fx.ParamTags(fxName),
|
||||
fx.As(new(sec.SecureTransport)),
|
||||
fx.ResultTags(`group:"security_unordered"`),
|
||||
)),
|
||||
)
|
||||
}
|
||||
// Then we consume the group security_unordered, and order them by the user's preference.
|
||||
fxopts = append(fxopts, fx.Provide(
|
||||
fx.Annotate(
|
||||
func(secs []sec.SecureTransport) ([]sec.SecureTransport, error) {
|
||||
if len(secs) != len(cfg.SecurityTransports) {
|
||||
return nil, errors.New("inconsistent length for security transports")
|
||||
}
|
||||
t := make([]sec.SecureTransport, 0, len(secs))
|
||||
for _, s := range cfg.SecurityTransports {
|
||||
for _, st := range secs {
|
||||
if s.ID != st.ID() {
|
||||
continue
|
||||
}
|
||||
t = append(t, st)
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
},
|
||||
fx.ParamTags(`group:"security_unordered"`),
|
||||
fx.ResultTags(`name:"security"`),
|
||||
)))
|
||||
}
|
||||
|
||||
fxopts = append(fxopts, fx.Provide(PrivKeyToStatelessResetKey))
|
||||
if cfg.QUICReuse != nil {
|
||||
fxopts = append(fxopts, cfg.QUICReuse...)
|
||||
} else {
|
||||
fxopts = append(fxopts, fx.Provide(quicreuse.NewConnManager)) // TODO: close the ConnManager when shutting down the node
|
||||
}
|
||||
|
||||
fxopts = append(fxopts, fx.Invoke(
|
||||
fx.Annotate(
|
||||
func(tpts []transport.Transport) error {
|
||||
for _, t := range tpts {
|
||||
if err := swrm.AddTransport(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
fx.ParamTags(`group:"transport"`),
|
||||
)),
|
||||
)
|
||||
if cfg.Relay {
|
||||
fxopts = append(fxopts, fx.Invoke(circuitv2.AddTransport))
|
||||
}
|
||||
app := fx.New(fxopts...)
|
||||
if err := app.Err(); err != nil {
|
||||
h.Close()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewNode constructs a new libp2p Host from the Config.
|
||||
//
|
||||
// This function consumes the config. Do not reuse it (really!).
|
||||
func (cfg *Config) NewNode() (host.Host, error) {
|
||||
eventBus := eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer(eventbus.WithRegisterer(cfg.PrometheusRegisterer))))
|
||||
swrm, err := cfg.makeSwarm(eventBus, !cfg.DisableMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !cfg.DisableMetrics {
|
||||
rcmgr.MustRegisterWith(cfg.PrometheusRegisterer)
|
||||
}
|
||||
|
||||
h, err := bhost.NewHost(swrm, &bhost.HostOpts{
|
||||
EventBus: eventBus,
|
||||
ConnManager: cfg.ConnManager,
|
||||
AddrsFactory: cfg.AddrsFactory,
|
||||
NATManager: cfg.NATManager,
|
||||
EnablePing: !cfg.DisablePing,
|
||||
UserAgent: cfg.UserAgent,
|
||||
ProtocolVersion: cfg.ProtocolVersion,
|
||||
EnableHolePunching: cfg.EnableHolePunching,
|
||||
HolePunchingOptions: cfg.HolePunchingOptions,
|
||||
EnableRelayService: cfg.EnableRelayService,
|
||||
RelayServiceOpts: cfg.RelayServiceOpts,
|
||||
EnableMetrics: !cfg.DisableMetrics,
|
||||
PrometheusRegisterer: cfg.PrometheusRegisterer,
|
||||
})
|
||||
if err != nil {
|
||||
swrm.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Relay {
|
||||
// If we've enabled the relay, we should filter out relay
|
||||
// addresses by default.
|
||||
//
|
||||
// TODO: We shouldn't be doing this here.
|
||||
oldFactory := h.AddrsFactory
|
||||
h.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
return oldFactory(autorelay.Filter(addrs))
|
||||
}
|
||||
}
|
||||
|
||||
if err := cfg.addTransports(h); err != nil {
|
||||
h.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: This method succeeds if listening on one address succeeds. We
|
||||
// should probably fail if listening on *any* addr fails.
|
||||
if err := h.Network().Listen(cfg.ListenAddrs...); err != nil {
|
||||
h.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Configure routing and autorelay
|
||||
var router routing.PeerRouting
|
||||
if cfg.Routing != nil {
|
||||
router, err = cfg.Routing(h)
|
||||
if err != nil {
|
||||
h.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Note: h.AddrsFactory may be changed by relayFinder, but non-relay version is
|
||||
// used by AutoNAT below.
|
||||
var ar *autorelay.AutoRelay
|
||||
addrF := h.AddrsFactory
|
||||
if cfg.EnableAutoRelay {
|
||||
if !cfg.Relay {
|
||||
h.Close()
|
||||
return nil, fmt.Errorf("cannot enable autorelay; relay is not enabled")
|
||||
}
|
||||
if !cfg.DisableMetrics {
|
||||
mt := autorelay.WithMetricsTracer(
|
||||
autorelay.NewMetricsTracer(autorelay.WithRegisterer(cfg.PrometheusRegisterer)))
|
||||
mtOpts := []autorelay.Option{mt}
|
||||
cfg.AutoRelayOpts = append(mtOpts, cfg.AutoRelayOpts...)
|
||||
}
|
||||
|
||||
ar, err = autorelay.NewAutoRelay(h, cfg.AutoRelayOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
autonatOpts := []autonat.Option{
|
||||
autonat.UsingAddresses(func() []ma.Multiaddr {
|
||||
return addrF(h.AllAddrs())
|
||||
}),
|
||||
}
|
||||
if !cfg.DisableMetrics {
|
||||
autonatOpts = append(autonatOpts,
|
||||
autonat.WithMetricsTracer(
|
||||
autonat.NewMetricsTracer(autonat.WithRegisterer(cfg.PrometheusRegisterer))))
|
||||
}
|
||||
if cfg.AutoNATConfig.ThrottleInterval != 0 {
|
||||
autonatOpts = append(autonatOpts,
|
||||
autonat.WithThrottling(cfg.AutoNATConfig.ThrottleGlobalLimit, cfg.AutoNATConfig.ThrottleInterval),
|
||||
autonat.WithPeerThrottling(cfg.AutoNATConfig.ThrottlePeerLimit))
|
||||
}
|
||||
if cfg.AutoNATConfig.EnableService {
|
||||
autonatPrivKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ps, err := pstoremem.NewPeerstore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Pull out the pieces of the config that we _actually_ care about.
|
||||
// Specifically, don't set up things like autorelay, listeners,
|
||||
// identify, etc.
|
||||
autoNatCfg := Config{
|
||||
Transports: cfg.Transports,
|
||||
Muxers: cfg.Muxers,
|
||||
SecurityTransports: cfg.SecurityTransports,
|
||||
Insecure: cfg.Insecure,
|
||||
PSK: cfg.PSK,
|
||||
ConnectionGater: cfg.ConnectionGater,
|
||||
Reporter: cfg.Reporter,
|
||||
PeerKey: autonatPrivKey,
|
||||
Peerstore: ps,
|
||||
DialRanker: swarm.NoDelayDialRanker,
|
||||
}
|
||||
|
||||
dialer, err := autoNatCfg.makeSwarm(eventbus.NewBus(), false)
|
||||
if err != nil {
|
||||
h.Close()
|
||||
return nil, err
|
||||
}
|
||||
dialerHost := blankhost.NewBlankHost(dialer)
|
||||
if err := autoNatCfg.addTransports(dialerHost); err != nil {
|
||||
dialerHost.Close()
|
||||
h.Close()
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: We're dropping the blank host here but that's fine. It
|
||||
// doesn't really _do_ anything and doesn't even need to be
|
||||
// closed (as long as we close the underlying network).
|
||||
autonatOpts = append(autonatOpts, autonat.EnableService(dialerHost.Network()))
|
||||
}
|
||||
if cfg.AutoNATConfig.ForceReachability != nil {
|
||||
autonatOpts = append(autonatOpts, autonat.WithReachability(*cfg.AutoNATConfig.ForceReachability))
|
||||
}
|
||||
|
||||
autonat, err := autonat.New(h, autonatOpts...)
|
||||
if err != nil {
|
||||
h.Close()
|
||||
return nil, fmt.Errorf("cannot enable autorelay; autonat failed to start: %v", err)
|
||||
}
|
||||
h.SetAutoNat(autonat)
|
||||
|
||||
// start the host background tasks
|
||||
h.Start()
|
||||
|
||||
var ho host.Host
|
||||
ho = h
|
||||
if router != nil {
|
||||
ho = routed.Wrap(h, router)
|
||||
}
|
||||
if ar != nil {
|
||||
arh := autorelay.NewAutoRelayHost(ho, ar)
|
||||
arh.Start()
|
||||
ho = arh
|
||||
}
|
||||
return ho, nil
|
||||
}
|
||||
|
||||
// Option is a libp2p config option that can be given to the libp2p constructor
|
||||
// (`libp2p.New`).
|
||||
type Option func(cfg *Config) error
|
||||
|
||||
// Apply applies the given options to the config, returning the first error
|
||||
// encountered (if any).
|
||||
func (cfg *Config) Apply(opts ...Option) error {
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if err := opt(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
28
vendor/github.com/libp2p/go-libp2p/config/log.go
generated
vendored
Normal file
28
vendor/github.com/libp2p/go-libp2p/config/log.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"go.uber.org/fx/fxevent"
|
||||
)
|
||||
|
||||
var log = logging.Logger("p2p-config")
|
||||
|
||||
var (
|
||||
fxLogger fxevent.Logger
|
||||
logInitOnce sync.Once
|
||||
)
|
||||
|
||||
type fxLogWriter struct{}
|
||||
|
||||
func (l *fxLogWriter) Write(b []byte) (int, error) {
|
||||
log.Debug(strings.TrimSuffix(string(b), "\n"))
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func getFXLogger() fxevent.Logger {
|
||||
logInitOnce.Do(func() { fxLogger = &fxevent.ConsoleLogger{W: &fxLogWriter{}} })
|
||||
return fxLogger
|
||||
}
|
||||
27
vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go
generated
vendored
Normal file
27
vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/hkdf"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
)
|
||||
|
||||
const statelessResetKeyInfo = "libp2p quic stateless reset key"
|
||||
|
||||
func PrivKeyToStatelessResetKey(key crypto.PrivKey) (quic.StatelessResetKey, error) {
|
||||
var statelessResetKey quic.StatelessResetKey
|
||||
keyBytes, err := key.Raw()
|
||||
if err != nil {
|
||||
return statelessResetKey, err
|
||||
}
|
||||
keyReader := hkdf.New(sha256.New, keyBytes, nil, []byte(statelessResetKeyInfo))
|
||||
if _, err := io.ReadFull(keyReader, statelessResetKey[:]); err != nil {
|
||||
return statelessResetKey, err
|
||||
}
|
||||
return statelessResetKey, nil
|
||||
}
|
||||
57
vendor/github.com/libp2p/go-libp2p/core/canonicallog/canonicallog.go
generated
vendored
Normal file
57
vendor/github.com/libp2p/go-libp2p/core/canonicallog/canonicallog.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package canonicallog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
var log = logging.WithSkip(logging.Logger("canonical-log"), 1)
|
||||
|
||||
// LogMisbehavingPeer is the canonical way to log a misbehaving peer.
|
||||
// Protocols should use this to identify a misbehaving peer to allow the end
|
||||
// user to easily identify these nodes across protocols and libp2p.
|
||||
func LogMisbehavingPeer(p peer.ID, peerAddr multiaddr.Multiaddr, component string, err error, msg string) {
|
||||
log.Warnf("CANONICAL_MISBEHAVING_PEER: peer=%s addr=%s component=%s err=%q msg=%q", p, peerAddr.String(), component, err, msg)
|
||||
}
|
||||
|
||||
// LogMisbehavingPeerNetAddr is the canonical way to log a misbehaving peer.
|
||||
// Protocols should use this to identify a misbehaving peer to allow the end
|
||||
// user to easily identify these nodes across protocols and libp2p.
|
||||
func LogMisbehavingPeerNetAddr(p peer.ID, peerAddr net.Addr, component string, originalErr error, msg string) {
|
||||
ma, err := manet.FromNetAddr(peerAddr)
|
||||
if err != nil {
|
||||
log.Warnf("CANONICAL_MISBEHAVING_PEER: peer=%s net_addr=%s component=%s err=%q msg=%q", p, peerAddr.String(), component, originalErr, msg)
|
||||
return
|
||||
}
|
||||
|
||||
LogMisbehavingPeer(p, ma, component, originalErr, msg)
|
||||
}
|
||||
|
||||
// LogPeerStatus logs any useful information about a peer. It takes in a sample
|
||||
// rate and will only log one in every sampleRate messages (randomly). This is
|
||||
// useful in surfacing events that are normal in isolation, but may be abnormal
|
||||
// in large quantities. For example, a successful connection from an IP address
|
||||
// is normal. 10,000 connections from that same IP address is not normal. libp2p
|
||||
// itself does nothing besides emitting this log. Hook this up to another tool
|
||||
// like fail2ban to action on the log.
|
||||
func LogPeerStatus(sampleRate int, p peer.ID, peerAddr multiaddr.Multiaddr, keyVals ...string) {
|
||||
if rand.Intn(sampleRate) == 0 {
|
||||
keyValsStr := strings.Builder{}
|
||||
for i, kOrV := range keyVals {
|
||||
if i%2 == 0 {
|
||||
fmt.Fprintf(&keyValsStr, " %v=", kOrV)
|
||||
} else {
|
||||
fmt.Fprintf(&keyValsStr, "%q", kOrV)
|
||||
}
|
||||
}
|
||||
log.Infof("CANONICAL_PEER_STATUS: peer=%s addr=%s sample_rate=%v%s", p, peerAddr.String(), sampleRate, keyValsStr.String())
|
||||
}
|
||||
}
|
||||
109
vendor/github.com/libp2p/go-libp2p/core/connmgr/decay.go
generated
vendored
Normal file
109
vendor/github.com/libp2p/go-libp2p/core/connmgr/decay.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
package connmgr
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// Decayer is implemented by connection managers supporting decaying tags. A
|
||||
// decaying tag is one whose value automatically decays over time.
|
||||
//
|
||||
// The actual application of the decay behaviour is encapsulated in a
|
||||
// user-provided decaying function (DecayFn). The function is called on every
|
||||
// tick (determined by the interval parameter), and returns either the new value
|
||||
// of the tag, or whether it should be erased altogether.
|
||||
//
|
||||
// We do not set values on a decaying tag. Rather, we "bump" decaying tags by a
|
||||
// delta. This calls the BumpFn with the old value and the delta, to determine
|
||||
// the new value.
|
||||
//
|
||||
// Such a pluggable design affords a great deal of flexibility and versatility.
|
||||
// Behaviours that are straightforward to implement include:
|
||||
//
|
||||
// - Decay a tag by -1, or by half its current value, on every tick.
|
||||
// - Every time a value is bumped, sum it to its current value.
|
||||
// - Exponentially boost a score with every bump.
|
||||
// - Sum the incoming score, but keep it within min, max bounds.
|
||||
//
|
||||
// Commonly used DecayFns and BumpFns are provided in this package.
|
||||
type Decayer interface {
|
||||
io.Closer
|
||||
|
||||
// RegisterDecayingTag creates and registers a new decaying tag, if and only
|
||||
// if a tag with the supplied name doesn't exist yet. Otherwise, an error is
|
||||
// returned.
|
||||
//
|
||||
// The caller provides the interval at which the tag is refreshed, as well
|
||||
// as the decay function and the bump function. Refer to godocs on DecayFn
|
||||
// and BumpFn for more info.
|
||||
RegisterDecayingTag(name string, interval time.Duration, decayFn DecayFn, bumpFn BumpFn) (DecayingTag, error)
|
||||
}
|
||||
|
||||
// DecayFn applies a decay to the peer's score. The implementation must call
|
||||
// DecayFn at the interval supplied when registering the tag.
|
||||
//
|
||||
// It receives a copy of the decaying value, and returns the score after
|
||||
// applying the decay, as well as a flag to signal if the tag should be erased.
|
||||
type DecayFn func(value DecayingValue) (after int, rm bool)
|
||||
|
||||
// BumpFn applies a delta onto an existing score, and returns the new score.
|
||||
//
|
||||
// Non-trivial bump functions include exponential boosting, moving averages,
|
||||
// ceilings, etc.
|
||||
type BumpFn func(value DecayingValue, delta int) (after int)
|
||||
|
||||
// DecayingTag represents a decaying tag. The tag is a long-lived general
|
||||
// object, used to operate on tag values for peers.
|
||||
type DecayingTag interface {
|
||||
// Name returns the name of the tag.
|
||||
Name() string
|
||||
|
||||
// Interval is the effective interval at which this tag will tick. Upon
|
||||
// registration, the desired interval may be overwritten depending on the
|
||||
// decayer's resolution, and this method allows you to obtain the effective
|
||||
// interval.
|
||||
Interval() time.Duration
|
||||
|
||||
// Bump applies a delta to a tag value, calling its bump function. The bump
|
||||
// will be applied asynchronously, and a non-nil error indicates a fault
|
||||
// when queuing.
|
||||
Bump(peer peer.ID, delta int) error
|
||||
|
||||
// Remove removes a decaying tag from a peer. The removal will be applied
|
||||
// asynchronously, and a non-nil error indicates a fault when queuing.
|
||||
Remove(peer peer.ID) error
|
||||
|
||||
// Close closes a decaying tag. The Decayer will stop tracking this tag,
|
||||
// and the state of all peers in the Connection Manager holding this tag
|
||||
// will be updated.
|
||||
//
|
||||
// The deletion is performed asynchronously.
|
||||
//
|
||||
// Once deleted, a tag should not be used, and further calls to Bump/Remove
|
||||
// will error.
|
||||
//
|
||||
// Duplicate calls to Remove will not return errors, but a failure to queue
|
||||
// the first actual removal, will (e.g. when the system is backlogged).
|
||||
Close() error
|
||||
}
|
||||
|
||||
// DecayingValue represents a value for a decaying tag.
|
||||
type DecayingValue struct {
|
||||
// Tag points to the tag this value belongs to.
|
||||
Tag DecayingTag
|
||||
|
||||
// Peer is the peer ID to whom this value is associated.
|
||||
Peer peer.ID
|
||||
|
||||
// Added is the timestamp when this value was added for the first time for
|
||||
// a tag and a peer.
|
||||
Added time.Time
|
||||
|
||||
// LastVisit is the timestamp of the last visit.
|
||||
LastVisit time.Time
|
||||
|
||||
// Value is the current value of the tag.
|
||||
Value int
|
||||
}
|
||||
89
vendor/github.com/libp2p/go-libp2p/core/connmgr/gater.go
generated
vendored
Normal file
89
vendor/github.com/libp2p/go-libp2p/core/connmgr/gater.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package connmgr
|
||||
|
||||
import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// ConnectionGater can be implemented by a type that supports active
|
||||
// inbound or outbound connection gating.
|
||||
//
|
||||
// ConnectionGaters are active, whereas ConnManagers tend to be passive.
|
||||
//
|
||||
// A ConnectionGater will be consulted during different states in the lifecycle
|
||||
// of a connection being established/upgraded. Specific functions will be called
|
||||
// throughout the process, to allow you to intercept the connection at that stage.
|
||||
//
|
||||
// InterceptPeerDial is called on an imminent outbound peer dial request, prior
|
||||
// to the addresses of that peer being available/resolved. Blocking connections
|
||||
// at this stage is typical for blacklisting scenarios.
|
||||
//
|
||||
// InterceptAddrDial is called on an imminent outbound dial to a peer on a
|
||||
// particular address. Blocking connections at this stage is typical for
|
||||
// address filtering.
|
||||
//
|
||||
// InterceptAccept is called as soon as a transport listener receives an
|
||||
// inbound connection request, before any upgrade takes place. Transports who
|
||||
// accept already secure and/or multiplexed connections (e.g. possibly QUIC)
|
||||
// MUST call this method regardless, for correctness/consistency.
|
||||
//
|
||||
// InterceptSecured is called for both inbound and outbound connections,
|
||||
// after a security handshake has taken place and we've authenticated the peer.
|
||||
//
|
||||
// InterceptUpgraded is called for inbound and outbound connections, after
|
||||
// libp2p has finished upgrading the connection entirely to a secure,
|
||||
// multiplexed channel.
|
||||
//
|
||||
// This interface can be used to implement *strict/active* connection management
|
||||
// policies, such as hard limiting of connections once a maximum count has been
|
||||
// reached, maintaining a peer blacklist, or limiting connections by transport
|
||||
// quotas.
|
||||
//
|
||||
// EXPERIMENTAL: a DISCONNECT protocol/message will be supported in the future.
|
||||
// This allows gaters and other components to communicate the intention behind
|
||||
// a connection closure, to curtail potential reconnection attempts.
|
||||
//
|
||||
// For now, InterceptUpgraded can return a non-zero DisconnectReason when
|
||||
// blocking a connection, but this interface is likely to change in the future
|
||||
// as we solidify this feature. The reason why only this method can handle
|
||||
// DisconnectReasons is that we require stream multiplexing capability to open a
|
||||
// control protocol stream to transmit the message.
|
||||
type ConnectionGater interface {
|
||||
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
|
||||
//
|
||||
// This is called by the network.Network implementation when dialling a peer.
|
||||
InterceptPeerDial(p peer.ID) (allow bool)
|
||||
|
||||
// InterceptAddrDial tests whether we're permitted to dial the specified
|
||||
// multiaddr for the given peer.
|
||||
//
|
||||
// This is called by the network.Network implementation after it has
|
||||
// resolved the peer's addrs, and prior to dialling each.
|
||||
InterceptAddrDial(peer.ID, ma.Multiaddr) (allow bool)
|
||||
|
||||
// InterceptAccept tests whether an incipient inbound connection is allowed.
|
||||
//
|
||||
// This is called by the upgrader, or by the transport directly (e.g. QUIC,
|
||||
// Bluetooth), straight after it has accepted a connection from its socket.
|
||||
InterceptAccept(network.ConnMultiaddrs) (allow bool)
|
||||
|
||||
// InterceptSecured tests whether a given connection, now authenticated,
|
||||
// is allowed.
|
||||
//
|
||||
// This is called by the upgrader, after it has performed the security
|
||||
// handshake, and before it negotiates the muxer, or by the directly by the
|
||||
// transport, at the exact same checkpoint.
|
||||
InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool)
|
||||
|
||||
// InterceptUpgraded tests whether a fully capable connection is allowed.
|
||||
//
|
||||
// At this point, the connection a multiplexer has been selected.
|
||||
// When rejecting a connection, the gater can return a DisconnectReason.
|
||||
// Refer to the godoc on the ConnectionGater type for more information.
|
||||
//
|
||||
// NOTE: the go-libp2p implementation currently IGNORES the disconnect reason.
|
||||
InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason)
|
||||
}
|
||||
91
vendor/github.com/libp2p/go-libp2p/core/connmgr/manager.go
generated
vendored
Normal file
91
vendor/github.com/libp2p/go-libp2p/core/connmgr/manager.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
// Package connmgr provides connection tracking and management interfaces for libp2p.
|
||||
//
|
||||
// The ConnManager interface exported from this package allows libp2p to enforce an
|
||||
// upper bound on the total number of open connections. To avoid service disruptions,
|
||||
// connections can be tagged with metadata and optionally "protected" to ensure that
|
||||
// essential connections are not arbitrarily cut.
|
||||
package connmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// SupportsDecay evaluates if the provided ConnManager supports decay, and if
|
||||
// so, it returns the Decayer object. Refer to godocs on Decayer for more info.
|
||||
func SupportsDecay(mgr ConnManager) (Decayer, bool) {
|
||||
d, ok := mgr.(Decayer)
|
||||
return d, ok
|
||||
}
|
||||
|
||||
// ConnManager tracks connections to peers, and allows consumers to associate
|
||||
// metadata with each peer.
|
||||
//
|
||||
// It enables connections to be trimmed based on implementation-defined
|
||||
// heuristics. The ConnManager allows libp2p to enforce an upper bound on the
|
||||
// total number of open connections.
|
||||
//
|
||||
// ConnManagers supporting decaying tags implement Decayer. Use the
|
||||
// SupportsDecay function to safely cast an instance to Decayer, if supported.
|
||||
type ConnManager interface {
|
||||
// TagPeer tags a peer with a string, associating a weight with the tag.
|
||||
TagPeer(peer.ID, string, int)
|
||||
|
||||
// Untag removes the tagged value from the peer.
|
||||
UntagPeer(p peer.ID, tag string)
|
||||
|
||||
// UpsertTag updates an existing tag or inserts a new one.
|
||||
//
|
||||
// The connection manager calls the upsert function supplying the current
|
||||
// value of the tag (or zero if inexistent). The return value is used as
|
||||
// the new value of the tag.
|
||||
UpsertTag(p peer.ID, tag string, upsert func(int) int)
|
||||
|
||||
// GetTagInfo returns the metadata associated with the peer,
|
||||
// or nil if no metadata has been recorded for the peer.
|
||||
GetTagInfo(p peer.ID) *TagInfo
|
||||
|
||||
// TrimOpenConns terminates open connections based on an implementation-defined
|
||||
// heuristic.
|
||||
TrimOpenConns(ctx context.Context)
|
||||
|
||||
// Notifee returns an implementation that can be called back to inform of
|
||||
// opened and closed connections.
|
||||
Notifee() network.Notifiee
|
||||
|
||||
// Protect protects a peer from having its connection(s) pruned.
|
||||
//
|
||||
// Tagging allows different parts of the system to manage protections without interfering with one another.
|
||||
//
|
||||
// Calls to Protect() with the same tag are idempotent. They are not refcounted, so after multiple calls
|
||||
// to Protect() with the same tag, a single Unprotect() call bearing the same tag will revoke the protection.
|
||||
Protect(id peer.ID, tag string)
|
||||
|
||||
// Unprotect removes a protection that may have been placed on a peer, under the specified tag.
|
||||
//
|
||||
// The return value indicates whether the peer continues to be protected after this call, by way of a different tag.
|
||||
// See notes on Protect() for more info.
|
||||
Unprotect(id peer.ID, tag string) (protected bool)
|
||||
|
||||
// IsProtected returns true if the peer is protected for some tag; if the tag is the empty string
|
||||
// then it will return true if the peer is protected for any tag
|
||||
IsProtected(id peer.ID, tag string) (protected bool)
|
||||
|
||||
// Close closes the connection manager and stops background processes.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// TagInfo stores metadata associated with a peer.
|
||||
type TagInfo struct {
|
||||
FirstSeen time.Time
|
||||
Value int
|
||||
|
||||
// Tags maps tag ids to the numerical values.
|
||||
Tags map[string]int
|
||||
|
||||
// Conns maps connection ids (such as remote multiaddr) to their creation time.
|
||||
Conns map[string]time.Time
|
||||
}
|
||||
24
vendor/github.com/libp2p/go-libp2p/core/connmgr/null.go
generated
vendored
Normal file
24
vendor/github.com/libp2p/go-libp2p/core/connmgr/null.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package connmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// NullConnMgr is a ConnMgr that provides no functionality.
|
||||
type NullConnMgr struct{}
|
||||
|
||||
var _ ConnManager = (*NullConnMgr)(nil)
|
||||
|
||||
func (NullConnMgr) TagPeer(peer.ID, string, int) {}
|
||||
func (NullConnMgr) UntagPeer(peer.ID, string) {}
|
||||
func (NullConnMgr) UpsertTag(peer.ID, string, func(int) int) {}
|
||||
func (NullConnMgr) GetTagInfo(peer.ID) *TagInfo { return &TagInfo{} }
|
||||
func (NullConnMgr) TrimOpenConns(ctx context.Context) {}
|
||||
func (NullConnMgr) Notifee() network.Notifiee { return network.GlobalNoopNotifiee }
|
||||
func (NullConnMgr) Protect(peer.ID, string) {}
|
||||
func (NullConnMgr) Unprotect(peer.ID, string) bool { return false }
|
||||
func (NullConnMgr) IsProtected(peer.ID, string) bool { return false }
|
||||
func (NullConnMgr) Close() error { return nil }
|
||||
67
vendor/github.com/libp2p/go-libp2p/core/connmgr/presets.go
generated
vendored
Normal file
67
vendor/github.com/libp2p/go-libp2p/core/connmgr/presets.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
package connmgr
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DecayNone applies no decay.
|
||||
func DecayNone() DecayFn {
|
||||
return func(value DecayingValue) (_ int, rm bool) {
|
||||
return value.Value, false
|
||||
}
|
||||
}
|
||||
|
||||
// DecayFixed subtracts from by the provided minuend, and deletes the tag when
|
||||
// first reaching 0 or negative.
|
||||
func DecayFixed(minuend int) DecayFn {
|
||||
return func(value DecayingValue) (_ int, rm bool) {
|
||||
v := value.Value - minuend
|
||||
return v, v <= 0
|
||||
}
|
||||
}
|
||||
|
||||
// DecayLinear applies a fractional coefficient to the value of the current tag,
|
||||
// rounding down via math.Floor. It erases the tag when the result is zero.
|
||||
func DecayLinear(coef float64) DecayFn {
|
||||
return func(value DecayingValue) (after int, rm bool) {
|
||||
v := math.Floor(float64(value.Value) * coef)
|
||||
return int(v), v <= 0
|
||||
}
|
||||
}
|
||||
|
||||
// DecayExpireWhenInactive expires a tag after a certain period of no bumps.
|
||||
func DecayExpireWhenInactive(after time.Duration) DecayFn {
|
||||
return func(value DecayingValue) (_ int, rm bool) {
|
||||
rm = time.Until(value.LastVisit) >= after
|
||||
return 0, rm
|
||||
}
|
||||
}
|
||||
|
||||
// BumpSumUnbounded adds the incoming value to the peer's score.
|
||||
func BumpSumUnbounded() BumpFn {
|
||||
return func(value DecayingValue, delta int) (after int) {
|
||||
return value.Value + delta
|
||||
}
|
||||
}
|
||||
|
||||
// BumpSumBounded keeps summing the incoming score, keeping it within a
|
||||
// [min, max] range.
|
||||
func BumpSumBounded(min, max int) BumpFn {
|
||||
return func(value DecayingValue, delta int) (after int) {
|
||||
v := value.Value + delta
|
||||
if v >= max {
|
||||
return max
|
||||
} else if v <= min {
|
||||
return min
|
||||
}
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
// BumpOverwrite replaces the current value of the tag with the incoming one.
|
||||
func BumpOverwrite() BumpFn {
|
||||
return func(value DecayingValue, delta int) (after int) {
|
||||
return delta
|
||||
}
|
||||
}
|
||||
9
vendor/github.com/libp2p/go-libp2p/core/control/disconnect.go
generated
vendored
Normal file
9
vendor/github.com/libp2p/go-libp2p/core/control/disconnect.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package control
|
||||
|
||||
// DisconnectReason communicates the reason why a connection is being closed.
|
||||
//
|
||||
// A zero value stands for "no reason" / NA.
|
||||
//
|
||||
// This is an EXPERIMENTAL type. It will change in the future. Refer to the
|
||||
// connmgr.ConnectionGater godoc for more info.
|
||||
type DisconnectReason int
|
||||
187
vendor/github.com/libp2p/go-libp2p/core/crypto/ecdsa.go
generated
vendored
Normal file
187
vendor/github.com/libp2p/go-libp2p/core/crypto/ecdsa.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
|
||||
"github.com/libp2p/go-libp2p/core/internal/catch"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
// ECDSAPrivateKey is an implementation of an ECDSA private key
|
||||
type ECDSAPrivateKey struct {
|
||||
priv *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
// ECDSAPublicKey is an implementation of an ECDSA public key
|
||||
type ECDSAPublicKey struct {
|
||||
pub *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
// ECDSASig holds the r and s values of an ECDSA signature
|
||||
type ECDSASig struct {
|
||||
R, S *big.Int
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrNotECDSAPubKey is returned when the public key passed is not an ecdsa public key
|
||||
ErrNotECDSAPubKey = errors.New("not an ecdsa public key")
|
||||
// ErrNilSig is returned when the signature is nil
|
||||
ErrNilSig = errors.New("sig is nil")
|
||||
// ErrNilPrivateKey is returned when a nil private key is provided
|
||||
ErrNilPrivateKey = errors.New("private key is nil")
|
||||
// ErrNilPublicKey is returned when a nil public key is provided
|
||||
ErrNilPublicKey = errors.New("public key is nil")
|
||||
// ECDSACurve is the default ecdsa curve used
|
||||
ECDSACurve = elliptic.P256()
|
||||
)
|
||||
|
||||
// GenerateECDSAKeyPair generates a new ecdsa private and public key
|
||||
func GenerateECDSAKeyPair(src io.Reader) (PrivKey, PubKey, error) {
|
||||
return GenerateECDSAKeyPairWithCurve(ECDSACurve, src)
|
||||
}
|
||||
|
||||
// GenerateECDSAKeyPairWithCurve generates a new ecdsa private and public key with a specified curve
|
||||
func GenerateECDSAKeyPairWithCurve(curve elliptic.Curve, src io.Reader) (PrivKey, PubKey, error) {
|
||||
priv, err := ecdsa.GenerateKey(curve, src)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &ECDSAPrivateKey{priv}, &ECDSAPublicKey{&priv.PublicKey}, nil
|
||||
}
|
||||
|
||||
// ECDSAKeyPairFromKey generates a new ecdsa private and public key from an input private key
|
||||
func ECDSAKeyPairFromKey(priv *ecdsa.PrivateKey) (PrivKey, PubKey, error) {
|
||||
if priv == nil {
|
||||
return nil, nil, ErrNilPrivateKey
|
||||
}
|
||||
|
||||
return &ECDSAPrivateKey{priv}, &ECDSAPublicKey{&priv.PublicKey}, nil
|
||||
}
|
||||
|
||||
// ECDSAPublicKeyFromPubKey generates a new ecdsa public key from an input public key
|
||||
func ECDSAPublicKeyFromPubKey(pub ecdsa.PublicKey) (PubKey, error) {
|
||||
return &ECDSAPublicKey{pub: &pub}, nil
|
||||
}
|
||||
|
||||
// MarshalECDSAPrivateKey returns x509 bytes from a private key
|
||||
func MarshalECDSAPrivateKey(ePriv ECDSAPrivateKey) (res []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "ECDSA private-key marshal") }()
|
||||
return x509.MarshalECPrivateKey(ePriv.priv)
|
||||
}
|
||||
|
||||
// MarshalECDSAPublicKey returns x509 bytes from a public key
|
||||
func MarshalECDSAPublicKey(ePub ECDSAPublicKey) (res []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "ECDSA public-key marshal") }()
|
||||
return x509.MarshalPKIXPublicKey(ePub.pub)
|
||||
}
|
||||
|
||||
// UnmarshalECDSAPrivateKey returns a private key from x509 bytes
|
||||
func UnmarshalECDSAPrivateKey(data []byte) (res PrivKey, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "ECDSA private-key unmarshal") }()
|
||||
|
||||
priv, err := x509.ParseECPrivateKey(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ECDSAPrivateKey{priv}, nil
|
||||
}
|
||||
|
||||
// UnmarshalECDSAPublicKey returns the public key from x509 bytes
|
||||
func UnmarshalECDSAPublicKey(data []byte) (key PubKey, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "ECDSA public-key unmarshal") }()
|
||||
|
||||
pubIfc, err := x509.ParsePKIXPublicKey(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pub, ok := pubIfc.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, ErrNotECDSAPubKey
|
||||
}
|
||||
|
||||
return &ECDSAPublicKey{pub}, nil
|
||||
}
|
||||
|
||||
// Type returns the key type
|
||||
func (ePriv *ECDSAPrivateKey) Type() pb.KeyType {
|
||||
return pb.KeyType_ECDSA
|
||||
}
|
||||
|
||||
// Raw returns x509 bytes from a private key
|
||||
func (ePriv *ECDSAPrivateKey) Raw() (res []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "ECDSA private-key marshal") }()
|
||||
return x509.MarshalECPrivateKey(ePriv.priv)
|
||||
}
|
||||
|
||||
// Equals compares two private keys
|
||||
func (ePriv *ECDSAPrivateKey) Equals(o Key) bool {
|
||||
return basicEquals(ePriv, o)
|
||||
}
|
||||
|
||||
// Sign returns the signature of the input data
|
||||
func (ePriv *ECDSAPrivateKey) Sign(data []byte) (sig []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "ECDSA signing") }()
|
||||
hash := sha256.Sum256(data)
|
||||
r, s, err := ecdsa.Sign(rand.Reader, ePriv.priv, hash[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return asn1.Marshal(ECDSASig{
|
||||
R: r,
|
||||
S: s,
|
||||
})
|
||||
}
|
||||
|
||||
// GetPublic returns a public key
|
||||
func (ePriv *ECDSAPrivateKey) GetPublic() PubKey {
|
||||
return &ECDSAPublicKey{&ePriv.priv.PublicKey}
|
||||
}
|
||||
|
||||
// Type returns the key type
|
||||
func (ePub *ECDSAPublicKey) Type() pb.KeyType {
|
||||
return pb.KeyType_ECDSA
|
||||
}
|
||||
|
||||
// Raw returns x509 bytes from a public key
|
||||
func (ePub *ECDSAPublicKey) Raw() ([]byte, error) {
|
||||
return x509.MarshalPKIXPublicKey(ePub.pub)
|
||||
}
|
||||
|
||||
// Equals compares to public keys
|
||||
func (ePub *ECDSAPublicKey) Equals(o Key) bool {
|
||||
return basicEquals(ePub, o)
|
||||
}
|
||||
|
||||
// Verify compares data to a signature
|
||||
func (ePub *ECDSAPublicKey) Verify(data, sigBytes []byte) (success bool, err error) {
|
||||
defer func() {
|
||||
catch.HandlePanic(recover(), &err, "ECDSA signature verification")
|
||||
|
||||
// Just to be extra paranoid.
|
||||
if err != nil {
|
||||
success = false
|
||||
}
|
||||
}()
|
||||
|
||||
sig := new(ECDSASig)
|
||||
if _, err := asn1.Unmarshal(sigBytes, sig); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
|
||||
return ecdsa.Verify(ePub.pub, hash[:], sig.R, sig.S), nil
|
||||
}
|
||||
156
vendor/github.com/libp2p/go-libp2p/core/crypto/ed25519.go
generated
vendored
Normal file
156
vendor/github.com/libp2p/go-libp2p/core/crypto/ed25519.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
|
||||
"github.com/libp2p/go-libp2p/core/internal/catch"
|
||||
)
|
||||
|
||||
// Ed25519PrivateKey is an ed25519 private key.
|
||||
type Ed25519PrivateKey struct {
|
||||
k ed25519.PrivateKey
|
||||
}
|
||||
|
||||
// Ed25519PublicKey is an ed25519 public key.
|
||||
type Ed25519PublicKey struct {
|
||||
k ed25519.PublicKey
|
||||
}
|
||||
|
||||
// GenerateEd25519Key generates a new ed25519 private and public key pair.
|
||||
func GenerateEd25519Key(src io.Reader) (PrivKey, PubKey, error) {
|
||||
pub, priv, err := ed25519.GenerateKey(src)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &Ed25519PrivateKey{
|
||||
k: priv,
|
||||
},
|
||||
&Ed25519PublicKey{
|
||||
k: pub,
|
||||
},
|
||||
nil
|
||||
}
|
||||
|
||||
// Type of the private key (Ed25519).
|
||||
func (k *Ed25519PrivateKey) Type() pb.KeyType {
|
||||
return pb.KeyType_Ed25519
|
||||
}
|
||||
|
||||
// Raw private key bytes.
|
||||
func (k *Ed25519PrivateKey) Raw() ([]byte, error) {
|
||||
// The Ed25519 private key contains two 32-bytes curve points, the private
|
||||
// key and the public key.
|
||||
// It makes it more efficient to get the public key without re-computing an
|
||||
// elliptic curve multiplication.
|
||||
buf := make([]byte, len(k.k))
|
||||
copy(buf, k.k)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (k *Ed25519PrivateKey) pubKeyBytes() []byte {
|
||||
return k.k[ed25519.PrivateKeySize-ed25519.PublicKeySize:]
|
||||
}
|
||||
|
||||
// Equals compares two ed25519 private keys.
|
||||
func (k *Ed25519PrivateKey) Equals(o Key) bool {
|
||||
edk, ok := o.(*Ed25519PrivateKey)
|
||||
if !ok {
|
||||
return basicEquals(k, o)
|
||||
}
|
||||
|
||||
return subtle.ConstantTimeCompare(k.k, edk.k) == 1
|
||||
}
|
||||
|
||||
// GetPublic returns an ed25519 public key from a private key.
|
||||
func (k *Ed25519PrivateKey) GetPublic() PubKey {
|
||||
return &Ed25519PublicKey{k: k.pubKeyBytes()}
|
||||
}
|
||||
|
||||
// Sign returns a signature from an input message.
|
||||
func (k *Ed25519PrivateKey) Sign(msg []byte) (res []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "ed15519 signing") }()
|
||||
|
||||
return ed25519.Sign(k.k, msg), nil
|
||||
}
|
||||
|
||||
// Type of the public key (Ed25519).
|
||||
func (k *Ed25519PublicKey) Type() pb.KeyType {
|
||||
return pb.KeyType_Ed25519
|
||||
}
|
||||
|
||||
// Raw public key bytes.
|
||||
func (k *Ed25519PublicKey) Raw() ([]byte, error) {
|
||||
return k.k, nil
|
||||
}
|
||||
|
||||
// Equals compares two ed25519 public keys.
|
||||
func (k *Ed25519PublicKey) Equals(o Key) bool {
|
||||
edk, ok := o.(*Ed25519PublicKey)
|
||||
if !ok {
|
||||
return basicEquals(k, o)
|
||||
}
|
||||
|
||||
return bytes.Equal(k.k, edk.k)
|
||||
}
|
||||
|
||||
// Verify checks a signature against the input data.
|
||||
func (k *Ed25519PublicKey) Verify(data []byte, sig []byte) (success bool, err error) {
|
||||
defer func() {
|
||||
catch.HandlePanic(recover(), &err, "ed15519 signature verification")
|
||||
|
||||
// To be safe.
|
||||
if err != nil {
|
||||
success = false
|
||||
}
|
||||
}()
|
||||
return ed25519.Verify(k.k, data, sig), nil
|
||||
}
|
||||
|
||||
// UnmarshalEd25519PublicKey returns a public key from input bytes.
|
||||
func UnmarshalEd25519PublicKey(data []byte) (PubKey, error) {
|
||||
if len(data) != 32 {
|
||||
return nil, errors.New("expect ed25519 public key data size to be 32")
|
||||
}
|
||||
|
||||
return &Ed25519PublicKey{
|
||||
k: ed25519.PublicKey(data),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnmarshalEd25519PrivateKey returns a private key from input bytes.
|
||||
func UnmarshalEd25519PrivateKey(data []byte) (PrivKey, error) {
|
||||
switch len(data) {
|
||||
case ed25519.PrivateKeySize + ed25519.PublicKeySize:
|
||||
// Remove the redundant public key. See issue #36.
|
||||
redundantPk := data[ed25519.PrivateKeySize:]
|
||||
pk := data[ed25519.PrivateKeySize-ed25519.PublicKeySize : ed25519.PrivateKeySize]
|
||||
if subtle.ConstantTimeCompare(pk, redundantPk) == 0 {
|
||||
return nil, errors.New("expected redundant ed25519 public key to be redundant")
|
||||
}
|
||||
|
||||
// No point in storing the extra data.
|
||||
newKey := make([]byte, ed25519.PrivateKeySize)
|
||||
copy(newKey, data[:ed25519.PrivateKeySize])
|
||||
data = newKey
|
||||
case ed25519.PrivateKeySize:
|
||||
default:
|
||||
return nil, fmt.Errorf(
|
||||
"expected ed25519 data size to be %d or %d, got %d",
|
||||
ed25519.PrivateKeySize,
|
||||
ed25519.PrivateKeySize+ed25519.PublicKeySize,
|
||||
len(data),
|
||||
)
|
||||
}
|
||||
|
||||
return &Ed25519PrivateKey{
|
||||
k: ed25519.PrivateKey(data),
|
||||
}, nil
|
||||
}
|
||||
291
vendor/github.com/libp2p/go-libp2p/core/crypto/key.go
generated
vendored
Normal file
291
vendor/github.com/libp2p/go-libp2p/core/crypto/key.go
generated
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
// Package crypto implements various cryptographic utilities used by libp2p.
|
||||
// This includes a Public and Private key interface and key implementations
|
||||
// for supported key algorithms.
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/crypto/pb"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
//go:generate protoc --go_out=. --go_opt=Mpb/crypto.proto=./pb pb/crypto.proto
|
||||
|
||||
const (
|
||||
// RSA is an enum for the supported RSA key type
|
||||
RSA = iota
|
||||
// Ed25519 is an enum for the supported Ed25519 key type
|
||||
Ed25519
|
||||
// Secp256k1 is an enum for the supported Secp256k1 key type
|
||||
Secp256k1
|
||||
// ECDSA is an enum for the supported ECDSA key type
|
||||
ECDSA
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBadKeyType is returned when a key is not supported
|
||||
ErrBadKeyType = errors.New("invalid or unsupported key type")
|
||||
// KeyTypes is a list of supported keys
|
||||
KeyTypes = []int{
|
||||
RSA,
|
||||
Ed25519,
|
||||
Secp256k1,
|
||||
ECDSA,
|
||||
}
|
||||
)
|
||||
|
||||
// PubKeyUnmarshaller is a func that creates a PubKey from a given slice of bytes
|
||||
type PubKeyUnmarshaller func(data []byte) (PubKey, error)
|
||||
|
||||
// PrivKeyUnmarshaller is a func that creates a PrivKey from a given slice of bytes
|
||||
type PrivKeyUnmarshaller func(data []byte) (PrivKey, error)
|
||||
|
||||
// PubKeyUnmarshallers is a map of unmarshallers by key type
|
||||
var PubKeyUnmarshallers = map[pb.KeyType]PubKeyUnmarshaller{
|
||||
pb.KeyType_RSA: UnmarshalRsaPublicKey,
|
||||
pb.KeyType_Ed25519: UnmarshalEd25519PublicKey,
|
||||
pb.KeyType_Secp256k1: UnmarshalSecp256k1PublicKey,
|
||||
pb.KeyType_ECDSA: UnmarshalECDSAPublicKey,
|
||||
}
|
||||
|
||||
// PrivKeyUnmarshallers is a map of unmarshallers by key type
|
||||
var PrivKeyUnmarshallers = map[pb.KeyType]PrivKeyUnmarshaller{
|
||||
pb.KeyType_RSA: UnmarshalRsaPrivateKey,
|
||||
pb.KeyType_Ed25519: UnmarshalEd25519PrivateKey,
|
||||
pb.KeyType_Secp256k1: UnmarshalSecp256k1PrivateKey,
|
||||
pb.KeyType_ECDSA: UnmarshalECDSAPrivateKey,
|
||||
}
|
||||
|
||||
// Key represents a crypto key that can be compared to another key
|
||||
type Key interface {
|
||||
// Equals checks whether two PubKeys are the same
|
||||
Equals(Key) bool
|
||||
|
||||
// Raw returns the raw bytes of the key (not wrapped in the
|
||||
// libp2p-crypto protobuf).
|
||||
//
|
||||
// This function is the inverse of {Priv,Pub}KeyUnmarshaler.
|
||||
Raw() ([]byte, error)
|
||||
|
||||
// Type returns the protobuf key type.
|
||||
Type() pb.KeyType
|
||||
}
|
||||
|
||||
// PrivKey represents a private key that can be used to generate a public key and sign data
|
||||
type PrivKey interface {
|
||||
Key
|
||||
|
||||
// Cryptographically sign the given bytes
|
||||
Sign([]byte) ([]byte, error)
|
||||
|
||||
// Return a public key paired with this private key
|
||||
GetPublic() PubKey
|
||||
}
|
||||
|
||||
// PubKey is a public key that can be used to verify data signed with the corresponding private key
|
||||
type PubKey interface {
|
||||
Key
|
||||
|
||||
// Verify that 'sig' is the signed hash of 'data'
|
||||
Verify(data []byte, sig []byte) (bool, error)
|
||||
}
|
||||
|
||||
// GenSharedKey generates the shared key from a given private key
|
||||
type GenSharedKey func([]byte) ([]byte, error)
|
||||
|
||||
// GenerateKeyPair generates a private and public key
|
||||
func GenerateKeyPair(typ, bits int) (PrivKey, PubKey, error) {
|
||||
return GenerateKeyPairWithReader(typ, bits, rand.Reader)
|
||||
}
|
||||
|
||||
// GenerateKeyPairWithReader returns a keypair of the given type and bit-size
|
||||
func GenerateKeyPairWithReader(typ, bits int, src io.Reader) (PrivKey, PubKey, error) {
|
||||
switch typ {
|
||||
case RSA:
|
||||
return GenerateRSAKeyPair(bits, src)
|
||||
case Ed25519:
|
||||
return GenerateEd25519Key(src)
|
||||
case Secp256k1:
|
||||
return GenerateSecp256k1Key(src)
|
||||
case ECDSA:
|
||||
return GenerateECDSAKeyPair(src)
|
||||
default:
|
||||
return nil, nil, ErrBadKeyType
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateEKeyPair returns an ephemeral public key and returns a function that will compute
|
||||
// the shared secret key. Used in the identify module.
|
||||
//
|
||||
// Focuses only on ECDH now, but can be made more general in the future.
|
||||
func GenerateEKeyPair(curveName string) ([]byte, GenSharedKey, error) {
|
||||
var curve elliptic.Curve
|
||||
|
||||
switch curveName {
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unknown curve name")
|
||||
}
|
||||
|
||||
priv, x, y, err := elliptic.GenerateKey(curve, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pubKey := elliptic.Marshal(curve, x, y)
|
||||
|
||||
done := func(theirPub []byte) ([]byte, error) {
|
||||
// Verify and unpack node's public key.
|
||||
x, y := elliptic.Unmarshal(curve, theirPub)
|
||||
if x == nil {
|
||||
return nil, fmt.Errorf("malformed public key: %d %v", len(theirPub), theirPub)
|
||||
}
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
// Generate shared secret.
|
||||
secret, _ := curve.ScalarMult(x, y, priv)
|
||||
|
||||
return secret.Bytes(), nil
|
||||
}
|
||||
|
||||
return pubKey, done, nil
|
||||
}
|
||||
|
||||
// UnmarshalPublicKey converts a protobuf serialized public key into its
|
||||
// representative object
|
||||
func UnmarshalPublicKey(data []byte) (PubKey, error) {
|
||||
pmes := new(pb.PublicKey)
|
||||
err := proto.Unmarshal(data, pmes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return PublicKeyFromProto(pmes)
|
||||
}
|
||||
|
||||
// PublicKeyFromProto converts an unserialized protobuf PublicKey message
|
||||
// into its representative object.
|
||||
func PublicKeyFromProto(pmes *pb.PublicKey) (PubKey, error) {
|
||||
um, ok := PubKeyUnmarshallers[pmes.GetType()]
|
||||
if !ok {
|
||||
return nil, ErrBadKeyType
|
||||
}
|
||||
|
||||
data := pmes.GetData()
|
||||
|
||||
pk, err := um(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch tpk := pk.(type) {
|
||||
case *RsaPublicKey:
|
||||
tpk.cached, _ = proto.Marshal(pmes)
|
||||
}
|
||||
|
||||
return pk, nil
|
||||
}
|
||||
|
||||
// MarshalPublicKey converts a public key object into a protobuf serialized
|
||||
// public key
|
||||
func MarshalPublicKey(k PubKey) ([]byte, error) {
|
||||
pbmes, err := PublicKeyToProto(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proto.Marshal(pbmes)
|
||||
}
|
||||
|
||||
// PublicKeyToProto converts a public key object into an unserialized
|
||||
// protobuf PublicKey message.
|
||||
func PublicKeyToProto(k PubKey) (*pb.PublicKey, error) {
|
||||
data, err := k.Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.PublicKey{
|
||||
Type: k.Type().Enum(),
|
||||
Data: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnmarshalPrivateKey converts a protobuf serialized private key into its
|
||||
// representative object
|
||||
func UnmarshalPrivateKey(data []byte) (PrivKey, error) {
|
||||
pmes := new(pb.PrivateKey)
|
||||
err := proto.Unmarshal(data, pmes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
um, ok := PrivKeyUnmarshallers[pmes.GetType()]
|
||||
if !ok {
|
||||
return nil, ErrBadKeyType
|
||||
}
|
||||
|
||||
return um(pmes.GetData())
|
||||
}
|
||||
|
||||
// MarshalPrivateKey converts a key object into its protobuf serialized form.
|
||||
func MarshalPrivateKey(k PrivKey) ([]byte, error) {
|
||||
data, err := k.Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Marshal(&pb.PrivateKey{
|
||||
Type: k.Type().Enum(),
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
// ConfigDecodeKey decodes from b64 (for config file) to a byte array that can be unmarshalled.
|
||||
func ConfigDecodeKey(b string) ([]byte, error) {
|
||||
return base64.StdEncoding.DecodeString(b)
|
||||
}
|
||||
|
||||
// ConfigEncodeKey encodes a marshalled key to b64 (for config file).
|
||||
func ConfigEncodeKey(b []byte) string {
|
||||
return base64.StdEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
// KeyEqual checks whether two Keys are equivalent (have identical byte representations).
|
||||
func KeyEqual(k1, k2 Key) bool {
|
||||
if k1 == k2 {
|
||||
return true
|
||||
}
|
||||
|
||||
return k1.Equals(k2)
|
||||
}
|
||||
|
||||
func basicEquals(k1, k2 Key) bool {
|
||||
if k1.Type() != k2.Type() {
|
||||
return false
|
||||
}
|
||||
|
||||
a, err := k1.Raw()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
b, err := k2.Raw()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return subtle.ConstantTimeCompare(a, b) == 1
|
||||
}
|
||||
78
vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go
generated
vendored
Normal file
78
vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4"
|
||||
)
|
||||
|
||||
// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p/core/crypto keys
|
||||
func KeyPairFromStdKey(priv crypto.PrivateKey) (PrivKey, PubKey, error) {
|
||||
if priv == nil {
|
||||
return nil, nil, ErrNilPrivateKey
|
||||
}
|
||||
|
||||
switch p := priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return &RsaPrivateKey{*p}, &RsaPublicKey{k: p.PublicKey}, nil
|
||||
|
||||
case *ecdsa.PrivateKey:
|
||||
return &ECDSAPrivateKey{p}, &ECDSAPublicKey{&p.PublicKey}, nil
|
||||
|
||||
case *ed25519.PrivateKey:
|
||||
pubIfc := p.Public()
|
||||
pub, _ := pubIfc.(ed25519.PublicKey)
|
||||
return &Ed25519PrivateKey{*p}, &Ed25519PublicKey{pub}, nil
|
||||
|
||||
case *secp256k1.PrivateKey:
|
||||
sPriv := Secp256k1PrivateKey(*p)
|
||||
sPub := Secp256k1PublicKey(*p.PubKey())
|
||||
return &sPriv, &sPub, nil
|
||||
|
||||
default:
|
||||
return nil, nil, ErrBadKeyType
|
||||
}
|
||||
}
|
||||
|
||||
// PrivKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) private keys
|
||||
func PrivKeyToStdKey(priv PrivKey) (crypto.PrivateKey, error) {
|
||||
if priv == nil {
|
||||
return nil, ErrNilPrivateKey
|
||||
}
|
||||
|
||||
switch p := priv.(type) {
|
||||
case *RsaPrivateKey:
|
||||
return &p.sk, nil
|
||||
case *ECDSAPrivateKey:
|
||||
return p.priv, nil
|
||||
case *Ed25519PrivateKey:
|
||||
return &p.k, nil
|
||||
case *Secp256k1PrivateKey:
|
||||
return p, nil
|
||||
default:
|
||||
return nil, ErrBadKeyType
|
||||
}
|
||||
}
|
||||
|
||||
// PubKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) public keys
|
||||
func PubKeyToStdKey(pub PubKey) (crypto.PublicKey, error) {
|
||||
if pub == nil {
|
||||
return nil, ErrNilPublicKey
|
||||
}
|
||||
|
||||
switch p := pub.(type) {
|
||||
case *RsaPublicKey:
|
||||
return &p.k, nil
|
||||
case *ECDSAPublicKey:
|
||||
return p.pub, nil
|
||||
case *Ed25519PublicKey:
|
||||
return p.k, nil
|
||||
case *Secp256k1PublicKey:
|
||||
return p, nil
|
||||
default:
|
||||
return nil, ErrBadKeyType
|
||||
}
|
||||
}
|
||||
297
vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go
generated
vendored
Normal file
297
vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v3.21.12
|
||||
// source: pb/crypto.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type KeyType int32
|
||||
|
||||
const (
|
||||
KeyType_RSA KeyType = 0
|
||||
KeyType_Ed25519 KeyType = 1
|
||||
KeyType_Secp256k1 KeyType = 2
|
||||
KeyType_ECDSA KeyType = 3
|
||||
)
|
||||
|
||||
// Enum value maps for KeyType.
|
||||
var (
|
||||
KeyType_name = map[int32]string{
|
||||
0: "RSA",
|
||||
1: "Ed25519",
|
||||
2: "Secp256k1",
|
||||
3: "ECDSA",
|
||||
}
|
||||
KeyType_value = map[string]int32{
|
||||
"RSA": 0,
|
||||
"Ed25519": 1,
|
||||
"Secp256k1": 2,
|
||||
"ECDSA": 3,
|
||||
}
|
||||
)
|
||||
|
||||
func (x KeyType) Enum() *KeyType {
|
||||
p := new(KeyType)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x KeyType) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (KeyType) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_pb_crypto_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (KeyType) Type() protoreflect.EnumType {
|
||||
return &file_pb_crypto_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x KeyType) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func (x *KeyType) UnmarshalJSON(b []byte) error {
|
||||
num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = KeyType(num)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: Use KeyType.Descriptor instead.
|
||||
func (KeyType) EnumDescriptor() ([]byte, []int) {
|
||||
return file_pb_crypto_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type PublicKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PublicKey) Reset() {
|
||||
*x = PublicKey{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pb_crypto_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PublicKey) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PublicKey) ProtoMessage() {}
|
||||
|
||||
func (x *PublicKey) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pb_crypto_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead.
|
||||
func (*PublicKey) Descriptor() ([]byte, []int) {
|
||||
return file_pb_crypto_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PublicKey) GetType() KeyType {
|
||||
if x != nil && x.Type != nil {
|
||||
return *x.Type
|
||||
}
|
||||
return KeyType_RSA
|
||||
}
|
||||
|
||||
func (x *PublicKey) GetData() []byte {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type PrivateKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PrivateKey) Reset() {
|
||||
*x = PrivateKey{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pb_crypto_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PrivateKey) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PrivateKey) ProtoMessage() {}
|
||||
|
||||
func (x *PrivateKey) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pb_crypto_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PrivateKey.ProtoReflect.Descriptor instead.
|
||||
func (*PrivateKey) Descriptor() ([]byte, []int) {
|
||||
return file_pb_crypto_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *PrivateKey) GetType() KeyType {
|
||||
if x != nil && x.Type != nil {
|
||||
return *x.Type
|
||||
}
|
||||
return KeyType_RSA
|
||||
}
|
||||
|
||||
func (x *PrivateKey) GetData() []byte {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_pb_crypto_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pb_crypto_proto_rawDesc = []byte{
|
||||
0x0a, 0x0f, 0x70, 0x62, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x12, 0x09, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x22, 0x47, 0x0a, 0x09,
|
||||
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70,
|
||||
0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
|
||||
0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70,
|
||||
0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x02, 0x28, 0x0c, 0x52,
|
||||
0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x48, 0x0a, 0x0a, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65,
|
||||
0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28,
|
||||
0x0e, 0x32, 0x12, 0x2e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65,
|
||||
0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44,
|
||||
0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x02, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x2a,
|
||||
0x39, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x53,
|
||||
0x41, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x01,
|
||||
0x12, 0x0d, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x70, 0x32, 0x35, 0x36, 0x6b, 0x31, 0x10, 0x02, 0x12,
|
||||
0x09, 0x0a, 0x05, 0x45, 0x43, 0x44, 0x53, 0x41, 0x10, 0x03, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f,
|
||||
0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63,
|
||||
0x72, 0x79, 0x70, 0x74, 0x6f, 0x2f, 0x70, 0x62,
|
||||
}
|
||||
|
||||
var (
|
||||
file_pb_crypto_proto_rawDescOnce sync.Once
|
||||
file_pb_crypto_proto_rawDescData = file_pb_crypto_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_pb_crypto_proto_rawDescGZIP() []byte {
|
||||
file_pb_crypto_proto_rawDescOnce.Do(func() {
|
||||
file_pb_crypto_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_crypto_proto_rawDescData)
|
||||
})
|
||||
return file_pb_crypto_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_pb_crypto_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_pb_crypto_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_pb_crypto_proto_goTypes = []interface{}{
|
||||
(KeyType)(0), // 0: crypto.pb.KeyType
|
||||
(*PublicKey)(nil), // 1: crypto.pb.PublicKey
|
||||
(*PrivateKey)(nil), // 2: crypto.pb.PrivateKey
|
||||
}
|
||||
var file_pb_crypto_proto_depIdxs = []int32{
|
||||
0, // 0: crypto.pb.PublicKey.Type:type_name -> crypto.pb.KeyType
|
||||
0, // 1: crypto.pb.PrivateKey.Type:type_name -> crypto.pb.KeyType
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_pb_crypto_proto_init() }
|
||||
func file_pb_crypto_proto_init() {
|
||||
if File_pb_crypto_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_pb_crypto_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PublicKey); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pb_crypto_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PrivateKey); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_pb_crypto_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_pb_crypto_proto_goTypes,
|
||||
DependencyIndexes: file_pb_crypto_proto_depIdxs,
|
||||
EnumInfos: file_pb_crypto_proto_enumTypes,
|
||||
MessageInfos: file_pb_crypto_proto_msgTypes,
|
||||
}.Build()
|
||||
File_pb_crypto_proto = out.File
|
||||
file_pb_crypto_proto_rawDesc = nil
|
||||
file_pb_crypto_proto_goTypes = nil
|
||||
file_pb_crypto_proto_depIdxs = nil
|
||||
}
|
||||
22
vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.proto
generated
vendored
Normal file
22
vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.proto
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
syntax = "proto2";
|
||||
|
||||
package crypto.pb;
|
||||
|
||||
option go_package = "github.com/libp2p/go-libp2p/core/crypto/pb";
|
||||
|
||||
enum KeyType {
|
||||
RSA = 0;
|
||||
Ed25519 = 1;
|
||||
Secp256k1 = 2;
|
||||
ECDSA = 3;
|
||||
}
|
||||
|
||||
message PublicKey {
|
||||
required KeyType Type = 1;
|
||||
required bytes Data = 2;
|
||||
}
|
||||
|
||||
message PrivateKey {
|
||||
required KeyType Type = 1;
|
||||
required bytes Data = 2;
|
||||
}
|
||||
28
vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go
generated
vendored
Normal file
28
vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// WeakRsaKeyEnv is an environment variable which, when set, lowers the
|
||||
// minimum required bits of RSA keys to 512. This should be used exclusively in
|
||||
// test situations.
|
||||
const WeakRsaKeyEnv = "LIBP2P_ALLOW_WEAK_RSA_KEYS"
|
||||
|
||||
var MinRsaKeyBits = 2048
|
||||
|
||||
var maxRsaKeyBits = 8192
|
||||
|
||||
// ErrRsaKeyTooSmall is returned when trying to generate or parse an RSA key
|
||||
// that's smaller than MinRsaKeyBits bits. In test
|
||||
var ErrRsaKeyTooSmall error
|
||||
var ErrRsaKeyTooBig error = fmt.Errorf("rsa keys must be <= %d bits", maxRsaKeyBits)
|
||||
|
||||
func init() {
|
||||
if _, ok := os.LookupEnv(WeakRsaKeyEnv); ok {
|
||||
MinRsaKeyBits = 512
|
||||
}
|
||||
|
||||
ErrRsaKeyTooSmall = fmt.Errorf("rsa keys must be >= %d bits to be useful", MinRsaKeyBits)
|
||||
}
|
||||
155
vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go
generated
vendored
Normal file
155
vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
|
||||
"github.com/libp2p/go-libp2p/core/internal/catch"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
// RsaPrivateKey is a rsa private key
|
||||
type RsaPrivateKey struct {
|
||||
sk rsa.PrivateKey
|
||||
}
|
||||
|
||||
// RsaPublicKey is a rsa public key
|
||||
type RsaPublicKey struct {
|
||||
k rsa.PublicKey
|
||||
|
||||
cached []byte
|
||||
}
|
||||
|
||||
// GenerateRSAKeyPair generates a new rsa private and public key
|
||||
func GenerateRSAKeyPair(bits int, src io.Reader) (PrivKey, PubKey, error) {
|
||||
if bits < MinRsaKeyBits {
|
||||
return nil, nil, ErrRsaKeyTooSmall
|
||||
}
|
||||
if bits > maxRsaKeyBits {
|
||||
return nil, nil, ErrRsaKeyTooBig
|
||||
}
|
||||
priv, err := rsa.GenerateKey(src, bits)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
pk := priv.PublicKey
|
||||
return &RsaPrivateKey{sk: *priv}, &RsaPublicKey{k: pk}, nil
|
||||
}
|
||||
|
||||
// Verify compares a signature against input data
|
||||
func (pk *RsaPublicKey) Verify(data, sig []byte) (success bool, err error) {
|
||||
defer func() {
|
||||
catch.HandlePanic(recover(), &err, "RSA signature verification")
|
||||
|
||||
// To be safe
|
||||
if err != nil {
|
||||
success = false
|
||||
}
|
||||
}()
|
||||
hashed := sha256.Sum256(data)
|
||||
err = rsa.VerifyPKCS1v15(&pk.k, crypto.SHA256, hashed[:], sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (pk *RsaPublicKey) Type() pb.KeyType {
|
||||
return pb.KeyType_RSA
|
||||
}
|
||||
|
||||
func (pk *RsaPublicKey) Raw() (res []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "RSA public-key marshaling") }()
|
||||
return x509.MarshalPKIXPublicKey(&pk.k)
|
||||
}
|
||||
|
||||
// Equals checks whether this key is equal to another
|
||||
func (pk *RsaPublicKey) Equals(k Key) bool {
|
||||
// make sure this is a rsa public key
|
||||
other, ok := (k).(*RsaPublicKey)
|
||||
if !ok {
|
||||
return basicEquals(pk, k)
|
||||
}
|
||||
|
||||
return pk.k.N.Cmp(other.k.N) == 0 && pk.k.E == other.k.E
|
||||
}
|
||||
|
||||
// Sign returns a signature of the input data
|
||||
func (sk *RsaPrivateKey) Sign(message []byte) (sig []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "RSA signing") }()
|
||||
hashed := sha256.Sum256(message)
|
||||
return rsa.SignPKCS1v15(rand.Reader, &sk.sk, crypto.SHA256, hashed[:])
|
||||
}
|
||||
|
||||
// GetPublic returns a public key
|
||||
func (sk *RsaPrivateKey) GetPublic() PubKey {
|
||||
return &RsaPublicKey{k: sk.sk.PublicKey}
|
||||
}
|
||||
|
||||
func (sk *RsaPrivateKey) Type() pb.KeyType {
|
||||
return pb.KeyType_RSA
|
||||
}
|
||||
|
||||
func (sk *RsaPrivateKey) Raw() (res []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "RSA private-key marshaling") }()
|
||||
b := x509.MarshalPKCS1PrivateKey(&sk.sk)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Equals checks whether this key is equal to another
|
||||
func (sk *RsaPrivateKey) Equals(k Key) bool {
|
||||
// make sure this is a rsa public key
|
||||
other, ok := (k).(*RsaPrivateKey)
|
||||
if !ok {
|
||||
return basicEquals(sk, k)
|
||||
}
|
||||
|
||||
a := sk.sk
|
||||
b := other.sk
|
||||
|
||||
// Don't care about constant time. We're only comparing the public half.
|
||||
return a.PublicKey.N.Cmp(b.PublicKey.N) == 0 && a.PublicKey.E == b.PublicKey.E
|
||||
}
|
||||
|
||||
// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes
|
||||
func UnmarshalRsaPrivateKey(b []byte) (key PrivKey, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "RSA private-key unmarshaling") }()
|
||||
sk, err := x509.ParsePKCS1PrivateKey(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sk.N.BitLen() < MinRsaKeyBits {
|
||||
return nil, ErrRsaKeyTooSmall
|
||||
}
|
||||
if sk.N.BitLen() > maxRsaKeyBits {
|
||||
return nil, ErrRsaKeyTooBig
|
||||
}
|
||||
return &RsaPrivateKey{sk: *sk}, nil
|
||||
}
|
||||
|
||||
// UnmarshalRsaPublicKey returns a public key from the input x509 bytes
|
||||
func UnmarshalRsaPublicKey(b []byte) (key PubKey, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "RSA public-key unmarshaling") }()
|
||||
pub, err := x509.ParsePKIXPublicKey(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pk, ok := pub.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, errors.New("not actually an rsa public key")
|
||||
}
|
||||
if pk.N.BitLen() < MinRsaKeyBits {
|
||||
return nil, ErrRsaKeyTooSmall
|
||||
}
|
||||
if pk.N.BitLen() > maxRsaKeyBits {
|
||||
return nil, ErrRsaKeyTooBig
|
||||
}
|
||||
|
||||
return &RsaPublicKey{k: *pk}, nil
|
||||
}
|
||||
127
vendor/github.com/libp2p/go-libp2p/core/crypto/secp256k1.go
generated
vendored
Normal file
127
vendor/github.com/libp2p/go-libp2p/core/crypto/secp256k1.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
pb "github.com/libp2p/go-libp2p/core/crypto/pb"
|
||||
"github.com/libp2p/go-libp2p/core/internal/catch"
|
||||
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4"
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
// Secp256k1PrivateKey is a Secp256k1 private key
|
||||
type Secp256k1PrivateKey secp256k1.PrivateKey
|
||||
|
||||
// Secp256k1PublicKey is a Secp256k1 public key
|
||||
type Secp256k1PublicKey secp256k1.PublicKey
|
||||
|
||||
// GenerateSecp256k1Key generates a new Secp256k1 private and public key pair
|
||||
func GenerateSecp256k1Key(src io.Reader) (PrivKey, PubKey, error) {
|
||||
privk, err := secp256k1.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
k := (*Secp256k1PrivateKey)(privk)
|
||||
return k, k.GetPublic(), nil
|
||||
}
|
||||
|
||||
// UnmarshalSecp256k1PrivateKey returns a private key from bytes
|
||||
func UnmarshalSecp256k1PrivateKey(data []byte) (k PrivKey, err error) {
|
||||
if len(data) != secp256k1.PrivKeyBytesLen {
|
||||
return nil, fmt.Errorf("expected secp256k1 data size to be %d", secp256k1.PrivKeyBytesLen)
|
||||
}
|
||||
defer func() { catch.HandlePanic(recover(), &err, "secp256k1 private-key unmarshal") }()
|
||||
|
||||
privk := secp256k1.PrivKeyFromBytes(data)
|
||||
return (*Secp256k1PrivateKey)(privk), nil
|
||||
}
|
||||
|
||||
// UnmarshalSecp256k1PublicKey returns a public key from bytes
|
||||
func UnmarshalSecp256k1PublicKey(data []byte) (_k PubKey, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "secp256k1 public-key unmarshal") }()
|
||||
k, err := secp256k1.ParsePubKey(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return (*Secp256k1PublicKey)(k), nil
|
||||
}
|
||||
|
||||
// Type returns the private key type
|
||||
func (k *Secp256k1PrivateKey) Type() pb.KeyType {
|
||||
return pb.KeyType_Secp256k1
|
||||
}
|
||||
|
||||
// Raw returns the bytes of the key
|
||||
func (k *Secp256k1PrivateKey) Raw() ([]byte, error) {
|
||||
return (*secp256k1.PrivateKey)(k).Serialize(), nil
|
||||
}
|
||||
|
||||
// Equals compares two private keys
|
||||
func (k *Secp256k1PrivateKey) Equals(o Key) bool {
|
||||
sk, ok := o.(*Secp256k1PrivateKey)
|
||||
if !ok {
|
||||
return basicEquals(k, o)
|
||||
}
|
||||
|
||||
return k.GetPublic().Equals(sk.GetPublic())
|
||||
}
|
||||
|
||||
// Sign returns a signature from input data
|
||||
func (k *Secp256k1PrivateKey) Sign(data []byte) (_sig []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "secp256k1 signing") }()
|
||||
key := (*secp256k1.PrivateKey)(k)
|
||||
hash := sha256.Sum256(data)
|
||||
sig := ecdsa.Sign(key, hash[:])
|
||||
|
||||
return sig.Serialize(), nil
|
||||
}
|
||||
|
||||
// GetPublic returns a public key
|
||||
func (k *Secp256k1PrivateKey) GetPublic() PubKey {
|
||||
return (*Secp256k1PublicKey)((*secp256k1.PrivateKey)(k).PubKey())
|
||||
}
|
||||
|
||||
// Type returns the public key type
|
||||
func (k *Secp256k1PublicKey) Type() pb.KeyType {
|
||||
return pb.KeyType_Secp256k1
|
||||
}
|
||||
|
||||
// Raw returns the bytes of the key
|
||||
func (k *Secp256k1PublicKey) Raw() (res []byte, err error) {
|
||||
defer func() { catch.HandlePanic(recover(), &err, "secp256k1 public key marshaling") }()
|
||||
return (*secp256k1.PublicKey)(k).SerializeCompressed(), nil
|
||||
}
|
||||
|
||||
// Equals compares two public keys
|
||||
func (k *Secp256k1PublicKey) Equals(o Key) bool {
|
||||
sk, ok := o.(*Secp256k1PublicKey)
|
||||
if !ok {
|
||||
return basicEquals(k, o)
|
||||
}
|
||||
|
||||
return (*secp256k1.PublicKey)(k).IsEqual((*secp256k1.PublicKey)(sk))
|
||||
}
|
||||
|
||||
// Verify compares a signature against the input data
|
||||
func (k *Secp256k1PublicKey) Verify(data []byte, sigStr []byte) (success bool, err error) {
|
||||
defer func() {
|
||||
catch.HandlePanic(recover(), &err, "secp256k1 signature verification")
|
||||
|
||||
// To be extra safe.
|
||||
if err != nil {
|
||||
success = false
|
||||
}
|
||||
}()
|
||||
sig, err := ecdsa.ParseDERSignature(sigStr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(data)
|
||||
return sig.Verify(hash[:], (*secp256k1.PublicKey)(k)), nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user