feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

17
vendor/github.com/status-im/doubleratchet/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,17 @@
/vendor
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
/glide.lock

View File

@@ -0,0 +1,74 @@
run:
concurrency: 4
deadline: 1m
issues-exit-code: 1
tests: true
# build-tags:
# - mytag
skip-dirs:
- static
skip-files:
- .*_mock.go
- jail/doc.go
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
linters-settings:
errcheck:
check-type-assertions: false
check-blank: false
govet:
check-shadowing: false
golint:
min-confidence: 0.8
gofmt:
simplify: true
gocyclo:
min-complexity: 16
maligned:
suggest-new: true
dupl:
threshold: 50
goconst:
min-len: 3
min-occurrences: 2
# depguard:
# list-type: blacklist
# include-go-root: false
# packages:
# - github.com/davecgh/go-spew/spew
linters:
disable-all: true
enable:
- deadcode
#- depguard
- errcheck
- gas
- goconst
- gocyclo
- gofmt
- golint
- govet
- ineffassign
- interfacer
- megacheck
- misspell
- structcheck
- typecheck
- unconvert
- varcheck
fast: false
issues:
exclude:
- "composite literal uses unkeyed fields" # govet
# exclude-use-default: true
# max-per-linter: 0
# max-same: 0
# new: false
# new-from-rev: ""
# new-from-patch: ""

18
vendor/github.com/status-im/doubleratchet/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,18 @@
language: go
go:
- 1.8.1
- tip
install:
- go get github.com/stretchr/testify/require
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- go get ./...
script:
- go test -v -covermode=count -coverprofile=coverage.out
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN
env:
secure: FOIA329dOVl+h897c764Zy7JJuW+BGD1ZqiQnVg6ejIpJoxQCyo/NuW+Pq2ZF8XPndit72pOcvvIuTpgubvyg439nJX7dOdnBfGtkBzeBxUp8IlpR+hK1xFqiXHvEGYuyti8cwfuykD6Umi5AENn60YMroWVaWSqE7iRHQE4rGp3bPbcdilqqkceZAbgzvf7vU3eUs0UGrJM87a7OjgK69tMCPS9Rcfa8HTADgdR3jKNv07lXQieCnomjzuAxFohoNPbP1bL/H5pjImwyHu1Bqsp+jZWXOV7TN2+EdMfaxoSBUjm4F4wPYmzPtyDOLVCfKVyszYtgG7e7R8gsTQALtuYtJed5JD7WLiU+ttJXYraKoerbjsngxT0dcj2YuTIqCWTpgwm30O2eMIeRBhVhY7TVQurrNZevXF83TYAdDM+amchtRFqbtmogUQUV5miG3aMgel7t/Ty209Yx/iRPgyLuvZTN7uzMdGXwk/tNHgdGula7HJoONpypPWRqonvjIZWx6nnHeJ4Ape5zMN6rbVyXjtBl5eUVrapUiEKFwVZadjBj/qCBxFTNiwmzHbBInuowlpPcS+Y/ZYsz3d915UmkPhfKNywVmA1sqGCKecSf9pdh4syAo86zoPy8WyHhsW8ziVOj9Oaq9pUmdzHrRUy1TageDwNIe61pIEQQQI=

21
vendor/github.com/status-im/doubleratchet/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Ivan Tomilov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

59
vendor/github.com/status-im/doubleratchet/Makefile generated vendored Normal file
View File

@@ -0,0 +1,59 @@
.PHONY: help
help: ##@other Show this help
@perl -e '$(HELP_FUN)' $(MAKEFILE_LIST)
ifndef GOPATH
$(error GOPATH not set. Please set GOPATH and make sure status-go is located at $$GOPATH/src/github.com/status-im/status-go. \
For more information about the GOPATH environment variable, see https://golang.org/doc/code.html#GOPATH)
endif
EXPECTED_PATH=$(shell go env GOPATH)/src/github.com/status-im/doubleratchet
ifneq ($(CURDIR),$(EXPECTED_PATH))
define NOT_IN_GOPATH_ERROR
Current dir is $(CURDIR), which seems to be different from your GOPATH.
Please, build status-go from GOPATH for proper build.
GOPATH = $(shell go env GOPATH)
Current dir = $(CURDIR)
Expected dir = $(EXPECTED_PATH))
See https://golang.org/doc/code.html#GOPATH for more info
endef
$(error $(NOT_IN_GOPATH_ERROR))
endif
GOBIN=$(dir $(realpath $(firstword $(MAKEFILE_LIST))))build/bin
GIT_COMMIT := $(shell git rev-parse --short HEAD)
# This is a code for automatic help generator.
# It supports ANSI colors and categories.
# To add new item into help output, simply add comments
# starting with '##'. To add category, use @category.
GREEN := $(shell echo "\e[32m")
WHITE := $(shell echo "\e[37m")
YELLOW := $(shell echo "\e[33m")
RESET := $(shell echo "\e[0m")
HELP_FUN = \
%help; \
while(<>) { push @{$$help{$$2 // 'options'}}, [$$1, $$3] if /^([a-zA-Z0-9\-]+)\s*:.*\#\#(?:@([a-zA-Z\-]+))?\s(.*)$$/ }; \
print "Usage: make [target]\n\n"; \
for (sort keys %help) { \
print "${WHITE}$$_:${RESET}\n"; \
for (@{$$help{$$_}}) { \
$$sep = " " x (32 - length $$_->[0]); \
print " ${YELLOW}$$_->[0]${RESET}$$sep${GREEN}$$_->[1]${RESET}\n"; \
}; \
print "\n"; \
}
setup: lint-install ##@other Prepare project for first build
lint-install:
@# The following installs a specific version of golangci-lint, which is appropriate for a CI server to avoid different results from build to build
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $(GOPATH)/bin v1.9.1
lint: ##@other Run linter
@echo "lint"
@golangci-lint run ./...

135
vendor/github.com/status-im/doubleratchet/README.md generated vendored Normal file
View File

@@ -0,0 +1,135 @@
# doubleratchet
[![Go Report Card](https://goreportcard.com/badge/github.com/status-im/doubleratchet)](https://goreportcard.com/report/github.com/status-im/doubleratchet)
[![Build Status](https://travis-ci.org/status-im/doubleratchet.svg?branch=master)](https://travis-ci.org/status-im/doubleratchet)
[![Coverage Status](https://coveralls.io/repos/github/status-im/doubleratchet/badge.svg?branch=master)](https://coveralls.io/github/status-im/doubleratchet?branch=master)
[![GoDoc](https://godoc.org/github.com/status-im/doubleratchet?status.svg)](https://godoc.org/github.com/status-im/doubleratchet)
[The Double Ratchet Algorithm](https://whispersystems.org/docs/specifications/doubleratchet) is used
by two parties to exchange encrypted messages based on a shared secret key. Typically the parties
will use some key agreement protocol (such as X3DH) to agree on the shared secret key.
Following this, the parties will use the Double Ratchet to send and receive encrypted messages.
The parties derive new keys for every Double Ratchet message so that earlier keys cannot be calculated
from later ones. The parties also send Diffie-Hellman public values attached to their messages.
The results of Diffie-Hellman calculations are mixed into the derived keys so that later keys cannot
be calculated from earlier ones. These properties gives some protection to earlier or later encrypted
messages in case of a compromise of a party's keys.
## Project status
The library is in beta version and ready for integration into production projects with care.
Let me know if you face any problems or have any questions or suggestions.
## Implementation notes
### The Double Ratchet logic
1. No more than 1000 messages can be skipped in a single chain.
1. Skipped messages from a single ratchet step are deleted after 100 ratchet steps.
1. Both parties' sending and receiving chains are initialized with the shared key so that both
of them could message each other from the very beginning.
### Cryptographic primitives
1. **GENERATE_DH():** Curve25519
1. **KDF_RK(rk, dh_out):** HKDF with SHA-256
1. **KDF_CK(ck):** HMAC with SHA-256 and constant inputs
1. **ENCRYPT(mk, pt, associated_data):** AES-256-CTR with HMAC-SHA-256 and IV derived alongside an encryption key
## Installation
go get github.com/status-im/doubleratchet
then `cd` into the project directory and install dependencies:
glide up
If `glide` is not installed, [install it](https://github.com/Masterminds/glide).
## Usage
### Basic usage example
```go
package main
import (
"fmt"
"log"
"github.com/status-im/doubleratchet"
)
func main() {
// The shared key both parties have already agreed upon before the communication.
sk := [32]byte{
0xeb, 0x8, 0x10, 0x7c, 0x33, 0x54, 0x0, 0x20,
0xe9, 0x4f, 0x6c, 0x84, 0xe4, 0x39, 0x50, 0x5a,
0x2f, 0x60, 0xbe, 0x81, 0xa, 0x78, 0x8b, 0xeb,
0x1e, 0x2c, 0x9, 0x8d, 0x4b, 0x4d, 0xc1, 0x40,
}
// Diffie-Hellman key pair generated by one of the parties during key exchange or
// by any other means. The public key MUST be sent to another party for initialization
// before the communication begins.
keyPair, err := doubleratchet.DefaultCrypto{}.GenerateDH()
if err != nil {
log.Fatal(err)
}
// Bob MUST be created with the shared secret and a DH key pair.
bob, err := doubleratchet.New([]byte("bob-session-id"), sk, keyPair, nil)
if err != nil {
log.Fatal(err)
}
// Alice MUST be created with the shared secret and Bob's public key.
alice, err := doubleratchet.NewWithRemoteKey([]byte("alice-session-id"), sk, keyPair.PublicKey(), nil)
if err != nil {
log.Fatal(err)
}
// Alice can now encrypt messages under the Double Ratchet session.
m, err := alice.RatchetEncrypt([]byte("Hi Bob!"), nil)
if err != nil {
log.Fatal(err)
}
// Which Bob can decrypt.
plaintext, err := bob.RatchetDecrypt(m, nil)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(plaintext))
}
```
### Options
Additional options can be passed to constructors to customize the algorithm behavior:
```go
doubleratchet.New(
sk, keyPair,
// Your own cryptography supplement implementing doubleratchet.Crypto.
WithCrypto(c),
// Custom storage for skipped keys implementing doubleratchet.KeysStorage.
WithKeysStorage(ks),
// The maximum number of skipped keys. Error will be raised in an attempt to store more keys
// in a single chain while decrypting.
WithMaxSkip(1200),
// The number of Diffie-Hellman ratchet steps skipped keys will be stored.
WithMaxKeep(90),
)
```
## License
MIT

46
vendor/github.com/status-im/doubleratchet/chains.go generated vendored Normal file
View File

@@ -0,0 +1,46 @@
package doubleratchet
// KDFer performs key derivation functions for chains.
type KDFer interface {
// KdfRK returns a pair (32-byte root key, 32-byte chain key) as the output of applying
// a KDF keyed by a 32-byte root key rk to a Diffie-Hellman output dhOut.
KdfRK(rk, dhOut Key) (rootKey, chainKey, newHeaderKey Key)
// KdfCK returns a pair (32-byte chain key, 32-byte message key) as the output of applying
// a KDF keyed by a 32-byte chain key ck to some constant.
KdfCK(ck Key) (chainKey, msgKey Key)
}
type kdfChain struct {
Crypto KDFer
// 32-byte chain key.
CK Key
// Messages count in the chain.
N uint32
}
// step performs symmetric ratchet step and returns a new message key.
func (c *kdfChain) step() Key {
var mk Key
c.CK, mk = c.Crypto.KdfCK(c.CK)
c.N++
return mk
}
type kdfRootChain struct {
Crypto KDFer
// 32-byte kdfChain key.
CK Key
}
// step performs symmetric ratchet step and returns a new chain and new header key.
func (c *kdfRootChain) step(kdfInput Key) (ch kdfChain, nhk Key) {
ch = kdfChain{
Crypto: c.Crypto,
}
c.CK, ch.CK, nhk = c.Crypto.KdfRK(c.CK, kdfInput)
return ch, nhk
}

36
vendor/github.com/status-im/doubleratchet/crypto.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
package doubleratchet
import "encoding/hex"
// Crypto is a cryptography supplement for the library.
type Crypto interface {
// GenerateDH creates a new Diffie-Hellman key pair.
GenerateDH() (DHPair, error)
// DH returns the output from the Diffie-Hellman calculation between
// the private key from the DH key pair dhPair and the DH public key dbPub.
DH(dhPair DHPair, dhPub Key) (Key, error)
// Encrypt returns an AEAD encryption of plaintext with message key mk. The associated_data
// is authenticated but is not included in the ciphertext. The AEAD nonce may be set to a constant.
Encrypt(mk Key, plaintext, ad []byte) (authCiphertext []byte, err error)
// Decrypt returns the AEAD decryption of ciphertext with message key mk.
Decrypt(mk Key, ciphertext, ad []byte) (plaintext []byte, err error)
KDFer
}
// DHPair is a general interface for DH pairs representation.
type DHPair interface {
PrivateKey() Key
PublicKey() Key
}
// Key is any byte representation of a key.
type Key []byte
// Stringer interface compliance.
func (k Key) String() string {
return hex.EncodeToString(k[:])
}

View File

@@ -0,0 +1,196 @@
package doubleratchet
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/hkdf"
)
// DefaultCrypto is an implementation of Crypto with cryptographic primitives recommended
// by the Double Ratchet Algorithm specification. However, some details are different,
// see function comments for details.
type DefaultCrypto struct{}
// GenerateDH creates a new Diffie-Hellman key pair.
func (c DefaultCrypto) GenerateDH() (DHPair, error) {
var privKey [32]byte
if _, err := io.ReadFull(rand.Reader, privKey[:]); err != nil {
return dhPair{}, fmt.Errorf("couldn't generate privKey: %s", err)
}
privKey[0] &= 248
privKey[31] &= 127
privKey[31] |= 64
var pubKey [32]byte
curve25519.ScalarBaseMult(&pubKey, &privKey)
return dhPair{
privateKey: privKey[:],
publicKey: pubKey[:],
}, nil
}
// DH returns the output from the Diffie-Hellman calculation between
// the private key from the DH key pair dhPair and the DH public key dbPub.
func (c DefaultCrypto) DH(dhPair DHPair, dhPub Key) (Key, error) {
var (
dhOut [32]byte
privKey [32]byte
pubKey [32]byte
)
if len(dhPair.PrivateKey()) != 32 {
return nil, fmt.Errorf("Invalid private key length: %d", len(dhPair.PrivateKey()))
}
if len(dhPub) != 32 {
return nil, fmt.Errorf("Invalid private key length: %d", len(dhPair.PrivateKey()))
}
copy(privKey[:], dhPair.PrivateKey()[:32])
copy(pubKey[:], dhPub[:32])
curve25519.ScalarMult(&dhOut, &privKey, &pubKey)
return dhOut[:], nil
}
// KdfRK returns a pair (32-byte root key, 32-byte chain key) as the output of applying
// a KDF keyed by a 32-byte root key rk to a Diffie-Hellman output dhOut.
func (c DefaultCrypto) KdfRK(rk, dhOut Key) (Key, Key, Key) {
var (
r = hkdf.New(sha256.New, dhOut, rk, []byte("rsZUpEuXUqqwXBvSy3EcievAh4cMj6QL"))
buf = make([]byte, 96)
)
// The only error here is an entropy limit which won't be reached for such a short buffer.
_, _ = io.ReadFull(r, buf)
rootKey := make(Key, 32)
headerKey := make(Key, 32)
chainKey := make(Key, 32)
copy(rootKey[:], buf[:32])
copy(chainKey[:], buf[32:64])
copy(headerKey[:], buf[64:96])
return rootKey, chainKey, headerKey
}
// KdfCK returns a pair (32-byte chain key, 32-byte message key) as the output of applying
// a KDF keyed by a 32-byte chain key ck to some constant.
func (c DefaultCrypto) KdfCK(ck Key) (Key, Key) {
const (
ckInput = 15
mkInput = 16
)
chainKey := make(Key, 32)
msgKey := make(Key, 32)
h := hmac.New(sha256.New, ck[:])
_, _ = h.Write([]byte{ckInput})
copy(chainKey[:], h.Sum(nil))
h.Reset()
_, _ = h.Write([]byte{mkInput})
copy(msgKey[:], h.Sum(nil))
return chainKey, msgKey
}
// Encrypt uses a slightly different approach than in the algorithm specification:
// it uses AES-256-CTR instead of AES-256-CBC for security, ciphertext length and implementation
// complexity considerations.
func (c DefaultCrypto) Encrypt(mk Key, plaintext, ad []byte) ([]byte, error) {
encKey, authKey, iv := c.deriveEncKeys(mk)
ciphertext := make([]byte, aes.BlockSize+len(plaintext))
copy(ciphertext, iv[:])
var (
block, _ = aes.NewCipher(encKey[:]) // No error will occur here as encKey is guaranteed to be 32 bytes.
stream = cipher.NewCTR(block, iv[:])
)
stream.XORKeyStream(ciphertext[aes.BlockSize:], plaintext)
return append(ciphertext, c.computeSignature(authKey[:], ciphertext, ad)...), nil
}
// Decrypt returns the AEAD decryption of ciphertext with message key mk.
func (c DefaultCrypto) Decrypt(mk Key, authCiphertext, ad []byte) ([]byte, error) {
var (
l = len(authCiphertext)
ciphertext = authCiphertext[:l-sha256.Size]
signature = authCiphertext[l-sha256.Size:]
)
// Check the signature.
encKey, authKey, _ := c.deriveEncKeys(mk)
if s := c.computeSignature(authKey[:], ciphertext, ad); !bytes.Equal(s, signature) {
return nil, fmt.Errorf("invalid signature")
}
// Decrypt.
var (
block, _ = aes.NewCipher(encKey[:]) // No error will occur here as encKey is guaranteed to be 32 bytes.
stream = cipher.NewCTR(block, ciphertext[:aes.BlockSize])
plaintext = make([]byte, len(ciphertext[aes.BlockSize:]))
)
stream.XORKeyStream(plaintext, ciphertext[aes.BlockSize:])
return plaintext, nil
}
// deriveEncKeys derive keys for message encryption and decryption. Returns (encKey, authKey, iv, err).
func (c DefaultCrypto) deriveEncKeys(mk Key) (Key, Key, [16]byte) {
// First, derive encryption and authentication key out of mk.
salt := make([]byte, 32)
var (
r = hkdf.New(sha256.New, mk[:], salt, []byte("pcwSByyx2CRdryCffXJwy7xgVZWtW5Sh"))
buf = make([]byte, 80)
)
// The only error here is an entropy limit which won't be reached for such a short buffer.
_, _ = io.ReadFull(r, buf)
var encKey Key = make(Key, 32)
var authKey Key = make(Key, 32)
var iv [16]byte
copy(encKey[:], buf[0:32])
copy(authKey[:], buf[32:64])
copy(iv[:], buf[64:80])
return encKey, authKey, iv
}
func (c DefaultCrypto) computeSignature(authKey, ciphertext, associatedData []byte) []byte {
h := hmac.New(sha256.New, authKey)
_, _ = h.Write(associatedData)
_, _ = h.Write(ciphertext)
return h.Sum(nil)
}
type dhPair struct {
privateKey Key
publicKey Key
}
func (p dhPair) PrivateKey() Key {
return p.privateKey
}
func (p dhPair) PublicKey() Key {
return p.publicKey
}
func (p dhPair) String() string {
return fmt.Sprintf("{privateKey: %s publicKey: %s}", p.privateKey, p.publicKey)
}

5
vendor/github.com/status-im/doubleratchet/glide.yaml generated vendored Normal file
View File

@@ -0,0 +1,5 @@
package: github.com/status-im/doubleratchet
import:
- package: golang.org/x/crypto
subpackages:
- curve25519

View File

@@ -0,0 +1,171 @@
package doubleratchet
import (
"bytes"
"fmt"
"sort"
)
// KeysStorage is an interface of an abstract in-memory or persistent keys storage.
type KeysStorage interface {
// Get returns a message key by the given key and message number.
Get(k Key, msgNum uint) (mk Key, ok bool, err error)
// Put saves the given mk under the specified key and msgNum.
Put(sessionID []byte, k Key, msgNum uint, mk Key, keySeqNum uint) error
// DeleteMk ensures there's no message key under the specified key and msgNum.
DeleteMk(k Key, msgNum uint) error
// DeleteOldMKeys deletes old message keys for a session.
DeleteOldMks(sessionID []byte, deleteUntilSeqKey uint) error
// TruncateMks truncates the number of keys to maxKeys.
TruncateMks(sessionID []byte, maxKeys int) error
// Count returns number of message keys stored under the specified key.
Count(k Key) (uint, error)
// All returns all the keys
All() (map[string]map[uint]Key, error)
}
// KeysStorageInMemory is an in-memory message keys storage.
type KeysStorageInMemory struct {
keys map[string]map[uint]InMemoryKey
}
// Get returns a message key by the given key and message number.
func (s *KeysStorageInMemory) Get(pubKey Key, msgNum uint) (Key, bool, error) {
index := fmt.Sprintf("%x", pubKey)
if s.keys == nil {
return Key{}, false, nil
}
msgs, ok := s.keys[index]
if !ok {
return Key{}, false, nil
}
mk, ok := msgs[msgNum]
if !ok {
return Key{}, false, nil
}
return mk.messageKey, true, nil
}
type InMemoryKey struct {
messageKey Key
seqNum uint
sessionID []byte
}
// Put saves the given mk under the specified key and msgNum.
func (s *KeysStorageInMemory) Put(sessionID []byte, pubKey Key, msgNum uint, mk Key, seqNum uint) error {
index := fmt.Sprintf("%x", pubKey)
if s.keys == nil {
s.keys = make(map[string]map[uint]InMemoryKey)
}
if _, ok := s.keys[index]; !ok {
s.keys[index] = make(map[uint]InMemoryKey)
}
s.keys[index][msgNum] = InMemoryKey{
sessionID: sessionID,
messageKey: mk,
seqNum: seqNum,
}
return nil
}
// DeleteMk ensures there's no message key under the specified key and msgNum.
func (s *KeysStorageInMemory) DeleteMk(pubKey Key, msgNum uint) error {
index := fmt.Sprintf("%x", pubKey)
if s.keys == nil {
return nil
}
if _, ok := s.keys[index]; !ok {
return nil
}
if _, ok := s.keys[index][msgNum]; !ok {
return nil
}
delete(s.keys[index], msgNum)
if len(s.keys[index]) == 0 {
delete(s.keys, index)
}
return nil
}
// TruncateMks truncates the number of keys to maxKeys.
func (s *KeysStorageInMemory) TruncateMks(sessionID []byte, maxKeys int) error {
var seqNos []uint
// Collect all seq numbers
for _, keys := range s.keys {
for _, inMemoryKey := range keys {
if bytes.Equal(inMemoryKey.sessionID, sessionID) {
seqNos = append(seqNos, inMemoryKey.seqNum)
}
}
}
// Nothing to do if we haven't reached the limit
if len(seqNos) <= maxKeys {
return nil
}
// Take the sequence numbers we care about
sort.Slice(seqNos, func(i, j int) bool { return seqNos[i] < seqNos[j] })
toDeleteSlice := seqNos[:len(seqNos)-maxKeys]
// Put in map for easier lookup
toDelete := make(map[uint]bool)
for _, seqNo := range toDeleteSlice {
toDelete[seqNo] = true
}
for pubKey, keys := range s.keys {
for i, inMemoryKey := range keys {
if toDelete[inMemoryKey.seqNum] && bytes.Equal(inMemoryKey.sessionID, sessionID) {
delete(s.keys[pubKey], i)
}
}
}
return nil
}
// DeleteOldMKeys deletes old message keys for a session.
func (s *KeysStorageInMemory) DeleteOldMks(sessionID []byte, deleteUntilSeqKey uint) error {
for pubKey, keys := range s.keys {
for i, inMemoryKey := range keys {
if inMemoryKey.seqNum <= deleteUntilSeqKey && bytes.Equal(inMemoryKey.sessionID, sessionID) {
delete(s.keys[pubKey], i)
}
}
}
return nil
}
// Count returns number of message keys stored under the specified key.
func (s *KeysStorageInMemory) Count(pubKey Key) (uint, error) {
index := fmt.Sprintf("%x", pubKey)
if s.keys == nil {
return 0, nil
}
return uint(len(s.keys[index])), nil
}
// All returns all the keys
func (s *KeysStorageInMemory) All() (map[string]map[uint]Key, error) {
response := make(map[string]map[uint]Key)
for pubKey, keys := range s.keys {
response[pubKey] = make(map[uint]Key)
for n, key := range keys {
response[pubKey][n] = key.messageKey
}
}
return response, nil
}

56
vendor/github.com/status-im/doubleratchet/message.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
package doubleratchet
import (
"encoding/binary"
"fmt"
)
// MessageHE contains ciphertext and an encrypted header.
type MessageHE struct {
Header []byte `json:"header"`
Ciphertext []byte `json:"ciphertext"`
}
// Message is a single message exchanged by the parties.
type Message struct {
Header MessageHeader `json:"header"`
Ciphertext []byte `json:"ciphertext"`
}
// MessageHeader that is prepended to every message.
type MessageHeader struct {
// DHr is the sender's current ratchet public key.
DH Key `json:"dh"`
// N is the number of the message in the sending chain.
N uint32 `json:"n"`
// PN is the length of the previous sending chain.
PN uint32 `json:"pn"`
}
// Encode the header in the binary format.
func (mh MessageHeader) Encode() MessageEncHeader {
buf := make([]byte, 8)
binary.LittleEndian.PutUint32(buf[0:4], mh.N)
binary.LittleEndian.PutUint32(buf[4:8], mh.PN)
return append(buf, mh.DH[:]...)
}
// MessageEncHeader is a binary-encoded representation of a message header.
type MessageEncHeader []byte
// Decode message header out of the binary-encoded representation.
func (mh MessageEncHeader) Decode() (MessageHeader, error) {
// n (4 bytes) + pn (4 bytes) + dh (32 bytes)
if len(mh) != 40 {
return MessageHeader{}, fmt.Errorf("encoded message header must be 40 bytes, %d given", len(mh))
}
var dh Key = make(Key, 32)
copy(dh[:], mh[8:40])
return MessageHeader{
DH: dh,
N: binary.LittleEndian.Uint32(mh[0:4]),
PN: binary.LittleEndian.Uint32(mh[4:8]),
}, nil
}

69
vendor/github.com/status-im/doubleratchet/options.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
package doubleratchet
import "fmt"
// option is a constructor option.
type option func(*State) error
// WithMaxSkip specifies the maximum number of skipped message in a single chain.
// nolint: golint
func WithMaxSkip(n int) option {
return func(s *State) error {
if n < 0 {
return fmt.Errorf("n must be non-negative")
}
s.MaxSkip = uint(n)
return nil
}
}
// WithMaxKeep specifies how long we keep message keys, counted in number of messages received
// nolint: golint
func WithMaxKeep(n int) option {
return func(s *State) error {
if n < 0 {
return fmt.Errorf("n must be non-negative")
}
s.MaxKeep = uint(n)
return nil
}
}
// WithMaxMessageKeysPerSession specifies the maximum number of message keys per session
// nolint: golint
func WithMaxMessageKeysPerSession(n int) option {
return func(s *State) error {
if n < 0 {
return fmt.Errorf("n must be non-negative")
}
s.MaxMessageKeysPerSession = n
return nil
}
}
// WithKeysStorage replaces the default keys storage with the specified.
// nolint: golint
func WithKeysStorage(ks KeysStorage) option {
return func(s *State) error {
if ks == nil {
return fmt.Errorf("KeysStorage mustn't be nil")
}
s.MkSkipped = ks
return nil
}
}
// WithCrypto replaces the default cryptographic supplement with the specified.
// nolint: golint
func WithCrypto(c Crypto) option {
return func(s *State) error {
if c == nil {
return fmt.Errorf("Crypto mustn't be nil")
}
s.Crypto = c
s.RootCh.Crypto = c
s.SendCh.Crypto = c
s.RecvCh.Crypto = c
return nil
}
}

193
vendor/github.com/status-im/doubleratchet/session.go generated vendored Normal file
View File

@@ -0,0 +1,193 @@
package doubleratchet
import (
"bytes"
"fmt"
)
// Session of the party involved in the Double Ratchet Algorithm.
type Session interface {
// RatchetEncrypt performs a symmetric-key ratchet step, then AEAD-encrypts the message with
// the resulting message key.
RatchetEncrypt(plaintext, associatedData []byte) (Message, error)
// RatchetDecrypt is called to AEAD-decrypt messages.
RatchetDecrypt(m Message, associatedData []byte) ([]byte, error)
//DeleteMk remove a message key from the database
DeleteMk(Key, uint32) error
}
type sessionState struct {
id []byte
State
storage SessionStorage
}
// New creates session with the shared key.
func New(id []byte, sharedKey Key, keyPair DHPair, storage SessionStorage, opts ...option) (Session, error) {
state, err := newState(sharedKey, opts...)
if err != nil {
return nil, err
}
state.DHs = keyPair
session := &sessionState{id: id, State: state, storage: storage}
return session, session.store()
}
// NewWithRemoteKey creates session with the shared key and public key of the other party.
func NewWithRemoteKey(id []byte, sharedKey, remoteKey Key, storage SessionStorage, opts ...option) (Session, error) {
state, err := newState(sharedKey, opts...)
if err != nil {
return nil, err
}
state.DHs, err = state.Crypto.GenerateDH()
if err != nil {
return nil, fmt.Errorf("can't generate key pair: %s", err)
}
state.DHr = remoteKey
secret, err := state.Crypto.DH(state.DHs, state.DHr)
if err != nil {
return nil, fmt.Errorf("can't generate dh secret: %s", err)
}
state.SendCh, _ = state.RootCh.step(secret)
session := &sessionState{id: id, State: state, storage: storage}
return session, session.store()
}
// Load a session from a SessionStorage implementation and apply options.
func Load(id []byte, store SessionStorage, opts ...option) (Session, error) {
state, err := store.Load(id)
if err != nil {
return nil, err
}
if state == nil {
return nil, nil
}
if err = state.applyOptions(opts); err != nil {
return nil, err
}
s := &sessionState{id: id, State: *state}
s.storage = store
return s, nil
}
func (s *sessionState) store() error {
if s.storage != nil {
err := s.storage.Save(s.id, &s.State)
if err != nil {
return err
}
}
return nil
}
// RatchetEncrypt performs a symmetric-key ratchet step, then encrypts the message with
// the resulting message key.
func (s *sessionState) RatchetEncrypt(plaintext, ad []byte) (Message, error) {
var (
h = MessageHeader{
DH: s.DHs.PublicKey(),
N: s.SendCh.N,
PN: s.PN,
}
mk = s.SendCh.step()
)
ct, err := s.Crypto.Encrypt(mk, plaintext, append(ad, h.Encode()...))
if err != nil {
return Message{}, err
}
// Store state
if err := s.store(); err != nil {
return Message{}, err
}
return Message{h, ct}, nil
}
// DeleteMk deletes a message key
func (s *sessionState) DeleteMk(dh Key, n uint32) error {
return s.MkSkipped.DeleteMk(dh, uint(n))
}
// RatchetDecrypt is called to decrypt messages.
func (s *sessionState) RatchetDecrypt(m Message, ad []byte) ([]byte, error) {
// Is the message one of the skipped?
mk, ok, err := s.MkSkipped.Get(m.Header.DH, uint(m.Header.N))
if err != nil {
return nil, err
}
if ok {
plaintext, err := s.Crypto.Decrypt(mk, m.Ciphertext, append(ad, m.Header.Encode()...))
if err != nil {
return nil, fmt.Errorf("can't decrypt skipped message: %s", err)
}
if err := s.store(); err != nil {
return nil, err
}
return plaintext, nil
}
var (
// All changes must be applied on a different session object, so that this session won't be modified nor left in a dirty session.
sc = s.State
skippedKeys1 []skippedKey
skippedKeys2 []skippedKey
)
// Is there a new ratchet key?
if !bytes.Equal(m.Header.DH, sc.DHr) {
if skippedKeys1, err = sc.skipMessageKeys(sc.DHr, uint(m.Header.PN)); err != nil {
return nil, fmt.Errorf("can't skip previous chain message keys: %s", err)
}
if err = sc.dhRatchet(m.Header); err != nil {
return nil, fmt.Errorf("can't perform ratchet step: %s", err)
}
}
// After all, update the current chain.
if skippedKeys2, err = sc.skipMessageKeys(sc.DHr, uint(m.Header.N)); err != nil {
return nil, fmt.Errorf("can't skip current chain message keys: %s", err)
}
mk = sc.RecvCh.step()
plaintext, err := s.Crypto.Decrypt(mk, m.Ciphertext, append(ad, m.Header.Encode()...))
if err != nil {
return nil, fmt.Errorf("can't decrypt: %s", err)
}
// Append current key, waiting for confirmation
skippedKeys := append(skippedKeys1, skippedKeys2...)
skippedKeys = append(skippedKeys, skippedKey{
key: sc.DHr,
nr: uint(m.Header.N),
mk: mk,
seq: sc.KeysCount,
})
// Increment the number of keys
sc.KeysCount++
// Apply changes.
if err := s.applyChanges(sc, s.id, skippedKeys); err != nil {
return nil, err
}
// Store state
if err := s.store(); err != nil {
return nil, err
}
return plaintext, nil
}

View File

@@ -0,0 +1,9 @@
package doubleratchet
type SessionStorage interface {
// Save state keyed by id
Save(id []byte, state *State) error
// Load state by id
Load(id []byte) (*State, error)
}

178
vendor/github.com/status-im/doubleratchet/state.go generated vendored Normal file
View File

@@ -0,0 +1,178 @@
package doubleratchet
// TODO: During each DH ratchet step a new ratchet key pair and sending chain are generated.
// As the sending chain is not needed right away, these steps could be deferred until the party
// is about to send a new message.
import (
"fmt"
)
// The double ratchet state.
type State struct {
Crypto Crypto
// DH Ratchet public key (the remote key).
DHr Key
// DH Ratchet key pair (the self ratchet key).
DHs DHPair
// Symmetric ratchet root chain.
RootCh kdfRootChain
// Symmetric ratchet sending and receiving chains.
SendCh, RecvCh kdfChain
// Number of messages in previous sending chain.
PN uint32
// Dictionary of skipped-over message keys, indexed by ratchet public key or header key
// and message number.
MkSkipped KeysStorage
// The maximum number of message keys that can be skipped in a single chain.
// WithMaxSkip should be set high enough to tolerate routine lost or delayed messages,
// but low enough that a malicious sender can't trigger excessive recipient computation.
MaxSkip uint
// Receiving header key and next header key. Only used for header encryption.
HKr, NHKr Key
// Sending header key and next header key. Only used for header encryption.
HKs, NHKs Key
// How long we keep messages keys, counted in number of messages received,
// for example if MaxKeep is 5 we only keep the last 5 messages keys, deleting everything n - 5.
MaxKeep uint
// Max number of message keys per session, older keys will be deleted in FIFO fashion
MaxMessageKeysPerSession int
// The number of the current ratchet step.
Step uint
// KeysCount the number of keys generated for decrypting
KeysCount uint
}
func DefaultState(sharedKey Key) State {
c := DefaultCrypto{}
return State{
DHs: dhPair{},
Crypto: c,
RootCh: kdfRootChain{CK: sharedKey, Crypto: c},
// Populate CKs and CKr with sharedKey so that both parties could send and receive
// messages from the very beginning.
SendCh: kdfChain{CK: sharedKey, Crypto: c},
RecvCh: kdfChain{CK: sharedKey, Crypto: c},
MkSkipped: &KeysStorageInMemory{},
MaxSkip: 1000,
MaxMessageKeysPerSession: 2000,
MaxKeep: 2000,
KeysCount: 0,
}
}
func (s *State) applyOptions(opts []option) error {
for i := range opts {
if err := opts[i](s); err != nil {
return fmt.Errorf("failed to apply option: %s", err)
}
}
return nil
}
func newState(sharedKey Key, opts ...option) (State, error) {
if sharedKey == nil {
return State{}, fmt.Errorf("sharedKey mustn't be empty")
}
s := DefaultState(sharedKey)
if err := s.applyOptions(opts); err != nil {
return State{}, err
}
return s, nil
}
// dhRatchet performs a single ratchet step.
func (s *State) dhRatchet(m MessageHeader) error {
s.PN = s.SendCh.N
s.DHr = m.DH
s.HKs = s.NHKs
s.HKr = s.NHKr
recvSecret, err := s.Crypto.DH(s.DHs, s.DHr)
if err != nil {
return fmt.Errorf("failed to generate dh recieve ratchet secret: %s", err)
}
s.RecvCh, s.NHKr = s.RootCh.step(recvSecret)
s.DHs, err = s.Crypto.GenerateDH()
if err != nil {
return fmt.Errorf("failed to generate dh pair: %s", err)
}
sendSecret, err := s.Crypto.DH(s.DHs, s.DHr)
if err != nil {
return fmt.Errorf("failed to generate dh send ratchet secret: %s", err)
}
s.SendCh, s.NHKs = s.RootCh.step(sendSecret)
return nil
}
type skippedKey struct {
key Key
nr uint
mk Key
seq uint
}
// skipMessageKeys skips message keys in the current receiving chain.
func (s *State) skipMessageKeys(key Key, until uint) ([]skippedKey, error) {
if until < uint(s.RecvCh.N) {
return nil, fmt.Errorf("bad until: probably an out-of-order message that was deleted")
}
if uint(s.RecvCh.N)+s.MaxSkip < until {
return nil, fmt.Errorf("too many messages")
}
skipped := []skippedKey{}
for uint(s.RecvCh.N) < until {
mk := s.RecvCh.step()
skipped = append(skipped, skippedKey{
key: key,
nr: uint(s.RecvCh.N - 1),
mk: mk,
seq: s.KeysCount,
})
// Increment key count
s.KeysCount++
}
return skipped, nil
}
func (s *State) applyChanges(sc State, sessionID []byte, skipped []skippedKey) error {
*s = sc
for _, skipped := range skipped {
if err := s.MkSkipped.Put(sessionID, skipped.key, skipped.nr, skipped.mk, skipped.seq); err != nil {
return err
}
}
if err := s.MkSkipped.TruncateMks(sessionID, s.MaxMessageKeysPerSession); err != nil {
return err
}
if s.KeysCount >= s.MaxKeep {
if err := s.MkSkipped.DeleteOldMks(sessionID, s.KeysCount-s.MaxKeep); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1 @@
vendor/

View File

@@ -0,0 +1,15 @@
ETHv4 multiaddr library
=======================
ETHv4 mutliaddr format adds a spec that is compatible with enode definition, for example:
peer with identity 436cc6f674928fdc9a9f7990f2944002b685d1c37f025c1be425185b5b1f0900feaf1ccc2a6130268f9901be4a7d252f37302c8335a2c1a62736e9232691cc3a ip 174.138.105.243 and tcp port 30404 can be defined as enode:
```
enode://436cc6f674928fdc9a9f7990f2944002b685d1c37f025c1be425185b5b1f0900feaf1ccc2a6130268f9901be4a7d252f37302c8335a2c1a62736e9232691cc3a@174.138.105.243:30404
```
or as multiaddr
```
/ip4/174.138.105.243/tcp/30404/ethv4/436cc6f674928fdc9a9f7990f2944002b685d1c37f025c1be425185b5b1f0900feaf1ccc2a6130268f9901be4a7d252f37302c8335a2c1a62736e9232691cc3a
```

View File

@@ -0,0 +1,67 @@
package ethv4
import (
"errors"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
mh "github.com/multiformats/go-multihash"
btcec "github.com/btcsuite/btcd/btcec/v2"
)
const (
P_ETHv4 = 0x01EA
)
func init() {
if err := ma.AddProtocol(
ma.Protocol{
Name: "ethv4",
Code: P_ETHv4,
VCode: ma.CodeToVarint(P_ETHv4),
Size: 312,
Path: false,
Transcoder: TranscoderETHv4,
}); err != nil {
panic(err)
}
}
var TranscoderETHv4 = ma.NewTranscoderFromFunctions(ethv4StB, ethv4BtS, func([]byte) error { return nil })
func ethv4StB(s string) ([]byte, error) {
id, err := mh.FromB58String(s)
if err != nil {
return nil, err
}
return id, err
}
func ethv4BtS(b []byte) (string, error) {
id, err := mh.Cast(b)
if err != nil {
return "", err
}
return id.B58String(), err
}
// PeerIDToNodeID casts peer.ID (b58 encoded string) to discover.NodeID
func PeerIDToNodeID(pid string) (n enode.ID, err error) {
nodeid, err := peer.Decode(pid)
if err != nil {
return n, err
}
pubkey, err := nodeid.ExtractPublicKey()
if err != nil {
return n, err
}
seckey, ok := pubkey.(*crypto.Secp256k1PublicKey)
if !ok {
return n, errors.New("public key is not on the secp256k1 curve")
}
return enode.PubkeyToIDV4((*btcec.PublicKey)(seckey).ToECDSA()), nil
}

356
vendor/github.com/status-im/keycard-go/LICENSE.md generated vendored Normal file
View File

@@ -0,0 +1,356 @@
Mozilla Public License Version 2.0
==================================
### 1. Definitions
**1.1. “Contributor”**
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
**1.2. “Contributor Version”**
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
**1.3. “Contribution”**
means Covered Software of a particular Contributor.
**1.4. “Covered Software”**
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
**1.5. “Incompatible With Secondary Licenses”**
means
* **(a)** that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
* **(b)** that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
**1.6. “Executable Form”**
means any form of the work other than Source Code Form.
**1.7. “Larger Work”**
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
**1.8. “License”**
means this document.
**1.9. “Licensable”**
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
**1.10. “Modifications”**
means any of the following:
* **(a)** any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
* **(b)** any new file in Source Code Form that contains any Covered
Software.
**1.11. “Patent Claims” of a Contributor**
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
**1.12. “Secondary License”**
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
**1.13. “Source Code Form”**
means the form of the work preferred for making modifications.
**1.14. “You” (or “Your”)**
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, “control” means **(a)** the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or **(b)** ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
### 2. License Grants and Conditions
#### 2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
* **(a)** under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
* **(b)** under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
#### 2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
#### 2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
* **(a)** for any code that a Contributor has removed from Covered Software;
or
* **(b)** for infringements caused by: **(i)** Your and any other third party's
modifications of Covered Software, or **(ii)** the combination of its
Contributions with other software (except as part of its Contributor
Version); or
* **(c)** under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
#### 2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
#### 2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
#### 2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
#### 2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
### 3. Responsibilities
#### 3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
#### 3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
* **(a)** such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
* **(b)** You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
#### 3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
#### 3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
#### 3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
### 4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: **(a)** comply with
the terms of this License to the maximum extent possible; and **(b)**
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
### 5. Termination
**5.1.** The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated **(a)** provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and **(b)** on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
**5.2.** If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
**5.3.** In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
### 6. Disclaimer of Warranty
> Covered Software is provided under this License on an “as is”
> basis, without warranty of any kind, either expressed, implied, or
> statutory, including, without limitation, warranties that the
> Covered Software is free of defects, merchantable, fit for a
> particular purpose or non-infringing. The entire risk as to the
> quality and performance of the Covered Software is with You.
> Should any Covered Software prove defective in any respect, You
> (not any Contributor) assume the cost of any necessary servicing,
> repair, or correction. This disclaimer of warranty constitutes an
> essential part of this License. No use of any Covered Software is
> authorized under this License except under this disclaimer.
### 7. Limitation of Liability
> Under no circumstances and under no legal theory, whether tort
> (including negligence), contract, or otherwise, shall any
> Contributor, or anyone who distributes Covered Software as
> permitted above, be liable to You for any direct, indirect,
> special, incidental, or consequential damages of any character
> including, without limitation, damages for lost profits, loss of
> goodwill, work stoppage, computer failure or malfunction, or any
> and all other commercial damages or losses, even if such party
> shall have been informed of the possibility of such damages. This
> limitation of liability shall not apply to liability for death or
> personal injury resulting from such party's negligence to the
> extent applicable law prohibits such limitation. Some
> jurisdictions do not allow the exclusion or limitation of
> incidental or consequential damages, so this exclusion and
> limitation may not apply to You.
### 8. Litigation
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
### 9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
### 10. Versions of the License
#### 10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
#### 10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
#### 10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
#### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
## Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
## Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@@ -0,0 +1,214 @@
package derivationpath
import (
"fmt"
"io"
"strconv"
"strings"
)
type StartingPoint int
const (
tokenMaster = 0x6D // char m
tokenSeparator = 0x2F // char /
tokenHardened = 0x27 // char '
tokenDot = 0x2E // char .
hardenedStart = 0x80000000 // 2^31
)
const (
StartingPointMaster StartingPoint = iota + 1
StartingPointCurrent
StartingPointParent
)
type parseFunc = func() error
type decoder struct {
r *strings.Reader
f parseFunc
pos int
path []uint32
start StartingPoint
currentToken string
currentTokenHardened bool
}
func newDecoder(path string) *decoder {
d := &decoder{
r: strings.NewReader(path),
}
d.reset()
return d
}
func (d *decoder) reset() {
d.r.Seek(0, io.SeekStart)
d.pos = 0
d.start = StartingPointCurrent
d.f = d.parseStart
d.path = make([]uint32, 0)
d.resetCurrentToken()
}
func (d *decoder) resetCurrentToken() {
d.currentToken = ""
d.currentTokenHardened = false
}
func (d *decoder) parse() (StartingPoint, []uint32, error) {
for {
err := d.f()
if err != nil {
if err == io.EOF {
err = nil
} else {
err = fmt.Errorf("at position %d, %s", d.pos, err.Error())
}
return d.start, d.path, err
}
}
return d.start, d.path, nil
}
func (d *decoder) readByte() (byte, error) {
b, err := d.r.ReadByte()
if err != nil {
return b, err
}
d.pos++
return b, nil
}
func (d *decoder) unreadByte() error {
err := d.r.UnreadByte()
if err != nil {
return err
}
d.pos--
return nil
}
func (d *decoder) parseStart() error {
b, err := d.readByte()
if err != nil {
return err
}
if b == tokenMaster {
d.start = StartingPointMaster
d.f = d.parseSeparator
return nil
}
if b == tokenDot {
b2, err := d.readByte()
if err != nil {
return err
}
if b2 == tokenDot {
d.f = d.parseSeparator
d.start = StartingPointParent
return nil
}
d.f = d.parseSeparator
d.start = StartingPointCurrent
return d.unreadByte()
}
d.f = d.parseSegment
return d.unreadByte()
}
func (d *decoder) saveSegment() error {
if len(d.currentToken) > 0 {
i, err := strconv.ParseUint(d.currentToken, 10, 32)
if err != nil {
return err
}
if i >= hardenedStart {
d.pos -= len(d.currentToken) - 1
return fmt.Errorf("index must be lower than 2^31, got %d", i)
}
if d.currentTokenHardened {
i += hardenedStart
}
d.path = append(d.path, uint32(i))
}
d.f = d.parseSegment
d.resetCurrentToken()
return nil
}
func (d *decoder) parseSeparator() error {
b, err := d.readByte()
if err != nil {
return err
}
if b == tokenSeparator {
return d.saveSegment()
}
return fmt.Errorf("expected %s, got %s", string(tokenSeparator), string(b))
}
func (d *decoder) parseSegment() error {
b, err := d.readByte()
if err == io.EOF {
if len(d.currentToken) == 0 {
return fmt.Errorf("expected number, got EOF")
}
if newErr := d.saveSegment(); newErr != nil {
return newErr
}
return err
}
if err != nil {
return err
}
if len(d.currentToken) > 0 && b == tokenSeparator {
return d.saveSegment()
}
if len(d.currentToken) > 0 && b == tokenHardened {
d.currentTokenHardened = true
d.f = d.parseSeparator
return nil
}
if b < 0x30 || b > 0x39 {
return fmt.Errorf("expected number, got %s", string(b))
}
d.currentToken = fmt.Sprintf("%s%s", d.currentToken, string(b))
return nil
}
func Decode(str string) (StartingPoint, []uint32, error) {
d := newDecoder(str)
return d.parse()
}

View File

@@ -0,0 +1,36 @@
package derivationpath
import (
"bytes"
"encoding/binary"
"fmt"
"strings"
)
func Encode(rawPath []uint32) string {
segments := []string{string(tokenMaster)}
for _, i := range rawPath {
suffix := ""
if i >= hardenedStart {
i = i - hardenedStart
suffix = string(tokenHardened)
}
segments = append(segments, fmt.Sprintf("%d%s", i, suffix))
}
return strings.Join(segments, string(tokenSeparator))
}
func EncodeFromBytes(data []byte) (string, error) {
buf := bytes.NewBuffer(data)
rawPath := make([]uint32, buf.Len()/4)
err := binary.Read(buf, binary.BigEndian, &rawPath)
if err != nil {
return "", err
}
return Encode(rawPath), nil
}

13
vendor/github.com/status-im/markdown/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,13 @@
*.out
*.swp
*.8
*.6
_obj
_test*
markdown
tags
fuzz-workdir/
markdown-fuzz.zip
coverage.txt
testdata/*_got.md
testdata/*_ast.txt

7
vendor/github.com/status-im/markdown/.gitpod generated vendored Normal file
View File

@@ -0,0 +1,7 @@
checkoutLocation: "src/github.com/gomarkdown/markdown"
workspaceLocation: "."
tasks:
- command: >
cd /workspace/src/github.com/gomarkdown/markdown &&
go get -v ./... &&
go test -c

17
vendor/github.com/status-im/markdown/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,17 @@
dist: bionic
language: go
go:
- "1.12.x"
install:
- go build -v ./...
script:
- go test -v ./...
- go test -run=^$ -bench=BenchmarkReference -benchmem
- ./s/test_with_codecoverage.sh
- ./s/ci_fuzzit.sh
after_success:
- bash <(curl -s https://codecov.io/bash)

31
vendor/github.com/status-im/markdown/LICENSE.txt generated vendored Normal file
View File

@@ -0,0 +1,31 @@
Markdown is distributed under the Simplified BSD License:
Copyright © 2011 Russ Ross
Copyright © 2018 Krzysztof Kowalczyk
Copyright © 2018 Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with
the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

325
vendor/github.com/status-im/markdown/README.md generated vendored Normal file
View File

@@ -0,0 +1,325 @@
# Markdown Parser and HTML Renderer for Go
[![GoDoc](https://godoc.org/github.com/status-im/markdown?status.svg)](https://godoc.org/github.com/gomarkdown/markdown) [![codecov](https://codecov.io/gh/gomarkdown/markdown/branch/master/graph/badge.svg)](https://codecov.io/gh/gomarkdown/markdown)
Package `github.com/status-im/markdown` is a very fast Go library for parsing [Markdown](https://daringfireball.net/projects/markdown/) documents and rendering them to HTML.
It's fast and supports common extensions.
## Installation
go get -u github.com/status-im/markdown
API Docs:
- https://godoc.org/github.com/status-im/markdown : top level package
- https://godoc.org/github.com/status-im/markdown/ast : defines abstract syntax tree of parsed markdown document
- https://godoc.org/github.com/status-im/markdown/parser : parser
- https://godoc.org/github.com/status-im/markdown/html : html renderer
## Usage
To convert markdown text to HTML using reasonable defaults:
```go
md := []byte("## markdown document")
output := markdown.ToHTML(md, nil, nil)
```
## Customizing markdown parser
Markdown format is loosely specified and there are multiple extensions invented after original specification was created.
The parser supports several [extensions](https://godoc.org/github.com/status-im/markdown/parser#Extensions).
Default parser uses most common `parser.CommonExtensions` but you can easily use parser with custom extension:
```go
import (
"github.com/status-im/markdown"
"github.com/status-im/markdown/parser"
)
extensions := parser.CommonExtensions | parser.AutoHeadingIDs
parser := parser.NewWithExtensions(extensions)
md := []byte("markdown text")
html := markdown.ToHTML(md, parser, nil)
```
## Customizing HTML renderer
Similarly, HTML renderer can be configured with different [options](https://godoc.org/github.com/status-im/markdown/html#RendererOptions)
Here's how to use a custom renderer:
```go
import (
"github.com/status-im/markdown"
"github.com/status-im/markdown/html"
)
htmlFlags := html.CommonFlags | html.HrefTargetBlank
opts := html.RendererOptions{Flags: htmlFlags}
renderer := html.NewRenderer(opts)
md := []byte("markdown text")
html := markdown.ToHTML(md, nil, renderer)
```
HTML renderer also supports reusing most of the logic and overriding rendering of only specifc nodes.
You can provide [RenderNodeFunc](https://godoc.org/github.com/status-im/markdown/html#RenderNodeFunc) in [RendererOptions](https://godoc.org/github.com/gomarkdown/markdown/html#RendererOptions).
The function is called for each node in AST, you can implement custom rendering logic and tell HTML renderer to skip rendering this node.
Here's the simplest example that drops all code blocks from the output:
````go
import (
"github.com/status-im/markdown"
"github.com/status-im/markdown/ast"
"github.com/status-im/markdown/html"
)
// return (ast.GoToNext, true) to tell html renderer to skip rendering this node
// (because you've rendered it)
func renderHookDropCodeBlock(w io.Writer, node ast.Node, entering bool) (ast.WalkStatus, bool) {
// skip all nodes that are not CodeBlock nodes
if _, ok := node.(*ast.CodeBlock); !ok {
return ast.GoToNext, false
}
// custom rendering logic for ast.CodeBlock. By doing nothing it won't be
// present in the output
return ast.GoToNext, true
}
opts := html.RendererOptions{
Flags: html.CommonFlags,
RenderNodeHook: renderHookDropCodeBlock,
}
renderer := html.NewRenderer(opts)
md := "test\n```\nthis code block will be dropped from output\n```\ntext"
html := markdown.ToHTML([]byte(s), nil, renderer)
````
## Sanitize untrusted content
We don't protect against malicious content. When dealing with user-provided
markdown, run renderer HTML through HTML sanitizer such as [Bluemonday](https://github.com/microcosm-cc/bluemonday).
Here's an example of simple usage with Bluemonday:
```go
import (
"github.com/microcosm-cc/bluemonday"
"github.com/status-im/markdown"
)
// ...
maybeUnsafeHTML := markdown.ToHTML(md, nil, nil)
html := bluemonday.UGCPolicy().SanitizeBytes(maybeUnsafeHTML)
```
## mdtohtml command-line tool
https://github.com/gomarkdown/mdtohtml is a command-line markdown to html
converter built using this library.
You can also use it as an example of how to use the library.
You can install it with:
go get -u github.com/gomarkdown/mdtohtml
To run: `mdtohtml input-file [output-file]`
## Features
- **Compatibility**. The Markdown v1.0.3 test suite passes with
the `--tidy` option. Without `--tidy`, the differences are
mostly in whitespace and entity escaping, where this package is
more consistent and cleaner.
- **Common extensions**, including table support, fenced code
blocks, autolinks, strikethroughs, non-strict emphasis, etc.
- **Safety**. Markdown is paranoid when parsing, making it safe
to feed untrusted user input without fear of bad things
happening. The test suite stress tests this and there are no
known inputs that make it crash. If you find one, please let me
know and send me the input that does it.
NOTE: "safety" in this context means _runtime safety only_. In order to
protect yourself against JavaScript injection in untrusted content, see
[this example](https://github.com/status-im/markdown#sanitize-untrusted-content).
- **Fast**. It is fast enough to render on-demand in
most web applications without having to cache the output.
- **Thread safety**. You can run multiple parsers in different
goroutines without ill effect. There is no dependence on global
shared state.
- **Minimal dependencies**. Only depends on standard library packages in Go.
- **Standards compliant**. Output successfully validates using the
W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
## Extensions
In addition to the standard markdown syntax, this package
implements the following extensions:
- **Intra-word emphasis supression**. The `_` character is
commonly used inside words when discussing code, so having
markdown interpret it as an emphasis command is usually the
wrong thing. We let you treat all emphasis markers as
normal characters when they occur inside a word.
- **Tables**. Tables can be created by drawing them in the input
using a simple syntax:
```
Name | Age
--------|------
Bob | 27
Alice | 23
```
Table footers are supported as well and can be added with equal signs (`=`):
```
Name | Age
--------|------
Bob | 27
Alice | 23
========|======
Total | 50
```
- **Fenced code blocks**. In addition to the normal 4-space
indentation to mark code blocks, you can explicitly mark them
and supply a language (to make syntax highlighting simple). Just
mark it like this:
```go
func getTrue() bool {
return true
}
```
You can use 3 or more backticks to mark the beginning of the
block, and the same number to mark the end of the block.
- **Definition lists**. A simple definition list is made of a single-line
term followed by a colon and the definition for that term.
Cat
: Fluffy animal everyone likes
Internet
: Vector of transmission for pictures of cats
Terms must be separated from the previous definition by a blank line.
- **Footnotes**. A marker in the text that will become a superscript number;
a footnote definition that will be placed in a list of footnotes at the
end of the document. A footnote looks like this:
This is a footnote.[^1]
[^1]: the footnote text.
- **Autolinking**. We can find URLs that have not been
explicitly marked as links and turn them into links.
- **Strikethrough**. Use two tildes (`~~`) to mark text that
should be crossed out.
- **Hard line breaks**. With this extension enabled newlines in the input
translate into line breaks in the output. This extension is off by default.
- **Non blocking space**. With this extension enabled spaces preceeded by an backslash n the input
translate non-blocking spaces in the output. This extension is off by default.
- **Smart quotes**. Smartypants-style punctuation substitution is
supported, turning normal double- and single-quote marks into
curly quotes, etc.
- **LaTeX-style dash parsing** is an additional option, where `--`
is translated into `&ndash;`, and `---` is translated into
`&mdash;`. This differs from most smartypants processors, which
turn a single hyphen into an ndash and a double hyphen into an
mdash.
- **Smart fractions**, where anything that looks like a fraction
is translated into suitable HTML (instead of just a few special
cases like most smartypant processors). For example, `4/5`
becomes `<sup>4</sup>&frasl;<sub>5</sub>`, which renders as
<sup>4</sup>&frasl;<sub>5</sub>.
- **MathJaX Support** is an additional feature which is supported by
many markdown editor. It translate inline math equation quoted by `$`
and display math block quoted by `$$` into MathJax compatible format.
hyphen `_` won't break LaTeX render within a math element any more.
```
$$
\left[ \begin{array}{a} a^l_1 \\ ⋮ \\ a^l_{d_l} \end{array}\right]
= \sigma(
\left[ \begin{matrix}
w^l_{1,1} & ⋯ & w^l_{1,d_{l-1}} \\
⋮ & ⋱ & ⋮ \\
w^l_{d_l,1} & ⋯ & w^l_{d_l,d_{l-1}} \\
\end{matrix}\right] ·
\left[ \begin{array}{x} a^{l-1}_1 \\ ⋮ \\ ⋮ \\ a^{l-1}_{d_{l-1}} \end{array}\right] +
\left[ \begin{array}{b} b^l_1 \\ ⋮ \\ b^l_{d_l} \end{array}\right])
$$
```
- **Ordered list start number**. With this extension enabled an ordered list will start with the
the number that was used to start it.
- **Super and subscript**. With this extension enabled sequences between ^ will indicate
superscript and ~ will become a subscript. For example: H~2~O is a liquid, 2^10^ is 1024.
- **Block level attributes**, allow setting attributes (ID, classes and key/value pairs) on block
level elements. The attribute must be enclosed with braces and be put on a line before the
element.
```
{#id3 .myclass fontsize="tiny"}
# Header 1
```
Will convert into `<h1 id="id3" class="myclass" fontsize="tiny">Header 1</h1>`.
- **Mmark support**, see <https://mmark.nl/syntax> for all new syntax elements this adds.
## Todo
- port https://github.com/russross/blackfriday/issues/348
- port [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX):
renders output as LaTeX.
- port https://github.com/shurcooL/github_flavored_markdown to markdown
- port [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
but for markdown.
- More unit testing
- Improve unicode support. It does not understand all unicode
rules (about what constitutes a letter, a punctuation symbol,
etc.), so it may fail to detect word boundaries correctly in
some instances. It is safe on all utf-8 input.
## History
markdown is a fork of v2 of https://github.com/russross/blackfriday that is:
- actively maintained (sadly in Feb 2018 blackfriday was inactive for 5 months with many bugs and pull requests accumulated)
- refactored API (split into ast/parser/html sub-packages)
Blackfriday itself was based on C implementation [sundown](https://github.com/vmg/sundown) which in turn was based on [libsoldout](http://fossil.instinctive.eu/libsoldout/home).
## License
[Simplified BSD License](LICENSE.txt)

10
vendor/github.com/status-im/markdown/ast/attribute.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package ast
// An attribute can be attached to block elements. They are specified as
// {#id .classs key="value"} where quotes for values are mandatory, multiple
// key/value pairs are separated by whitespace.
type Attribute struct {
ID []byte
Classes [][]byte
Attrs map[string][]byte
}

4
vendor/github.com/status-im/markdown/ast/doc.go generated vendored Normal file
View File

@@ -0,0 +1,4 @@
/*
Package ast defines tree representation of a parsed markdown document.
*/
package ast

781
vendor/github.com/status-im/markdown/ast/node.go generated vendored Normal file
View File

@@ -0,0 +1,781 @@
package ast
import "encoding/json"
// ListType contains bitwise or'ed flags for list and list item objects.
type ListType int
// These are the possible flag values for the ListItem renderer.
// Multiple flag values may be ORed together.
// These are mostly of interest if you are writing a new output format.
const (
ListTypeOrdered ListType = 1 << iota
ListTypeDefinition
ListTypeTerm
ListItemContainsBlock
ListItemBeginningOfList // TODO: figure out if this is of any use now
ListItemEndOfList
)
// CellAlignFlags holds a type of alignment in a table cell.
type CellAlignFlags int
// These are the possible flag values for the table cell renderer.
// Only a single one of these values will be used; they are not ORed together.
// These are mostly of interest if you are writing a new output format.
const (
TableAlignmentLeft CellAlignFlags = 1 << iota
TableAlignmentRight
TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
)
func (a CellAlignFlags) String() string {
switch a {
case TableAlignmentLeft:
return "left"
case TableAlignmentRight:
return "right"
case TableAlignmentCenter:
return "center"
default:
return ""
}
}
// DocumentMatters holds the type of a {front,main,back}matter in the document
type DocumentMatters int
// These are all possible Document divisions.
const (
DocumentMatterNone DocumentMatters = iota
DocumentMatterFront
DocumentMatterMain
DocumentMatterBack
)
// CitationTypes holds the type of a citation, informative, normative or suppressed
type CitationTypes int
const (
CitationTypeNone CitationTypes = iota
CitationTypeSuppressed
CitationTypeInformative
CitationTypeNormative
)
// Node defines an ast node
type Node interface {
AsContainer() *Container
AsLeaf() *Leaf
GetParent() Node
SetParent(newParent Node)
GetChildren() []Node
SetChildren(newChildren []Node)
}
// Container is a type of node that can contain children
type Container struct {
Parent Node `json:"-"`
Children []Node
Literal []byte // Text contents of the leaf nodes
Content []byte // Markdown content of the block nodes
*Attribute // Block level attribute
}
func (c *Container) MarshalJSON() ([]byte, error) {
type ContainerJSON struct {
Children []Node `json:"children"`
Literal string `json:"literal"`
*Attribute
}
var c1 ContainerJSON
c1.Children = c.Children
c1.Literal = string(c.Literal)
c1.Attribute = c.Attribute
return json.Marshal(&c1)
}
// AsContainer returns itself as *Container
func (c *Container) AsContainer() *Container {
return c
}
// AsLeaf returns nil
func (c *Container) AsLeaf() *Leaf {
return nil
}
// GetParent returns parent node
func (c *Container) GetParent() Node {
return c.Parent
}
// SetParent sets the parent node
func (c *Container) SetParent(newParent Node) {
c.Parent = newParent
}
// GetChildren returns children nodes
func (c *Container) GetChildren() []Node {
return c.Children
}
// SetChildren sets children node
func (c *Container) SetChildren(newChildren []Node) {
c.Children = newChildren
}
// Leaf is a type of node that cannot have children
type Leaf struct {
Parent Node `json:"-"`
Literal []byte // Text contents of the leaf nodes
Content []byte // Markdown content of the block nodes
*Attribute // Block level attribute
}
func (c *Leaf) MarshalJSON() ([]byte, error) {
type LeafJSON struct {
Literal string `json:"literal"`
}
var c1 LeafJSON
c1.Literal = string(c.Literal)
return json.Marshal(&c1)
}
// AsContainer returns nil
func (l *Leaf) AsContainer() *Container {
return nil
}
// AsLeaf returns itself as *Leaf
func (l *Leaf) AsLeaf() *Leaf {
return l
}
// GetParent returns parent node
func (l *Leaf) GetParent() Node {
return l.Parent
}
// SetParent sets the parent nodd
func (l *Leaf) SetParent(newParent Node) {
l.Parent = newParent
}
// GetChildren returns nil because Leaf cannot have children
func (l *Leaf) GetChildren() []Node {
return nil
}
// SetChildren will panic becuase Leaf cannot have children
func (l *Leaf) SetChildren(newChildren []Node) {
panic("leaf node cannot have children")
}
// Document represents markdown document node, a root of ast
type Document struct {
Container
}
func (doc *Document) MarshalJSON() ([]byte, error) {
children := doc.GetChildren()
if len(children) != 0 {
return json.Marshal(children)
}
return []byte("[]"), nil
}
// DocumentMatter represents markdown node that signals a document
// division: frontmatter, mainmatter or backmatter.
type DocumentMatter struct {
Container
Matter DocumentMatters
}
// BlockQuote represents markdown block quote node
type BlockQuote struct {
Container
}
func (c *BlockQuote) MarshalJSON() ([]byte, error) {
type BlockQuoteJSON struct {
Type string `json:"type"`
Children []Node `json:"children"`
Literal string `json:"literal"`
*Attribute
}
var c1 BlockQuoteJSON
c1.Children = c.Children
c1.Literal = string(c.Literal)
c1.Type = "blockquote"
return json.Marshal(&c1)
}
// Aside represents an markdown aside node.
type Aside struct {
Container
}
// List represents markdown list node
type List struct {
Container
ListFlags ListType
Tight bool // Skip <p>s around list item data if true
BulletChar byte // '*', '+' or '-' in bullet lists
Delimiter byte // '.' or ')' after the number in ordered lists
Start int // for ordered lists this indicates the starting number if > 0
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
IsFootnotesList bool // This is a list of footnotes
}
// ListItem represents markdown list item node
type ListItem struct {
Container
ListFlags ListType
Tight bool // Skip <p>s around list item data if true
BulletChar byte // '*', '+' or '-' in bullet lists
Delimiter byte // '.' or ')' after the number in ordered lists
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
IsFootnotesList bool // This is a list of footnotes
}
// Paragraph represents markdown paragraph node
type Paragraph struct {
Container
}
func (c *Paragraph) MarshalJSON() ([]byte, error) {
type ParagraphJSON struct {
Type string `json:"type"`
Children []Node `json:"children"`
}
var c1 ParagraphJSON
c1.Children = c.Children
c1.Type = "paragraph"
return json.Marshal(&c1)
}
// Math represents markdown MathAjax inline node
type Math struct {
Leaf
}
// MathBlock represents markdown MathAjax block node
type MathBlock struct {
Container
}
// Heading represents markdown heading node
type Heading struct {
Container
Level int // This holds the heading level number
HeadingID string // This might hold heading ID, if present
IsTitleblock bool // Specifies whether it's a title block
IsSpecial bool // We are a special heading (starts with .#)
}
func (c *Heading) MarshalJSON() ([]byte, error) {
type HeadingJSON struct {
Type string `json:"type"`
Children []Node `json:"children"`
Literal string `json:"literal"`
Level int `json:"level"`
IsTitleblock bool `json:"isTitleBlock"`
*Attribute
}
var c1 HeadingJSON
c1.Children = c.Children
c1.Literal = string(c.Literal)
c1.Attribute = c.Attribute
c1.Type = "heading"
c1.Level = c.Level
c1.IsTitleblock = c.IsTitleblock
return json.Marshal(&c1)
}
// HorizontalRule represents markdown horizontal rule node
type HorizontalRule struct {
Leaf
}
// Emph represents markdown emphasis node
type Emph struct {
Leaf
}
func (c *Emph) MarshalJSON() ([]byte, error) {
type EmphJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
*Attribute
}
var c1 EmphJSON
c1.Literal = string(c.Literal)
c1.Attribute = c.Attribute
c1.Type = "emph"
return json.Marshal(&c1)
}
type StatusTag struct {
Leaf
}
func (c *StatusTag) MarshalJSON() ([]byte, error) {
type StatusTagJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
}
var c1 StatusTagJSON
c1.Literal = string(c.Literal)
c1.Type = "status-tag"
return json.Marshal(&c1)
}
type Mention struct {
Leaf
}
func (c *Mention) MarshalJSON() ([]byte, error) {
type MentionJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
}
var c1 MentionJSON
c1.Literal = string(c.Literal)
c1.Type = "mention"
return json.Marshal(&c1)
}
// Strong represents markdown strong node
type Strong struct {
Leaf
}
func (c *Strong) MarshalJSON() ([]byte, error) {
type StrongJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
*Attribute
}
var c1 StrongJSON
c1.Literal = string(c.Literal)
c1.Attribute = c.Attribute
c1.Type = "strong"
return json.Marshal(&c1)
}
// StrongEmph represents markdown strong emphasis node
type StrongEmph struct {
Leaf
}
func (c *StrongEmph) MarshalJSON() ([]byte, error) {
type StrongJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
*Attribute
}
var c1 StrongJSON
c1.Literal = string(c.Literal)
c1.Attribute = c.Attribute
c1.Type = "strong-emph"
return json.Marshal(&c1)
}
// Del represents markdown del node
type Del struct {
Leaf
}
func (c *Del) MarshalJSON() ([]byte, error) {
type StrongJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
*Attribute
}
var c1 StrongJSON
c1.Literal = string(c.Literal)
c1.Attribute = c.Attribute
c1.Type = "del"
return json.Marshal(&c1)
}
// Link represents markdown link node
type Link struct {
Container
Destination []byte // Destination is what goes into a href
Title []byte // Title is the tooltip thing that goes in a title attribute
NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
Footnote Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
DeferredID []byte // If a deferred link this holds the original ID.
}
func (c *Link) MarshalJSON() ([]byte, error) {
type LinkJSON struct {
Type string `json:"type"`
Children []Node `json:"children"`
Literal string `json:"literal"`
Title string `json:"title"`
Destination string `json:"destination"`
*Attribute
}
var c1 LinkJSON
c1.Children = c.Children
c1.Literal = string(c.Literal)
c1.Attribute = c.Attribute
c1.Title = string(c.Title)
c1.Destination = string(c.Destination)
c1.Type = "link"
return json.Marshal(&c1)
}
// CrossReference is a reference node.
type CrossReference struct {
Container
Destination []byte // Destination is where the reference points to
}
// Citation is a citation node.
type Citation struct {
Leaf
Destination [][]byte // Destination is where the citation points to. Multiple ones are allowed.
Type []CitationTypes // 1:1 mapping of destination and citation type
Suffix [][]byte // Potential citation suffix, i.e. [@!RFC1035, p. 144]
}
// Image represents markdown image node
type Image struct {
Container
Destination []byte // Destination is what goes into a href
Title []byte // Title is the tooltip thing that goes in a title attribute
}
// Text represents markdown text node
type Text struct {
Leaf
}
// HTMLBlock represents markdown html node
type HTMLBlock struct {
Leaf
}
// CodeBlock represents markdown code block node
type CodeBlock struct {
Leaf
IsFenced bool // Specifies whether it's a fenced code block or an indented one
Info []byte // This holds the info string
FenceChar byte
FenceLength int
FenceOffset int
}
func (c *CodeBlock) MarshalJSON() ([]byte, error) {
type CodeBlockJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
Language string `json:"language,omitempty"`
*Attribute
}
var c1 CodeBlockJSON
c1.Literal = string(c.Literal)
c1.Type = "codeblock"
c1.Attribute = c.Attribute
c1.Language = string(c.Info)
return json.Marshal(&c1)
}
// Softbreak represents markdown softbreak node
// Note: not used currently
type Softbreak struct {
Leaf
}
// Hardbreak represents markdown hard break node
type Hardbreak struct {
Leaf
}
// NonBlockingSpace represents markdown non-blocking space node
type NonBlockingSpace struct {
Leaf
}
// Code represents markdown code node
type Code struct {
Leaf
}
func (c *Code) MarshalJSON() ([]byte, error) {
type CodeJSON struct {
Type string `json:"type"`
Literal string `json:"literal"`
}
var c1 CodeJSON
c1.Literal = string(c.Literal)
c1.Type = "code"
return json.Marshal(&c1)
}
// HTMLSpan represents markdown html span node
type HTMLSpan struct {
Leaf
}
// Table represents markdown table node
type Table struct {
Container
}
// TableCell represents markdown table cell node
type TableCell struct {
Container
IsHeader bool // This tells if it's under the header row
Align CellAlignFlags // This holds the value for align attribute
}
// TableHeader represents markdown table head node
type TableHeader struct {
Container
}
// TableBody represents markdown table body node
type TableBody struct {
Container
}
// TableRow represents markdown table row node
type TableRow struct {
Container
}
// TableFooter represents markdown table foot node
type TableFooter struct {
Container
}
// Caption represents a figure, code or quote caption
type Caption struct {
Container
}
// CaptionFigure is a node (blockquote or codeblock) that has a caption
type CaptionFigure struct {
Container
HeadingID string // This might hold heading ID, if present
}
// Callout is a node that can exist both in text (where it is an actual node) and in a code block.
type Callout struct {
Leaf
ID []byte // number of this callout
}
// Index is a node that contains an Index item and an optional, subitem.
type Index struct {
Leaf
Primary bool
Item []byte
Subitem []byte
ID string // ID of the index
}
// Subscript is a subscript node
type Subscript struct {
Leaf
}
// Subscript is a superscript node
type Superscript struct {
Leaf
}
// Footnotes is a node that contains all footnotes
type Footnotes struct {
Container
}
func removeNodeFromArray(a []Node, node Node) []Node {
n := len(a)
for i := 0; i < n; i++ {
if a[i] == node {
return append(a[:i], a[i+1:]...)
}
}
return nil
}
// AppendChild appends child to children of parent
// It panics if either node is nil.
func AppendChild(parent Node, child Node) {
RemoveFromTree(child)
child.SetParent(parent)
newChildren := append(parent.GetChildren(), child)
parent.SetChildren(newChildren)
}
// RemoveFromTree removes this node from tree
func RemoveFromTree(n Node) {
if n.GetParent() == nil {
return
}
// important: don't clear n.Children if n has no parent
// we're called from AppendChild and that might happen on a node
// that accumulated Children but hasn't been inserted into the tree
n.SetChildren(nil)
p := n.GetParent()
newChildren := removeNodeFromArray(p.GetChildren(), n)
if newChildren != nil {
p.SetChildren(newChildren)
}
}
// GetLastChild returns last child of node n
// It's implemented as stand-alone function to keep Node interface small
func GetLastChild(n Node) Node {
a := n.GetChildren()
if len(a) > 0 {
return a[len(a)-1]
}
return nil
}
// GetFirstChild returns first child of node n
// It's implemented as stand-alone function to keep Node interface small
func GetFirstChild(n Node) Node {
a := n.GetChildren()
if len(a) > 0 {
return a[0]
}
return nil
}
// GetNextNode returns next sibling of node n (node after n)
// We can't make it part of Container or Leaf because we loose Node identity
func GetNextNode(n Node) Node {
parent := n.GetParent()
if parent == nil {
return nil
}
a := parent.GetChildren()
len := len(a) - 1
for i := 0; i < len; i++ {
if a[i] == n {
return a[i+1]
}
}
return nil
}
// GetPrevNode returns previous sibling of node n (node before n)
// We can't make it part of Container or Leaf because we loose Node identity
func GetPrevNode(n Node) Node {
parent := n.GetParent()
if parent == nil {
return nil
}
a := parent.GetChildren()
len := len(a)
for i := 1; i < len; i++ {
if a[i] == n {
return a[i-1]
}
}
return nil
}
// WalkStatus allows NodeVisitor to have some control over the tree traversal.
// It is returned from NodeVisitor and different values allow Node.Walk to
// decide which node to go to next.
type WalkStatus int
const (
// GoToNext is the default traversal of every node.
GoToNext WalkStatus = iota
// SkipChildren tells walker to skip all children of current node.
SkipChildren
// Terminate tells walker to terminate the traversal.
Terminate
)
// NodeVisitor is a callback to be called when traversing the syntax tree.
// Called twice for every node: once with entering=true when the branch is
// first visited, then with entering=false after all the children are done.
type NodeVisitor interface {
Visit(node Node, entering bool) WalkStatus
}
// NodeVisitorFunc casts a function to match NodeVisitor interface
type NodeVisitorFunc func(node Node, entering bool) WalkStatus
// Walk traverses tree recursively
func Walk(n Node, visitor NodeVisitor) WalkStatus {
isContainer := n.AsContainer() != nil
status := visitor.Visit(n, true) // entering
if status == Terminate {
// even if terminating, close container node
if isContainer {
visitor.Visit(n, false)
}
return status
}
if isContainer && status != SkipChildren {
children := n.GetChildren()
for _, n := range children {
status = Walk(n, visitor)
if status == Terminate {
return status
}
}
}
if isContainer {
status = visitor.Visit(n, false) // exiting
if status == Terminate {
return status
}
}
return GoToNext
}
// Visit calls visitor function
func (f NodeVisitorFunc) Visit(node Node, entering bool) WalkStatus {
return f(node, entering)
}
// WalkFunc is like Walk but accepts just a callback function
func WalkFunc(n Node, f NodeVisitorFunc) {
visitor := NodeVisitorFunc(f)
Walk(n, visitor)
}

158
vendor/github.com/status-im/markdown/ast/print.go generated vendored Normal file
View File

@@ -0,0 +1,158 @@
package ast
import (
"bytes"
"fmt"
"io"
"strings"
)
// Print is for debugging. It prints a string representation of parsed
// markdown doc (result of parser.Parse()) to dst.
//
// To make output readable, it shortens text output.
func Print(dst io.Writer, doc Node) {
PrintWithPrefix(dst, doc, " ")
}
// PrintWithPrefix is like Print but allows customizing prefix used for
// indentation. By default it's 2 spaces. You can change it to e.g. tab
// by passing "\t"
func PrintWithPrefix(w io.Writer, doc Node, prefix string) {
// for more compact output, don't print outer Document
if _, ok := doc.(*Document); ok {
for _, c := range doc.GetChildren() {
printRecur(w, c, prefix, 0)
}
} else {
printRecur(w, doc, prefix, 0)
}
}
// ToString is like Dump but returns result as a string
func ToString(doc Node) string {
var buf bytes.Buffer
Print(&buf, doc)
return buf.String()
}
func getContent(node Node) string {
if c := node.AsContainer(); c != nil {
return string(c.Literal)
}
leaf := node.AsLeaf()
return string(leaf.Literal)
}
func shortenString(s string, maxLen int) string {
// for cleaner, one-line ouput, replace some white-space chars
// with their escaped version
s = strings.Replace(s, "\n", `\n`, -1)
s = strings.Replace(s, "\r", `\r`, -1)
s = strings.Replace(s, "\t", `\t`, -1)
if maxLen < 0 {
return s
}
if len(s) < maxLen {
return s
}
// add "..." to indicate truncation
return s[:maxLen-3] + "..."
}
// get a short name of the type of v which excludes package name
// and strips "()" from the end
func getNodeType(node Node) string {
s := fmt.Sprintf("%T", node)
s = strings.TrimSuffix(s, "()")
if idx := strings.Index(s, "."); idx != -1 {
return s[idx+1:]
}
return s
}
func printDefault(w io.Writer, indent string, typeName string, content string) {
content = strings.TrimSpace(content)
if len(content) > 0 {
fmt.Fprintf(w, "%s%s '%s'\n", indent, typeName, content)
} else {
fmt.Fprintf(w, "%s%s\n", indent, typeName)
}
}
func getListFlags(f ListType) string {
var s string
if f&ListTypeOrdered != 0 {
s += "ordered "
}
if f&ListTypeDefinition != 0 {
s += "definition "
}
if f&ListTypeTerm != 0 {
s += "term "
}
if f&ListItemContainsBlock != 0 {
s += "has_block "
}
if f&ListItemBeginningOfList != 0 {
s += "start "
}
if f&ListItemEndOfList != 0 {
s += "end "
}
s = strings.TrimSpace(s)
return s
}
func printRecur(w io.Writer, node Node, prefix string, depth int) {
if node == nil {
return
}
indent := strings.Repeat(prefix, depth)
content := shortenString(getContent(node), 40)
typeName := getNodeType(node)
switch v := node.(type) {
case *Link:
content := "url=" + string(v.Destination)
printDefault(w, indent, typeName, content)
case *StatusTag:
content := "tag=" + string(v.Literal)
printDefault(w, indent, typeName, content)
case *Image:
content := "url=" + string(v.Destination)
printDefault(w, indent, typeName, content)
case *List:
if v.Start > 1 {
content += fmt.Sprintf("start=%d ", v.Start)
}
if v.Tight {
content += "tight "
}
if v.IsFootnotesList {
content += "footnotes "
}
flags := getListFlags(v.ListFlags)
if len(flags) > 0 {
content += "flags=" + flags + " "
}
printDefault(w, indent, typeName, content)
case *ListItem:
if v.Tight {
content += "tight "
}
if v.IsFootnotesList {
content += "footnotes "
}
flags := getListFlags(v.ListFlags)
if len(flags) > 0 {
content += "flags=" + flags + " "
}
printDefault(w, indent, typeName, content)
default:
printDefault(w, indent, typeName, content)
}
for _, child := range node.GetChildren() {
printRecur(w, child, prefix, depth+1)
}
}

View File

@@ -0,0 +1,27 @@
## Changes from blackfriday
This library is derived from blackfriday library. Here's a list of changes.
**Redesigned API**
- split into 3 separate packages: ast, parser and html (for html renderer). This makes the API more manageable. It also separates e.g. parser option from renderer options
- changed how AST node is represented from union-like representation (manually keeping track of the type of the node) to using interface{} (which is a Go way to combine an arbitrary value with its type)
**Allow re-using most of html renderer logic**
You can implement your own renderer by implementing `Renderer` interface.
Implementing a full renderer is a lot of work and often you just want to tweak html rendering of few node typs.
I've added a way to hook `Renderer.Render` function in html renderer with a custom function that can take over rendering of specific nodes.
I use it myself to do syntax-highlighting of code snippets.
**Speed up go test**
Running `go test` was really slow (17 secs) because it did a poor man's version of fuzzing by feeding the parser all subsets of test strings in order to find panics
due to incorrect parsing logic.
I've moved that logic to `cmd/crashtest`, so that it can be run on CI but not slow down regular development.
Now `go test` is blazing fast.

8
vendor/github.com/status-im/markdown/codecov.yml generated vendored Normal file
View File

@@ -0,0 +1,8 @@
coverage:
status:
project:
default:
# basic
target: 60%
threshold: 2%
base: auto

35
vendor/github.com/status-im/markdown/doc.go generated vendored Normal file
View File

@@ -0,0 +1,35 @@
/*
Package markdown implements markdown parser and HTML renderer.
It parses markdown into AST format which can be serialized to HTML
(using html.Renderer) or possibly other formats (using alternate renderers).
Convert markdown to HTML
The simplest way to convert markdown document to HTML
md := []byte("## markdown document")
html := markdown.ToHTML(md, nil, nil)
Customizing parsing and HTML rendering
You can customize parser and HTML renderer:
import (
"github.com/status-im/markdown/parser"
"github.com/status-im/markdown/renderer"
"github.com/status-im/markdown"
)
extensions := parser.CommonExtensions | parser.AutoHeadingIDs
p := parser.NewWithExtensions(extensions)
htmlFlags := html.CommonFlags | html.HrefTargetBlank
opts := html.RendererOptions{Flags: htmlFlags}
renderer := html.NewRenderer(opts)
md := []byte("markdown text")
html := markdown.ToHTML(md, p, renderer)
For a cmd-line tool see https://github.com/gomarkdown/mdtohtml
*/
package markdown

9
vendor/github.com/status-im/markdown/fuzz.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
// +build gofuzz
package markdown
// Fuzz is to be used by https://github.com/dvyukov/go-fuzz
func Fuzz(data []byte) int {
Parse(data, nil)
return 0
}

66
vendor/github.com/status-im/markdown/markdown.go generated vendored Normal file
View File

@@ -0,0 +1,66 @@
package markdown
import (
"bytes"
"io"
"github.com/status-im/markdown/ast"
"github.com/status-im/markdown/parser"
)
// Renderer is an interface for implementing custom renderers.
type Renderer interface {
// RenderNode renders markdown node to w.
// It's called once for a leaf node.
// It's called twice for non-leaf nodes:
// * first with entering=true
// * then with entering=false
//
// Return value is a way to tell the calling walker to adjust its walk
// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
// can ask the walker to skip a subtree of this node by returning SkipChildren.
// The typical behavior is to return GoToNext, which asks for the usual
// traversal to the next node.
RenderNode(w io.Writer, node ast.Node, entering bool) ast.WalkStatus
// RenderHeader is a method that allows the renderer to produce some
// content preceding the main body of the output document. The header is
// understood in the broad sense here. For example, the default HTML
// renderer will write not only the HTML document preamble, but also the
// table of contents if it was requested.
//
// The method will be passed an entire document tree, in case a particular
// implementation needs to inspect it to produce output.
//
// The output should be written to the supplied writer w. If your
// implementation has no header to write, supply an empty implementation.
RenderHeader(w io.Writer, ast ast.Node)
// RenderFooter is a symmetric counterpart of RenderHeader.
RenderFooter(w io.Writer, ast ast.Node)
}
// Parse parsers a markdown document using provided parser. If parser is nil,
// we use parser configured with parser.CommonExtensions.
//
// It returns AST (abstract syntax tree) that can be converted to another
// format using Render function.
func Parse(markdown []byte, p *parser.Parser) ast.Node {
if p == nil {
p = parser.New()
}
return p.Parse(markdown)
}
// Render uses renderer to convert parsed markdown document into a different format.
//
// To convert to HTML, pass html.Renderer
func Render(doc ast.Node, renderer Renderer) []byte {
var buf bytes.Buffer
renderer.RenderHeader(&buf, doc)
ast.WalkFunc(doc, func(node ast.Node, entering bool) ast.WalkStatus {
return renderer.RenderNode(&buf, node, entering)
})
renderer.RenderFooter(&buf, doc)
return buf.Bytes()
}

73
vendor/github.com/status-im/markdown/parser/aside.go generated vendored Normal file
View File

@@ -0,0 +1,73 @@
package parser
import (
"bytes"
"github.com/status-im/markdown/ast"
)
// returns aisde prefix length
func (p *Parser) asidePrefix(data []byte) int {
i := 0
n := len(data)
for i < 3 && i < n && data[i] == ' ' {
i++
}
if i+1 < n && data[i] == 'A' && data[i+1] == '>' {
if i+2 < n && data[i+2] == ' ' {
return i + 3
}
return i + 2
}
return 0
}
// aside ends with at least one blank line
// followed by something without a aside prefix
func (p *Parser) terminateAside(data []byte, beg, end int) bool {
if p.isEmpty(data[beg:]) <= 0 {
return false
}
if end >= len(data) {
return true
}
return p.asidePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
}
// parse a aside fragment
func (p *Parser) aside(data []byte) int {
var raw bytes.Buffer
beg, end := 0, 0
// identical to quote
for beg < len(data) {
end = beg
// Step over whole lines, collecting them. While doing that, check for
// fenced code and if one's found, incorporate it altogether,
// irregardless of any contents inside it
for end < len(data) && data[end] != '\n' {
if p.extensions&FencedCode != 0 {
if i := p.fencedCodeBlock(data[end:], false); i > 0 {
// -1 to compensate for the extra end++ after the loop:
end += i - 1
break
}
}
end++
}
end = skipCharN(data, end, '\n', 1)
if pre := p.asidePrefix(data[beg:]); pre > 0 {
// skip the prefix
beg += pre
} else if p.terminateAside(data, beg, end) {
break
}
// this line is part of the aside
raw.Write(data[beg:end])
beg = end
}
block := p.addBlock(&ast.Aside{})
p.block(raw.Bytes())
p.finalize(block)
return end
}

View File

@@ -0,0 +1,116 @@
package parser
import (
"bytes"
"github.com/status-im/markdown/ast"
)
// attribute parses a (potential) block attribute and adds it to p.
func (p *Parser) attribute(data []byte) []byte {
if len(data) < 3 {
return data
}
i := 0
if data[i] != '{' {
return data
}
i++
// last character must be a } otherwise it's not an attribute
end := skipUntilChar(data, i, '\n')
if data[end-1] != '}' {
return data
}
i = skipSpace(data, i)
b := &ast.Attribute{Attrs: make(map[string][]byte)}
esc := false
quote := false
trail := 0
Loop:
for ; i < len(data); i++ {
switch data[i] {
case ' ', '\t', '\f', '\v':
if quote {
continue
}
chunk := data[trail+1 : i]
if len(chunk) == 0 {
trail = i
continue
}
switch {
case chunk[0] == '.':
b.Classes = append(b.Classes, chunk[1:])
case chunk[0] == '#':
b.ID = chunk[1:]
default:
k, v := keyValue(chunk)
if k != nil && v != nil {
b.Attrs[string(k)] = v
} else {
// this is illegal in an attribute
return data
}
}
trail = i
case '"':
if esc {
esc = !esc
continue
}
quote = !quote
case '\\':
esc = !esc
case '}':
if esc {
esc = !esc
continue
}
chunk := data[trail+1 : i]
if len(chunk) == 0 {
return data
}
switch {
case chunk[0] == '.':
b.Classes = append(b.Classes, chunk[1:])
case chunk[0] == '#':
b.ID = chunk[1:]
default:
k, v := keyValue(chunk)
if k != nil && v != nil {
b.Attrs[string(k)] = v
} else {
return data
}
}
i++
break Loop
default:
esc = false
}
}
p.attr = b
return data[i:]
}
// key="value" quotes are mandatory.
func keyValue(data []byte) ([]byte, []byte) {
chunk := bytes.SplitN(data, []byte{'='}, 2)
if len(chunk) != 2 {
return nil, nil
}
key := chunk[0]
value := chunk[1]
if len(value) < 3 || len(key) == 0 {
return nil, nil
}
if value[0] != '"' || value[len(value)-1] != '"' {
return key, nil
}
return key, value[1 : len(value)-1]
}

1415
vendor/github.com/status-im/markdown/parser/block.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

29
vendor/github.com/status-im/markdown/parser/callout.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
package parser
import (
"bytes"
"strconv"
)
// IsCallout detects a callout in the following format: <<N>> Where N is a integer > 0.
func IsCallout(data []byte) (id []byte, consumed int) {
if !bytes.HasPrefix(data, []byte("<<")) {
return nil, 0
}
start := 2
end := bytes.Index(data[start:], []byte(">>"))
if end < 0 {
return nil, 0
}
b := data[start : start+end]
b = bytes.TrimSpace(b)
i, err := strconv.Atoi(string(b))
if err != nil {
return nil, 0
}
if i <= 0 {
return nil, 0
}
return b, start + end + 2 // 2 for >>
}

70
vendor/github.com/status-im/markdown/parser/caption.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
package parser
import (
"bytes"
)
// caption checks for a caption, it returns the caption data and a potential "headingID".
func (p *Parser) caption(data, caption []byte) ([]byte, string, int) {
if !bytes.HasPrefix(data, caption) {
return nil, "", 0
}
j := len(caption)
data = data[j:]
end := p.linesUntilEmpty(data)
data = data[:end]
id, start := captionID(data)
if id != "" {
return data[:start], id, end + j
}
return data, "", end + j
}
// linesUntilEmpty scans lines up to the first empty line.
func (p *Parser) linesUntilEmpty(data []byte) int {
line, i := 0, 0
for line < len(data) {
i++
// find the end of this line
for i < len(data) && data[i-1] != '\n' {
i++
}
if p.isEmpty(data[line:i]) == 0 {
line = i
continue
}
break
}
return i
}
// captionID checks if the caption *ends* in {#....}. If so the text after {# is taken to be
// the ID/anchor of the entire figure block.
func captionID(data []byte) (string, int) {
end := len(data)
j, k := 0, 0
// find start/end of heading id
for j = 0; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
}
for k = j + 1; k < end && data[k] != '}'; k++ {
}
// remains must be whitespace.
for l := k + 1; l < end; l++ {
if !isSpace(data[l]) {
return "", 0
}
}
if j > 0 && k > 0 && j+2 < k {
return string(data[j+2 : k]), j
}
return "", 0
}

View File

@@ -0,0 +1,86 @@
package parser
import (
"bytes"
"github.com/status-im/markdown/ast"
)
// citation parses a citation. In its most simple form [@ref], we allow multiple
// being separated by semicolons and a sub reference inside ala pandoc: [@ref, p. 23].
// Each citation can have a modifier: !, ? or - wich mean:
//
// ! - normative
// ? - formative
// - - suppressed
//
// The suffix starts after a comma, we strip any whitespace before and after. If the output
// allows for it, this can be rendered.
func citation(p *Parser, data []byte, offset int) (int, ast.Node) {
// look for the matching closing bracket
i := offset + 1
for level := 1; level > 0 && i < len(data); i++ {
switch {
case data[i] == '\n':
// no newlines allowed.
return 0, nil
case data[i-1] == '\\':
continue
case data[i] == '[':
level++
case data[i] == ']':
level--
if level <= 0 {
i-- // compensate for extra i++ in for loop
}
}
}
if i >= len(data) {
return 0, nil
}
node := &ast.Citation{}
citations := bytes.Split(data[1:i], []byte(";"))
for _, citation := range citations {
var suffix []byte
citation = bytes.TrimSpace(citation)
j := 0
if citation[j] != '@' {
// not a citation, drop out entirely.
return 0, nil
}
if c := bytes.Index(citation, []byte(",")); c > 0 {
part := citation[:c]
suff := citation[c+1:]
part = bytes.TrimSpace(part)
suff = bytes.TrimSpace(suff)
citation = part
suffix = suff
}
citeType := ast.CitationTypeInformative
j = 1
switch citation[j] {
case '!':
citeType = ast.CitationTypeNormative
j++
case '?':
citeType = ast.CitationTypeInformative
j++
case '-':
citeType = ast.CitationTypeSuppressed
j++
}
node.Destination = append(node.Destination, citation[j:])
node.Type = append(node.Type, citeType)
node.Suffix = append(node.Suffix, suffix)
}
return i + 1, node
}

20
vendor/github.com/status-im/markdown/parser/esc.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
package parser
// isEscape returns true if byte i is prefixed by an odd number of backslahses.
func isEscape(data []byte, i int) bool {
if i == 0 {
return false
}
if i == 1 {
return data[0] == '\\'
}
j := i - 1
for ; j >= 0; j-- {
if data[j] != '\\' {
break
}
}
j++
// odd number of backslahes means escape
return (i-j)%2 != 0
}

119
vendor/github.com/status-im/markdown/parser/figures.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
package parser
import (
"bytes"
"github.com/status-im/markdown/ast"
)
// sFigureLine checks if there's a figure line (e.g., !--- ) at the beginning of data,
// and returns the end index if so, or 0 otherwise.
func sFigureLine(data []byte, oldmarker string) (end int, marker string) {
i, size := 0, 0
n := len(data)
// skip up to three spaces
for i < n && i < 3 && data[i] == ' ' {
i++
}
// check for the marker characters: !
if i+1 >= n {
return 0, ""
}
if data[i] != '!' || data[i+1] != '-' {
return 0, ""
}
i++
c := data[i] // i.e. the -
// the whole line must be the same char or whitespace
for i < n && data[i] == c {
size++
i++
}
// the marker char must occur at least 3 times
if size < 3 {
return 0, ""
}
marker = string(data[i-size : i])
// if this is the end marker, it must match the beginning marker
if oldmarker != "" && marker != oldmarker {
return 0, ""
}
// there is no syntax modifier although it might be an idea to re-use this space for something?
i = skipChar(data, i, ' ')
if i >= n || data[i] != '\n' {
if i == n {
return i, marker
}
return 0, ""
}
return i + 1, marker // Take newline into account.
}
// figureBlock returns the end index if data contains a figure block at the beginning,
// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
// If doRender is true, a final newline is mandatory to recognize the figure block.
func (p *Parser) figureBlock(data []byte, doRender bool) int {
beg, marker := sFigureLine(data, "")
if beg == 0 || beg >= len(data) {
return 0
}
var raw bytes.Buffer
for {
// safe to assume beg < len(data)
// check for the end of the code block
figEnd, _ := sFigureLine(data[beg:], marker)
if figEnd != 0 {
beg += figEnd
break
}
// copy the current line
end := skipUntilChar(data, beg, '\n') + 1
// did we reach the end of the buffer without a closing marker?
if end >= len(data) {
return 0
}
// verbatim copy to the working buffer
if doRender {
raw.Write(data[beg:end])
}
beg = end
}
if !doRender {
return beg
}
figure := &ast.CaptionFigure{}
p.addBlock(figure)
p.block(raw.Bytes())
defer p.finalize(figure)
if captionContent, id, consumed := p.caption(data[beg:], []byte("Figure: ")); consumed > 0 {
caption := &ast.Caption{}
p.Inline(caption, captionContent)
figure.HeadingID = id
p.addChild(caption)
beg += consumed
}
p.finalize(figure)
return beg
}

129
vendor/github.com/status-im/markdown/parser/include.go generated vendored Normal file
View File

@@ -0,0 +1,129 @@
package parser
import (
"bytes"
"path"
"path/filepath"
)
// isInclude parses {{...}}[...], that contains a path between the {{, the [...] syntax contains
// an address to select which lines to include. It is treated as an opaque string and just given
// to readInclude.
func (p *Parser) isInclude(data []byte) (filename string, address []byte, consumed int) {
i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
if len(data[i:]) < 3 {
return "", nil, 0
}
if data[i] != '{' || data[i+1] != '{' {
return "", nil, 0
}
start := i + 2
// find the end delimiter
i = skipUntilChar(data, i, '}')
if i+1 >= len(data) {
return "", nil, 0
}
end := i
i++
if data[i] != '}' {
return "", nil, 0
}
filename = string(data[start:end])
if i+1 < len(data) && data[i+1] == '[' { // potential address specification
start := i + 2
end = skipUntilChar(data, start, ']')
if end >= len(data) {
return "", nil, 0
}
address = data[start:end]
return filename, address, end + 1
}
return filename, address, i + 1
}
func (p *Parser) readInclude(from, file string, address []byte) []byte {
if p.Opts.ReadIncludeFn != nil {
return p.Opts.ReadIncludeFn(from, file, address)
}
return nil
}
// isCodeInclude parses <{{...}} which is similar to isInclude the returned bytes are, however wrapped in a code block.
func (p *Parser) isCodeInclude(data []byte) (filename string, address []byte, consumed int) {
i := skipCharN(data, 0, ' ', 3) // start with up to 3 spaces
if len(data[i:]) < 3 {
return "", nil, 0
}
if data[i] != '<' {
return "", nil, 0
}
start := i
filename, address, consumed = p.isInclude(data[i+1:])
if consumed == 0 {
return "", nil, 0
}
return filename, address, start + consumed + 1
}
// readCodeInclude acts like include except the returned bytes are wrapped in a fenced code block.
func (p *Parser) readCodeInclude(from, file string, address []byte) []byte {
data := p.readInclude(from, file, address)
if data == nil {
return nil
}
ext := path.Ext(file)
buf := &bytes.Buffer{}
buf.Write([]byte("```"))
if ext != "" { // starts with a dot
buf.WriteString(" " + ext[1:] + "\n")
} else {
buf.WriteByte('\n')
}
buf.Write(data)
buf.WriteString("```\n")
return buf.Bytes()
}
// incStack hold the current stack of chained includes. Each value is the containing
// path of the file being parsed.
type incStack struct {
stack []string
}
func newIncStack() *incStack {
return &incStack{stack: []string{}}
}
// Push updates i with new.
func (i *incStack) Push(new string) {
if path.IsAbs(new) {
i.stack = append(i.stack, path.Dir(new))
return
}
last := ""
if len(i.stack) > 0 {
last = i.stack[len(i.stack)-1]
}
i.stack = append(i.stack, path.Dir(filepath.Join(last, new)))
}
// Pop pops the last value.
func (i *incStack) Pop() {
if len(i.stack) == 0 {
return
}
i.stack = i.stack[:len(i.stack)-1]
}
func (i *incStack) Last() string {
if len(i.stack) == 0 {
return ""
}
return i.stack[len(i.stack)-1]
}

1348
vendor/github.com/status-im/markdown/parser/inline.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

36
vendor/github.com/status-im/markdown/parser/matter.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
package parser
import (
"bytes"
"github.com/status-im/markdown/ast"
)
func (p *Parser) documentMatter(data []byte) int {
if data[0] != '{' {
return 0
}
consumed := 0
matter := ast.DocumentMatterNone
if bytes.HasPrefix(data, []byte("{frontmatter}")) {
consumed = len("{frontmatter}")
matter = ast.DocumentMatterFront
}
if bytes.HasPrefix(data, []byte("{mainmatter}")) {
consumed = len("{mainmatter}")
matter = ast.DocumentMatterMain
}
if bytes.HasPrefix(data, []byte("{backmatter}")) {
consumed = len("{backmatter}")
matter = ast.DocumentMatterBack
}
if consumed == 0 {
return 0
}
node := &ast.DocumentMatter{Matter: matter}
p.addBlock(node)
p.finalize(node)
return consumed
}

32
vendor/github.com/status-im/markdown/parser/options.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
package parser
import (
"github.com/status-im/markdown/ast"
)
// Flags control optional behavior of parser.
type Flags int
// Options is a collection of supplementary parameters tweaking the behavior of various parts of the parser.
type Options struct {
ParserHook BlockFunc
ReadIncludeFn ReadIncludeFunc
Flags Flags // Flags allow customizing parser's behavior
}
// Parser renderer configuration options.
const (
FlagsNone Flags = 0
SkipFootnoteList Flags = 1 << iota // Skip adding the footnote list (regardless if they are parsed)
)
// BlockFunc allows to registration of a parser function. If successful it
// returns an ast.Node, a buffer that should be parsed as a block and the the number of bytes consumed.
type BlockFunc func(data []byte) (ast.Node, []byte, int)
// ReadIncludeFunc should read the file under path and returns the read bytes,
// from will be set to the name of the current file being parsed. Initially
// this will be empty. address is the optional address specifier of which lines
// of the file to return. If this function is not set no data will be read.
type ReadIncludeFunc func(from, path string, address []byte) []byte

836
vendor/github.com/status-im/markdown/parser/parser.go generated vendored Normal file
View File

@@ -0,0 +1,836 @@
/*
Package parser implements parser for markdown text that generates AST (abstract syntax tree).
*/
package parser
import (
"bytes"
"fmt"
"strings"
"unicode/utf8"
"github.com/status-im/markdown/ast"
)
// Extensions is a bitmask of enabled parser extensions.
type Extensions int
// Bit flags representing markdown parsing extensions.
// Use | (or) to specify multiple extensions.
const (
NoExtensions Extensions = 0
NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
Tables // Parse tables
FencedCode // Parse fenced code blocks
Autolink // Detect embedded URLs that are not explicitly marked
Strikethrough // Strikethrough text using ~~test~~
LaxHTMLBlocks // Loosen up HTML block parsing rules
SpaceHeadings // Be strict about prefix heading rules
HardLineBreak // Translate newlines into line breaks
NonBlockingSpace // Translate backspace spaces into line non-blocking spaces
TabSizeEight // Expand tabs to eight spaces instead of four
Footnotes // Pandoc-style footnotes
NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
HeadingIDs // specify heading IDs with {#id}
Titleblock // Titleblock ala pandoc
AutoHeadingIDs // Create the heading ID from the text
BackslashLineBreak // Translate trailing backslashes into line breaks
DefinitionLists // Parse definition lists
MathJax // Parse MathJax
OrderedListStart // Keep track of the first number used when starting an ordered list.
Attributes // Block Attributes
SuperSubscript // Super- and subscript support: 2^10^, H~2~O.
EmptyLinesBreakList // 2 empty lines break out of list
Includes // Support including other files.
Mmark // Support Mmark syntax, see https://mmark.nl/syntax
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
BackslashLineBreak | DefinitionLists | MathJax
)
// The size of a tab stop.
const (
tabSizeDefault = 4
tabSizeDouble = 8
)
// for each character that triggers a response when parsing inline data.
type inlineParser func(p *Parser, data []byte, offset int) (int, ast.Node)
// ReferenceOverrideFunc is expected to be called with a reference string and
// return either a valid Reference type that the reference string maps to or
// nil. If overridden is false, the default reference logic will be executed.
// See the documentation in Options for more details on use-case.
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
// Parser is a type that holds extensions and the runtime state used by
// Parse, and the renderer. You can not use it directly, construct it with New.
type Parser struct {
// ReferenceOverride is an optional function callback that is called every
// time a reference is resolved. It can be set before starting parsing.
//
// In Markdown, the link reference syntax can be made to resolve a link to
// a reference instead of an inline URL, in one of the following ways:
//
// * [link text][refid]
// * [refid][]
//
// Usually, the refid is defined at the bottom of the Markdown document. If
// this override function is provided, the refid is passed to the override
// function first, before consulting the defined refids at the bottom. If
// the override function indicates an override did not occur, the refids at
// the bottom will be used to fill in the link details.
ReferenceOverride ReferenceOverrideFunc
Opts Options
// after parsing, this is AST root of parsed markdown text
Doc ast.Node
extensions Extensions
refs map[string]*reference
refsRecord map[string]struct{}
inlineCallback [256]inlineParser
nesting int
maxNesting int
insideLink bool
indexCnt int // incremented after every index
// Footnotes need to be ordered as well as available to quickly check for
// presence. If a ref is also a footnote, it's stored both in refs and here
// in notes. Slice is nil if footnotes not enabled.
notes []*reference
tip ast.Node // = doc
oldTip ast.Node
lastMatchedContainer ast.Node // = doc
allClosed bool
// Attributes are attached to block level elements.
attr *ast.Attribute
includeStack *incStack
}
// New creates a markdown parser with CommonExtensions.
//
// You can then call `doc := p.Parse(markdown)` to parse markdown document
// and `markdown.Render(doc, renderer)` to convert it to another format with
// a renderer.
func New() *Parser {
return NewWithExtensions(CommonExtensions)
}
// NewWithExtensions creates a markdown parser with given extensions.
func NewWithExtensions(extension Extensions) *Parser {
p := Parser{
refs: make(map[string]*reference),
refsRecord: make(map[string]struct{}),
maxNesting: 1,
insideLink: false,
Doc: &ast.Document{},
extensions: extension,
allClosed: true,
includeStack: newIncStack(),
}
p.tip = p.Doc
p.oldTip = p.Doc
p.lastMatchedContainer = p.Doc
p.inlineCallback[' '] = maybeLineBreak
p.inlineCallback['*'] = emphasis
p.inlineCallback['#'] = statusTag
p.inlineCallback['@'] = mention
p.inlineCallback['_'] = emphasis
if p.extensions&Strikethrough != 0 {
p.inlineCallback['~'] = emphasis
}
p.inlineCallback['`'] = codeSpan
p.inlineCallback['\n'] = lineBreak
p.inlineCallback['\\'] = escape
if p.extensions&Autolink != 0 {
p.inlineCallback['h'] = maybeAutoLink
p.inlineCallback['m'] = maybeAutoLink
p.inlineCallback['f'] = maybeAutoLink
p.inlineCallback['H'] = maybeAutoLink
p.inlineCallback['M'] = maybeAutoLink
p.inlineCallback['F'] = maybeAutoLink
}
return &p
}
func (p *Parser) getRef(refid string) (ref *reference, found bool) {
if p.ReferenceOverride != nil {
r, overridden := p.ReferenceOverride(refid)
if overridden {
if r == nil {
return nil, false
}
return &reference{
link: []byte(r.Link),
title: []byte(r.Title),
noteID: 0,
hasBlock: false,
text: []byte(r.Text)}, true
}
}
// refs are case insensitive
ref, found = p.refs[strings.ToLower(refid)]
return ref, found
}
func (p *Parser) isFootnote(ref *reference) bool {
_, ok := p.refsRecord[string(ref.link)]
return ok
}
func (p *Parser) finalize(block ast.Node) {
p.tip = block.GetParent()
}
func (p *Parser) addChild(node ast.Node) ast.Node {
for !canNodeContain(p.tip, node) {
p.finalize(p.tip)
}
ast.AppendChild(p.tip, node)
p.tip = node
return node
}
func canNodeContain(n ast.Node, v ast.Node) bool {
switch n.(type) {
case *ast.List:
return isListItem(v)
case *ast.Document, *ast.BlockQuote, *ast.Aside, *ast.ListItem, *ast.CaptionFigure:
return !isListItem(v)
case *ast.Table:
switch v.(type) {
case *ast.TableHeader, *ast.TableBody, *ast.TableFooter:
return true
default:
return false
}
case *ast.TableHeader, *ast.TableBody, *ast.TableFooter:
_, ok := v.(*ast.TableRow)
return ok
case *ast.TableRow:
_, ok := v.(*ast.TableCell)
return ok
}
return false
}
func (p *Parser) closeUnmatchedBlocks() {
if p.allClosed {
return
}
for p.oldTip != p.lastMatchedContainer {
parent := p.oldTip.GetParent()
p.finalize(p.oldTip)
p.oldTip = parent
}
p.allClosed = true
}
// Reference represents the details of a link.
// See the documentation in Options for more details on use-case.
type Reference struct {
// Link is usually the URL the reference points to.
Link string
// Title is the alternate text describing the link in more detail.
Title string
// Text is the optional text to override the ref with if the syntax used was
// [refid][]
Text string
}
// Parse generates AST (abstract syntax tree) representing markdown document.
//
// The result is a root of the tree whose underlying type is *ast.Document
//
// You can then convert AST to html using html.Renderer, to some other format
// using a custom renderer or transform the tree.
func (p *Parser) Parse(input []byte) ast.Node {
p.block(input)
// Walk the tree and finish up some of unfinished blocks
for p.tip != nil {
p.finalize(p.tip)
}
// Walk the tree again and process inline markdown in each block
ast.WalkFunc(p.Doc, func(node ast.Node, entering bool) ast.WalkStatus {
switch node.(type) {
case *ast.Paragraph:
p.Inline(node, node.AsContainer().Content)
node.AsContainer().Content = nil
}
return ast.GoToNext
})
return p.Doc
}
func (p *Parser) parseRefsToAST() {
if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
return
}
p.tip = p.Doc
list := &ast.List{
IsFootnotesList: true,
ListFlags: ast.ListTypeOrdered,
}
p.addBlock(&ast.Footnotes{})
block := p.addBlock(list)
flags := ast.ListItemBeginningOfList
// Note: this loop is intentionally explicit, not range-form. This is
// because the body of the loop will append nested footnotes to p.notes and
// we need to process those late additions. Range form would only walk over
// the fixed initial set.
for i := 0; i < len(p.notes); i++ {
ref := p.notes[i]
p.addChild(ref.footnote)
block := ref.footnote
listItem := block.(*ast.ListItem)
listItem.ListFlags = flags | ast.ListTypeOrdered
listItem.RefLink = ref.link
if ref.hasBlock {
flags |= ast.ListItemContainsBlock
p.block(ref.title)
} else {
p.Inline(block, ref.title)
}
flags &^= ast.ListItemBeginningOfList | ast.ListItemContainsBlock
}
above := list.Parent
finalizeList(list)
p.tip = above
ast.WalkFunc(block, func(node ast.Node, entering bool) ast.WalkStatus {
switch node.(type) {
case *ast.Paragraph, *ast.Heading:
p.Inline(node, node.AsContainer().Content)
node.AsContainer().Content = nil
}
return ast.GoToNext
})
}
//
// Link references
//
// This section implements support for references that (usually) appear
// as footnotes in a document, and can be referenced anywhere in the document.
// The basic format is:
//
// [1]: http://www.google.com/ "Google"
// [2]: http://www.github.com/ "Github"
//
// Anywhere in the document, the reference can be linked by referring to its
// label, i.e., 1 and 2 in this example, as in:
//
// This library is hosted on [Github][2], a git hosting site.
//
// Actual footnotes as specified in Pandoc and supported by some other Markdown
// libraries such as php-markdown are also taken care of. They look like this:
//
// This sentence needs a bit of further explanation.[^note]
//
// [^note]: This is the explanation.
//
// Footnotes should be placed at the end of the document in an ordered list.
// Inline footnotes such as:
//
// Inline footnotes^[Not supported.] also exist.
//
// are not yet supported.
// reference holds all information necessary for a reference-style links or
// footnotes.
//
// Consider this markdown with reference-style links:
//
// [link][ref]
//
// [ref]: /url/ "tooltip title"
//
// It will be ultimately converted to this HTML:
//
// <p><a href=\"/url/\" title=\"title\">link</a></p>
//
// And a reference structure will be populated as follows:
//
// p.refs["ref"] = &reference{
// link: "/url/",
// title: "tooltip title",
// }
//
// Alternatively, reference can contain information about a footnote. Consider
// this markdown:
//
// Text needing a footnote.[^a]
//
// [^a]: This is the note
//
// A reference structure will be populated as follows:
//
// p.refs["a"] = &reference{
// link: "a",
// title: "This is the note",
// noteID: <some positive int>,
// }
//
// TODO: As you can see, it begs for splitting into two dedicated structures
// for refs and for footnotes.
type reference struct {
link []byte
title []byte
noteID int // 0 if not a footnote ref
hasBlock bool
footnote ast.Node // a link to the Item node within a list of footnotes
text []byte // only gets populated by refOverride feature with Reference.Text
}
func (r *reference) String() string {
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
r.link, r.title, r.text, r.noteID, r.hasBlock)
}
// Check whether or not data starts with a reference link.
// If so, it is parsed and stored in the list of references
// (in the render struct).
// Returns the number of bytes to skip to move past it,
// or zero if the first line is not a reference.
func isReference(p *Parser, data []byte, tabSize int) int {
// up to 3 optional leading spaces
if len(data) < 4 {
return 0
}
i := 0
for i < 3 && data[i] == ' ' {
i++
}
noteID := 0
// id part: anything but a newline between brackets
if data[i] != '[' {
return 0
}
i++
if p.extensions&Footnotes != 0 {
if i < len(data) && data[i] == '^' {
// we can set it to anything here because the proper noteIds will
// be assigned later during the second pass. It just has to be != 0
noteID = 1
i++
}
}
idOffset := i
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
i++
}
if i >= len(data) || data[i] != ']' {
return 0
}
idEnd := i
// footnotes can have empty ID, like this: [^], but a reference can not be
// empty like this: []. Break early if it's not a footnote and there's no ID
if noteID == 0 && idOffset == idEnd {
return 0
}
// spacer: colon (space | tab)* newline? (space | tab)*
i++
if i >= len(data) || data[i] != ':' {
return 0
}
i++
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
i++
}
}
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i >= len(data) {
return 0
}
var (
linkOffset, linkEnd int
titleOffset, titleEnd int
lineEnd int
raw []byte
hasBlock bool
)
if p.extensions&Footnotes != 0 && noteID != 0 {
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
lineEnd = linkEnd
} else {
linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
}
if lineEnd == 0 {
return 0
}
// a valid ref has been found
ref := &reference{
noteID: noteID,
hasBlock: hasBlock,
}
if noteID > 0 {
// reusing the link field for the id since footnotes don't have links
ref.link = data[idOffset:idEnd]
// if footnote, it's not really a title, it's the contained text
ref.title = raw
} else {
ref.link = data[linkOffset:linkEnd]
ref.title = data[titleOffset:titleEnd]
}
// id matches are case-insensitive
id := string(bytes.ToLower(data[idOffset:idEnd]))
p.refs[id] = ref
return lineEnd
}
func scanLinkRef(p *Parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
// link: whitespace-free sequence, optionally between angle brackets
if data[i] == '<' {
i++
}
linkOffset = i
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
i++
}
linkEnd = i
if linkEnd < len(data) && data[linkOffset] == '<' && data[linkEnd-1] == '>' {
linkOffset++
linkEnd--
}
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
return
}
// compute end-of-line
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
lineEnd = i
}
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
lineEnd++
}
// optional (space|tab)* spacer after a newline
if lineEnd > 0 {
i = lineEnd + 1
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
}
// optional title: any non-newline sequence enclosed in '"() alone on its line
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
i++
titleOffset = i
// look for EOL
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
i++
}
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
titleEnd = i + 1
} else {
titleEnd = i
}
// step back
i--
for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
i--
}
if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
lineEnd = titleEnd
titleEnd = i
}
}
return
}
// The first bit of this logic is the same as Parser.listItem, but the rest
// is much simpler. This function simply finds the entire block and shifts it
// over by one tab if it is indeed a block (just returns the line if it's not).
// blockEnd is the end of the section in the input buffer, and contents is the
// extracted text that was shifted over one tab. It will need to be rendered at
// the end of the document.
func scanFootnote(p *Parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
if i == 0 || len(data) == 0 {
return
}
// skip leading whitespace on first line
for i < len(data) && data[i] == ' ' {
i++
}
blockStart = i
// find the end of the line
blockEnd = i
for i < len(data) && data[i-1] != '\n' {
i++
}
// get working buffer
var raw bytes.Buffer
// put the first line into the working buffer
raw.Write(data[blockEnd:i])
blockEnd = i
// process the following lines
containsBlankLine := false
gatherLines:
for blockEnd < len(data) {
i++
// find the end of this line
for i < len(data) && data[i-1] != '\n' {
i++
}
// if it is an empty line, guess that it is part of this item
// and move on to the next line
if p.isEmpty(data[blockEnd:i]) > 0 {
containsBlankLine = true
blockEnd = i
continue
}
n := 0
if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
// this is the end of the block.
// we don't want to include this last line in the index.
break gatherLines
}
// if there were blank lines before this one, insert a new one now
if containsBlankLine {
raw.WriteByte('\n')
containsBlankLine = false
}
// get rid of that first tab, write to buffer
raw.Write(data[blockEnd+n : i])
hasBlock = true
blockEnd = i
}
if data[blockEnd-1] != '\n' {
raw.WriteByte('\n')
}
contents = raw.Bytes()
return
}
// isPunctuation returns true if c is a punctuation symbol.
func isPunctuation(c byte) bool {
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
if c == r {
return true
}
}
return false
}
// isSpace returns true if c is a white-space charactr
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v'
}
// isLowerCaseLetter returns true if c is ascii lower case letter
func isLowerCaseLetter(c byte) bool {
return (c >= 'a' && c <= 'z')
}
// isLetter returns true if c is ascii letter
func isLetter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// isAlnum returns true if c is a digit or letter
// TODO: check when this is looking for ASCII alnum and when it should use unicode
func isAlnum(c byte) bool {
return (c >= '0' && c <= '9') || isLetter(c)
}
// isNum returns true if c is a digit
func isNum(c byte) bool {
return (c >= '0' && c <= '9')
}
func isValidStatusTagChar(c byte) bool {
return isNum(c) || isLowerCaseLetter(c) || c == '-' || c == '_'
}
func isValidTerminatingMentionChar(c byte) bool {
return isSpace(c) || c == '.' || c == ',' || c == ':' || c == ';' || c == '!' || c == '?'
}
func isValidPublicKeyChar(c byte) bool {
return c == '0' || c == '1' || c == '2' || c == '3' || c == '4' || c == '5' || c == '6' || c == '7' || c == '8' || c == '9' || c == 'a' || c == 'b' || c == 'c' || c == 'd' || c == 'e' || c == 'f'
}
func isValidCompressedPublicKeyChar(c byte) bool {
return c == '0' || c == '1' || c == '2' || c == '3' || c == '4' || c == '5' || c == '6' || c == '7' || c == '8' || c == '9' ||
c == 'a' || c == 'b' || c == 'c' || c == 'd' || c == 'e' || c == 'f' || c == 'g' || c == 'h' || c == 'i' || c == 'j' ||
c == 'k' || c == 'm' || c == 'n' || c == 'o' || c == 'p' || c == 'q' || c == 'r' || c == 's' || c == 't' || c == 'u' ||
c == 'v' || c == 'w' || c == 'x' || c == 'y' || c == 'z' || c == 'A' || c == 'B' || c == 'C' || c == 'D' || c == 'E' ||
c == 'F' || c == 'G' || c == 'H' || c == 'J' || c == 'K' || c == 'L' || c == 'M' || c == 'N' || c == 'P' || c == 'Q' ||
c == 'R' || c == 'S' || c == 'T' || c == 'U' || c == 'V' || c == 'W' || c == 'X' || c == 'Y' || c == 'Z'
}
func isValidSystemTagChar(c byte) bool {
return c == '0' || c == '1' || c == '2' || c == '3' || c == '4' || c == '5' || c == '6' || c == '7' || c == '8' || c == '9'
}
// TODO: this is not used
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
// always ends output with a newline
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
// first, check for common cases: no tabs, or only tabs at beginning of line
i, prefix := 0, 0
slowcase := false
for i = 0; i < len(line); i++ {
if line[i] == '\t' {
if prefix == i {
prefix++
} else {
slowcase = true
break
}
}
}
// no need to decode runes if all tabs are at the beginning of the line
if !slowcase {
for i = 0; i < prefix*tabSize; i++ {
out.WriteByte(' ')
}
out.Write(line[prefix:])
return
}
// the slow case: we need to count runes to figure out how
// many spaces to insert for each tab
column := 0
i = 0
for i < len(line) {
start := i
for i < len(line) && line[i] != '\t' {
_, size := utf8.DecodeRune(line[i:])
i += size
column++
}
if i > start {
out.Write(line[start:i])
}
if i >= len(line) {
break
}
for {
out.WriteByte(' ')
column++
if column%tabSize == 0 {
break
}
}
i++
}
}
// Find if a line counts as indented or not.
// Returns number of characters the indent is (0 = not indented).
func isIndented(data []byte, indentSize int) int {
if len(data) == 0 {
return 0
}
if data[0] == '\t' {
return 1
}
if len(data) < indentSize {
return 0
}
for i := 0; i < indentSize; i++ {
if data[i] != ' ' {
return 0
}
}
return indentSize
}
// Create a url-safe slug for fragments
func slugify(in []byte) []byte {
if len(in) == 0 {
return in
}
out := make([]byte, 0, len(in))
sym := false
for _, ch := range in {
if isAlnum(ch) {
sym = false
out = append(out, ch)
} else if sym {
continue
} else {
out = append(out, '-')
sym = true
}
}
var a, b int
var ch byte
for a, ch = range out {
if ch != '-' {
break
}
}
for b = len(out) - 1; b > 0; b-- {
if out[b] != '-' {
break
}
}
return out[a : b+1]
}
func isListItem(d ast.Node) bool {
_, ok := d.(*ast.ListItem)
return ok
}

89
vendor/github.com/status-im/markdown/parser/ref.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
package parser
import (
"bytes"
"fmt"
"github.com/status-im/markdown/ast"
)
// parse '(#r)', where r does not contain spaces. Or.
// (!item) (!item, subitem), for an index, (!!item) signals primary.
func maybeShortRefOrIndex(p *Parser, data []byte, offset int) (int, ast.Node) {
if len(data[offset:]) < 4 {
return 0, nil
}
// short ref first
data = data[offset:]
i := 1
switch data[i] {
case '#': // cross ref
i++
Loop:
for i < len(data) {
c := data[i]
switch {
case c == ')':
break Loop
case !isAlnum(c):
if c == '_' || c == '-' || c == ':' {
i++
continue
}
i = 0
break Loop
}
i++
}
if i >= len(data) {
return 0, nil
}
if data[i] != ')' {
return 0, nil
}
id := data[2:i]
node := &ast.CrossReference{}
node.Destination = id
return i + 1, node
case '!': // index
i++
start := i
i = skipUntilChar(data, start, ')')
// did we reach the end of the buffer without a closing marker?
if i >= len(data) {
return 0, nil
}
if len(data[start:i]) < 1 {
return 0, nil
}
idx := &ast.Index{}
idx.ID = fmt.Sprintf("idxref:%d", p.indexCnt)
p.indexCnt++
idx.Primary = data[start] == '!'
buf := data[start:i]
if idx.Primary {
buf = buf[1:]
}
items := bytes.Split(buf, []byte(","))
switch len(items) {
case 1:
idx.Item = bytes.TrimSpace(items[0])
return i + 1, idx
case 2:
idx.Item = bytes.TrimSpace(items[0])
idx.Subitem = bytes.TrimSpace(items[1])
return i + 1, idx
}
}
return 0, nil
}

7
vendor/github.com/status-im/markdown/todo.md generated vendored Normal file
View File

@@ -0,0 +1,7 @@
# Things to do
[ ] docs: add examples like https://godoc.org/github.com/dgrijalva/jwt-go (put in foo_example_test.go). Or see https://github.com/garyburd/redigo/blob/master/redis/zpop_example_test.go#L5 / https://godoc.org/github.com/garyburd/redigo/redis or https://godoc.org/github.com/go-redis/redis
[ ] figure out expandTabs and parser.TabSizeEight. Are those used?
[ ] SoftbreakData is not used

189
vendor/github.com/status-im/markdown/tracking-perf.md generated vendored Normal file
View File

@@ -0,0 +1,189 @@
## Tracking perf changes
Initial performance:
```
goos: darwin
goarch: amd64
pkg: github.com/gomarkdown/markdown
BenchmarkEscapeHTML-8 2000000 823 ns/op 0 B/op 0 allocs/op
BenchmarkSmartDoubleQuotes-8 300000 5033 ns/op 9872 B/op 56 allocs/op
BenchmarkReferenceAmps-8 100000 19538 ns/op 26776 B/op 150 allocs/op
BenchmarkReferenceAutoLinks-8 100000 17574 ns/op 24544 B/op 132 allocs/op
BenchmarkReferenceBackslashEscapes-8 30000 50977 ns/op 76752 B/op 243 allocs/op
BenchmarkReferenceBlockquotesWithCodeBlocks-8 200000 8546 ns/op 12864 B/op 65 allocs/op
BenchmarkReferenceCodeBlocks-8 200000 9000 ns/op 14912 B/op 70 allocs/op
BenchmarkReferenceCodeSpans-8 200000 8856 ns/op 14992 B/op 69 allocs/op
BenchmarkReferenceHardWrappedPara-8 200000 6599 ns/op 11312 B/op 57 allocs/op
BenchmarkReferenceHorizontalRules-8 100000 15483 ns/op 23536 B/op 98 allocs/op
BenchmarkReferenceInlineHTMLAdvances-8 200000 6839 ns/op 12150 B/op 62 allocs/op
BenchmarkReferenceInlineHTMLSimple-8 100000 19940 ns/op 28488 B/op 117 allocs/op
BenchmarkReferenceInlineHTMLComments-8 200000 7455 ns/op 13440 B/op 64 allocs/op
BenchmarkReferenceLinksInline-8 100000 16425 ns/op 23664 B/op 147 allocs/op
BenchmarkReferenceLinksReference-8 30000 54895 ns/op 66464 B/op 416 allocs/op
BenchmarkReferenceLinksShortcut-8 100000 17647 ns/op 23776 B/op 158 allocs/op
BenchmarkReferenceLiterQuotesInTitles-8 200000 9367 ns/op 14832 B/op 95 allocs/op
BenchmarkReferenceMarkdownBasics-8 10000 129772 ns/op 130848 B/op 378 allocs/op
BenchmarkReferenceMarkdownSyntax-8 3000 502365 ns/op 461411 B/op 1411 allocs/op
BenchmarkReferenceNestedBlockquotes-8 200000 7028 ns/op 12688 B/op 64 allocs/op
BenchmarkReferenceOrderedAndUnorderedLists-8 20000 79686 ns/op 107520 B/op 374 allocs/op
BenchmarkReferenceStrongAndEm-8 200000 10020 ns/op 17792 B/op 78 allocs/op
BenchmarkReferenceTabs-8 200000 12025 ns/op 18224 B/op 81 allocs/op
BenchmarkReferenceTidyness-8 200000 8985 ns/op 14432 B/op 71 allocs/op
PASS
ok github.com/gomarkdown/markdown 45.375s
```
After switching to using interface{} for Node.Data:
```
BenchmarkEscapeHTML-8 2000000 929 ns/op 0 B/op 0 allocs/op
BenchmarkSmartDoubleQuotes-8 300000 5126 ns/op 9248 B/op 56 allocs/op
BenchmarkReferenceAmps-8 100000 19927 ns/op 17880 B/op 154 allocs/op
BenchmarkReferenceAutoLinks-8 100000 20732 ns/op 17360 B/op 141 allocs/op
BenchmarkReferenceBackslashEscapes-8 30000 50267 ns/op 38128 B/op 244 allocs/op
BenchmarkReferenceBlockquotesWithCodeBlocks-8 200000 8988 ns/op 10912 B/op 67 allocs/op
BenchmarkReferenceCodeBlocks-8 200000 8611 ns/op 12256 B/op 74 allocs/op
BenchmarkReferenceCodeSpans-8 200000 8256 ns/op 11248 B/op 69 allocs/op
BenchmarkReferenceHardWrappedPara-8 200000 6739 ns/op 9856 B/op 57 allocs/op
BenchmarkReferenceHorizontalRules-8 100000 15503 ns/op 15600 B/op 104 allocs/op
BenchmarkReferenceInlineHTMLAdvances-8 200000 6874 ns/op 10278 B/op 62 allocs/op
BenchmarkReferenceInlineHTMLSimple-8 100000 22271 ns/op 18552 B/op 121 allocs/op
BenchmarkReferenceInlineHTMLComments-8 200000 8315 ns/op 10736 B/op 64 allocs/op
BenchmarkReferenceLinksInline-8 100000 16155 ns/op 16912 B/op 152 allocs/op
BenchmarkReferenceLinksReference-8 30000 52387 ns/op 38192 B/op 445 allocs/op
BenchmarkReferenceLinksShortcut-8 100000 17111 ns/op 16592 B/op 167 allocs/op
BenchmarkReferenceLiterQuotesInTitles-8 200000 9164 ns/op 12048 B/op 97 allocs/op
BenchmarkReferenceMarkdownBasics-8 10000 129262 ns/op 87264 B/op 416 allocs/op
BenchmarkReferenceMarkdownSyntax-8 3000 496873 ns/op 293906 B/op 1559 allocs/op
BenchmarkReferenceNestedBlockquotes-8 200000 6854 ns/op 10192 B/op 64 allocs/op
BenchmarkReferenceOrderedAndUnorderedLists-8 20000 79633 ns/op 55024 B/op 447 allocs/op
BenchmarkReferenceStrongAndEm-8 200000 9637 ns/op 12176 B/op 78 allocs/op
BenchmarkReferenceTabs-8 100000 12164 ns/op 13776 B/op 87 allocs/op
BenchmarkReferenceTidyness-8 200000 8677 ns/op 11296 B/op 75 allocs/op
```
Not necessarily faster, but uses less bytes per op (but sometimes more allocs).
After tweaking the API:
```
$ ./s/run-bench.sh
go test -bench=. -test.benchmem
goos: darwin
goarch: amd64
pkg: github.com/gomarkdown/markdown
BenchmarkEscapeHTML-8 2000000 834 ns/op 0 B/op 0 allocs/op
BenchmarkSmartDoubleQuotes-8 300000 3486 ns/op 6160 B/op 27 allocs/op
BenchmarkReferenceAmps-8 100000 18158 ns/op 14792 B/op 125 allocs/op
BenchmarkReferenceAutoLinks-8 100000 16824 ns/op 14272 B/op 112 allocs/op
BenchmarkReferenceBackslashEscapes-8 30000 44066 ns/op 35040 B/op 215 allocs/op
BenchmarkReferenceBlockquotesWithCodeBlocks-8 200000 6868 ns/op 7824 B/op 38 allocs/op
BenchmarkReferenceCodeBlocks-8 200000 7157 ns/op 9168 B/op 45 allocs/op
BenchmarkReferenceCodeSpans-8 200000 6663 ns/op 8160 B/op 40 allocs/op
BenchmarkReferenceHardWrappedPara-8 300000 4821 ns/op 6768 B/op 28 allocs/op
BenchmarkReferenceHorizontalRules-8 100000 13033 ns/op 12512 B/op 75 allocs/op
BenchmarkReferenceInlineHTMLAdvances-8 300000 4998 ns/op 7190 B/op 33 allocs/op
BenchmarkReferenceInlineHTMLSimple-8 100000 17696 ns/op 15464 B/op 92 allocs/op
BenchmarkReferenceInlineHTMLComments-8 300000 5506 ns/op 7648 B/op 35 allocs/op
BenchmarkReferenceLinksInline-8 100000 14450 ns/op 13824 B/op 123 allocs/op
BenchmarkReferenceLinksReference-8 30000 52561 ns/op 35104 B/op 416 allocs/op
BenchmarkReferenceLinksShortcut-8 100000 15616 ns/op 13504 B/op 138 allocs/op
BenchmarkReferenceLiterQuotesInTitles-8 200000 7772 ns/op 8960 B/op 68 allocs/op
BenchmarkReferenceMarkdownBasics-8 10000 121436 ns/op 84176 B/op 387 allocs/op
BenchmarkReferenceMarkdownSyntax-8 3000 487404 ns/op 290818 B/op 1530 allocs/op
BenchmarkReferenceNestedBlockquotes-8 300000 5098 ns/op 7104 B/op 35 allocs/op
BenchmarkReferenceOrderedAndUnorderedLists-8 20000 74422 ns/op 51936 B/op 418 allocs/op
BenchmarkReferenceStrongAndEm-8 200000 7888 ns/op 9088 B/op 49 allocs/op
BenchmarkReferenceTabs-8 200000 10061 ns/op 10688 B/op 58 allocs/op
BenchmarkReferenceTidyness-8 200000 7152 ns/op 8208 B/op 46 allocs/op
ok github.com/gomarkdown/markdown 40.809s
```
After refactoring Renderer:
```
BenchmarkEscapeHTML-8 2000000 883 ns/op 0 B/op 0 allocs/op
BenchmarkSmartDoubleQuotes-8 300000 3717 ns/op 6208 B/op 29 allocs/op
BenchmarkReferenceAmps-8 100000 19135 ns/op 14680 B/op 123 allocs/op
BenchmarkReferenceAutoLinks-8 100000 17142 ns/op 14176 B/op 110 allocs/op
BenchmarkReferenceBackslashEscapes-8 30000 54616 ns/op 35088 B/op 217 allocs/op
BenchmarkReferenceBlockquotesWithCodeBlocks-8 200000 7993 ns/op 7872 B/op 40 allocs/op
BenchmarkReferenceCodeBlocks-8 200000 8285 ns/op 9216 B/op 47 allocs/op
BenchmarkReferenceCodeSpans-8 200000 7684 ns/op 8208 B/op 42 allocs/op
BenchmarkReferenceHardWrappedPara-8 200000 5595 ns/op 6816 B/op 30 allocs/op
BenchmarkReferenceHorizontalRules-8 100000 16444 ns/op 12560 B/op 77 allocs/op
BenchmarkReferenceInlineHTMLAdvances-8 200000 5415 ns/op 7238 B/op 35 allocs/op
BenchmarkReferenceInlineHTMLSimple-8 100000 19867 ns/op 15512 B/op 94 allocs/op
BenchmarkReferenceInlineHTMLComments-8 200000 6026 ns/op 7696 B/op 37 allocs/op
BenchmarkReferenceLinksInline-8 100000 14864 ns/op 13664 B/op 120 allocs/op
BenchmarkReferenceLinksReference-8 30000 52479 ns/op 34816 B/op 401 allocs/op
BenchmarkReferenceLinksShortcut-8 100000 15812 ns/op 13472 B/op 135 allocs/op
BenchmarkReferenceLiterQuotesInTitles-8 200000 7767 ns/op 8880 B/op 68 allocs/op
BenchmarkReferenceMarkdownBasics-8 10000 131065 ns/op 84048 B/op 386 allocs/op
BenchmarkReferenceMarkdownSyntax-8 2000 515604 ns/op 289953 B/op 1501 allocs/op
BenchmarkReferenceNestedBlockquotes-8 200000 5655 ns/op 7152 B/op 37 allocs/op
BenchmarkReferenceOrderedAndUnorderedLists-8 20000 84188 ns/op 51984 B/op 420 allocs/op
BenchmarkReferenceStrongAndEm-8 200000 8664 ns/op 9136 B/op 51 allocs/op
BenchmarkReferenceTabs-8 100000 11110 ns/op 10736 B/op 60 allocs/op
BenchmarkReferenceTidyness-8 200000 7628 ns/op 8256 B/op 48 allocs/op
ok github.com/gomarkdown/markdown 40.841s
```
After Node refactor to have Children array:
```
BenchmarkEscapeHTML-8 2000000 901 ns/op 0 B/op 0 allocs/op
BenchmarkSmartDoubleQuotes-8 300000 3905 ns/op 6224 B/op 31 allocs/op
BenchmarkReferenceAmps-8 100000 22216 ns/op 15560 B/op 157 allocs/op
BenchmarkReferenceAutoLinks-8 100000 20335 ns/op 14824 B/op 146 allocs/op
BenchmarkReferenceBackslashEscapes-8 20000 69174 ns/op 37392 B/op 316 allocs/op
BenchmarkReferenceBlockquotesWithCodeBlocks-8 200000 8443 ns/op 7968 B/op 48 allocs/op
BenchmarkReferenceCodeBlocks-8 200000 9250 ns/op 9392 B/op 58 allocs/op
BenchmarkReferenceCodeSpans-8 200000 8515 ns/op 8432 B/op 54 allocs/op
BenchmarkReferenceHardWrappedPara-8 200000 5738 ns/op 6856 B/op 34 allocs/op
BenchmarkReferenceHorizontalRules-8 100000 20864 ns/op 13648 B/op 93 allocs/op
BenchmarkReferenceInlineHTMLAdvances-8 200000 6187 ns/op 7310 B/op 40 allocs/op
BenchmarkReferenceInlineHTMLSimple-8 50000 23793 ns/op 16128 B/op 114 allocs/op
BenchmarkReferenceInlineHTMLComments-8 200000 7060 ns/op 7840 B/op 44 allocs/op
BenchmarkReferenceLinksInline-8 100000 18432 ns/op 14496 B/op 153 allocs/op
BenchmarkReferenceLinksReference-8 20000 67666 ns/op 37136 B/op 502 allocs/op
BenchmarkReferenceLinksShortcut-8 100000 19324 ns/op 13984 B/op 162 allocs/op
BenchmarkReferenceLiterQuotesInTitles-8 200000 8998 ns/op 9320 B/op 83 allocs/op
BenchmarkReferenceMarkdownBasics-8 10000 160908 ns/op 88152 B/op 518 allocs/op
BenchmarkReferenceMarkdownSyntax-8 2000 707160 ns/op 303801 B/op 2044 allocs/op
BenchmarkReferenceNestedBlockquotes-8 200000 6740 ns/op 7248 B/op 45 allocs/op
BenchmarkReferenceOrderedAndUnorderedLists-8 10000 115808 ns/op 55052 B/op 626 allocs/op
BenchmarkReferenceStrongAndEm-8 100000 10540 ns/op 9416 B/op 72 allocs/op
BenchmarkReferenceTabs-8 100000 13171 ns/op 10968 B/op 77 allocs/op
BenchmarkReferenceTidyness-8 200000 8903 ns/op 8404 B/op 62 allocs/op
PASS
ok github.com/gomarkdown/markdown 43.477s
```
It's slower (but opens up possibilities for further improvements).
After refactoring to make ast.Node a top-level thing.
```
BenchmarkEscapeHTML-8 2000000 829 ns/op 0 B/op 0 allocs/op
BenchmarkSmartDoubleQuotes-8 300000 3998 ns/op 6192 B/op 31 allocs/op
BenchmarkReferenceAmps-8 50000 27389 ns/op 15480 B/op 153 allocs/op
BenchmarkReferenceAutoLinks-8 50000 23106 ns/op 14656 B/op 137 allocs/op
BenchmarkReferenceBackslashEscapes-8 10000 112435 ns/op 36696 B/op 315 allocs/op
BenchmarkReferenceBlockquotesWithCodeBlocks-8 200000 9227 ns/op 7856 B/op 46 allocs/op
BenchmarkReferenceCodeBlocks-8 200000 10469 ns/op 9248 B/op 54 allocs/op
BenchmarkReferenceCodeSpans-8 200000 10522 ns/op 8368 B/op 54 allocs/op
BenchmarkReferenceHardWrappedPara-8 200000 6354 ns/op 6784 B/op 34 allocs/op
BenchmarkReferenceHorizontalRules-8 50000 32393 ns/op 13952 B/op 87 allocs/op
BenchmarkReferenceInlineHTMLAdvances-8 200000 6894 ns/op 7238 B/op 40 allocs/op
BenchmarkReferenceInlineHTMLSimple-8 50000 32942 ns/op 15864 B/op 110 allocs/op
BenchmarkReferenceInlineHTMLComments-8 200000 8181 ns/op 7776 B/op 44 allocs/op
BenchmarkReferenceLinksInline-8 100000 21679 ns/op 14400 B/op 148 allocs/op
BenchmarkReferenceLinksReference-8 20000 83928 ns/op 36688 B/op 473 allocs/op
BenchmarkReferenceLinksShortcut-8 100000 22053 ns/op 13872 B/op 153 allocs/op
BenchmarkReferenceLiterQuotesInTitles-8 100000 10784 ns/op 9296 B/op 81 allocs/op
BenchmarkReferenceMarkdownBasics-8 5000 237097 ns/op 87760 B/op 480 allocs/op
BenchmarkReferenceMarkdownSyntax-8 1000 1465402 ns/op 300769 B/op 1896 allocs/op
BenchmarkReferenceNestedBlockquotes-8 200000 7461 ns/op 7152 B/op 45 allocs/op
BenchmarkReferenceOrderedAndUnorderedLists-8 5000 212256 ns/op 53724 B/op 553 allocs/op
BenchmarkReferenceStrongAndEm-8 100000 13018 ns/op 9264 B/op 72 allocs/op
BenchmarkReferenceTabs-8 100000 15005 ns/op 10752 B/op 71 allocs/op
BenchmarkReferenceTidyness-8 200000 10308 ns/op 8292 B/op 58 allocs/op
PASS
ok github.com/gomarkdown/markdown 42.176s
```

13
vendor/github.com/status-im/migrate/v4/.dockerignore generated vendored Normal file
View File

@@ -0,0 +1,13 @@
# Project
FAQ.md
README.md
LICENSE
Makefile
.gitignore
.travis.yml
CONTRIBUTING.md
MIGRATIONS.md
docker-deploy.sh
# Golang
testing

8
vendor/github.com/status-im/migrate/v4/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,8 @@
.DS_Store
cli/build
cli/cli
cli/migrate
.coverage
.godoc.pid
vendor/
.vscode/

27
vendor/github.com/status-im/migrate/v4/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,27 @@
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 2m
linters:
enable:
#- golint
- interfacer
- unconvert
#- dupl
- goconst
- gofmt
- misspell
- maligned
- unparam
- nakedret
- prealloc
#- gosec
linters-settings:
misspell:
locale: US
issues:
max-same: 0
max-per-linter: 0
exclude-use-default: false
exclude:
# gosec: Duplicated errcheck checks
- G104

135
vendor/github.com/status-im/migrate/v4/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,135 @@
language: go
sudo: required
matrix:
allow_failures:
- go: master
include:
# Supported versions of Go: https://golang.org/dl/
- go: "1.12.x"
- go: "1.13.x"
- go: master
go_import_path: github.com/golang-migrate/migrate
env:
global:
- GO111MODULE=on
- MIGRATE_TEST_CONTAINER_BOOT_TIMEOUT=60
- DOCKER_USERNAME=golangmigrate
- secure: "oSOznzUrgr5h45qW4PONkREpisPAt40tnM+KFWtS/Ggu5UI2Ie0CmyYXWuBjbt7B97a4yN9Qzmn8FxJHJ7kk+ABOi3muhkxeIhr6esXbzHhX/Jhv0mj1xkzX7KoVN9oHBz3cOI/QeRyEAO68xjDHNE2kby4RTT9VBt6TQUakKVkqI5qkqLBTADepCjVC+9XhxVxUNyeWKU8ormaUfJBjoNVoDlwXekUPnJenfmfZqXxUInvBCfUyp7Pq+kurBORmg4yc6qOlRYuK67Xw+i5xpjbZouNlXPk0rq7pPy5zjhmZQ3kImoFPvNMeKViDcI6kSIJKtjdhms9/g/6MgXS9HlL5kFy8tYKbsyiHnHB1BsvaLAKXctbUZFDPstgMPADfnad2kZXPrNqIhfWKZrGRWidawCYJ1sKKwYxLMKrtA0umqgMoL90MmBOELhuGmvMV0cFJB+zo+K2YWjEiMGd8xRb5mC5aAy0ZcCehO46jGtpr217EJmMF8Ywr7cFqM2Shg5U2jev9qUpYiXwmPnJKDuoT2ZHuHmPgFIkYiWC5yeJnnmG5bed1sKBp93AFrJX+1Rx5oC4BpNegewmBZKpOSwls/D1uMAeQK3dPmQHLsT6o2VBLfeDGr+zY0R85ywwPZCv00vGol02zYoTqN7eFqr6Qhjr/qx5K1nnxJdFK3Ts="
services:
- docker
cache:
directories:
- $GOPATH/pkg
before_install:
# Update docker to latest version: https://docs.travis-ci.com/user/docker/#installing-a-newer-docker-version
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- sudo apt-get update
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
# Install golangci-lint
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.20.0
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
install:
- go get github.com/mattn/goveralls
script:
- golangci-lint run
- make test COVERAGE_DIR=/tmp/coverage
after_success:
- goveralls -service=travis-ci -coverprofile /tmp/coverage/combined.txt
- make list-external-deps > dependency_tree.txt && cat dependency_tree.txt
- make build-cli
- gem install --no-document fpm
- fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m dhui@users.noreply.github.com --url https://github.com/golang-migrate/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/local/bin/migrate
deploy:
- provider: releases
api_key:
secure: hWH1HLPpzpfA8pXQ93T1qKQVFSpQp0as/JLQ7D91jHuJ8p+RxVeqblDrR6HQY/95R/nyiE9GJmvUolSuw5h449LSrGxPtVWhdh6EnkxlQHlen5XeMhVjRjFV0sE9qGe8v7uAkiTfRO61ktTWHrEAvw5qpyqnNISodmZS78XIasPODQbNlzwINhWhDTHIjXGb4FpizYaL3OGCanrxfR9fQyCaqKGGBjRq3Mfq8U6Yd4mApmsE+uJxgaZV8K5zBqpkSzQRWhcVGNL5DuLsU3gfSJOo7kZeA2G71SHffH577dBoqtCZ4VFv169CoUZehLWCb+7XKJZmHXVujCURATSySLGUOPc6EoLFAn3YtsCA04mS4bZVo5FZPWVwfhjmkhtDR4f6wscKp7r1HsFHSOgm59QfETQdrn4MnZ44H2Jd39axqndn5DvK9EcZVjPHynOPnueXP2u6mTuUgh2VyyWBCDO3CNo0fGlo7VJI69IkIWNSD87K9cHZWYMClyKZkUzS+PmRAhHRYbVd+9ZjKOmnU36kUHNDG/ft1D4ogsY+rhVtXB4lgWDM5adri+EIScYdYnB1/pQexLBigcJY9uE7nQTR0U6QgVNYvun7uRNs40E0c4voSfmPdFO0FlOD2y1oQhnaXfWLbu9nMcTcs4RFGrcC7NzkUN4/WjG8s285V6w=
skip_cleanup: true
on:
go: "1.12.x"
repo: golang-migrate/migrate
tags: true
file:
- cli/build/migrate.linux-amd64.tar.gz
- cli/build/migrate.darwin-amd64.tar.gz
- cli/build/migrate.windows-amd64.exe.tar.gz
- cli/build/sha256sum.txt
- dependency_tree.txt
- provider: packagecloud
repository: migrate
username: golang-migrate
token:
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
dist: ubuntu/xenial
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
repository: migrate
username: golang-migrate
token:
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
dist: ubuntu/bionic
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
repository: migrate
username: golang-migrate
token:
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
dist: ubuntu/cosmic
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
repository: migrate
username: golang-migrate
token:
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
dist: debian/stretch
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
repository: migrate
username: golang-migrate
token:
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
dist: debian/buster
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
repo: golang-migrate/migrate
tags: true
- provider: script
script: ./docker-deploy.sh
skip_cleanup: true
on:
go: "1.12.x"
repo: golang-migrate/migrate
tags: true

24
vendor/github.com/status-im/migrate/v4/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Development, Testing and Contributing
1. Make sure you have a running Docker daemon
(Install for [MacOS](https://docs.docker.com/docker-for-mac/))
1. Use a version of Go that supports [modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) (e.g. Go 1.11+)
1. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/golang-migrate/migrate`
* Ensure that [Go modules are enabled](https://golang.org/cmd/go/#hdr-Preliminary_module_support) (e.g. your repo path or the `GO111MODULE` environment variable are set correctly)
1. Install [golangci-lint](https://github.com/golangci/golangci-lint#install)
1. Run the linter: `golangci-lint run`
1. Confirm tests are working: `make test-short`
1. Write awesome code ...
1. `make test` to run all tests against all database versions
1. Push code and open Pull Request
Some more helpful commands:
* You can specify which database/ source tests to run:
`make test-short SOURCE='file go_bindata' DATABASE='postgres cassandra'`
* After `make test`, run `make html-coverage` which opens a shiny test coverage overview.
* `make build-cli` builds the CLI in directory `cli/build/`.
* `make list-external-deps` lists all external dependencies for each package
* `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server.
Repeatedly call `make docs` to refresh the server.
* Set the `DOCKER_API_VERSION` environment variable to the latest supported version if you get errors regarding the docker client API version being too new.

23
vendor/github.com/status-im/migrate/v4/Dockerfile generated vendored Normal file
View File

@@ -0,0 +1,23 @@
FROM golang:1.12-alpine3.10 AS downloader
ARG VERSION
RUN apk add --no-cache git gcc musl-dev
WORKDIR /go/src/github.com/golang-migrate/migrate
COPY . ./
ENV GO111MODULE=on
ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver"
ENV SOURCES="file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab"
RUN go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
FROM alpine:3.10
RUN apk add --no-cache ca-certificates
COPY --from=downloader /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /migrate
ENTRYPOINT ["/migrate"]
CMD ["--help"]

76
vendor/github.com/status-im/migrate/v4/FAQ.md generated vendored Normal file
View File

@@ -0,0 +1,76 @@
# FAQ
#### How is the code base structured?
```
/ package migrate (the heart of everything)
/cli the CLI wrapper
/database database driver and sub directories have the actual driver implementations
/source source driver and sub directories have the actual driver implementations
```
#### Why is there no `source/driver.go:Last()`?
It's not needed. And unless the source has a "native" way to read a directory in reversed order,
it might be expensive to do a full directory scan in order to get the last element.
#### What is a NilMigration? NilVersion?
NilMigration defines a migration without a body. NilVersion is defined as const -1.
#### What is the difference between uint(version) and int(targetVersion)?
version refers to an existing migration version coming from a source and therefor can never be negative.
targetVersion can either be a version OR represent a NilVersion, which equals -1.
#### What's the difference between Next/Previous and Up/Down?
```
1_first_migration.up.extension next -> 2_second_migration.up.extension ...
1_first_migration.down.extension <- previous 2_second_migration.down.extension ...
```
#### Why two separate files (up and down) for a migration?
It makes all of our lives easier. No new markup/syntax to learn for users
and existing database utility tools continue to work as expected.
#### How many migrations can migrate handle?
Whatever the maximum positive signed integer value is for your platform.
For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to
the currently run and pre-fetched migrations in memory. Please note that some
source drivers need to do build a full "directory" tree first, which puts some
heat on the memory consumption.
#### Are the table tests in migrate_test.go bloated?
Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact
the tests are very visual now and might help new users understand expected behaviors quickly.
Migrate from version x to y and y is the last migration? Just check out the test for
that particular case and know what's going on instantly.
#### What is Docker being used for?
Only for testing. See [testing/docker.go](testing/docker.go)
#### Why not just use docker-compose?
It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast
and whenever we want, not just once at the beginning of all tests.
#### Can I maintain my driver in my own repository?
Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though.
The driver's functionality is dictated by migrate's interfaces. That means there should really
just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing,
just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the
"best" available driver for a database in order to get started, we would have failed as an open source community.
#### Can I mix multiple sources during a batch of migrations?
No.
#### What does "dirty" database mean?
Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists,
which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error
and then "force" the expected version.
#### What happens if two programs try and update the database at the same time?
Database-specific locking features are used by *some* database drivers to prevent multiple instances of migrate from running migrations at the same time
the same database at the same time. For example, the MySQL driver uses the `GET_LOCK` function, while the Postgres driver uses
the `pg_advisory_lock` function.
#### Do I need to create a table for tracking migration version used?
No, it is done automatically.
#### Can I use migrate with a non-Go project?
Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework.

View File

@@ -0,0 +1,43 @@
# Getting started
Before you start, you should understand the concept of forward/up and reverse/down database migrations.
Configure a database for your application. Make sure that your database driver is supported [here](README.md#databases)
## Create migrations
Create some migrations using migrate CLI. Here is an example:
```
migrate create -ext sql -dir db/migrations -seq create_users_table
```
Once you create your files, you should fill them.
**IMPORTANT:** In a project developed by more than one person there is a chance of migrations inconsistency - e.g. two developers can create conflicting migrations, and the developer that created his migration later gets it merged to the repository first.
Developers and Teams should keep an eye on such cases (especially during code review).
[Here](https://github.com/golang-migrate/migrate/issues/179#issuecomment-475821264) is the issue summary if you would like to read more.
Consider making your migrations idempotent - we can run the same sql code twice in a row with the same result. This makes our migrations more robust. On the other hand, it causes slightly less control over database schema - e.g. let's say you forgot to drop the table in down migration. You run down migration - the table is still there. When you run up migration again - `CREATE TABLE` would return an error, helping you find an issue in down migration, while `CREATE TABLE IF NOT EXISTS` would not. Use those conditions wisely.
In case you would like to run several commands/queries in one migration, you should wrap them in a transaction (if your database supports it).
This way if one of commands fails, our database will remain unchanged.
## Run migrations
Run your migrations through the CLI or your app and check if they applied expected changes.
Just to give you an idea:
```
migrate -database YOUR_DATBASE_URL -path PATH_TO_YOUR_MIGRATIONS up
```
Just add the code to your app and you're ready to go!
Before commiting your migrations you should run your migrations up, down, and then up again to see if migrations are working properly both ways.
(e.g. if you created a table in a migration but reverse migration did not delete it, you will encounter an error when running the forward migration again)
It's also worth checking your migrations in a separate, containerized environment. You can find some tools in the end of this document.
**IMPORTANT:** If you would like to run multiple instances of your app on different machines be sure to use a database that supports locking when running migrations. Otherwise you may encounter issues.
## Further reading:
- [PostgreSQL tutorial](database/postgres/TUTORIAL.md)
- [Best practices](MIGRATIONS.md)
- [FAQ](FAQ.md)
- Tools for testing your migrations in a container:
- https://github.com/dhui/dktest
- https://github.com/ory/dockertest

28
vendor/github.com/status-im/migrate/v4/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,28 @@
The MIT License (MIT)
Original Work
Copyright (c) 2016 Matthias Kadenbach
https://github.com/mattes/migrate
Modified Work
Copyright (c) 2018 Dale Hui
https://github.com/golang-migrate/migrate
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

86
vendor/github.com/status-im/migrate/v4/MIGRATIONS.md generated vendored Normal file
View File

@@ -0,0 +1,86 @@
# Migrations
## Migration Filename Format
A single logical migration is represented as two separate migration files, one
to migrate "up" to the specified version from the previous version, and a second
to migrate back "down" to the previous version. These migrations can be provided
by any one of the supported [migration sources](./README.md#migration-sources).
The ordering and direction of the migration files is determined by the filenames
used for them. `migrate` expects the filenames of migrations to have the format:
{version}_{title}.up.{extension}
{version}_{title}.down.{extension}
The `title` of each migration is unused, and is only for readability. Similarly,
the `extension` of the migration files is not checked by the library, and should
be an appropriate format for the database in use (`.sql` for SQL variants, for
instance).
Versions of migrations may be represented as any 64 bit unsigned integer.
All migrations are applied upward in order of increasing version number, and
downward by decreasing version number.
Common versioning schemes include incrementing integers:
1_initialize_schema.down.sql
1_initialize_schema.up.sql
2_add_table.down.sql
2_add_table.up.sql
...
Or timestamps at an appropriate resolution:
1500360784_initialize_schema.down.sql
1500360784_initialize_schema.up.sql
1500445949_add_table.down.sql
1500445949_add_table.up.sql
...
But any scheme resulting in distinct, incrementing integers as versions is valid.
It is suggested that the version number of corresponding `up` and `down` migration
files be equivalent for clarity, but they are allowed to differ so long as the
relative ordering of the migrations is preserved.
The migration files are permitted to be "empty", in the event that a migration
is a no-op or is irreversible. It is recommended to still include both migration
files by making the whole migration file consist of a comment.
If your database does not support comments, then deleting the migration file will also work.
Note, an actual empty file (e.g. a 0 byte file) may cause issues with your database since migrate
will attempt to run an empty query. In this case, deleting the migration file will also work.
For the rational of this behavior see:
[#244 (comment)](https://github.com/golang-migrate/migrate/issues/244#issuecomment-510758270)
## Migration Content Format
The format of the migration files themselves varies between database systems.
Different databases have different semantics around schema changes and when and
how they are allowed to occur
(for instance, [if schema changes can occur within a transaction](https://wiki.postgresql.org/wiki/Transactional_DDL_in_PostgreSQL:_A_Competitive_Analysis)).
As such, the `migrate` library has little to no checking around the format of
migration sources. The migration files are generally processed directly by the
drivers as raw operations.
## Reversibility of Migrations
Best practice for writing schema migration is that all migrations should be
reversible. It should in theory be possible for run migrations down and back up
through any and all versions with the state being fully cleaned and recreated
by doing so.
By adhering to this recommended practice, development and deployment of new code
is cleaner and easier (cleaning database state for a new feature should be as
easy as migrating down to a prior version, and back up to the latest).
As opposed to some other migration libraries, `migrate` represents up and down
migrations as separate files. This prevents any non-standard file syntax from
being introduced which may result in unintended behavior or errors, depending
on what database is processing the file.
While it is technically possible for an up or down migration to exist on its own
without an equivalently versioned counterpart, it is strongly recommended to
always include a down migration which cleans up the state of the corresponding
up migration.

105
vendor/github.com/status-im/migrate/v4/Makefile generated vendored Normal file
View File

@@ -0,0 +1,105 @@
SOURCE ?= file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab
DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver
VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-)
TEST_FLAGS ?=
REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)")
COVERAGE_DIR ?= .coverage
build-cli: clean
-mkdir ./cli/build
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o ../../cli/build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -a -o ../../cli/build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -a -o ../../cli/build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {}
cd ./cli/build && shasum -a 256 * > sha256sum.txt
cat ./cli/build/sha256sum.txt
clean:
-rm -r ./cli/build
test-short:
make test-with-flags --ignore-errors TEST_FLAGS='-short'
test:
@-rm -r $(COVERAGE_DIR)
@mkdir $(COVERAGE_DIR)
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/combined.txt -bench=. -benchmem -timeout 20m'
test-with-flags:
@echo SOURCE: $(SOURCE)
@echo DATABASE: $(DATABASE)
@go test $(TEST_FLAGS) ./...
kill-orphaned-docker-containers:
docker rm -f $(shell docker ps -aq --filter label=migrate_test)
html-coverage:
go tool cover -html=$(COVERAGE_DIR)/combined.txt
list-external-deps:
$(call external_deps,'.')
$(call external_deps,'./cli/...')
$(call external_deps,'./testing/...')
$(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...'))
$(call external_deps,'./source/testing/...')
$(call external_deps,'./source/stub/...')
$(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...'))
$(call external_deps,'./database/testing/...')
$(call external_deps,'./database/stub/...')
restore-import-paths:
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \;
rewrite-import-paths:
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \;
# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs
docs:
-make kill-docs
nohup godoc -play -http=127.0.0.1:6064 </dev/null >/dev/null 2>&1 & echo $$! > .godoc.pid
cat .godoc.pid
kill-docs:
@cat .godoc.pid
kill -9 $$(cat .godoc.pid)
rm .godoc.pid
open-docs:
open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate
# example: make release V=0.0.0
release:
git tag v$(V)
@read -p "Press enter to confirm and push to origin ..." && git push origin v$(V)
define external_deps
@echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
endef
.PHONY: build-cli clean test-short test test-with-flags html-coverage \
restore-import-paths rewrite-import-paths list-external-deps release \
docs kill-docs open-docs kill-orphaned-docker-containers
SHELL = /bin/bash
RAND = $(shell echo $$RANDOM)

181
vendor/github.com/status-im/migrate/v4/README.md generated vendored Normal file
View File

@@ -0,0 +1,181 @@
[![Build Status](https://img.shields.io/travis/com/golang-migrate/migrate/master.svg)](https://travis-ci.com/golang-migrate/migrate)
[![GoDoc](https://godoc.org/github.com/golang-migrate/migrate?status.svg)](https://godoc.org/github.com/golang-migrate/migrate)
[![Coverage Status](https://img.shields.io/coveralls/github/golang-migrate/migrate/master.svg)](https://coveralls.io/github/golang-migrate/migrate?branch=master)
[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/golang-migrate/migrate?filter=debs)
[![Docker Pulls](https://img.shields.io/docker/pulls/migrate/migrate.svg)](https://hub.docker.com/r/migrate/migrate/)
![Supported Go Versions](https://img.shields.io/badge/Go-1.12%2C%201.13-lightgrey.svg)
[![GitHub Release](https://img.shields.io/github/release/golang-migrate/migrate.svg)](https://github.com/golang-migrate/migrate/releases)
[![Go Report Card](https://goreportcard.com/badge/github.com/golang-migrate/migrate)](https://goreportcard.com/report/github.com/golang-migrate/migrate)
# migrate
__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__
* Migrate reads migrations from [sources](#migration-sources)
and applies them in correct order to a [database](#databases).
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
(Keeps the drivers lightweight, too.)
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
Forked from [mattes/migrate](https://github.com/mattes/migrate)
## Databases
Database drivers run migrations. [Add a new database?](database/driver.go)
* [PostgreSQL](database/postgres)
* [Redshift](database/redshift)
* [Ql](database/ql)
* [Cassandra](database/cassandra)
* [SQLite](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
* [MySQL/ MariaDB](database/mysql)
* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167))
* [MongoDB](database/mongodb)
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
* [Google Cloud Spanner](database/spanner)
* [CockroachDB](database/cockroachdb)
* [ClickHouse](database/clickhouse)
* [Firebird](database/firebird) ([todo #49](https://github.com/golang-migrate/migrate/issues/49))
* [MS SQL Server](database/sqlserver)
### Database URLs
Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?option1=true&option2=false`
Any [reserved URL characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) need to be escaped. Note, the `%` character also [needs to be escaped](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_the_percent_character)
Explicitly, the following characters need to be escaped:
`!`, `#`, `$`, `%`, `&`, `'`, `(`, `)`, `*`, `+`, `,`, `/`, `:`, `;`, `=`, `?`, `@`, `[`, `]`
It's easiest to always run the URL parts of your DB connection URL (e.g. username, password, etc) through an URL encoder. See the example Python snippets below:
```bash
$ python3 -c 'import urllib.parse; print(urllib.parse.quote(input("String to encode: "), ""))'
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
$ python2 -c 'import urllib; print urllib.quote(raw_input("String to encode: "), "")'
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
$
```
## Migration Sources
Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go)
* [Filesystem](source/file) - read from filesystem
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
* [Github](source/github) - read from remote Github repositories
* [Github Enterprise](source/github_ee) - read from remote Github Enterprise repositories
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
## CLI usage
* Simple wrapper around this library.
* Handles ctrl+c (SIGINT) gracefully.
* No config search paths, no config files, no magic ENV var injections.
__[CLI Documentation](cmd/migrate)__
### Basic usage
```bash
$ migrate -source file://path/to/migrations -database postgres://localhost:5432/database up 2
```
### Docker usage
```bash
$ docker run -v {{ migration dir }}:/migrations --network host migrate/migrate
-path=/migrations/ -database postgres://localhost:5432/database up 2
```
## Use in your Go project
* API is stable and frozen for this release (v3 & v4).
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
* Bring your own logger.
* Uses `io.Reader` streams internally for low memory overhead.
* Thread-safe and no goroutine leaks.
__[Go Documentation](https://godoc.org/github.com/golang-migrate/migrate)__
```go
import (
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/github"
)
func main() {
m, err := migrate.New(
"github://mattes:personal-access-token@mattes/migrate_test",
"postgres://localhost:5432/database?sslmode=enable")
m.Steps(2)
}
```
Want to use an existing database client?
```go
import (
"database/sql"
_ "github.com/lib/pq"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
)
func main() {
db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable")
driver, err := postgres.WithInstance(db, &postgres.Config{})
m, err := migrate.NewWithDatabaseInstance(
"file:///migrations",
"postgres", driver)
m.Steps(2)
}
```
## Getting started
Go to [getting started](GETTING_STARTED.md)
## Tutorials
- [PostgreSQL](database/postgres/TUTORIAL.md)
(more tutorials to come)
## Migration files
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
```bash
1481574547_create_users_table.up.sql
1481574547_create_users_table.down.sql
```
[Best practices: How to write migrations.](MIGRATIONS.md)
## Versions
Version | Supported? | Import | Notes
--------|------------|--------|------
**master** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | New features and bug fixes arrive here first |
**v4** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | Used for stable releases |
**v3** | :x: | `import "github.com/golang-migrate/migrate"` (with package manager) or `import "gopkg.in/golang-migrate/migrate.v3"` (not recommended) | **DO NOT USE** - No longer supported |
## Development and Contributing
Yes, please! [`Makefile`](Makefile) is your friend,
read the [development guide](CONTRIBUTING.md).
Also have a look at the [FAQ](FAQ.md).
---
Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database).

View File

@@ -0,0 +1,28 @@
# postgres
`postgres://user:password@host:port/dbname?query` (`postgresql://` works, too)
| URL Query | WithInstance Config | Description |
|------------|---------------------|-------------|
| `x-migrations-table` | `MigrationsTable` | Name of the migrations table |
| `dbname` | `DatabaseName` | The name of the database to connect to |
| `search_path` | | This variable specifies the order in which schemas are searched when an object is referenced by a simple name with no schema specified. |
| `user` | | The user to sign in as |
| `password` | | The user's password |
| `host` | | The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) |
| `port` | | The port to bind to. (default is 5432) |
| `fallback_application_name` | | An application_name to fall back to if one isn't provided. |
| `connect_timeout` | | Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. |
| `sslcert` | | Cert file location. The file must contain PEM encoded data. |
| `sslkey` | | Key file location. The file must contain PEM encoded data. |
| `sslrootcert` | | The location of the root certificate file. The file must contain PEM encoded data. |
| `sslmode` | | Whether or not to use SSL (disable\|require\|verify-ca\|verify-full) |
## Upgrading from v1
1. Write down the current migration version from schema_migrations
1. `DROP TABLE schema_migrations`
2. Wrap your existing migrations in transactions ([BEGIN/COMMIT](https://www.postgresql.org/docs/current/static/transaction-iso.html)) if you use multiple statements within one migration.
3. Download and install the latest migrate version.
4. Force the current migration version with `migrate force <current_version>`.

View File

@@ -0,0 +1,148 @@
# PostgreSQL tutorial for beginners
## Create/configure database
For the purpose of this tutorial let's create PostgreSQL database called `example`.
Our user here is `postgres`, password `password`, and host is `localhost`.
```
psql -h localhost -U postgres -w -c "create database example;"
```
When using Migrate CLI we need to pass to database URL. Let's export it to a variable for convienience:
```
export POSTGRESQL_URL=postgres://postgres:password@localhost:5432/example?sslmode=disable
```
`sslmode=disable` means that the connection with our database will not be encrypted. Enabling it is left as an exercise.
You can find further description of database URLs [here](README.md#database-urls).
## Create migrations
Let's create table called `users`:
```
migrate create -ext sql -dir db/migrations -seq create_users_table
```
If there were no errors, we should have two files available under `db/migrations` folder:
- 000001_create_users_table.down.sql
- 000001_create_users_table.up.sql
Note the `sql` extension that we provided.
In the `.up.sql` file let's create the table:
```
CREATE TABLE IF NOT EXISTS users(
user_id serial PRIMARY KEY,
username VARCHAR (50) UNIQUE NOT NULL,
password VARCHAR (50) NOT NULL,
email VARCHAR (300) UNIQUE NOT NULL
);
```
And in the `.down.sql` let's delete it:
```
DROP TABLE IF EXISTS users;
```
By adding `IF EXISTS/IF NOT EXISTS` we are making migrations idempotent - you can read more about idempotency in [getting started](GETTING_STARTED.md#create-migrations)
## Run migrations
```
migrate -database ${POSTGRESQL_URL} -path db/migrations up
```
Let's check if the table was created properly by running `psql example -c "\d users"`.
The output you are supposed to see:
```
Table "public.users"
Column | Type | Modifiers
----------+------------------------+---------------------------------------------------------
user_id | integer | not null default nextval('users_user_id_seq'::regclass)
username | character varying(50) | not null
password | character varying(50) | not null
email | character varying(300) | not null
Indexes:
"users_pkey" PRIMARY KEY, btree (user_id)
"users_email_key" UNIQUE CONSTRAINT, btree (email)
"users_username_key" UNIQUE CONSTRAINT, btree (username)
```
Great! Now let's check if running reverse migration also works:
```
migrate -database ${POSTGRESQL_URL} -path db/migrations down
```
Make sure to check if your database changed as expected in this case as well.
## Database transactions
To show database transactions usage, let's create another set of migrations by running:
```
migrate create -ext sql -dir db/migrations -seq add_mood_to_users
```
Again, it should create for us two migrations files:
- 000002_add_mood_to_users.down.sql
- 000002_add_mood_to_users.up.sql
In Postgres, when we want our queries to be done in a transaction, we need to wrap it with `BEGIN` and `COMMIT` commands.
In our example, we are going to add a column to our database that can only accept enumerable values or NULL.
Migration up:
```
BEGIN;
CREATE TYPE enum_mood AS ENUM (
'happy',
'sad',
'neutral'
);
ALTER TABLE users ADD COLUMN mood enum_mood;
COMMIT;
```
Migration down:
```
BEGIN;
ALTER TABLE users DROP COLUMN mood;
DROP TYPE enum_mood;
COMMIT;
```
Now we can run our new migration and check the database:
```
migrate -database ${POSTGRESQL_URL} -path db/migrations up
psql example -c "\d users"
```
Expected output:
```
Table "public.users"
Column | Type | Modifiers
----------+------------------------+---------------------------------------------------------
user_id | integer | not null default nextval('users_user_id_seq'::regclass)
username | character varying(50) | not null
password | character varying(50) | not null
email | character varying(300) | not null
mood | enum_mood |
Indexes:
"users_pkey" PRIMARY KEY, btree (user_id)
"users_email_key" UNIQUE CONSTRAINT, btree (email)
"users_username_key" UNIQUE CONSTRAINT, btree (username)
```
## Optional: Run migrations within your Go app
Here is a very simple app running migrations for the above configuration:
```
import (
"log"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
)
func main() {
m, err := migrate.New(
"file://db/migrations",
"postgres://postgres:postgres@localhost:5432/example?sslmode=disable")
if err != nil {
log.Fatal(err)
}
if err := m.Up(); err != nil {
log.Fatal(err)
}
}
```
You can find details [here](README.md#use-in-your-go-project)

View File

@@ -0,0 +1,362 @@
// +build go1.9
package postgres
import (
"context"
"database/sql"
"fmt"
"io"
"io/ioutil"
nurl "net/url"
"strconv"
"strings"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
multierror "github.com/hashicorp/go-multierror"
"github.com/lib/pq"
)
func init() {
db := Postgres{}
database.Register("postgres", &db)
database.Register("postgresql", &db)
}
var DefaultMigrationsTable = "schema_migrations"
var (
ErrNilConfig = fmt.Errorf("no config")
ErrNoDatabaseName = fmt.Errorf("no database name")
ErrNoSchema = fmt.Errorf("no schema")
ErrDatabaseDirty = fmt.Errorf("database is dirty")
)
type Config struct {
MigrationsTable string
DatabaseName string
SchemaName string
}
type Postgres struct {
// Locking and unlocking need to use the same connection
conn *sql.Conn
db *sql.DB
isLocked bool
// Open and WithInstance need to guarantee that config is never nil
config *Config
}
func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) {
if config == nil {
return nil, ErrNilConfig
}
if err := instance.Ping(); err != nil {
return nil, err
}
query := `SELECT CURRENT_DATABASE()`
var databaseName string
if err := instance.QueryRow(query).Scan(&databaseName); err != nil {
return nil, &database.Error{OrigErr: err, Query: []byte(query)}
}
if len(databaseName) == 0 {
return nil, ErrNoDatabaseName
}
config.DatabaseName = databaseName
query = `SELECT CURRENT_SCHEMA()`
var schemaName string
if err := instance.QueryRow(query).Scan(&schemaName); err != nil {
return nil, &database.Error{OrigErr: err, Query: []byte(query)}
}
if len(schemaName) == 0 {
return nil, ErrNoSchema
}
config.SchemaName = schemaName
if len(config.MigrationsTable) == 0 {
config.MigrationsTable = DefaultMigrationsTable
}
conn, err := instance.Conn(context.Background())
if err != nil {
return nil, err
}
px := &Postgres{
conn: conn,
db: instance,
config: config,
}
if err := px.ensureVersionTable(); err != nil {
return nil, err
}
return px, nil
}
func (p *Postgres) Open(url string) (database.Driver, error) {
purl, err := nurl.Parse(url)
if err != nil {
return nil, err
}
db, err := sql.Open("postgres", migrate.FilterCustomQuery(purl).String())
if err != nil {
return nil, err
}
migrationsTable := purl.Query().Get("x-migrations-table")
px, err := WithInstance(db, &Config{
DatabaseName: purl.Path,
MigrationsTable: migrationsTable,
})
if err != nil {
return nil, err
}
return px, nil
}
func (p *Postgres) Close() error {
connErr := p.conn.Close()
dbErr := p.db.Close()
if connErr != nil || dbErr != nil {
return fmt.Errorf("conn: %v, db: %v", connErr, dbErr)
}
return nil
}
// https://www.postgresql.org/docs/9.6/static/explicit-locking.html#ADVISORY-LOCKS
func (p *Postgres) Lock() error {
if p.isLocked {
return database.ErrLocked
}
aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName, p.config.SchemaName)
if err != nil {
return err
}
// This will wait indefinitely until the lock can be acquired.
query := `SELECT pg_advisory_lock($1)`
if _, err := p.conn.ExecContext(context.Background(), query, aid); err != nil {
return &database.Error{OrigErr: err, Err: "try lock failed", Query: []byte(query)}
}
p.isLocked = true
return nil
}
func (p *Postgres) Unlock() error {
if !p.isLocked {
return nil
}
aid, err := database.GenerateAdvisoryLockId(p.config.DatabaseName, p.config.SchemaName)
if err != nil {
return err
}
query := `SELECT pg_advisory_unlock($1)`
if _, err := p.conn.ExecContext(context.Background(), query, aid); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
p.isLocked = false
return nil
}
func (p *Postgres) Run(migration io.Reader) error {
migr, err := ioutil.ReadAll(migration)
if err != nil {
return err
}
// run migration
query := string(migr[:])
if _, err := p.conn.ExecContext(context.Background(), query); err != nil {
if pgErr, ok := err.(*pq.Error); ok {
var line uint
var col uint
var lineColOK bool
if pgErr.Position != "" {
if pos, err := strconv.ParseUint(pgErr.Position, 10, 64); err == nil {
line, col, lineColOK = computeLineFromPos(query, int(pos))
}
}
message := fmt.Sprintf("migration failed: %s", pgErr.Message)
if lineColOK {
message = fmt.Sprintf("%s (column %d)", message, col)
}
if pgErr.Detail != "" {
message = fmt.Sprintf("%s, %s", message, pgErr.Detail)
}
return database.Error{OrigErr: err, Err: message, Query: migr, Line: line}
}
return database.Error{OrigErr: err, Err: "migration failed", Query: migr}
}
return nil
}
func computeLineFromPos(s string, pos int) (line uint, col uint, ok bool) {
// replace crlf with lf
s = strings.Replace(s, "\r\n", "\n", -1)
// pg docs: pos uses index 1 for the first character, and positions are measured in characters not bytes
runes := []rune(s)
if pos > len(runes) {
return 0, 0, false
}
sel := runes[:pos]
line = uint(runesCount(sel, newLine) + 1)
col = uint(pos - 1 - runesLastIndex(sel, newLine))
return line, col, true
}
const newLine = '\n'
func runesCount(input []rune, target rune) int {
var count int
for _, r := range input {
if r == target {
count++
}
}
return count
}
func runesLastIndex(input []rune, target rune) int {
for i := len(input) - 1; i >= 0; i-- {
if input[i] == target {
return i
}
}
return -1
}
func (p *Postgres) SetVersion(version int, dirty bool) error {
tx, err := p.conn.BeginTx(context.Background(), &sql.TxOptions{})
if err != nil {
return &database.Error{OrigErr: err, Err: "transaction start failed"}
}
query := `TRUNCATE ` + pq.QuoteIdentifier(p.config.MigrationsTable)
if _, err := tx.Exec(query); err != nil {
if errRollback := tx.Rollback(); errRollback != nil {
err = multierror.Append(err, errRollback)
}
return &database.Error{OrigErr: err, Query: []byte(query)}
}
if version >= 0 {
query = `INSERT INTO ` + pq.QuoteIdentifier(p.config.MigrationsTable) + ` (version, dirty) VALUES ($1, $2)`
if _, err := tx.Exec(query, version, dirty); err != nil {
if errRollback := tx.Rollback(); errRollback != nil {
err = multierror.Append(err, errRollback)
}
return &database.Error{OrigErr: err, Query: []byte(query)}
}
}
if err := tx.Commit(); err != nil {
return &database.Error{OrigErr: err, Err: "transaction commit failed"}
}
return nil
}
func (p *Postgres) Version() (version int, dirty bool, err error) {
query := `SELECT version, dirty FROM ` + pq.QuoteIdentifier(p.config.MigrationsTable) + ` LIMIT 1`
err = p.conn.QueryRowContext(context.Background(), query).Scan(&version, &dirty)
switch {
case err == sql.ErrNoRows:
return database.NilVersion, false, nil
case err != nil:
if e, ok := err.(*pq.Error); ok {
if e.Code.Name() == "undefined_table" {
return database.NilVersion, false, nil
}
}
return 0, false, &database.Error{OrigErr: err, Query: []byte(query)}
default:
return version, dirty, nil
}
}
func (p *Postgres) Drop() (err error) {
// select all tables in current schema
query := `SELECT table_name FROM information_schema.tables WHERE table_schema=(SELECT current_schema()) AND table_type='BASE TABLE'`
tables, err := p.conn.QueryContext(context.Background(), query)
if err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
defer func() {
if errClose := tables.Close(); errClose != nil {
err = multierror.Append(err, errClose)
}
}()
// delete one table after another
tableNames := make([]string, 0)
for tables.Next() {
var tableName string
if err := tables.Scan(&tableName); err != nil {
return err
}
if len(tableName) > 0 {
tableNames = append(tableNames, tableName)
}
}
if len(tableNames) > 0 {
// delete one by one ...
for _, t := range tableNames {
query = `DROP TABLE IF EXISTS ` + pq.QuoteIdentifier(t) + ` CASCADE`
if _, err := p.conn.ExecContext(context.Background(), query); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
}
}
return nil
}
// ensureVersionTable checks if versions table exists and, if not, creates it.
// Note that this function locks the database, which deviates from the usual
// convention of "caller locks" in the Postgres type.
func (p *Postgres) ensureVersionTable() (err error) {
if err = p.Lock(); err != nil {
return err
}
defer func() {
if e := p.Unlock(); e != nil {
if err == nil {
err = e
} else {
err = multierror.Append(err, e)
}
}
}()
query := `CREATE TABLE IF NOT EXISTS ` + pq.QuoteIdentifier(p.config.MigrationsTable) + ` (version bigint not null primary key, dirty boolean not null)`
if _, err = p.conn.ExecContext(context.Background(), query); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
return nil
}

View File

View File

@@ -0,0 +1,215 @@
package sqlcipher
import (
"database/sql"
"fmt"
"io"
"io/ioutil"
nurl "net/url"
"strings"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database"
_ "github.com/mutecomm/go-sqlcipher/v4"
)
func init() {
database.Register("sqlite3", &Sqlite{})
}
var DefaultMigrationsTable = "schema_migrations"
var (
ErrDatabaseDirty = fmt.Errorf("database is dirty")
ErrNilConfig = fmt.Errorf("no config")
ErrNoDatabaseName = fmt.Errorf("no database name")
)
type Config struct {
MigrationsTable string
DatabaseName string
}
type Sqlite struct {
db *sql.DB
isLocked bool
config *Config
}
func WithInstance(instance *sql.DB, config *Config) (database.Driver, error) {
if config == nil {
return nil, ErrNilConfig
}
if err := instance.Ping(); err != nil {
return nil, err
}
if len(config.MigrationsTable) == 0 {
config.MigrationsTable = DefaultMigrationsTable
}
mx := &Sqlite{
db: instance,
config: config,
}
if err := mx.ensureVersionTable(); err != nil {
return nil, err
}
return mx, nil
}
func (m *Sqlite) ensureVersionTable() error {
query := fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (version uint64,dirty bool);
CREATE UNIQUE INDEX IF NOT EXISTS version_unique ON %s (version);
`, m.config.MigrationsTable, m.config.MigrationsTable)
if _, err := m.db.Exec(query); err != nil {
return err
}
return nil
}
func (m *Sqlite) Open(url string) (database.Driver, error) {
purl, err := nurl.Parse(url)
if err != nil {
return nil, err
}
dbfile := strings.Replace(migrate.FilterCustomQuery(purl).String(), "sqlite3://", "", 1)
db, err := sql.Open("sqlite3", dbfile)
if err != nil {
return nil, err
}
migrationsTable := purl.Query().Get("x-migrations-table")
if len(migrationsTable) == 0 {
migrationsTable = DefaultMigrationsTable
}
mx, err := WithInstance(db, &Config{
DatabaseName: purl.Path,
MigrationsTable: migrationsTable,
})
if err != nil {
return nil, err
}
return mx, nil
}
func (m *Sqlite) Close() error {
return m.db.Close()
}
func (m *Sqlite) Drop() error {
query := `SELECT name FROM sqlite_master WHERE type = 'table';`
tables, err := m.db.Query(query)
if err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
defer tables.Close()
tableNames := make([]string, 0)
for tables.Next() {
var tableName string
if err := tables.Scan(&tableName); err != nil {
return err
}
if len(tableName) > 0 {
tableNames = append(tableNames, tableName)
}
}
if len(tableNames) > 0 {
for _, t := range tableNames {
query := "DROP TABLE " + t
err = m.executeQuery(query)
if err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
}
if err := m.ensureVersionTable(); err != nil {
return err
}
query := "VACUUM"
_, err = m.db.Query(query)
if err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
}
return nil
}
func (m *Sqlite) Lock() error {
if m.isLocked {
return database.ErrLocked
}
m.isLocked = true
return nil
}
func (m *Sqlite) Unlock() error {
if !m.isLocked {
return nil
}
m.isLocked = false
return nil
}
func (m *Sqlite) Run(migration io.Reader) error {
migr, err := ioutil.ReadAll(migration)
if err != nil {
return err
}
query := string(migr[:])
return m.executeQuery(query)
}
func (m *Sqlite) executeQuery(query string) error {
tx, err := m.db.Begin()
if err != nil {
return &database.Error{OrigErr: err, Err: "transaction start failed"}
}
if _, err := tx.Exec(query); err != nil {
tx.Rollback()
return &database.Error{OrigErr: err, Query: []byte(query)}
}
if err := tx.Commit(); err != nil {
return &database.Error{OrigErr: err, Err: "transaction commit failed"}
}
return nil
}
func (m *Sqlite) SetVersion(version int, dirty bool) error {
tx, err := m.db.Begin()
if err != nil {
return &database.Error{OrigErr: err, Err: "transaction start failed"}
}
query := "DELETE FROM " + m.config.MigrationsTable
if _, err := tx.Exec(query); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
if version >= 0 {
query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, '%t')`, m.config.MigrationsTable, version, dirty)
if _, err := tx.Exec(query); err != nil {
tx.Rollback()
return &database.Error{OrigErr: err, Query: []byte(query)}
}
}
if err := tx.Commit(); err != nil {
return &database.Error{OrigErr: err, Err: "transaction commit failed"}
}
return nil
}
func (m *Sqlite) Version() (version int, dirty bool, err error) {
query := "SELECT version, dirty FROM " + m.config.MigrationsTable + " LIMIT 1"
err = m.db.QueryRow(query).Scan(&version, &dirty)
if err != nil {
return database.NilVersion, false, nil
}
return version, dirty, nil
}

View File

@@ -0,0 +1,5 @@
#!/bin/bash
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin && \
docker build --build-arg VERSION="$TRAVIS_TAG" . -t migrate/migrate -t migrate/migrate:"$TRAVIS_TAG" && \
docker push migrate/migrate:"$TRAVIS_TAG" && docker push migrate/migrate

View File

@@ -0,0 +1,25 @@
package url
import (
"errors"
"strings"
)
var errNoScheme = errors.New("no scheme")
var errEmptyURL = errors.New("URL cannot be empty")
// schemeFromURL returns the scheme from a URL string
func SchemeFromURL(url string) (string, error) {
if url == "" {
return "", errEmptyURL
}
i := strings.Index(url, ":")
// No : or : is the first character.
if i < 1 {
return "", errNoScheme
}
return url[0:i], nil
}

12
vendor/github.com/status-im/migrate/v4/log.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
package migrate
// Logger is an interface so you can pass in your own
// logging implementation.
type Logger interface {
// Printf is like fmt.Printf
Printf(format string, v ...interface{})
// Verbose should return true when verbose logging output is wanted
Verbose() bool
}

980
vendor/github.com/status-im/migrate/v4/migrate.go generated vendored Normal file
View File

@@ -0,0 +1,980 @@
// Package migrate reads migrations from sources and runs them against databases.
// Sources are defined by the `source.Driver` and databases by the `database.Driver`
// interface. The driver interfaces are kept "dump", all migration logic is kept
// in this package.
package migrate
import (
"errors"
"fmt"
"os"
"sync"
"time"
"github.com/hashicorp/go-multierror"
"github.com/golang-migrate/migrate/v4/database"
iurl "github.com/status-im/migrate/v4/internal/url"
"github.com/golang-migrate/migrate/v4/source"
)
// DefaultPrefetchMigrations sets the number of migrations to pre-read
// from the source. This is helpful if the source is remote, but has little
// effect for a local source (i.e. file system).
// Please note that this setting has a major impact on the memory usage,
// since each pre-read migration is buffered in memory. See DefaultBufferSize.
var DefaultPrefetchMigrations = uint(10)
// DefaultLockTimeout sets the max time a database driver has to acquire a lock.
var DefaultLockTimeout = 15 * time.Second
var (
ErrNoChange = errors.New("no change")
ErrNilVersion = errors.New("no migration")
ErrInvalidVersion = errors.New("version must be >= -1")
ErrLocked = errors.New("database locked")
ErrLockTimeout = errors.New("timeout: can't acquire database lock")
)
// ErrShortLimit is an error returned when not enough migrations
// can be returned by a source for a given limit.
type ErrShortLimit struct {
Short uint
}
// Error implements the error interface.
func (e ErrShortLimit) Error() string {
return fmt.Sprintf("limit %v short", e.Short)
}
type ErrDirty struct {
Version int
}
func (e ErrDirty) Error() string {
return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version)
}
type Migrate struct {
sourceName string
sourceDrv source.Driver
databaseName string
databaseDrv database.Driver
// Log accepts a Logger interface
Log Logger
// GracefulStop accepts `true` and will stop executing migrations
// as soon as possible at a safe break point, so that the database
// is not corrupted.
GracefulStop chan bool
isLockedMu *sync.Mutex
isGracefulStop bool
isLocked bool
// PrefetchMigrations defaults to DefaultPrefetchMigrations,
// but can be set per Migrate instance.
PrefetchMigrations uint
// LockTimeout defaults to DefaultLockTimeout,
// but can be set per Migrate instance.
LockTimeout time.Duration
}
// New returns a new Migrate instance from a source URL and a database URL.
// The URL scheme is defined by each driver.
func New(sourceURL, databaseURL string) (*Migrate, error) {
m := newCommon()
sourceName, err := iurl.SchemeFromURL(sourceURL)
if err != nil {
return nil, err
}
m.sourceName = sourceName
databaseName, err := iurl.SchemeFromURL(databaseURL)
if err != nil {
return nil, err
}
m.databaseName = databaseName
sourceDrv, err := source.Open(sourceURL)
if err != nil {
return nil, err
}
m.sourceDrv = sourceDrv
databaseDrv, err := database.Open(databaseURL)
if err != nil {
return nil, err
}
m.databaseDrv = databaseDrv
return m, nil
}
// NewWithDatabaseInstance returns a new Migrate instance from a source URL
// and an existing database instance. The source URL scheme is defined by each driver.
// Use any string that can serve as an identifier during logging as databaseName.
// You are responsible for closing the underlying database client if necessary.
func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
m := newCommon()
sourceName, err := iurl.SchemeFromURL(sourceURL)
if err != nil {
return nil, err
}
m.sourceName = sourceName
m.databaseName = databaseName
sourceDrv, err := source.Open(sourceURL)
if err != nil {
return nil, err
}
m.sourceDrv = sourceDrv
m.databaseDrv = databaseInstance
return m, nil
}
// NewWithSourceInstance returns a new Migrate instance from an existing source instance
// and a database URL. The database URL scheme is defined by each driver.
// Use any string that can serve as an identifier during logging as sourceName.
// You are responsible for closing the underlying source client if necessary.
func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseURL string) (*Migrate, error) {
m := newCommon()
databaseName, err := iurl.SchemeFromURL(databaseURL)
if err != nil {
return nil, err
}
m.databaseName = databaseName
m.sourceName = sourceName
databaseDrv, err := database.Open(databaseURL)
if err != nil {
return nil, err
}
m.databaseDrv = databaseDrv
m.sourceDrv = sourceInstance
return m, nil
}
// NewWithInstance returns a new Migrate instance from an existing source and
// database instance. Use any string that can serve as an identifier during logging
// as sourceName and databaseName. You are responsible for closing down
// the underlying source and database client if necessary.
func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
m := newCommon()
m.sourceName = sourceName
m.databaseName = databaseName
m.sourceDrv = sourceInstance
m.databaseDrv = databaseInstance
return m, nil
}
func newCommon() *Migrate {
return &Migrate{
GracefulStop: make(chan bool, 1),
PrefetchMigrations: DefaultPrefetchMigrations,
LockTimeout: DefaultLockTimeout,
isLockedMu: &sync.Mutex{},
}
}
// Close closes the source and the database.
func (m *Migrate) Close() (source error, database error) {
databaseSrvClose := make(chan error)
sourceSrvClose := make(chan error)
m.logVerbosePrintf("Closing source and database\n")
go func() {
databaseSrvClose <- m.databaseDrv.Close()
}()
go func() {
sourceSrvClose <- m.sourceDrv.Close()
}()
return <-sourceSrvClose, <-databaseSrvClose
}
// Migrate looks at the currently active migration version,
// then migrates either up or down to the specified version.
func (m *Migrate) Migrate(version uint) error {
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go m.read(curVersion, int(version), ret)
return m.unlockErr(m.runMigrations(ret))
}
// Steps looks at the currently active migration version.
// It will migrate up if n > 0, and down if n < 0.
func (m *Migrate) Steps(n int) error {
if n == 0 {
return ErrNoChange
}
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
if n > 0 {
go m.readUp(curVersion, n, ret)
} else {
go m.readDown(curVersion, -n, ret)
}
return m.unlockErr(m.runMigrations(ret))
}
// Up looks at the currently active migration version
// and will migrate all the way up (applying all up migrations).
func (m *Migrate) Up() error {
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go m.readUp(curVersion, -1, ret)
return m.unlockErr(m.runMigrations(ret))
}
// Down looks at the currently active migration version
// and will migrate all the way down (applying all down migrations).
func (m *Migrate) Down() error {
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go m.readDown(curVersion, -1, ret)
return m.unlockErr(m.runMigrations(ret))
}
// Drop deletes everything in the database.
func (m *Migrate) Drop() error {
if err := m.lock(); err != nil {
return err
}
if err := m.databaseDrv.Drop(); err != nil {
return m.unlockErr(err)
}
return m.unlock()
}
// Run runs any migration provided by you against the database.
// It does not check any currently active version in database.
// Usually you don't need this function at all. Use Migrate,
// Steps, Up or Down instead.
func (m *Migrate) Run(migration ...*Migration) error {
if len(migration) == 0 {
return ErrNoChange
}
if err := m.lock(); err != nil {
return err
}
curVersion, dirty, err := m.databaseDrv.Version()
if err != nil {
return m.unlockErr(err)
}
if dirty {
return m.unlockErr(ErrDirty{curVersion})
}
ret := make(chan interface{}, m.PrefetchMigrations)
go func() {
defer close(ret)
for _, migr := range migration {
if m.PrefetchMigrations > 0 && migr.Body != nil {
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
} else {
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
}
ret <- migr
go func(migr *Migration) {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}(migr)
}
}()
return m.unlockErr(m.runMigrations(ret))
}
// Force sets a migration version.
// It does not check any currently active version in database.
// It resets the dirty state to false.
func (m *Migrate) Force(version int) error {
if version < -1 {
return ErrInvalidVersion
}
if err := m.lock(); err != nil {
return err
}
if err := m.databaseDrv.SetVersion(version, false); err != nil {
return m.unlockErr(err)
}
return m.unlock()
}
// Version returns the currently active migration version.
// If no migration has been applied, yet, it will return ErrNilVersion.
func (m *Migrate) Version() (version uint, dirty bool, err error) {
v, d, err := m.databaseDrv.Version()
if err != nil {
return 0, false, err
}
if v == database.NilVersion {
return 0, false, ErrNilVersion
}
return suint(v), d, nil
}
// read reads either up or down migrations from source `from` to `to`.
// Each migration is then written to the ret channel.
// If an error occurs during reading, that error is written to the ret channel, too.
// Once read is done reading it will close the ret channel.
func (m *Migrate) read(from int, to int, ret chan<- interface{}) {
defer close(ret)
// check if from version exists
if from >= 0 {
if err := m.versionExists(suint(from)); err != nil {
ret <- err
return
}
}
// check if to version exists
if to >= 0 {
if err := m.versionExists(suint(to)); err != nil {
ret <- err
return
}
}
// no change?
if from == to {
ret <- ErrNoChange
return
}
if from < to {
// it's going up
// apply first migration if from is nil version
if from == -1 {
firstVersion, err := m.sourceDrv.First()
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(firstVersion, int(firstVersion))
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
from = int(firstVersion)
}
// run until we reach target ...
for from < to {
if m.stop() {
return
}
next, err := m.sourceDrv.Next(suint(from))
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(next, int(next))
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
from = int(next)
}
} else {
// it's going down
// run until we reach target ...
for from > to && from >= 0 {
if m.stop() {
return
}
prev, err := m.sourceDrv.Prev(suint(from))
if os.IsNotExist(err) && to == -1 {
// apply nil migration
migr, err := m.newMigration(suint(from), -1)
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
return
} else if err != nil {
ret <- err
return
}
migr, err := m.newMigration(suint(from), int(prev))
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
from = int(prev)
}
}
}
// readUp reads up migrations from `from` limitted by `limit`.
// limit can be -1, implying no limit and reading until there are no more migrations.
// Each migration is then written to the ret channel.
// If an error occurs during reading, that error is written to the ret channel, too.
// Once readUp is done reading it will close the ret channel.
func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) {
defer close(ret)
// check if from version exists
if from >= 0 {
if err := m.versionExists(suint(from)); err != nil {
ret <- err
return
}
}
if limit == 0 {
ret <- ErrNoChange
return
}
count := 0
for count < limit || limit == -1 {
if m.stop() {
return
}
// apply first migration if from is nil version
if from == -1 {
firstVersion, err := m.sourceDrv.First()
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(firstVersion, int(firstVersion))
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
from = int(firstVersion)
count++
continue
}
// apply next migration
next, err := m.sourceDrv.Next(suint(from))
if os.IsNotExist(err) {
// no limit, but no migrations applied?
if limit == -1 && count == 0 {
ret <- ErrNoChange
return
}
// no limit, reached end
if limit == -1 {
return
}
// reached end, and didn't apply any migrations
if limit > 0 && count == 0 {
ret <- os.ErrNotExist
return
}
// applied less migrations than limit?
if count < limit {
ret <- ErrShortLimit{suint(limit - count)}
return
}
}
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(next, int(next))
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
from = int(next)
count++
}
}
// readDown reads down migrations from `from` limitted by `limit`.
// limit can be -1, implying no limit and reading until there are no more migrations.
// Each migration is then written to the ret channel.
// If an error occurs during reading, that error is written to the ret channel, too.
// Once readDown is done reading it will close the ret channel.
func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) {
defer close(ret)
// check if from version exists
if from >= 0 {
if err := m.versionExists(suint(from)); err != nil {
ret <- err
return
}
}
if limit == 0 {
ret <- ErrNoChange
return
}
// no change if already at nil version
if from == -1 && limit == -1 {
ret <- ErrNoChange
return
}
// can't go over limit if already at nil version
if from == -1 && limit > 0 {
ret <- os.ErrNotExist
return
}
count := 0
for count < limit || limit == -1 {
if m.stop() {
return
}
prev, err := m.sourceDrv.Prev(suint(from))
if os.IsNotExist(err) {
// no limit or haven't reached limit, apply "first" migration
if limit == -1 || limit-count > 0 {
firstVersion, err := m.sourceDrv.First()
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(firstVersion, -1)
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
count++
}
if count < limit {
ret <- ErrShortLimit{suint(limit - count)}
}
return
}
if err != nil {
ret <- err
return
}
migr, err := m.newMigration(suint(from), int(prev))
if err != nil {
ret <- err
return
}
ret <- migr
go func() {
if err := migr.Buffer(); err != nil {
m.logErr(err)
}
}()
from = int(prev)
count++
}
}
// runMigrations reads *Migration and error from a channel. Any other type
// sent on this channel will result in a panic. Each migration is then
// proxied to the database driver and run against the database.
// Before running a newly received migration it will check if it's supposed
// to stop execution because it might have received a stop signal on the
// GracefulStop channel.
func (m *Migrate) runMigrations(ret <-chan interface{}) error {
for r := range ret {
if m.stop() {
return nil
}
switch r := r.(type) {
case error:
return r
case *Migration:
migr := r
// set version with dirty state
if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil {
return err
}
if migr.Body != nil {
m.logVerbosePrintf("Read and execute %v\n", migr.LogString())
if err := m.databaseDrv.Run(migr.BufferedBody); err != nil {
return err
}
}
// set clean state
if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil {
return err
}
endTime := time.Now()
readTime := migr.FinishedReading.Sub(migr.StartedBuffering)
runTime := endTime.Sub(migr.FinishedReading)
// log either verbose or normal
if m.Log != nil {
if m.Log.Verbose() {
m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime)
} else {
m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime)
}
}
default:
return fmt.Errorf("unknown type: %T with value: %+v", r, r)
}
}
return nil
}
// versionExists checks the source if either the up or down migration for
// the specified migration version exists.
func (m *Migrate) versionExists(version uint) (result error) {
// try up migration first
up, _, err := m.sourceDrv.ReadUp(version)
if err == nil {
defer func() {
if errClose := up.Close(); errClose != nil {
result = multierror.Append(result, errClose)
}
}()
}
if os.IsExist(err) {
return nil
} else if !os.IsNotExist(err) {
return err
}
// then try down migration
down, _, err := m.sourceDrv.ReadDown(version)
if err == nil {
defer func() {
if errClose := down.Close(); errClose != nil {
result = multierror.Append(result, errClose)
}
}()
}
if os.IsExist(err) {
return nil
} else if !os.IsNotExist(err) {
return err
}
m.logErr(fmt.Errorf("no migration found for version %d", version))
return os.ErrNotExist
}
// stop returns true if no more migrations should be run against the database
// because a stop signal was received on the GracefulStop channel.
// Calls are cheap and this function is not blocking.
func (m *Migrate) stop() bool {
if m.isGracefulStop {
return true
}
select {
case <-m.GracefulStop:
m.isGracefulStop = true
return true
default:
return false
}
}
// newMigration is a helper func that returns a *Migration for the
// specified version and targetVersion.
func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) {
var migr *Migration
if targetVersion >= int(version) {
r, identifier, err := m.sourceDrv.ReadUp(version)
if os.IsNotExist(err) {
// create "empty" migration
migr, err = NewMigration(nil, "", version, targetVersion)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
} else {
// create migration from up source
migr, err = NewMigration(r, identifier, version, targetVersion)
if err != nil {
return nil, err
}
}
} else {
r, identifier, err := m.sourceDrv.ReadDown(version)
if os.IsNotExist(err) {
// create "empty" migration
migr, err = NewMigration(nil, "", version, targetVersion)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
} else {
// create migration from down source
migr, err = NewMigration(r, identifier, version, targetVersion)
if err != nil {
return nil, err
}
}
}
if m.PrefetchMigrations > 0 && migr.Body != nil {
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
} else {
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
}
return migr, nil
}
// lock is a thread safe helper function to lock the database.
// It should be called as late as possible when running migrations.
func (m *Migrate) lock() error {
m.isLockedMu.Lock()
defer m.isLockedMu.Unlock()
if m.isLocked {
return ErrLocked
}
// create done channel, used in the timeout goroutine
done := make(chan bool, 1)
defer func() {
done <- true
}()
// use errchan to signal error back to this context
errchan := make(chan error, 2)
// start timeout goroutine
timeout := time.After(m.LockTimeout)
go func() {
for {
select {
case <-done:
return
case <-timeout:
errchan <- ErrLockTimeout
return
}
}
}()
// now try to acquire the lock
go func() {
if err := m.databaseDrv.Lock(); err != nil {
errchan <- err
} else {
errchan <- nil
}
}()
// wait until we either receive ErrLockTimeout or error from Lock operation
err := <-errchan
if err == nil {
m.isLocked = true
}
return err
}
// unlock is a thread safe helper function to unlock the database.
// It should be called as early as possible when no more migrations are
// expected to be executed.
func (m *Migrate) unlock() error {
m.isLockedMu.Lock()
defer m.isLockedMu.Unlock()
if err := m.databaseDrv.Unlock(); err != nil {
// BUG: Can potentially create a deadlock. Add a timeout.
return err
}
m.isLocked = false
return nil
}
// unlockErr calls unlock and returns a combined error
// if a prevErr is not nil.
func (m *Migrate) unlockErr(prevErr error) error {
if err := m.unlock(); err != nil {
return multierror.Append(prevErr, err)
}
return prevErr
}
// logPrintf writes to m.Log if not nil
func (m *Migrate) logPrintf(format string, v ...interface{}) {
if m.Log != nil {
m.Log.Printf(format, v...)
}
}
// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output.
func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) {
if m.Log != nil && m.Log.Verbose() {
m.Log.Printf(format, v...)
}
}
// logErr writes error to m.Log if not nil
func (m *Migrate) logErr(err error) {
if m.Log != nil {
m.Log.Printf("error: %v", err)
}
}

160
vendor/github.com/status-im/migrate/v4/migration.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
package migrate
import (
"bufio"
"fmt"
"io"
"time"
)
// DefaultBufferSize sets the in memory buffer size (in Bytes) for every
// pre-read migration (see DefaultPrefetchMigrations).
var DefaultBufferSize = uint(100000)
// Migration holds information about a migration.
// It is initially created from data coming from the source and then
// used when run against the database.
type Migration struct {
// Identifier can be any string to help identifying
// the migration in the source.
Identifier string
// Version is the version of this migration.
Version uint
// TargetVersion is the migration version after this migration
// has been applied to the database.
// Can be -1, implying that this is a NilVersion.
TargetVersion int
// Body holds an io.ReadCloser to the source.
Body io.ReadCloser
// BufferedBody holds an buffered io.Reader to the underlying Body.
BufferedBody io.Reader
// BufferSize defaults to DefaultBufferSize
BufferSize uint
// bufferWriter holds an io.WriteCloser and pipes to BufferBody.
// It's an *Closer for flow control.
bufferWriter io.WriteCloser
// Scheduled is the time when the migration was scheduled/ queued.
Scheduled time.Time
// StartedBuffering is the time when buffering of the migration source started.
StartedBuffering time.Time
// FinishedBuffering is the time when buffering of the migration source finished.
FinishedBuffering time.Time
// FinishedReading is the time when the migration source is fully read.
FinishedReading time.Time
// BytesRead holds the number of Bytes read from the migration source.
BytesRead int64
}
// NewMigration returns a new Migration and sets the body, identifier,
// version and targetVersion. Body can be nil, which turns this migration
// into a "NilMigration". If no identifier is provided, it will default to "<empty>".
// targetVersion can be -1, implying it is a NilVersion.
//
// What is a NilMigration?
// Usually each migration version coming from source is expected to have an
// Up and Down migration. This is not a hard requirement though, leading to
// a situation where only the Up or Down migration is present. So let's say
// the user wants to migrate up to a version that doesn't have the actual Up
// migration, in that case we still want to apply the version, but with an empty
// body. We are calling that a NilMigration, a migration with an empty body.
//
// What is a NilVersion?
// NilVersion is a const(-1). When running down migrations and we are at the
// last down migration, there is no next down migration, the targetVersion should
// be nil. Nil in this case is represented by -1 (because type int).
func NewMigration(body io.ReadCloser, identifier string,
version uint, targetVersion int) (*Migration, error) {
tnow := time.Now()
m := &Migration{
Identifier: identifier,
Version: version,
TargetVersion: targetVersion,
Scheduled: tnow,
}
if body == nil {
if len(identifier) == 0 {
m.Identifier = "<empty>"
}
m.StartedBuffering = tnow
m.FinishedBuffering = tnow
m.FinishedReading = tnow
return m, nil
}
br, bw := io.Pipe()
m.Body = body // want to simulate low latency? newSlowReader(body)
m.BufferSize = DefaultBufferSize
m.BufferedBody = br
m.bufferWriter = bw
return m, nil
}
// String implements string.Stringer and is used in tests.
func (m *Migration) String() string {
return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion)
}
// LogString returns a string describing this migration to humans.
func (m *Migration) LogString() string {
directionStr := "u"
if m.TargetVersion < int(m.Version) {
directionStr = "d"
}
return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier)
}
// Buffer buffers Body up to BufferSize.
// Calling this function blocks. Call with goroutine.
func (m *Migration) Buffer() error {
if m.Body == nil {
return nil
}
m.StartedBuffering = time.Now()
b := bufio.NewReaderSize(m.Body, int(m.BufferSize))
// start reading from body, peek won't move the read pointer though
// poor man's solution?
if _, err := b.Peek(int(m.BufferSize)); err != nil && err != io.EOF {
return err
}
m.FinishedBuffering = time.Now()
// write to bufferWriter, this will block until
// something starts reading from m.Buffer
n, err := b.WriteTo(m.bufferWriter)
if err != nil {
return err
}
m.FinishedReading = time.Now()
m.BytesRead = n
// close bufferWriter so Buffer knows that there is no
// more data coming
if err := m.bufferWriter.Close(); err != nil {
return err
}
// it's safe to close the Body too
if err := m.Body.Close(); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,43 @@
# go_bindata
## Usage
### Read bindata with NewWithSourceInstance
```shell
go get -u github.com/jteeuwen/go-bindata/...
cd examples/migrations && go-bindata -pkg migrations .
```
```go
import (
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/source/go_bindata"
"github.com/golang-migrate/migrate/v4/source/go_bindata/examples/migrations"
)
func main() {
// wrap assets into Resource
s := bindata.Resource(migrations.AssetNames(),
func(name string) ([]byte, error) {
return migrations.Asset(name)
})
d, err := bindata.WithInstance(s)
m, err := migrate.NewWithSourceInstance("go-bindata", d, "database://foobar")
m.Up() // run your migrations and handle the errors above of course
}
```
### Read bindata with URL (todo)
This will restore the assets in a tmp directory and then
proxy to source/file. go-bindata must be in your `$PATH`.
```
migrate -source go-bindata://examples/migrations/bindata.go
```

View File

@@ -0,0 +1,119 @@
package bindata
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/golang-migrate/migrate/v4/source"
)
type AssetFunc func(name string) ([]byte, error)
func Resource(names []string, afn AssetFunc) *AssetSource {
return &AssetSource{
Names: names,
AssetFunc: afn,
}
}
type AssetSource struct {
Names []string
AssetFunc AssetFunc
}
func init() {
source.Register("go-bindata", &Bindata{})
}
type Bindata struct {
path string
assetSource *AssetSource
migrations *source.Migrations
}
func (b *Bindata) Open(url string) (source.Driver, error) {
return nil, fmt.Errorf("not yet implemented")
}
var (
ErrNoAssetSource = fmt.Errorf("expects *AssetSource")
)
func WithInstance(instance interface{}) (source.Driver, error) {
if _, ok := instance.(*AssetSource); !ok {
return nil, ErrNoAssetSource
}
as := instance.(*AssetSource)
bn := &Bindata{
path: "<go-bindata>",
assetSource: as,
migrations: source.NewMigrations(),
}
for _, fi := range as.Names {
m, err := source.DefaultParse(fi)
if err != nil {
continue // ignore files that we can't parse
}
if !bn.migrations.Append(m) {
return nil, fmt.Errorf("unable to parse file %v", fi)
}
}
return bn, nil
}
func (b *Bindata) Close() error {
return nil
}
func (b *Bindata) First() (version uint, err error) {
if v, ok := b.migrations.First(); !ok {
return 0, &os.PathError{Op: "first", Path: b.path, Err: os.ErrNotExist}
} else {
return v, nil
}
}
func (b *Bindata) Prev(version uint) (prevVersion uint, err error) {
if v, ok := b.migrations.Prev(version); !ok {
return 0, &os.PathError{Op: fmt.Sprintf("prev for version %v", version), Path: b.path, Err: os.ErrNotExist}
} else {
return v, nil
}
}
func (b *Bindata) Next(version uint) (nextVersion uint, err error) {
if v, ok := b.migrations.Next(version); !ok {
return 0, &os.PathError{Op: fmt.Sprintf("next for version %v", version), Path: b.path, Err: os.ErrNotExist}
} else {
return v, nil
}
}
func (b *Bindata) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := b.migrations.Up(version); ok {
body, err := b.assetSource.AssetFunc(m.Raw)
if err != nil {
return nil, "", err
}
return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil
}
return nil, "", &os.PathError{Op: fmt.Sprintf("read version %v", version), Path: b.path, Err: os.ErrNotExist}
}
func (b *Bindata) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := b.migrations.Down(version); ok {
body, err := b.assetSource.AssetFunc(m.Raw)
if err != nil {
return nil, "", err
}
return ioutil.NopCloser(bytes.NewReader(body)), m.Identifier, nil
}
return nil, "", &os.PathError{Op: fmt.Sprintf("read version %v", version), Path: b.path, Err: os.ErrNotExist}
}

62
vendor/github.com/status-im/migrate/v4/util.go generated vendored Normal file
View File

@@ -0,0 +1,62 @@
package migrate
import (
"fmt"
nurl "net/url"
"strings"
)
// MultiError holds multiple errors.
//
// Deprecated: Use github.com/hashicorp/go-multierror instead
type MultiError struct {
Errs []error
}
// NewMultiError returns an error type holding multiple errors.
//
// Deprecated: Use github.com/hashicorp/go-multierror instead
//
func NewMultiError(errs ...error) MultiError {
compactErrs := make([]error, 0)
for _, e := range errs {
if e != nil {
compactErrs = append(compactErrs, e)
}
}
return MultiError{compactErrs}
}
// Error implements error. Multiple errors are concatenated with 'and's.
func (m MultiError) Error() string {
var strs = make([]string, 0)
for _, e := range m.Errs {
if len(e.Error()) > 0 {
strs = append(strs, e.Error())
}
}
return strings.Join(strs, " and ")
}
// suint safely converts int to uint
// see https://goo.gl/wEcqof
// see https://goo.gl/pai7Dr
func suint(n int) uint {
if n < 0 {
panic(fmt.Sprintf("suint(%v) expects input >= 0", n))
}
return uint(n)
}
// FilterCustomQuery filters all query values starting with `x-`
func FilterCustomQuery(u *nurl.URL) *nurl.URL {
ux := *u
vx := make(nurl.Values)
for k, v := range ux.Query() {
if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") {
vx[k] = v
}
}
ux.RawQuery = vx.Encode()
return &ux
}

21
vendor/github.com/status-im/mvds/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Status
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,33 @@
package node
import (
"database/sql"
"github.com/status-im/mvds/state"
)
type epochSQLitePersistence struct {
db *sql.DB
}
func newEpochSQLitePersistence(db *sql.DB) *epochSQLitePersistence {
return &epochSQLitePersistence{db: db}
}
func (p *epochSQLitePersistence) Get(nodeID state.PeerID) (epoch int64, err error) {
row := p.db.QueryRow(`SELECT epoch FROM mvds_epoch WHERE peer_id = ?`, nodeID[:])
err = row.Scan(&epoch)
if err == sql.ErrNoRows {
err = nil
}
return
}
func (p *epochSQLitePersistence) Set(nodeID state.PeerID, epoch int64) error {
_, err := p.db.Exec(`
INSERT OR REPLACE INTO mvds_epoch (peer_id, epoch) VALUES (?, ?)`,
nodeID[:],
epoch,
)
return err
}

View File

@@ -0,0 +1,321 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
// 1565345162_initial_schema.down.sql (23B)
// 1565345162_initial_schema.up.sql (86B)
// doc.go (377B)
package migrations
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
digest [sha256.Size]byte
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var __1565345162_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x2d\xc8\x4f\xce\xb0\xe6\x02\x04\x00\x00\xff\xff\xd3\x00\xf3\x23\x17\x00\x00\x00")
func _1565345162_initial_schemaDownSqlBytes() ([]byte, error) {
return bindataRead(
__1565345162_initial_schemaDownSql,
"1565345162_initial_schema.down.sql",
)
}
func _1565345162_initial_schemaDownSql() (*asset, error) {
bytes, err := _1565345162_initial_schemaDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1565345162_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7c, 0x69, 0xd2, 0x3, 0xea, 0x82, 0x7c, 0xb3, 0x44, 0x6c, 0xef, 0x64, 0x2c, 0x99, 0x62, 0xa2, 0x8b, 0x6f, 0x96, 0x4f, 0x34, 0x41, 0x87, 0xd5, 0x4e, 0x3, 0x7f, 0x4a, 0xd1, 0x91, 0x9, 0x99}}
return a, nil
}
var __1565345162_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x2d\xc8\x4f\xce\x50\xd0\xe0\x52\x50\x50\x50\x28\x48\x4d\x2d\x8a\xcf\x4c\x51\x70\xf2\xf1\x77\x52\x08\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\xd4\x01\xcb\x42\x54\x7a\xfa\x85\xb8\xba\xbb\x06\x29\xf8\xf9\x87\x28\xf8\x85\xfa\xf8\x70\x69\x5a\x73\x01\x02\x00\x00\xff\xff\x51\x96\x2d\xcb\x56\x00\x00\x00")
func _1565345162_initial_schemaUpSqlBytes() ([]byte, error) {
return bindataRead(
__1565345162_initial_schemaUpSql,
"1565345162_initial_schema.up.sql",
)
}
func _1565345162_initial_schemaUpSql() (*asset, error) {
bytes, err := _1565345162_initial_schemaUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1565345162_initial_schema.up.sql", size: 86, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x78, 0x7c, 0xdd, 0x67, 0x61, 0x3e, 0x7f, 0xd4, 0xce, 0xb0, 0x17, 0xbe, 0x5a, 0xa7, 0x9e, 0x93, 0x34, 0xe8, 0xbb, 0x44, 0xfb, 0x88, 0xd6, 0x18, 0x6d, 0x9f, 0xb4, 0x22, 0xda, 0xbc, 0x87, 0x94}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
func docGoBytes() ([]byte, error) {
return bindataRead(
_docGo,
"doc.go",
)
}
func docGo() (*asset, error) {
bytes, err := docGoBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1565345162_initial_schema.down.sql": _1565345162_initial_schemaDownSql,
"1565345162_initial_schema.up.sql": _1565345162_initial_schemaUpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"1565345162_initial_schema.down.sql": {_1565345162_initial_schemaDownSql, map[string]*bintree{}},
"1565345162_initial_schema.up.sql": {_1565345162_initial_schemaUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
}
// RestoreAssets restores an asset under the given directory recursively.
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}

562
vendor/github.com/status-im/mvds/node/node.go generated vendored Normal file
View File

@@ -0,0 +1,562 @@
// Package node contains node logic.
package node
// @todo this is a very rough implementation that needs cleanup
import (
"context"
"database/sql"
"encoding/hex"
"fmt"
"sync/atomic"
"time"
"go.uber.org/zap"
"github.com/status-im/mvds/peers"
"github.com/status-im/mvds/protobuf"
"github.com/status-im/mvds/state"
"github.com/status-im/mvds/store"
"github.com/status-im/mvds/transport"
)
// Mode represents the synchronization mode.
type Mode int
const (
INTERACTIVE Mode = iota
BATCH
)
// CalculateNextEpoch is a function used to calculate the next `SendEpoch` for a given message.
type CalculateNextEpoch func(count uint64, epoch int64) int64
// Node represents an MVDS node, it runs all the logic like sending and receiving protocol messages.
type Node struct {
// This needs to be declared first: https://github.com/golang/go/issues/9959
epoch int64
ctx context.Context
cancel context.CancelFunc
store store.MessageStore
transport transport.Transport
syncState state.SyncState
peers peers.Persistence
payloads payloads
nextEpoch CalculateNextEpoch
ID state.PeerID
epochPersistence *epochSQLitePersistence
mode Mode
subscription chan protobuf.Message
logger *zap.Logger
}
func NewPersistentNode(
db *sql.DB,
st transport.Transport,
id state.PeerID,
mode Mode,
nextEpoch CalculateNextEpoch,
logger *zap.Logger,
) (*Node, error) {
ctx, cancel := context.WithCancel(context.Background())
if logger == nil {
logger = zap.NewNop()
}
node := Node{
ID: id,
ctx: ctx,
cancel: cancel,
store: store.NewPersistentMessageStore(db),
transport: st,
peers: peers.NewSQLitePersistence(db),
syncState: state.NewPersistentSyncState(db),
payloads: newPayloads(),
epochPersistence: newEpochSQLitePersistence(db),
nextEpoch: nextEpoch,
logger: logger.With(zap.Namespace("mvds")),
mode: mode,
}
if currentEpoch, err := node.epochPersistence.Get(id); err != nil {
return nil, err
} else {
node.epoch = currentEpoch
}
return &node, nil
}
func NewEphemeralNode(
id state.PeerID,
t transport.Transport,
nextEpoch CalculateNextEpoch,
currentEpoch int64,
mode Mode,
logger *zap.Logger,
) *Node {
ctx, cancel := context.WithCancel(context.Background())
if logger == nil {
logger = zap.NewNop()
}
return &Node{
ID: id,
ctx: ctx,
cancel: cancel,
store: store.NewDummyStore(),
transport: t,
syncState: state.NewSyncState(),
peers: peers.NewMemoryPersistence(),
payloads: newPayloads(),
nextEpoch: nextEpoch,
epoch: currentEpoch,
logger: logger.With(zap.Namespace("mvds")),
mode: mode,
}
}
// NewNode returns a new node.
func NewNode(
ms store.MessageStore,
st transport.Transport,
ss state.SyncState,
nextEpoch CalculateNextEpoch,
currentEpoch int64,
id state.PeerID,
mode Mode,
pp peers.Persistence,
logger *zap.Logger,
) *Node {
ctx, cancel := context.WithCancel(context.Background())
if logger == nil {
logger = zap.NewNop()
}
return &Node{
ctx: ctx,
cancel: cancel,
store: ms,
transport: st,
syncState: ss,
peers: pp,
payloads: newPayloads(),
nextEpoch: nextEpoch,
ID: id,
epoch: currentEpoch,
logger: logger.With(zap.Namespace("mvds")),
mode: mode,
}
}
func (n *Node) CurrentEpoch() int64 {
return atomic.LoadInt64(&n.epoch)
}
// Start listens for new messages received by the node and sends out those required every epoch.
func (n *Node) Start(duration time.Duration) {
go func() {
for {
select {
case <-n.ctx.Done():
n.logger.Info("Watch stopped")
return
default:
p := n.transport.Watch()
go n.onPayload(p.Sender, p.Payload)
}
}
}()
go func() {
for {
select {
case <-n.ctx.Done():
n.logger.Info("Epoch processing stopped")
return
default:
n.logger.Debug("Epoch processing", zap.String("node", hex.EncodeToString(n.ID[:4])), zap.Int64("epoch", n.epoch))
time.Sleep(duration)
err := n.sendMessages()
if err != nil {
n.logger.Error("Error sending messages.", zap.Error(err))
}
atomic.AddInt64(&n.epoch, 1)
// When a persistent node is used, the epoch needs to be saved.
if n.epochPersistence != nil {
if err := n.epochPersistence.Set(n.ID, n.epoch); err != nil {
n.logger.Error("Failed to persisten epoch", zap.Error(err))
}
}
}
}
}()
}
// Stop message reading and epoch processing
func (n *Node) Stop() {
n.logger.Info("Stopping node")
n.Unsubscribe()
n.cancel()
}
// Subscribe subscribes to incoming messages.
func (n *Node) Subscribe() chan protobuf.Message {
n.subscription = make(chan protobuf.Message)
return n.subscription
}
// Unsubscribe closes the listening channels
func (n *Node) Unsubscribe() {
if n.subscription != nil {
close(n.subscription)
}
n.subscription = nil
}
// AppendMessage sends a message to a given group.
func (n *Node) AppendMessage(groupID state.GroupID, data []byte) (state.MessageID, error) {
m := protobuf.Message{
GroupId: groupID[:],
Timestamp: time.Now().Unix(),
Body: data,
}
id := m.ID()
peers, err := n.peers.GetByGroupID(groupID)
if err != nil {
return state.MessageID{}, fmt.Errorf("trying to send to unknown group %x", groupID[:4])
}
err = n.store.Add(&m)
if err != nil {
return state.MessageID{}, err
}
for _, p := range peers {
t := state.OFFER
if n.mode == BATCH {
t = state.MESSAGE
}
n.insertSyncState(&groupID, id, p, t)
}
n.logger.Debug("Sending message",
zap.String("node", hex.EncodeToString(n.ID[:4])),
zap.String("groupID", hex.EncodeToString(groupID[:4])),
zap.String("id", hex.EncodeToString(id[:4])))
// @todo think about a way to insta trigger send messages when send was selected, we don't wanna wait for ticks here
return id, nil
}
// RequestMessage adds a REQUEST record to the next payload for a given message ID.
func (n *Node) RequestMessage(group state.GroupID, id state.MessageID) error {
peers, err := n.peers.GetByGroupID(group)
if err != nil {
return fmt.Errorf("trying to request from an unknown group %x", group[:4])
}
for _, p := range peers {
exist, err := n.IsPeerInGroup(group, p)
if err != nil {
return err
}
if exist {
continue
}
n.insertSyncState(&group, id, p, state.REQUEST)
}
return nil
}
// AddPeer adds a peer to a specific group making it a recipient of messages.
func (n *Node) AddPeer(group state.GroupID, id state.PeerID) error {
return n.peers.Add(group, id)
}
// IsPeerInGroup checks whether a peer is in the specified group.
func (n *Node) IsPeerInGroup(g state.GroupID, p state.PeerID) (bool, error) {
return n.peers.Exists(g, p)
}
func (n *Node) sendMessages() error {
err := n.syncState.Map(n.epoch, func(s state.State) state.State {
m := s.MessageID
p := s.PeerID
switch s.Type {
case state.OFFER:
n.payloads.AddOffers(p, m[:])
case state.REQUEST:
n.payloads.AddRequests(p, m[:])
n.logger.Debug("sending REQUEST",
zap.String("from", hex.EncodeToString(n.ID[:4])),
zap.String("to", hex.EncodeToString(p[:4])),
zap.String("messageID", hex.EncodeToString(m[:4])),
)
case state.MESSAGE:
g := *s.GroupID
// TODO: Handle errors
exist, err := n.IsPeerInGroup(g, p)
if err != nil {
return s
}
if !exist {
return s
}
msg, err := n.store.Get(m)
if err != nil {
n.logger.Error("Failed to retreive message",
zap.String("messageID", hex.EncodeToString(m[:4])),
zap.Error(err),
)
return s
}
n.payloads.AddMessages(p, msg)
n.logger.Debug("sending MESSAGE",
zap.String("groupID", hex.EncodeToString(g[:4])),
zap.String("from", hex.EncodeToString(n.ID[:4])),
zap.String("to", hex.EncodeToString(p[:4])),
zap.String("messageID", hex.EncodeToString(m[:4])),
)
}
return n.updateSendEpoch(s)
})
if err != nil {
n.logger.Error("error while mapping sync state", zap.Error(err))
return err
}
return n.payloads.MapAndClear(func(peer state.PeerID, payload *protobuf.Payload) error {
err := n.transport.Send(n.ID, peer, payload)
if err != nil {
n.logger.Error("error sending message", zap.Error(err))
return err
}
return nil
})
}
func (n *Node) onPayload(sender state.PeerID, payload *protobuf.Payload) {
// Acks, Requests and Offers are all arrays of bytes as protobuf doesn't allow type aliases otherwise arrays of messageIDs would be nicer.
if err := n.onAck(sender, payload.Acks); err != nil {
n.logger.Error("error processing acks", zap.Error(err))
}
if err := n.onRequest(sender, payload.Requests); err != nil {
n.logger.Error("error processing requests", zap.Error(err))
}
if err := n.onOffer(sender, payload.Offers); err != nil {
n.logger.Error("error processing offers", zap.Error(err))
}
messageIds := n.onMessages(sender, payload.Messages)
n.payloads.AddAcks(sender, messageIds)
}
func (n *Node) onOffer(sender state.PeerID, offers [][]byte) error {
for _, raw := range offers {
id := toMessageID(raw)
n.logger.Debug("OFFER received",
zap.String("from", hex.EncodeToString(sender[:4])),
zap.String("to", hex.EncodeToString(n.ID[:4])),
zap.String("messageID", hex.EncodeToString(id[:4])),
)
exist, err := n.store.Has(id)
// @todo maybe ack?
if err != nil {
return err
}
if exist {
continue
}
n.insertSyncState(nil, id, sender, state.REQUEST)
}
return nil
}
func (n *Node) onRequest(sender state.PeerID, requests [][]byte) error {
for _, raw := range requests {
id := toMessageID(raw)
n.logger.Debug("REQUEST received",
zap.String("from", hex.EncodeToString(sender[:4])),
zap.String("to", hex.EncodeToString(n.ID[:4])),
zap.String("messageID", hex.EncodeToString(id[:4])),
)
message, err := n.store.Get(id)
if err != nil {
return err
}
if message == nil {
n.logger.Error("message does not exist", zap.String("messageID", hex.EncodeToString(id[:4])))
continue
}
groupID := toGroupID(message.GroupId)
exist, err := n.IsPeerInGroup(groupID, sender)
if err != nil {
return err
}
if !exist {
n.logger.Error("peer is not in group",
zap.String("groupID", hex.EncodeToString(groupID[:4])),
zap.String("peer", hex.EncodeToString(sender[:4])),
)
continue
}
n.insertSyncState(&groupID, id, sender, state.MESSAGE)
}
return nil
}
func (n *Node) onAck(sender state.PeerID, acks [][]byte) error {
for _, ack := range acks {
id := toMessageID(ack)
err := n.syncState.Remove(id, sender)
if err != nil {
n.logger.Error("Error while removing sync state.", zap.Error(err))
return err
}
n.logger.Debug("ACK received",
zap.String("from", hex.EncodeToString(sender[:4])),
zap.String("to", hex.EncodeToString(n.ID[:4])),
zap.String("messageID", hex.EncodeToString(id[:4])),
)
}
return nil
}
func (n *Node) onMessages(sender state.PeerID, messages []*protobuf.Message) [][]byte {
a := make([][]byte, 0)
for _, m := range messages {
groupID := toGroupID(m.GroupId)
err := n.onMessage(sender, *m)
if err != nil {
n.logger.Error("Error processing message", zap.Error(err))
continue
}
id := m.ID()
n.logger.Debug("sending ACK",
zap.String("groupID", hex.EncodeToString(groupID[:4])),
zap.String("from", hex.EncodeToString(n.ID[:4])),
zap.String("", hex.EncodeToString(sender[:4])),
zap.String("messageID", hex.EncodeToString(id[:4])),
)
a = append(a, id[:])
}
return a
}
func (n *Node) onMessage(sender state.PeerID, msg protobuf.Message) error {
id := msg.ID()
groupID := toGroupID(msg.GroupId)
n.logger.Debug("MESSAGE received",
zap.String("from", hex.EncodeToString(sender[:4])),
zap.String("to", hex.EncodeToString(n.ID[:4])),
zap.String("messageID", hex.EncodeToString(id[:4])),
)
err := n.syncState.Remove(id, sender)
if err != nil && err != state.ErrStateNotFound {
return err
}
err = n.store.Add(&msg)
if err != nil {
return err
// @todo process, should this function ever even have an error?
}
peers, err := n.peers.GetByGroupID(groupID)
if err != nil {
return err
}
for _, peer := range peers {
if peer == sender {
continue
}
n.insertSyncState(&groupID, id, peer, state.OFFER)
}
if n.subscription != nil {
n.subscription <- msg
}
return nil
}
func (n *Node) insertSyncState(groupID *state.GroupID, messageID state.MessageID, peerID state.PeerID, t state.RecordType) {
s := state.State{
GroupID: groupID,
MessageID: messageID,
PeerID: peerID,
Type: t,
SendEpoch: n.epoch + 1,
}
err := n.syncState.Add(s)
if err != nil {
n.logger.Error("error setting sync states",
zap.Error(err),
zap.String("groupID", hex.EncodeToString(groupID[:4])),
zap.String("messageID", hex.EncodeToString(messageID[:4])),
zap.String("peerID", hex.EncodeToString(peerID[:4])),
)
}
}
func (n *Node) updateSendEpoch(s state.State) state.State {
s.SendCount += 1
s.SendEpoch = n.nextEpoch(s.SendCount, n.epoch)
return s
}
func toMessageID(b []byte) state.MessageID {
var id state.MessageID
copy(id[:], b)
return id
}
func toGroupID(b []byte) state.GroupID {
var id state.GroupID
copy(id[:], b)
return id
}

96
vendor/github.com/status-im/mvds/node/payloads.go generated vendored Normal file
View File

@@ -0,0 +1,96 @@
package node
import (
"sync"
"github.com/status-im/mvds/protobuf"
"github.com/status-im/mvds/state"
)
type payloads struct {
sync.Mutex
payloads map[state.PeerID]*protobuf.Payload
}
// @todo check in all the functions below that we aren't duplicating stuff
func newPayloads() payloads {
return payloads{
payloads: make(map[state.PeerID]*protobuf.Payload),
}
}
func (p *payloads) AddOffers(peer state.PeerID, offers ...[]byte) {
p.Lock()
defer p.Unlock()
payload := p.get(peer)
payload.Offers = append(payload.Offers, offers...)
p.set(peer, payload)
}
func (p *payloads) AddAcks(peer state.PeerID, acks [][]byte) {
p.Lock()
defer p.Unlock()
payload := p.get(peer)
payload.Acks = append(payload.Acks, acks...)
p.set(peer, payload)
}
func (p *payloads) AddRequests(peer state.PeerID, request ...[]byte) {
p.Lock()
defer p.Unlock()
payload := p.get(peer)
payload.Requests = append(payload.Requests, request...)
p.set(peer, payload)
}
func (p *payloads) AddMessages(peer state.PeerID, messages ...*protobuf.Message) {
p.Lock()
defer p.Unlock()
payload := p.get(peer)
if payload.Messages == nil {
payload.Messages = make([]*protobuf.Message, 0)
}
payload.Messages = append(payload.Messages, messages...)
p.set(peer, payload)
}
func (p *payloads) MapAndClear(f func(state.PeerID, *protobuf.Payload) error) error {
p.Lock()
defer p.Unlock()
for peer, payload := range p.payloads {
err := f(peer, payload)
if err != nil {
return err
}
}
// TODO: this should only be called upon confirmation that the message has been sent
p.payloads = make(map[state.PeerID]*protobuf.Payload)
return nil
}
func (p *payloads) get(peer state.PeerID) *protobuf.Payload {
payload := p.payloads[peer]
if payload == nil {
return &protobuf.Payload{}
}
return payload
}
func (p *payloads) set(peer state.PeerID, payload *protobuf.Payload) {
p.payloads[peer] = payload
}

View File

@@ -0,0 +1,321 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
// 1565249278_initial_schema.down.sql (23B)
// 1565249278_initial_schema.up.sql (140B)
// doc.go (377B)
package migrations
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
digest [sha256.Size]byte
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var __1565249278_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x48\x4d\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x58\x44\x68\xf7\x17\x00\x00\x00")
func _1565249278_initial_schemaDownSqlBytes() ([]byte, error) {
return bindataRead(
__1565249278_initial_schemaDownSql,
"1565249278_initial_schema.down.sql",
)
}
func _1565249278_initial_schemaDownSql() (*asset, error) {
bytes, err := _1565249278_initial_schemaDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1565249278_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4, 0xfb, 0x5, 0x92, 0xf0, 0x93, 0xaa, 0x83, 0xb7, 0xdf, 0x66, 0xe2, 0x97, 0x53, 0x9d, 0x34, 0xd3, 0xca, 0x97, 0xd8, 0xe1, 0xed, 0xf0, 0x4a, 0x94, 0x1a, 0xb1, 0x8f, 0xcf, 0xc, 0xa4, 0x6}}
return a, nil
}
var __1565249278_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x48\x4d\x2d\x2a\x56\xd0\xe0\x52\x50\x50\x50\x48\x2f\xca\x2f\x2d\x88\xcf\x4c\x51\x70\xf2\xf1\x77\x52\xf0\xf3\x0f\x51\xf0\x0b\xf5\xf1\xd1\x01\xcb\x81\xd4\xe1\x90\x0a\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\x54\xd0\x80\x99\xa1\x03\xd3\xa1\xa9\xe0\xef\xa7\xe0\xec\xef\xe7\xe6\xe3\xe9\x1c\xa2\x10\xe4\x1a\xe0\xe3\xe8\xec\xca\xa5\x69\xcd\x05\x08\x00\x00\xff\xff\x67\x63\xbf\x36\x8c\x00\x00\x00")
func _1565249278_initial_schemaUpSqlBytes() ([]byte, error) {
return bindataRead(
__1565249278_initial_schemaUpSql,
"1565249278_initial_schema.up.sql",
)
}
func _1565249278_initial_schemaUpSql() (*asset, error) {
bytes, err := _1565249278_initial_schemaUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1565249278_initial_schema.up.sql", size: 140, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8a, 0xbc, 0x3a, 0x87, 0x12, 0x93, 0xeb, 0xb4, 0xcc, 0x42, 0x6e, 0xb2, 0x7d, 0xfa, 0x9a, 0xa8, 0x3f, 0xb, 0x6b, 0xa8, 0x2d, 0x8b, 0xde, 0x67, 0x2a, 0xa8, 0xa5, 0x42, 0xad, 0x27, 0x15, 0x7e}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
func docGoBytes() ([]byte, error) {
return bindataRead(
_docGo,
"doc.go",
)
}
func docGo() (*asset, error) {
bytes, err := docGoBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1565249278_initial_schema.down.sql": _1565249278_initial_schemaDownSql,
"1565249278_initial_schema.up.sql": _1565249278_initial_schemaUpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"1565249278_initial_schema.down.sql": {_1565249278_initial_schemaDownSql, map[string]*bintree{}},
"1565249278_initial_schema.up.sql": {_1565249278_initial_schemaUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
}
// RestoreAssets restores an asset under the given directory recursively.
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}

11
vendor/github.com/status-im/mvds/peers/persistence.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
package peers
import (
"github.com/status-im/mvds/state"
)
type Persistence interface {
Add(state.GroupID, state.PeerID) error
GetByGroupID(group state.GroupID) ([]state.PeerID, error)
Exists(state.GroupID, state.PeerID) (bool, error)
}

View File

@@ -0,0 +1,32 @@
package peers
import "github.com/status-im/mvds/state"
type MemoryPersistence struct {
peers map[state.GroupID][]state.PeerID
}
func NewMemoryPersistence() *MemoryPersistence {
return &MemoryPersistence{
peers: make(map[state.GroupID][]state.PeerID),
}
}
func (p *MemoryPersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
p.peers[groupID] = append(p.peers[groupID], peerID)
return nil
}
func (p *MemoryPersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
for _, peer := range p.peers[groupID] {
if peer == peerID {
return true, nil
}
}
return false, nil
}
func (p *MemoryPersistence) GetByGroupID(groupID state.GroupID) ([]state.PeerID, error) {
return p.peers[groupID], nil
}

View File

@@ -0,0 +1,63 @@
package peers
import (
"database/sql"
"errors"
"github.com/status-im/mvds/state"
)
var (
ErrPeerNotFound = errors.New("peer not found")
)
type sqlitePersistence struct {
db *sql.DB
}
func NewSQLitePersistence(db *sql.DB) sqlitePersistence {
return sqlitePersistence{db: db}
}
func (p sqlitePersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
_, err := p.db.Exec(`INSERT INTO mvds_peers (group_id, peer_id) VALUES (?, ?)`, groupID[:], peerID[:])
return err
}
func (p sqlitePersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
var result bool
err := p.db.QueryRow(
`SELECT EXISTS(SELECT 1 FROM mvds_peers WHERE group_id = ? AND peer_id = ?)`,
groupID[:],
peerID[:],
).Scan(&result)
switch err {
case sql.ErrNoRows:
return false, ErrPeerNotFound
case nil:
return result, nil
default:
return false, err
}
}
func (p sqlitePersistence) GetByGroupID(groupID state.GroupID) (result []state.PeerID, err error) {
rows, err := p.db.Query(`SELECT peer_id FROM mvds_peers WHERE group_id = ?`, groupID[:])
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var (
peerIDBytes []byte
peerID state.PeerID
)
if err := rows.Scan(&peerIDBytes); err != nil {
return nil, err
}
copy(peerID[:], peerIDBytes)
result = append(result, peerID)
}
return
}

View File

@@ -0,0 +1,70 @@
package persistenceutil
import (
"github.com/pkg/errors"
"strings"
nodemigrations "github.com/status-im/mvds/node/migrations"
peersmigrations "github.com/status-im/mvds/peers/migrations"
statemigrations "github.com/status-im/mvds/state/migrations"
storemigrations "github.com/status-im/mvds/store/migrations"
)
type getter func(string) ([]byte, error)
type Migration struct {
Names []string
Getter func(name string) ([]byte, error)
}
func prepareMigrations(migrations []Migration) ([]string, getter, error) {
var allNames []string
nameToGetter := make(map[string]getter)
for _, m := range migrations {
for _, name := range m.Names {
if !validateName(name) {
continue
}
if _, ok := nameToGetter[name]; ok {
return nil, nil, errors.Errorf("migration with name %s already exists", name)
}
allNames = append(allNames, name)
nameToGetter[name] = m.Getter
}
}
return allNames, func(name string) ([]byte, error) {
getter, ok := nameToGetter[name]
if !ok {
return nil, errors.Errorf("no migration for name %s", name)
}
return getter(name)
}, nil
}
// DefaultMigrations is a collection of all mvds components migrations.
var DefaultMigrations = []Migration{
{
Names: nodemigrations.AssetNames(),
Getter: nodemigrations.Asset,
},
{
Names: peersmigrations.AssetNames(),
Getter: peersmigrations.Asset,
},
{
Names: statemigrations.AssetNames(),
Getter: statemigrations.Asset,
},
{
Names: storemigrations.AssetNames(),
Getter: storemigrations.Asset,
},
}
// validateName verifies that only *.sql files are taken into consideration.
func validateName(name string) bool {
return strings.HasSuffix(name, ".sql")
}

View File

@@ -0,0 +1,124 @@
package persistenceutil
import (
"database/sql"
"fmt"
"os"
"github.com/pkg/errors"
"github.com/status-im/migrate/v4"
"github.com/status-im/migrate/v4/database/sqlcipher"
bindata "github.com/status-im/migrate/v4/source/go_bindata"
)
// The reduced number of kdf iterations (for performance reasons) which is
// currently used for derivation of the database key
// https://github.com/status-im/status-go/pull/1343
// https://notes.status.im/i8Y_l7ccTiOYq09HVgoFwA
const reducedKdfIterationsNumber = 3200
// MigrationConfig is a struct that allows to define bindata migrations.
type MigrationConfig struct {
AssetNames []string
AssetGetter func(name string) ([]byte, error)
}
// Migrate migrates a provided sqldb
func Migrate(db *sql.DB) error {
assetNames, assetGetter, err := prepareMigrations(DefaultMigrations)
if err != nil {
return err
}
return ApplyMigrations(db, assetNames, assetGetter)
}
// Open opens or initializes a new database for a given file path.
// MigrationConfig is optional but if provided migrations are applied automatically.
func Open(path, key string, mc ...MigrationConfig) (*sql.DB, error) {
return open(path, key, reducedKdfIterationsNumber, mc)
}
// OpenWithIter allows to open a new database with a custom number of kdf iterations.
// Higher kdf iterations number makes it slower to open the database.
func OpenWithIter(path, key string, kdfIter int, mc ...MigrationConfig) (*sql.DB, error) {
return open(path, key, kdfIter, mc)
}
func open(path string, key string, kdfIter int, configs []MigrationConfig) (*sql.DB, error) {
_, err := os.OpenFile(path, os.O_CREATE, 0644)
if err != nil {
return nil, err
}
db, err := sql.Open("sqlite3", path)
if err != nil {
return nil, err
}
keyString := fmt.Sprintf("PRAGMA key = '%s'", key)
// Disable concurrent access as not supported by the driver
db.SetMaxOpenConns(1)
if _, err = db.Exec("PRAGMA foreign_keys=ON"); err != nil {
return nil, err
}
if _, err = db.Exec(keyString); err != nil {
return nil, err
}
kdfString := fmt.Sprintf("PRAGMA kdf_iter = '%d'", kdfIter)
if _, err = db.Exec(kdfString); err != nil {
return nil, err
}
// Apply all provided migrations.
for _, mc := range configs {
if err := ApplyMigrations(db, mc.AssetNames, mc.AssetGetter); err != nil {
return nil, err
}
}
return db, nil
}
// ApplyMigrations allows to apply bindata migrations on the current *sql.DB.
// `assetNames` is a list of assets with migrations and `assetGetter` is responsible
// for returning the content of the asset with a given name.
func ApplyMigrations(db *sql.DB, assetNames []string, assetGetter func(name string) ([]byte, error)) error {
resources := bindata.Resource(
assetNames,
assetGetter,
)
source, err := bindata.WithInstance(resources)
if err != nil {
return errors.Wrap(err, "failed to create migration source")
}
driver, err := sqlcipher.WithInstance(db, &sqlcipher.Config{
MigrationsTable: "mvds_" + sqlcipher.DefaultMigrationsTable,
})
if err != nil {
return errors.Wrap(err, "failed to create driver")
}
m, err := migrate.NewWithInstance(
"go-bindata",
source,
"sqlcipher",
driver,
)
if err != nil {
return errors.Wrap(err, "failed to create migration instance")
}
if err = m.Up(); err != migrate.ErrNoChange {
return errors.Wrap(err, "failed to migrate")
}
return nil
}

20
vendor/github.com/status-im/mvds/protobuf/messageid.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
package protobuf
import (
"crypto/sha256"
"encoding/binary"
"github.com/status-im/mvds/state"
)
// ID creates the MessageID for a Message
func (m Message) ID() state.MessageID {
t := make([]byte, 8)
binary.LittleEndian.PutUint64(t, uint64(m.Timestamp))
b := append([]byte("MESSAGE_ID"), m.GroupId[:]...)
b = append(b, t...)
b = append(b, m.Body...)
return sha256.Sum256(b)
}

7
vendor/github.com/status-im/mvds/protobuf/payload.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
package protobuf
// IsValid checks whether there are any known field in the protobuf
// message
func (m *Payload) IsValid() bool {
return len(m.Messages)+len(m.Acks)+len(m.Offers)+len(m.Requests)+len(m.GroupOffers) != 0
}

3
vendor/github.com/status-im/mvds/protobuf/service.go generated vendored Normal file
View File

@@ -0,0 +1,3 @@
package protobuf
//go:generate protoc --go_out=. ./sync.proto

338
vendor/github.com/status-im/mvds/protobuf/sync.pb.go generated vendored Normal file
View File

@@ -0,0 +1,338 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.29.1
// protoc v3.21.12
// source: sync.proto
package protobuf
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Offer struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
GroupId []byte `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
MessageIds [][]byte `protobuf:"bytes,2,rep,name=message_ids,json=messageIds,proto3" json:"message_ids,omitempty"`
}
func (x *Offer) Reset() {
*x = Offer{}
if protoimpl.UnsafeEnabled {
mi := &file_sync_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Offer) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Offer) ProtoMessage() {}
func (x *Offer) ProtoReflect() protoreflect.Message {
mi := &file_sync_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Offer.ProtoReflect.Descriptor instead.
func (*Offer) Descriptor() ([]byte, []int) {
return file_sync_proto_rawDescGZIP(), []int{0}
}
func (x *Offer) GetGroupId() []byte {
if x != nil {
return x.GroupId
}
return nil
}
func (x *Offer) GetMessageIds() [][]byte {
if x != nil {
return x.MessageIds
}
return nil
}
type Payload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Acks [][]byte `protobuf:"bytes,5001,rep,name=acks,proto3" json:"acks,omitempty"`
Offers [][]byte `protobuf:"bytes,5002,rep,name=offers,proto3" json:"offers,omitempty"`
Requests [][]byte `protobuf:"bytes,5003,rep,name=requests,proto3" json:"requests,omitempty"`
Messages []*Message `protobuf:"bytes,5004,rep,name=messages,proto3" json:"messages,omitempty"`
GroupOffers []*Offer `protobuf:"bytes,5005,rep,name=group_offers,json=groupOffers,proto3" json:"group_offers,omitempty"`
}
func (x *Payload) Reset() {
*x = Payload{}
if protoimpl.UnsafeEnabled {
mi := &file_sync_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Payload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Payload) ProtoMessage() {}
func (x *Payload) ProtoReflect() protoreflect.Message {
mi := &file_sync_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Payload.ProtoReflect.Descriptor instead.
func (*Payload) Descriptor() ([]byte, []int) {
return file_sync_proto_rawDescGZIP(), []int{1}
}
func (x *Payload) GetAcks() [][]byte {
if x != nil {
return x.Acks
}
return nil
}
func (x *Payload) GetOffers() [][]byte {
if x != nil {
return x.Offers
}
return nil
}
func (x *Payload) GetRequests() [][]byte {
if x != nil {
return x.Requests
}
return nil
}
func (x *Payload) GetMessages() []*Message {
if x != nil {
return x.Messages
}
return nil
}
func (x *Payload) GetGroupOffers() []*Offer {
if x != nil {
return x.GroupOffers
}
return nil
}
type Message struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
GroupId []byte `protobuf:"bytes,6001,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
Timestamp int64 `protobuf:"varint,6002,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Body []byte `protobuf:"bytes,6003,opt,name=body,proto3" json:"body,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_sync_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_sync_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
return file_sync_proto_rawDescGZIP(), []int{2}
}
func (x *Message) GetGroupId() []byte {
if x != nil {
return x.GroupId
}
return nil
}
func (x *Message) GetTimestamp() int64 {
if x != nil {
return x.Timestamp
}
return 0
}
func (x *Message) GetBody() []byte {
if x != nil {
return x.Body
}
return nil
}
var File_sync_proto protoreflect.FileDescriptor
var file_sync_proto_rawDesc = []byte{
0x0a, 0x0a, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x6d, 0x76,
0x64, 0x73, 0x22, 0x43, 0x0a, 0x05, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x67,
0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x67,
0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x12, 0x13, 0x0a, 0x04, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x89, 0x27, 0x20, 0x03,
0x28, 0x0c, 0x52, 0x04, 0x61, 0x63, 0x6b, 0x73, 0x12, 0x17, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x65,
0x72, 0x73, 0x18, 0x8a, 0x27, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x65, 0x72,
0x73, 0x12, 0x1b, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x8b, 0x27,
0x20, 0x03, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x2a,
0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x8c, 0x27, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x0d, 0x2e, 0x6d, 0x76, 0x64, 0x73, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0c, 0x67, 0x72,
0x6f, 0x75, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x73, 0x18, 0x8d, 0x27, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x0b, 0x2e, 0x6d, 0x76, 0x64, 0x73, 0x2e, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x52, 0x0b,
0x67, 0x72, 0x6f, 0x75, 0x70, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x73, 0x22, 0x59, 0x0a, 0x07, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f,
0x69, 0x64, 0x18, 0xf1, 0x2e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70,
0x49, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18,
0xf2, 0x2e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x12, 0x13, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0xf3, 0x2e, 0x20, 0x01, 0x28, 0x0c,
0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x2f, 0x3b, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_sync_proto_rawDescOnce sync.Once
file_sync_proto_rawDescData = file_sync_proto_rawDesc
)
func file_sync_proto_rawDescGZIP() []byte {
file_sync_proto_rawDescOnce.Do(func() {
file_sync_proto_rawDescData = protoimpl.X.CompressGZIP(file_sync_proto_rawDescData)
})
return file_sync_proto_rawDescData
}
var file_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_sync_proto_goTypes = []interface{}{
(*Offer)(nil), // 0: mvds.Offer
(*Payload)(nil), // 1: mvds.Payload
(*Message)(nil), // 2: mvds.Message
}
var file_sync_proto_depIdxs = []int32{
2, // 0: mvds.Payload.messages:type_name -> mvds.Message
0, // 1: mvds.Payload.group_offers:type_name -> mvds.Offer
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_sync_proto_init() }
func file_sync_proto_init() {
if File_sync_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_sync_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Offer); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_sync_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Payload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_sync_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_sync_proto_rawDesc,
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_sync_proto_goTypes,
DependencyIndexes: file_sync_proto_depIdxs,
MessageInfos: file_sync_proto_msgTypes,
}.Build()
File_sync_proto = out.File
file_sync_proto_rawDesc = nil
file_sync_proto_goTypes = nil
file_sync_proto_depIdxs = nil
}

23
vendor/github.com/status-im/mvds/protobuf/sync.proto generated vendored Normal file
View File

@@ -0,0 +1,23 @@
syntax = "proto3";
package mvds;
option go_package = "./;protobuf";
message Offer {
bytes group_id = 1;
repeated bytes message_ids = 2;
}
message Payload {
repeated bytes acks = 5001;
repeated bytes offers = 5002;
repeated bytes requests = 5003;
repeated Message messages = 5004;
repeated Offer group_offers = 5005;
}
message Message {
bytes group_id = 6001;
int64 timestamp = 6002;
bytes body = 6003;
}

View File

@@ -0,0 +1,321 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
// 1565341329_initial_schema.down.sql (24B)
// 1565341329_initial_schema.up.sql (294B)
// doc.go (377B)
package migrations
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
digest [sha256.Size]byte
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var __1565341329_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x2e\x49\x2c\x49\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xb7\x43\xc1\xc1\x18\x00\x00\x00")
func _1565341329_initial_schemaDownSqlBytes() ([]byte, error) {
return bindataRead(
__1565341329_initial_schemaDownSql,
"1565341329_initial_schema.down.sql",
)
}
func _1565341329_initial_schemaDownSql() (*asset, error) {
bytes, err := _1565341329_initial_schemaDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1565341329_initial_schema.down.sql", size: 24, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x20, 0x56, 0x1a, 0x0, 0xc5, 0x81, 0xb3, 0xeb, 0x2a, 0xae, 0xed, 0xbb, 0x68, 0x51, 0x68, 0xc7, 0xe3, 0x31, 0xe, 0x1, 0x3e, 0xd2, 0x85, 0x9e, 0x6d, 0x55, 0xad, 0x55, 0xd6, 0x2f, 0x29, 0xca}}
return a, nil
}
var __1565341329_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x8f\xc1\x8a\x83\x30\x14\x45\xf7\xf9\x8a\xbb\x54\xf0\x0f\x5c\xe9\x4c\x18\x64\xd2\x58\x42\x0a\x75\x15\xc4\x3c\xac\x0b\x35\x98\x58\xda\xbf\x2f\xb4\x15\xa5\x60\xb7\xf7\x1c\x1e\xef\xfc\x28\x9e\x69\x0e\x9d\xe5\x82\xa3\xbf\x5a\x6f\x7c\xa8\x03\x79\x44\x0c\x00\xc2\xdd\x11\x0a\xa9\xf9\x1f\x57\x90\xa5\x86\x3c\x09\x91\x3c\x91\xa7\xc1\x9a\x66\x9c\x87\xf0\x4d\x20\x37\x36\x97\x1d\xa1\x9d\xc6\xd9\x99\xce\x22\x17\x65\xfe\x9a\x1c\xd1\xb4\x2c\x1f\x76\x4f\xde\xd7\x2d\xed\xd0\xa3\x2a\x0e\x99\xaa\xf0\xcf\x2b\x44\xab\x9a\x2c\x17\x63\x16\xa7\x8c\xbd\x6b\x0b\xf9\xcb\xcf\xe8\xec\xcd\x6c\x7e\x2c\xe5\xb6\x3f\x5a\x49\x9c\xb2\x47\x00\x00\x00\xff\xff\x5e\xe5\x72\x74\x26\x01\x00\x00")
func _1565341329_initial_schemaUpSqlBytes() ([]byte, error) {
return bindataRead(
__1565341329_initial_schemaUpSql,
"1565341329_initial_schema.up.sql",
)
}
func _1565341329_initial_schemaUpSql() (*asset, error) {
bytes, err := _1565341329_initial_schemaUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1565341329_initial_schema.up.sql", size: 294, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xa5, 0x37, 0x9d, 0x3f, 0xf3, 0xc9, 0xc8, 0x12, 0x74, 0x79, 0x74, 0xff, 0xfd, 0xb1, 0x5f, 0x13, 0xaf, 0xf2, 0x50, 0x14, 0x9f, 0xdf, 0xc8, 0xc5, 0xa7, 0xc3, 0xf5, 0xa4, 0x8e, 0x8a, 0xf6}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
func docGoBytes() ([]byte, error) {
return bindataRead(
_docGo,
"doc.go",
)
}
func docGo() (*asset, error) {
bytes, err := docGoBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1565341329_initial_schema.down.sql": _1565341329_initial_schemaDownSql,
"1565341329_initial_schema.up.sql": _1565341329_initial_schemaUpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"1565341329_initial_schema.down.sql": {_1565341329_initial_schemaDownSql, map[string]*bintree{}},
"1565341329_initial_schema.up.sql": {_1565341329_initial_schemaUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
}
// RestoreAssets restores an asset under the given directory recursively.
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}

4
vendor/github.com/status-im/mvds/state/peerid.go generated vendored Normal file
View File

@@ -0,0 +1,4 @@
package state
// PeerID is the ID for a specific peer.
type PeerID [65]byte

29
vendor/github.com/status-im/mvds/state/state.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
// Package state contains everything related to the synchronization state for MVDS.
package state
// RecordType is the type for a specific record, either `OFFER`, `REQUEST` or `MESSAGE`.
type RecordType int
const (
OFFER RecordType = iota
REQUEST
MESSAGE
)
// State is a struct used to store a records [state](https://github.com/status-im/bigbrother-specs/blob/master/data_sync/mvds.md#state).
type State struct {
Type RecordType
SendCount uint64
SendEpoch int64
// GroupID is optional, thus nullable
GroupID *GroupID
PeerID PeerID
MessageID MessageID
}
type SyncState interface {
Add(newState State) error
Remove(id MessageID, peer PeerID) error
All(epoch int64) ([]State, error)
Map(epoch int64, process func(State) State) error
}

61
vendor/github.com/status-im/mvds/state/state_memory.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
package state
import (
"sync"
)
type memorySyncState struct {
sync.Mutex
state []State
}
func NewSyncState() *memorySyncState {
return &memorySyncState{}
}
func (s *memorySyncState) Add(newState State) error {
s.Lock()
defer s.Unlock()
s.state = append(s.state, newState)
return nil
}
func (s *memorySyncState) Remove(id MessageID, peer PeerID) error {
s.Lock()
defer s.Unlock()
var newState []State
for _, state := range s.state {
if state.MessageID != id || state.PeerID != peer {
newState = append(newState, state)
}
}
s.state = newState
return nil
}
func (s *memorySyncState) All(_ int64) ([]State, error) {
s.Lock()
defer s.Unlock()
return s.state, nil
}
func (s *memorySyncState) Map(epoch int64, process func(State) State) error {
s.Lock()
defer s.Unlock()
for i, state := range s.state {
if state.SendEpoch > epoch {
continue
}
s.state[i] = process(state)
}
return nil
}

166
vendor/github.com/status-im/mvds/state/state_sqlite.go generated vendored Normal file
View File

@@ -0,0 +1,166 @@
package state
import (
"database/sql"
"errors"
"log"
)
var (
ErrStateNotFound = errors.New("state not found")
)
// Verify that SyncState interface is implemented.
var _ SyncState = (*sqliteSyncState)(nil)
type sqliteSyncState struct {
db *sql.DB
}
func NewPersistentSyncState(db *sql.DB) *sqliteSyncState {
return &sqliteSyncState{db: db}
}
func (p *sqliteSyncState) Add(newState State) error {
var groupIDBytes []byte
if newState.GroupID != nil {
groupIDBytes = newState.GroupID[:]
}
_, err := p.db.Exec(`
INSERT INTO mvds_states
(type, send_count, send_epoch, group_id, peer_id, message_id)
VALUES
(?, ?, ?, ?, ?, ?)`,
newState.Type,
newState.SendCount,
newState.SendEpoch,
groupIDBytes,
newState.PeerID[:],
newState.MessageID[:],
)
return err
}
func (p *sqliteSyncState) Remove(messageID MessageID, peerID PeerID) error {
result, err := p.db.Exec(
`DELETE FROM mvds_states WHERE message_id = ? AND peer_id = ?`,
messageID[:],
peerID[:],
)
if err != nil {
return err
}
if n, err := result.RowsAffected(); err != nil {
return err
} else if n == 0 {
return ErrStateNotFound
}
return nil
}
func (p *sqliteSyncState) All(epoch int64) ([]State, error) {
var result []State
rows, err := p.db.Query(`
SELECT
type, send_count, send_epoch, group_id, peer_id, message_id
FROM
mvds_states
WHERE
send_epoch <= ?
`, epoch)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var (
state State
groupID, peerID, messageID []byte
)
err := rows.Scan(
&state.Type,
&state.SendCount,
&state.SendEpoch,
&groupID,
&peerID,
&messageID,
)
if err != nil {
return nil, err
}
if len(groupID) > 0 {
val := GroupID{}
copy(val[:], groupID)
state.GroupID = &val
}
copy(state.PeerID[:], peerID)
copy(state.MessageID[:], messageID)
result = append(result, state)
}
return result, nil
}
func (p *sqliteSyncState) Map(epoch int64, process func(State) State) error {
states, err := p.All(epoch)
if err != nil {
return err
}
var updated []State
for _, state := range states {
if err := invariant(state.SendEpoch <= epoch, "invalid state provided to process"); err != nil {
log.Printf("%v", err)
continue
}
newState := process(state)
if newState != state {
updated = append(updated, newState)
}
}
if len(updated) == 0 {
return nil
}
tx, err := p.db.Begin()
if err != nil {
return err
}
for _, state := range updated {
if err := updateInTx(tx, state); err != nil {
_ = tx.Rollback()
return err
}
}
return tx.Commit()
}
func updateInTx(tx *sql.Tx, state State) error {
_, err := tx.Exec(`
UPDATE mvds_states
SET
send_count = ?,
send_epoch = ?
WHERE
message_id = ? AND
peer_id = ?
`,
state.SendCount,
state.SendEpoch,
state.MessageID[:],
state.PeerID[:],
)
return err
}
func invariant(cond bool, message string) error {
if !cond {
return errors.New(message)
}
return nil
}

4
vendor/github.com/status-im/mvds/state/sync_types.go generated vendored Normal file
View File

@@ -0,0 +1,4 @@
package state
type MessageID [32]byte
type GroupID [32]byte

13
vendor/github.com/status-im/mvds/store/messagestore.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// Package store contains everything storage related for MVDS.
package store
import (
"github.com/status-im/mvds/protobuf"
"github.com/status-im/mvds/state"
)
type MessageStore interface {
Has(id state.MessageID) (bool, error)
Get(id state.MessageID) (*protobuf.Message, error)
Add(message *protobuf.Message) error
}

Some files were not shown because too many files have changed in this diff Show More