321
vendor/github.com/status-im/mvds/state/migrations/migrations.go
generated
vendored
Normal file
321
vendor/github.com/status-im/mvds/state/migrations/migrations.go
generated
vendored
Normal file
@@ -0,0 +1,321 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565341329_initial_schema.down.sql (24B)
|
||||
// 1565341329_initial_schema.up.sql (294B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565341329_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x2e\x49\x2c\x49\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xb7\x43\xc1\xc1\x18\x00\x00\x00")
|
||||
|
||||
func _1565341329_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565341329_initial_schemaDownSql,
|
||||
"1565341329_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565341329_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565341329_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.down.sql", size: 24, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x20, 0x56, 0x1a, 0x0, 0xc5, 0x81, 0xb3, 0xeb, 0x2a, 0xae, 0xed, 0xbb, 0x68, 0x51, 0x68, 0xc7, 0xe3, 0x31, 0xe, 0x1, 0x3e, 0xd2, 0x85, 0x9e, 0x6d, 0x55, 0xad, 0x55, 0xd6, 0x2f, 0x29, 0xca}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565341329_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x8f\xc1\x8a\x83\x30\x14\x45\xf7\xf9\x8a\xbb\x54\xf0\x0f\x5c\xe9\x4c\x18\x64\xd2\x58\x42\x0a\x75\x15\xc4\x3c\xac\x0b\x35\x98\x58\xda\xbf\x2f\xb4\x15\xa5\x60\xb7\xf7\x1c\x1e\xef\xfc\x28\x9e\x69\x0e\x9d\xe5\x82\xa3\xbf\x5a\x6f\x7c\xa8\x03\x79\x44\x0c\x00\xc2\xdd\x11\x0a\xa9\xf9\x1f\x57\x90\xa5\x86\x3c\x09\x91\x3c\x91\xa7\xc1\x9a\x66\x9c\x87\xf0\x4d\x20\x37\x36\x97\x1d\xa1\x9d\xc6\xd9\x99\xce\x22\x17\x65\xfe\x9a\x1c\xd1\xb4\x2c\x1f\x76\x4f\xde\xd7\x2d\xed\xd0\xa3\x2a\x0e\x99\xaa\xf0\xcf\x2b\x44\xab\x9a\x2c\x17\x63\x16\xa7\x8c\xbd\x6b\x0b\xf9\xcb\xcf\xe8\xec\xcd\x6c\x7e\x2c\xe5\xb6\x3f\x5a\x49\x9c\xb2\x47\x00\x00\x00\xff\xff\x5e\xe5\x72\x74\x26\x01\x00\x00")
|
||||
|
||||
func _1565341329_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565341329_initial_schemaUpSql,
|
||||
"1565341329_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565341329_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565341329_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.up.sql", size: 294, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xa5, 0x37, 0x9d, 0x3f, 0xf3, 0xc9, 0xc8, 0x12, 0x74, 0x79, 0x74, 0xff, 0xfd, 0xb1, 0x5f, 0x13, 0xaf, 0xf2, 0x50, 0x14, 0x9f, 0xdf, 0xc8, 0xc5, 0xa7, 0xc3, 0xf5, 0xa4, 0x8e, 0x8a, 0xf6}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1704726726, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565341329_initial_schema.down.sql": _1565341329_initial_schemaDownSql,
|
||||
"1565341329_initial_schema.up.sql": _1565341329_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDebug is true if the assets were built with the debug flag enabled.
|
||||
const AssetDebug = false
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
//
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
//
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565341329_initial_schema.down.sql": {_1565341329_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565341329_initial_schema.up.sql": {_1565341329_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": {docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
4
vendor/github.com/status-im/mvds/state/peerid.go
generated
vendored
Normal file
4
vendor/github.com/status-im/mvds/state/peerid.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
package state
|
||||
|
||||
// PeerID is the ID for a specific peer.
|
||||
type PeerID [65]byte
|
||||
29
vendor/github.com/status-im/mvds/state/state.go
generated
vendored
Normal file
29
vendor/github.com/status-im/mvds/state/state.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Package state contains everything related to the synchronization state for MVDS.
|
||||
package state
|
||||
|
||||
// RecordType is the type for a specific record, either `OFFER`, `REQUEST` or `MESSAGE`.
|
||||
type RecordType int
|
||||
|
||||
const (
|
||||
OFFER RecordType = iota
|
||||
REQUEST
|
||||
MESSAGE
|
||||
)
|
||||
|
||||
// State is a struct used to store a records [state](https://github.com/status-im/bigbrother-specs/blob/master/data_sync/mvds.md#state).
|
||||
type State struct {
|
||||
Type RecordType
|
||||
SendCount uint64
|
||||
SendEpoch int64
|
||||
// GroupID is optional, thus nullable
|
||||
GroupID *GroupID
|
||||
PeerID PeerID
|
||||
MessageID MessageID
|
||||
}
|
||||
|
||||
type SyncState interface {
|
||||
Add(newState State) error
|
||||
Remove(id MessageID, peer PeerID) error
|
||||
All(epoch int64) ([]State, error)
|
||||
Map(epoch int64, process func(State) State) error
|
||||
}
|
||||
61
vendor/github.com/status-im/mvds/state/state_memory.go
generated
vendored
Normal file
61
vendor/github.com/status-im/mvds/state/state_memory.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type memorySyncState struct {
|
||||
sync.Mutex
|
||||
|
||||
state []State
|
||||
}
|
||||
|
||||
func NewSyncState() *memorySyncState {
|
||||
return &memorySyncState{}
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Add(newState State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.state = append(s.state, newState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Remove(id MessageID, peer PeerID) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
var newState []State
|
||||
|
||||
for _, state := range s.state {
|
||||
if state.MessageID != id || state.PeerID != peer {
|
||||
newState = append(newState, state)
|
||||
}
|
||||
}
|
||||
|
||||
s.state = newState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) All(_ int64) ([]State, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.state, nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Map(epoch int64, process func(State) State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for i, state := range s.state {
|
||||
if state.SendEpoch > epoch {
|
||||
continue
|
||||
}
|
||||
|
||||
s.state[i] = process(state)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
166
vendor/github.com/status-im/mvds/state/state_sqlite.go
generated
vendored
Normal file
166
vendor/github.com/status-im/mvds/state/state_sqlite.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrStateNotFound = errors.New("state not found")
|
||||
)
|
||||
|
||||
// Verify that SyncState interface is implemented.
|
||||
var _ SyncState = (*sqliteSyncState)(nil)
|
||||
|
||||
type sqliteSyncState struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewPersistentSyncState(db *sql.DB) *sqliteSyncState {
|
||||
return &sqliteSyncState{db: db}
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Add(newState State) error {
|
||||
var groupIDBytes []byte
|
||||
if newState.GroupID != nil {
|
||||
groupIDBytes = newState.GroupID[:]
|
||||
}
|
||||
|
||||
_, err := p.db.Exec(`
|
||||
INSERT INTO mvds_states
|
||||
(type, send_count, send_epoch, group_id, peer_id, message_id)
|
||||
VALUES
|
||||
(?, ?, ?, ?, ?, ?)`,
|
||||
newState.Type,
|
||||
newState.SendCount,
|
||||
newState.SendEpoch,
|
||||
groupIDBytes,
|
||||
newState.PeerID[:],
|
||||
newState.MessageID[:],
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Remove(messageID MessageID, peerID PeerID) error {
|
||||
result, err := p.db.Exec(
|
||||
`DELETE FROM mvds_states WHERE message_id = ? AND peer_id = ?`,
|
||||
messageID[:],
|
||||
peerID[:],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n, err := result.RowsAffected(); err != nil {
|
||||
return err
|
||||
} else if n == 0 {
|
||||
return ErrStateNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) All(epoch int64) ([]State, error) {
|
||||
var result []State
|
||||
|
||||
rows, err := p.db.Query(`
|
||||
SELECT
|
||||
type, send_count, send_epoch, group_id, peer_id, message_id
|
||||
FROM
|
||||
mvds_states
|
||||
WHERE
|
||||
send_epoch <= ?
|
||||
`, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var (
|
||||
state State
|
||||
groupID, peerID, messageID []byte
|
||||
)
|
||||
err := rows.Scan(
|
||||
&state.Type,
|
||||
&state.SendCount,
|
||||
&state.SendEpoch,
|
||||
&groupID,
|
||||
&peerID,
|
||||
&messageID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(groupID) > 0 {
|
||||
val := GroupID{}
|
||||
copy(val[:], groupID)
|
||||
state.GroupID = &val
|
||||
}
|
||||
copy(state.PeerID[:], peerID)
|
||||
copy(state.MessageID[:], messageID)
|
||||
|
||||
result = append(result, state)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Map(epoch int64, process func(State) State) error {
|
||||
states, err := p.All(epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var updated []State
|
||||
|
||||
for _, state := range states {
|
||||
if err := invariant(state.SendEpoch <= epoch, "invalid state provided to process"); err != nil {
|
||||
log.Printf("%v", err)
|
||||
continue
|
||||
}
|
||||
newState := process(state)
|
||||
if newState != state {
|
||||
updated = append(updated, newState)
|
||||
}
|
||||
}
|
||||
|
||||
if len(updated) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, state := range updated {
|
||||
if err := updateInTx(tx, state); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func updateInTx(tx *sql.Tx, state State) error {
|
||||
_, err := tx.Exec(`
|
||||
UPDATE mvds_states
|
||||
SET
|
||||
send_count = ?,
|
||||
send_epoch = ?
|
||||
WHERE
|
||||
message_id = ? AND
|
||||
peer_id = ?
|
||||
`,
|
||||
state.SendCount,
|
||||
state.SendEpoch,
|
||||
state.MessageID[:],
|
||||
state.PeerID[:],
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func invariant(cond bool, message string) error {
|
||||
if !cond {
|
||||
return errors.New(message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
4
vendor/github.com/status-im/mvds/state/sync_types.go
generated
vendored
Normal file
4
vendor/github.com/status-im/mvds/state/sync_types.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
package state
|
||||
|
||||
type MessageID [32]byte
|
||||
type GroupID [32]byte
|
||||
Reference in New Issue
Block a user