feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

View File

@@ -0,0 +1,93 @@
package anonmetrics
import (
"crypto/ecdsa"
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/golang/protobuf/ptypes"
"github.com/status-im/status-go/appmetrics"
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/protocol/protobuf"
)
// adaptProtoToModel is an adaptor helper function to convert a protobuf.AnonymousMetric into a appmetrics.AppMetric
func adaptProtoToModel(pbAnonMetric *protobuf.AnonymousMetric) (*appmetrics.AppMetric, error) {
t, err := ptypes.Timestamp(pbAnonMetric.CreatedAt)
if err != nil {
return nil, err
}
return &appmetrics.AppMetric{
MessageID: pbAnonMetric.Id,
Event: appmetrics.AppMetricEventType(pbAnonMetric.Event),
Value: pbAnonMetric.Value,
AppVersion: pbAnonMetric.AppVersion,
OS: pbAnonMetric.Os,
SessionID: pbAnonMetric.SessionId,
CreatedAt: t,
}, nil
}
// adaptModelToProto is an adaptor helper function to convert a appmetrics.AppMetric into a protobuf.AnonymousMetric
func adaptModelToProto(modelAnonMetric appmetrics.AppMetric, sendID *ecdsa.PublicKey) (*protobuf.AnonymousMetric, error) {
id := generateProtoID(modelAnonMetric, sendID)
createdAt, err := ptypes.TimestampProto(modelAnonMetric.CreatedAt)
if err != nil {
return nil, err
}
return &protobuf.AnonymousMetric{
Id: id,
Event: string(modelAnonMetric.Event),
Value: modelAnonMetric.Value,
AppVersion: modelAnonMetric.AppVersion,
Os: modelAnonMetric.OS,
SessionId: modelAnonMetric.SessionID,
CreatedAt: createdAt,
}, nil
}
func adaptModelsToProtoBatch(modelAnonMetrics []appmetrics.AppMetric, sendID *ecdsa.PublicKey) (*protobuf.AnonymousMetricBatch, error) {
amb := new(protobuf.AnonymousMetricBatch)
for _, m := range modelAnonMetrics {
p, err := adaptModelToProto(m, sendID)
if err != nil {
return nil, err
}
amb.Metrics = append(amb.Metrics, p)
}
return amb, nil
}
func adaptProtoBatchToModels(protoBatch *protobuf.AnonymousMetricBatch) ([]*appmetrics.AppMetric, error) {
if protoBatch == nil {
return nil, nil
}
var ams []*appmetrics.AppMetric
for _, pm := range protoBatch.Metrics {
m, err := adaptProtoToModel(pm)
if err != nil {
return nil, err
}
ams = append(ams, m)
}
return ams, nil
}
// NEEDED because we don't send individual metrics, we send only batches
func generateProtoID(modelAnonMetric appmetrics.AppMetric, sendID *ecdsa.PublicKey) string {
return types.EncodeHex(crypto.Keccak256([]byte(fmt.Sprintf(
"%s%s",
types.EncodeHex(crypto.FromECDSAPub(sendID)),
spew.Sdump(modelAnonMetric)))))
}

View File

@@ -0,0 +1,231 @@
package anonmetrics
import (
"context"
"crypto/ecdsa"
"errors"
"sync"
"time"
"github.com/golang/protobuf/proto"
"go.uber.org/zap"
"github.com/status-im/status-go/appmetrics"
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/protocol/common"
"github.com/status-im/status-go/protocol/protobuf"
)
const ActiveClientPhrase = "yes i am wanting the activation of the anon metrics client, please thank you lots thank you"
type ClientConfig struct {
ShouldSend bool
SendAddress *ecdsa.PublicKey
Active string
}
type Client struct {
Config *ClientConfig
DB *appmetrics.Database
Identity *ecdsa.PrivateKey
Logger *zap.Logger
//messageSender is a message processor used to send metric batch messages
messageSender *common.MessageSender
IntervalInc *FibonacciIntervalIncrementer
// mainLoopQuit is a channel that concurrently orchestrates that the main loop that should be terminated
mainLoopQuit chan struct{}
// deleteLoopQuit is a channel that concurrently orchestrates that the delete loop that should be terminated
deleteLoopQuit chan struct{}
// DBLock prevents deletion of DB items during mainloop
DBLock sync.Mutex
}
func NewClient(sender *common.MessageSender) *Client {
return &Client{
messageSender: sender,
IntervalInc: &FibonacciIntervalIncrementer{
Last: 0,
Current: 1,
},
}
}
func (c *Client) sendUnprocessedMetrics() {
if c.Config.Active != ActiveClientPhrase {
return
}
c.Logger.Debug("sendUnprocessedMetrics() triggered")
c.DBLock.Lock()
defer c.DBLock.Unlock()
// Get all unsent metrics grouped by session id
uam, err := c.DB.GetUnprocessedGroupedBySession()
if err != nil {
c.Logger.Error("failed to get unprocessed messages grouped by session", zap.Error(err))
}
c.Logger.Debug("unprocessed metrics from db", zap.Reflect("uam", uam))
for session, batch := range uam {
c.Logger.Debug("processing uam from session", zap.String("session", session))
// Convert the metrics into protobuf
amb, err := adaptModelsToProtoBatch(batch, &c.Identity.PublicKey)
if err != nil {
c.Logger.Error("failed to adapt models to protobuf batch", zap.Error(err))
return
}
// Generate an ephemeral key per session id
ephemeralKey, err := crypto.GenerateKey()
if err != nil {
c.Logger.Error("failed to generate an ephemeral key", zap.Error(err))
return
}
// Prepare the protobuf message
encodedMessage, err := proto.Marshal(amb)
if err != nil {
c.Logger.Error("failed to marshal protobuf", zap.Error(err))
return
}
rawMessage := common.RawMessage{
Payload: encodedMessage,
Sender: ephemeralKey,
SkipEncryptionLayer: true,
SendOnPersonalTopic: true,
MessageType: protobuf.ApplicationMetadataMessage_ANONYMOUS_METRIC_BATCH,
}
c.Logger.Debug("rawMessage prepared from unprocessed anonymous metrics", zap.Reflect("rawMessage", rawMessage))
// Send the metrics batch
_, err = c.messageSender.SendPrivate(context.Background(), c.Config.SendAddress, &rawMessage)
if err != nil {
c.Logger.Error("failed to send metrics batch message", zap.Error(err))
return
}
// Mark metrics as processed
err = c.DB.SetToProcessed(batch)
if err != nil {
c.Logger.Error("failed to set metrics as processed in db", zap.Error(err))
}
}
}
func (c *Client) mainLoop() error {
if c.Config.Active != ActiveClientPhrase {
return nil
}
c.Logger.Debug("mainLoop() triggered")
for {
c.sendUnprocessedMetrics()
waitFor := time.Duration(c.IntervalInc.Next()) * time.Second
c.Logger.Debug("mainLoop() wait interval set", zap.Duration("waitFor", waitFor))
select {
case <-time.After(waitFor):
case <-c.mainLoopQuit:
return nil
}
}
}
func (c *Client) startMainLoop() {
if c.Config.Active != ActiveClientPhrase {
return
}
c.Logger.Debug("startMainLoop() triggered")
c.stopMainLoop()
c.mainLoopQuit = make(chan struct{})
go func() {
c.Logger.Debug("startMainLoop() anonymous go routine triggered")
err := c.mainLoop()
if err != nil {
c.Logger.Error("main loop exited with an error", zap.Error(err))
}
}()
}
func (c *Client) deleteLoop() error {
// Sleep to give the main lock time to process any old messages
time.Sleep(time.Second * 10)
for {
func() {
c.DBLock.Lock()
defer c.DBLock.Unlock()
oneWeekAgo := time.Now().Add(time.Hour * 24 * 7 * -1)
err := c.DB.DeleteOlderThan(&oneWeekAgo)
if err != nil {
c.Logger.Error("failed to delete metrics older than given time",
zap.Time("time given", oneWeekAgo),
zap.Error(err))
}
}()
select {
case <-time.After(time.Hour):
case <-c.deleteLoopQuit:
return nil
}
}
}
func (c *Client) startDeleteLoop() {
c.stopDeleteLoop()
c.deleteLoopQuit = make(chan struct{})
go func() {
err := c.deleteLoop()
if err != nil {
c.Logger.Error("delete loop exited with an error", zap.Error(err))
}
}()
}
func (c *Client) Start() error {
c.Logger.Debug("Main Start() triggered")
if c.messageSender == nil {
return errors.New("can't start, missing message processor")
}
c.startMainLoop()
c.startDeleteLoop()
return nil
}
func (c *Client) stopMainLoop() {
c.Logger.Debug("stopMainLoop() triggered")
if c.mainLoopQuit != nil {
c.Logger.Debug("mainLoopQuit not set, attempting to close")
close(c.mainLoopQuit)
c.mainLoopQuit = nil
}
}
func (c *Client) stopDeleteLoop() {
if c.deleteLoopQuit != nil {
close(c.deleteLoopQuit)
c.deleteLoopQuit = nil
}
}
func (c *Client) Stop() error {
c.stopMainLoop()
c.stopDeleteLoop()
return nil
}

View File

@@ -0,0 +1,15 @@
package anonmetrics
type FibonacciIntervalIncrementer struct {
Last int64
Current int64
}
func (f *FibonacciIntervalIncrementer) Next() int64 {
out := f.Last + f.Current
f.Last = f.Current
f.Current = out
return out
}

View File

@@ -0,0 +1,321 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
// 1619446565_postgres_make_anon_metrics_table.down.sql (24B)
// 1619446565_postgres_make_anon_metrics_table.up.sql (443B)
// doc.go (380B)
package migrations
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
digest [sha256.Size]byte
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var __1619446565_postgres_make_anon_metrics_tableDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\x48\x2c\x28\x88\xcf\x4d\x2d\x29\xca\x4c\x2e\xb6\xe6\x02\x04\x00\x00\xff\xff\x99\xa7\x42\x7d\x18\x00\x00\x00")
func _1619446565_postgres_make_anon_metrics_tableDownSqlBytes() ([]byte, error) {
return bindataRead(
__1619446565_postgres_make_anon_metrics_tableDownSql,
"1619446565_postgres_make_anon_metrics_table.down.sql",
)
}
func _1619446565_postgres_make_anon_metrics_tableDownSql() (*asset, error) {
bytes, err := _1619446565_postgres_make_anon_metrics_tableDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1619446565_postgres_make_anon_metrics_table.down.sql", size: 24, mode: os.FileMode(0644), modTime: time.Unix(1704739012, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x75, 0xea, 0x1, 0x74, 0xe6, 0xa3, 0x11, 0xd0, 0x86, 0x87, 0x7e, 0x31, 0xb4, 0x1a, 0x27, 0x5d, 0xda, 0x77, 0xa3, 0xf5, 0x1d, 0x88, 0x79, 0xcf, 0xd5, 0x95, 0x75, 0xd, 0x47, 0xa1, 0x90, 0x5}}
return a, nil
}
var __1619446565_postgres_make_anon_metrics_tableUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x90\x4d\x6a\xc3\x30\x14\x84\xf7\x39\xc5\x2c\xdb\x10\xc8\x01\xba\x52\xdc\x17\xea\x56\xb6\x53\x59\x2e\x64\x65\x84\xfd\x30\x82\xfa\x07\x49\x75\xe9\xed\x8b\x1d\x92\x38\x81\xac\xbf\xef\x69\x46\x13\x29\x12\x9a\xa0\xc5\x4e\x12\xcc\x30\x94\x2d\x07\x67\x2b\x8f\xa7\x15\x00\xd8\x1a\x39\xa9\x58\x48\x1c\x54\x9c\x08\x75\xc4\x07\x1d\x37\xab\x99\x6d\xd7\x88\xbb\xaa\x6f\x6d\xd7\xe0\x74\x85\xda\x04\x83\xf5\x76\xc6\x2d\x7b\x6f\x1a\x2e\x6d\x8d\x2f\xa1\xa2\x37\xa1\x50\xa4\xf1\x67\x41\x48\x33\x8d\xb4\x90\x72\x33\x7b\x3c\x72\x17\x2e\xca\x2d\x1b\xcd\xf7\x0f\xe3\x3d\xcf\xd2\x3b\x30\x35\x1d\xd9\x79\xdb\x77\x0f\x4e\xfb\x81\x9d\x09\xb6\x6b\x4a\xff\xe7\x03\xb7\x0f\x34\xcf\x7e\x7a\x64\xd9\xf2\x56\xa8\x1c\x9b\xc0\x75\x69\x02\x74\x9c\x50\xae\x45\x72\x58\x28\xe7\x25\x54\xff\x3b\x8d\x60\x96\x0b\x0c\xae\xaf\xd8\x7b\xae\xb1\xcb\x32\x49\xe2\xfa\x09\xbc\xd2\x5e\x14\x52\x63\x2f\x64\x4e\xa7\x20\xc7\x15\xdb\xf1\x3e\xe9\x2c\x46\x85\x52\x94\xea\xf2\x42\x9e\x5f\xfe\x03\x00\x00\xff\xff\xee\x42\x32\x03\xbb\x01\x00\x00")
func _1619446565_postgres_make_anon_metrics_tableUpSqlBytes() ([]byte, error) {
return bindataRead(
__1619446565_postgres_make_anon_metrics_tableUpSql,
"1619446565_postgres_make_anon_metrics_table.up.sql",
)
}
func _1619446565_postgres_make_anon_metrics_tableUpSql() (*asset, error) {
bytes, err := _1619446565_postgres_make_anon_metrics_tableUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1619446565_postgres_make_anon_metrics_table.up.sql", size: 443, mode: os.FileMode(0644), modTime: time.Unix(1704739012, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd5, 0xdc, 0x72, 0x28, 0x3c, 0xf6, 0x94, 0xb0, 0x47, 0x3d, 0xca, 0x55, 0x3d, 0xf7, 0x83, 0xb8, 0x7d, 0x2f, 0x1e, 0x98, 0xb7, 0xde, 0xa, 0xff, 0xa0, 0x52, 0x60, 0x83, 0x56, 0xc5, 0xd1, 0xa2}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbd\x6e\xf3\x30\x0c\x45\x77\x3f\xc5\x45\x96\x2c\x9f\xa5\xe5\x9b\xba\x75\xec\xde\x17\x60\xe4\x6b\x49\x88\x2d\x1a\x22\xf3\xf7\xf6\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\x32\x46\x7c\x96\x6a\x98\xeb\x42\x54\x43\x63\xa2\x99\xf4\x07\x4e\x4c\x72\x31\xe2\x90\xab\x97\xcb\x29\x24\x5d\xa3\xb9\xf8\xc5\xc6\xba\xc6\xb5\xe6\x2e\xce\x78\xfd\x7f\x18\x62\x44\x92\x76\x74\x14\x69\xd3\xc2\x67\xcb\x60\x2e\xdd\x6b\xcb\xb8\x55\x2f\x10\x6c\x9d\x73\xbd\x07\xbc\x3b\x16\x8a\x39\xbc\x88\x1f\x0d\x5e\x88\x24\xc6\x3d\x33\x6b\x47\xd6\xf1\x54\xdb\x24\x2e\x61\x47\x1f\xf3\x0b\xd9\x17\x26\x59\x16\x4e\x98\xbb\xae\x4f\xd7\x64\x25\xa6\xda\x99\x5c\xfb\xe3\x1f\xc4\x8c\x8e\x26\x2b\x6d\xf7\x8b\x5c\x89\xa6\x3f\xe7\x21\x6d\xfa\xfb\x23\xdc\xb4\x9f\x0d\x62\xe0\x7d\x63\x72\x4e\x61\x18\x36\x49\x67\xc9\xc4\xa6\xe6\xb9\xd3\x86\x21\xc6\xac\x6f\x99\x8d\xbb\xf7\xba\x72\xdc\xce\x19\xdf\xbd\xaa\xcd\x30\x2a\x42\x88\xbf\x20\x64\x45\x88\xc3\x57\x00\x00\x00\xff\xff\xa9\xf1\x73\x83\x7c\x01\x00\x00")
func docGoBytes() ([]byte, error) {
return bindataRead(
_docGo,
"doc.go",
)
}
func docGo() (*asset, error) {
bytes, err := docGoBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 380, mode: os.FileMode(0644), modTime: time.Unix(1704739012, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x49, 0x1, 0xd4, 0xd6, 0xc7, 0x44, 0xd4, 0xfd, 0x7b, 0x69, 0x1f, 0xe3, 0xe, 0x48, 0x14, 0x99, 0xf0, 0x8e, 0x43, 0xae, 0x54, 0x64, 0xa2, 0x8b, 0x82, 0x1c, 0x2b, 0xb, 0xec, 0xf5, 0xb3, 0xfc}}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1619446565_postgres_make_anon_metrics_table.down.sql": _1619446565_postgres_make_anon_metrics_tableDownSql,
"1619446565_postgres_make_anon_metrics_table.up.sql": _1619446565_postgres_make_anon_metrics_tableUpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"1619446565_postgres_make_anon_metrics_table.down.sql": {_1619446565_postgres_make_anon_metrics_tableDownSql, map[string]*bintree{}},
"1619446565_postgres_make_anon_metrics_table.up.sql": {_1619446565_postgres_make_anon_metrics_tableUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
}
// RestoreAssets restores an asset under the given directory recursively.
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}

View File

@@ -0,0 +1,189 @@
package anonmetrics
import (
"database/sql"
// Import postgres driver
_ "github.com/lib/pq"
"go.uber.org/zap"
"github.com/status-im/migrate/v4"
"github.com/status-im/migrate/v4/database/postgres"
bindata "github.com/status-im/migrate/v4/source/go_bindata"
"github.com/status-im/status-go/appmetrics"
"github.com/status-im/status-go/protocol/anonmetrics/migrations"
"github.com/status-im/status-go/protocol/protobuf"
)
const ActiveServerPhrase = "I was thinking that it would be a pretty nice idea if the server functionality was working now, I express gratitude in the anticipation"
type ServerConfig struct {
Enabled bool
PostgresURI string
Active string
}
type Server struct {
Config *ServerConfig
Logger *zap.Logger
PostgresDB *sql.DB
}
func NewServer(postgresURI string) (*Server, error) {
postgresMigration := bindata.Resource(migrations.AssetNames(), migrations.Asset)
db, err := NewMigratedDB(postgresURI, postgresMigration)
if err != nil {
return nil, err
}
return &Server{
PostgresDB: db,
}, nil
}
func (s *Server) Stop() error {
if s.PostgresDB != nil {
return s.PostgresDB.Close()
}
return nil
}
func (s *Server) StoreMetrics(appMetricsBatch *protobuf.AnonymousMetricBatch) (appMetrics []*appmetrics.AppMetric, err error) {
if s.Config.Active != ActiveServerPhrase {
return nil, nil
}
s.Logger.Debug("StoreMetrics() triggered with payload",
zap.Reflect("appMetricsBatch", appMetricsBatch))
appMetrics, err = adaptProtoBatchToModels(appMetricsBatch)
if err != nil {
return
}
var (
tx *sql.Tx
insert *sql.Stmt
)
// start txn
tx, err = s.PostgresDB.Begin()
if err != nil {
return
}
defer func() {
if err == nil {
err = tx.Commit()
return
}
_ = tx.Rollback()
}()
//noinspection ALL
query := `INSERT INTO app_metrics (message_id, event, value, app_version, operating_system, session_id, created_at)
VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (message_id) DO NOTHING;`
insert, err = tx.Prepare(query)
if err != nil {
return
}
for _, metric := range appMetrics {
_, err = insert.Exec(
metric.MessageID,
metric.Event,
metric.Value,
metric.AppVersion,
metric.OS,
metric.SessionID,
metric.CreatedAt,
)
if err != nil {
return
}
}
return
}
func (s *Server) getFromRows(rows *sql.Rows) (appMetrics []appmetrics.AppMetric, err error) {
for rows.Next() {
metric := appmetrics.AppMetric{}
err = rows.Scan(
&metric.ID,
&metric.MessageID,
&metric.Event,
&metric.Value,
&metric.AppVersion,
&metric.OS,
&metric.SessionID,
&metric.CreatedAt,
&metric.Processed,
&metric.ReceivedAt,
)
if err != nil {
return nil, err
}
appMetrics = append(appMetrics, metric)
}
return appMetrics, nil
}
func (s *Server) GetAppMetrics(limit int, offset int) ([]appmetrics.AppMetric, error) {
if s.Config.Active != ActiveServerPhrase {
return nil, nil
}
rows, err := s.PostgresDB.Query("SELECT id, message_id, event, value, app_version, operating_system, session_id, created_at, processed, received_at FROM app_metrics LIMIT $1 OFFSET $2", limit, offset)
if err != nil {
return nil, err
}
defer rows.Close()
return s.getFromRows(rows)
}
func NewMigratedDB(uri string, migrationResource *bindata.AssetSource) (*sql.DB, error) {
db, err := sql.Open("postgres", uri)
if err != nil {
return nil, err
}
if err := setup(db, migrationResource); err != nil {
return nil, err
}
return db, nil
}
func setup(d *sql.DB, migrationResource *bindata.AssetSource) error {
m, err := MakeMigration(d, migrationResource)
if err != nil {
return err
}
if err = m.Up(); err != migrate.ErrNoChange {
return err
}
return nil
}
func MakeMigration(d *sql.DB, migrationResource *bindata.AssetSource) (*migrate.Migrate, error) {
source, err := bindata.WithInstance(migrationResource)
if err != nil {
return nil, err
}
driver, err := postgres.WithInstance(d, &postgres.Config{})
if err != nil {
return nil, err
}
return migrate.NewWithInstance(
"go-bindata",
source,
"postgres",
driver)
}