390
vendor/github.com/status-im/status-go/services/wallet/transfer/block_dao.go
generated
vendored
Normal file
390
vendor/github.com/status-im/status-go/services/wallet/transfer/block_dao.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
)
|
||||
|
||||
type BlocksRange struct {
|
||||
from *big.Int
|
||||
to *big.Int
|
||||
}
|
||||
|
||||
type Block struct {
|
||||
Number *big.Int
|
||||
Balance *big.Int
|
||||
Nonce *int64
|
||||
}
|
||||
|
||||
type BlockView struct {
|
||||
Address common.Address `json:"address"`
|
||||
Number *big.Int `json:"blockNumber"`
|
||||
Balance bigint.BigInt `json:"balance"`
|
||||
Nonce *int64 `json:"nonce"`
|
||||
}
|
||||
|
||||
func blocksToViews(blocks map[common.Address]*Block) []BlockView {
|
||||
blocksViews := []BlockView{}
|
||||
for address, block := range blocks {
|
||||
view := BlockView{
|
||||
Address: address,
|
||||
Number: block.Number,
|
||||
Balance: bigint.BigInt{Int: block.Balance},
|
||||
Nonce: block.Nonce,
|
||||
}
|
||||
blocksViews = append(blocksViews, view)
|
||||
}
|
||||
|
||||
return blocksViews
|
||||
}
|
||||
|
||||
type BlockDAO struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// MergeBlocksRanges merge old blocks ranges if possible
|
||||
func (b *BlockDAO) mergeBlocksRanges(chainIDs []uint64, accounts []common.Address) error {
|
||||
for _, chainID := range chainIDs {
|
||||
for _, account := range accounts {
|
||||
err := b.mergeRanges(chainID, account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlockDAO) mergeRanges(chainID uint64, account common.Address) (err error) {
|
||||
var (
|
||||
tx *sql.Tx
|
||||
)
|
||||
|
||||
ranges, err := b.getOldRanges(chainID, account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("merge old ranges", "account", account, "network", chainID, "ranges", len(ranges))
|
||||
|
||||
if len(ranges) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err = b.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
return
|
||||
}
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
newRanges, deletedRanges := getNewRanges(ranges)
|
||||
|
||||
for _, rangeToDelete := range deletedRanges {
|
||||
err = deleteRange(chainID, tx, account, rangeToDelete.from, rangeToDelete.to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, newRange := range newRanges {
|
||||
err = insertRange(chainID, tx, account, newRange.from, newRange.to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlockDAO) insertRange(chainID uint64, account common.Address, from, to, balance *big.Int, nonce uint64) error {
|
||||
log.Debug("insert blocks range", "account", account, "network id", chainID, "from", from, "to", to, "balance", balance, "nonce", nonce)
|
||||
insert, err := b.db.Prepare("INSERT INTO blocks_ranges (network_id, address, blk_from, blk_to, balance, nonce) VALUES (?, ?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = insert.Exec(chainID, account, (*bigint.SQLBigInt)(from), (*bigint.SQLBigInt)(to), (*bigint.SQLBigIntBytes)(balance), &nonce)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *BlockDAO) getOldRanges(chainID uint64, account common.Address) ([]*BlocksRange, error) {
|
||||
query := `select blk_from, blk_to from blocks_ranges
|
||||
where address = ?
|
||||
and network_id = ?
|
||||
order by blk_from`
|
||||
|
||||
rows, err := b.db.Query(query, account, chainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
ranges := []*BlocksRange{}
|
||||
for rows.Next() {
|
||||
from := &big.Int{}
|
||||
to := &big.Int{}
|
||||
err = rows.Scan((*bigint.SQLBigInt)(from), (*bigint.SQLBigInt)(to))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ranges = append(ranges, &BlocksRange{
|
||||
from: from,
|
||||
to: to,
|
||||
})
|
||||
}
|
||||
|
||||
return ranges, nil
|
||||
}
|
||||
|
||||
// GetBlocksToLoadByAddress gets unloaded blocks for a given address.
|
||||
func (b *BlockDAO) GetBlocksToLoadByAddress(chainID uint64, address common.Address, limit int) (rst []*big.Int, err error) {
|
||||
query := `SELECT blk_number FROM blocks
|
||||
WHERE address = ? AND network_id = ? AND loaded = 0
|
||||
ORDER BY blk_number DESC
|
||||
LIMIT ?`
|
||||
rows, err := b.db.Query(query, address, chainID, limit)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
block := &big.Int{}
|
||||
err = rows.Scan((*bigint.SQLBigInt)(block))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rst = append(rst, block)
|
||||
}
|
||||
return rst, nil
|
||||
}
|
||||
|
||||
func (b *BlockDAO) GetLastBlockByAddress(chainID uint64, address common.Address, limit int) (rst *big.Int, err error) {
|
||||
query := `SELECT * FROM
|
||||
(SELECT blk_number FROM blocks WHERE address = ? AND network_id = ? ORDER BY blk_number DESC LIMIT ?)
|
||||
ORDER BY blk_number LIMIT 1`
|
||||
rows, err := b.db.Query(query, address, chainID, limit)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
block := &big.Int{}
|
||||
err = rows.Scan((*bigint.SQLBigInt)(block))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *BlockDAO) GetFirstSavedBlock(chainID uint64, address common.Address) (rst *DBHeader, err error) {
|
||||
query := `SELECT blk_number, blk_hash, loaded
|
||||
FROM blocks
|
||||
WHERE network_id = ? AND address = ?
|
||||
ORDER BY blk_number LIMIT 1`
|
||||
rows, err := b.db.Query(query, chainID, address)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
header := &DBHeader{Hash: common.Hash{}, Number: new(big.Int)}
|
||||
err = rows.Scan((*bigint.SQLBigInt)(header.Number), &header.Hash, &header.Loaded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return header, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *BlockDAO) GetFirstKnownBlock(chainID uint64, address common.Address) (rst *big.Int, err error) {
|
||||
query := `SELECT blk_from FROM blocks_ranges
|
||||
WHERE address = ?
|
||||
AND network_id = ?
|
||||
ORDER BY blk_from
|
||||
LIMIT 1`
|
||||
|
||||
rows, err := b.db.Query(query, address, chainID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
block := &big.Int{}
|
||||
err = rows.Scan((*bigint.SQLBigInt)(block))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *BlockDAO) GetLastKnownBlockByAddress(chainID uint64, address common.Address) (block *Block, err error) {
|
||||
query := `SELECT blk_to, balance, nonce FROM blocks_ranges
|
||||
WHERE address = ?
|
||||
AND network_id = ?
|
||||
ORDER BY blk_to DESC
|
||||
LIMIT 1`
|
||||
|
||||
rows, err := b.db.Query(query, address, chainID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
var nonce sql.NullInt64
|
||||
block = &Block{Number: &big.Int{}, Balance: &big.Int{}}
|
||||
err = rows.Scan((*bigint.SQLBigInt)(block.Number), (*bigint.SQLBigIntBytes)(block.Balance), &nonce)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if nonce.Valid {
|
||||
block.Nonce = &nonce.Int64
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *BlockDAO) getLastKnownBlocks(chainID uint64, addresses []common.Address) (map[common.Address]*Block, error) {
|
||||
result := map[common.Address]*Block{}
|
||||
for _, address := range addresses {
|
||||
block, error := b.GetLastKnownBlockByAddress(chainID, address)
|
||||
if error != nil {
|
||||
return nil, error
|
||||
}
|
||||
|
||||
if block != nil {
|
||||
result[address] = block
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// TODO Remove the method below, it is used in one place and duplicates getLastKnownBlocks method with slight unneeded change
|
||||
func (b *BlockDAO) GetLastKnownBlockByAddresses(chainID uint64, addresses []common.Address) (map[common.Address]*Block, []common.Address, error) {
|
||||
res := map[common.Address]*Block{}
|
||||
accountsWithoutHistory := []common.Address{}
|
||||
for _, address := range addresses {
|
||||
block, err := b.GetLastKnownBlockByAddress(chainID, address)
|
||||
if err != nil {
|
||||
log.Info("Can't get last block", "error", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if block != nil {
|
||||
res[address] = block
|
||||
} else {
|
||||
accountsWithoutHistory = append(accountsWithoutHistory, address)
|
||||
}
|
||||
}
|
||||
|
||||
return res, accountsWithoutHistory, nil
|
||||
}
|
||||
|
||||
func getNewRanges(ranges []*BlocksRange) ([]*BlocksRange, []*BlocksRange) {
|
||||
initValue := big.NewInt(-1)
|
||||
prevFrom := big.NewInt(-1)
|
||||
prevTo := big.NewInt(-1)
|
||||
hasMergedRanges := false
|
||||
var newRanges []*BlocksRange
|
||||
var deletedRanges []*BlocksRange
|
||||
for idx, blocksRange := range ranges {
|
||||
if prevTo.Cmp(initValue) == 0 {
|
||||
prevTo = blocksRange.to
|
||||
prevFrom = blocksRange.from
|
||||
} else if prevTo.Cmp(blocksRange.from) >= 0 {
|
||||
hasMergedRanges = true
|
||||
deletedRanges = append(deletedRanges, ranges[idx-1])
|
||||
if prevTo.Cmp(blocksRange.to) <= 0 {
|
||||
prevTo = blocksRange.to
|
||||
}
|
||||
} else {
|
||||
if hasMergedRanges {
|
||||
deletedRanges = append(deletedRanges, ranges[idx-1])
|
||||
newRanges = append(newRanges, &BlocksRange{
|
||||
from: prevFrom,
|
||||
to: prevTo,
|
||||
})
|
||||
}
|
||||
log.Info("blocks ranges gap detected", "from", prevTo, "to", blocksRange.from)
|
||||
hasMergedRanges = false
|
||||
|
||||
prevFrom = blocksRange.from
|
||||
prevTo = blocksRange.to
|
||||
}
|
||||
}
|
||||
|
||||
if hasMergedRanges {
|
||||
deletedRanges = append(deletedRanges, ranges[len(ranges)-1])
|
||||
newRanges = append(newRanges, &BlocksRange{
|
||||
from: prevFrom,
|
||||
to: prevTo,
|
||||
})
|
||||
}
|
||||
|
||||
return newRanges, deletedRanges
|
||||
}
|
||||
|
||||
func deleteRange(chainID uint64, creator statementCreator, account common.Address, from *big.Int, to *big.Int) error {
|
||||
log.Info("delete blocks range", "account", account, "network", chainID, "from", from, "to", to)
|
||||
delete, err := creator.Prepare(`DELETE FROM blocks_ranges
|
||||
WHERE address = ?
|
||||
AND network_id = ?
|
||||
AND blk_from = ?
|
||||
AND blk_to = ?`)
|
||||
if err != nil {
|
||||
log.Info("some error", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = delete.Exec(account, chainID, (*bigint.SQLBigInt)(from), (*bigint.SQLBigInt)(to))
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteAllRanges(creator statementCreator, account common.Address) error {
|
||||
delete, err := creator.Prepare(`DELETE FROM blocks_ranges WHERE address = ?`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = delete.Exec(account)
|
||||
return err
|
||||
}
|
||||
|
||||
func insertRange(chainID uint64, creator statementCreator, account common.Address, from *big.Int, to *big.Int) error {
|
||||
log.Info("insert blocks range", "account", account, "network", chainID, "from", from, "to", to)
|
||||
insert, err := creator.Prepare("INSERT INTO blocks_ranges (network_id, address, blk_from, blk_to) VALUES (?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = insert.Exec(chainID, account, (*bigint.SQLBigInt)(from), (*bigint.SQLBigInt)(to))
|
||||
return err
|
||||
}
|
||||
194
vendor/github.com/status-im/status-go/services/wallet/transfer/block_ranges_sequential_dao.go
generated
vendored
Normal file
194
vendor/github.com/status-im/status-go/services/wallet/transfer/block_ranges_sequential_dao.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
)
|
||||
|
||||
type BlockRangeDAOer interface {
|
||||
getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error)
|
||||
upsertRange(chainID uint64, account common.Address, newBlockRange *ethTokensBlockRanges) (err error)
|
||||
updateTokenRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error)
|
||||
upsertEthRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error)
|
||||
}
|
||||
|
||||
type BlockRangeSequentialDAO struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
type BlockRange struct {
|
||||
Start *big.Int // Block of first transfer
|
||||
FirstKnown *big.Int // Oldest scanned block
|
||||
LastKnown *big.Int // Last scanned block
|
||||
}
|
||||
|
||||
func NewBlockRange() *BlockRange {
|
||||
return &BlockRange{Start: &big.Int{}, FirstKnown: &big.Int{}, LastKnown: &big.Int{}}
|
||||
}
|
||||
|
||||
type ethTokensBlockRanges struct {
|
||||
eth *BlockRange
|
||||
tokens *BlockRange
|
||||
balanceCheckHash string
|
||||
}
|
||||
|
||||
func newEthTokensBlockRanges() *ethTokensBlockRanges {
|
||||
return ðTokensBlockRanges{eth: NewBlockRange(), tokens: NewBlockRange()}
|
||||
}
|
||||
|
||||
func (b *BlockRangeSequentialDAO) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error) {
|
||||
query := `SELECT blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash FROM blocks_ranges_sequential
|
||||
WHERE address = ?
|
||||
AND network_id = ?`
|
||||
|
||||
rows, err := b.db.Query(query, address, chainID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
blockRange = ðTokensBlockRanges{}
|
||||
if rows.Next() {
|
||||
blockRange = newEthTokensBlockRanges()
|
||||
err = rows.Scan((*bigint.SQLBigInt)(blockRange.eth.Start),
|
||||
(*bigint.SQLBigInt)(blockRange.eth.FirstKnown),
|
||||
(*bigint.SQLBigInt)(blockRange.eth.LastKnown),
|
||||
(*bigint.SQLBigInt)(blockRange.tokens.Start),
|
||||
(*bigint.SQLBigInt)(blockRange.tokens.FirstKnown),
|
||||
(*bigint.SQLBigInt)(blockRange.tokens.LastKnown),
|
||||
&blockRange.balanceCheckHash,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blockRange, nil
|
||||
}
|
||||
|
||||
return blockRange, nil
|
||||
}
|
||||
|
||||
func (b *BlockRangeSequentialDAO) deleteRange(account common.Address) error {
|
||||
log.Debug("delete blocks range", "account", account)
|
||||
delete, err := b.db.Prepare(`DELETE FROM blocks_ranges_sequential WHERE address = ?`)
|
||||
if err != nil {
|
||||
log.Error("Failed to prepare deletion of sequential block range", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = delete.Exec(account)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *BlockRangeSequentialDAO) upsertRange(chainID uint64, account common.Address, newBlockRange *ethTokensBlockRanges) (err error) {
|
||||
ethTokensBlockRange, err := b.getBlockRange(chainID, account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ethBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange.eth)
|
||||
tokensBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange.tokens)
|
||||
|
||||
log.Debug("update eth and tokens blocks range", "account", account, "chainID", chainID,
|
||||
"eth.start", ethBlockRange.Start, "eth.first", ethBlockRange.FirstKnown, "eth.last", ethBlockRange.LastKnown,
|
||||
"tokens.start", tokensBlockRange.Start, "tokens.first", ethBlockRange.FirstKnown, "eth.last", ethBlockRange.LastKnown, "hash", newBlockRange.balanceCheckHash)
|
||||
|
||||
upsert, err := b.db.Prepare(`REPLACE INTO blocks_ranges_sequential
|
||||
(network_id, address, blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = upsert.Exec(chainID, account, (*bigint.SQLBigInt)(ethBlockRange.Start), (*bigint.SQLBigInt)(ethBlockRange.FirstKnown), (*bigint.SQLBigInt)(ethBlockRange.LastKnown),
|
||||
(*bigint.SQLBigInt)(tokensBlockRange.Start), (*bigint.SQLBigInt)(tokensBlockRange.FirstKnown), (*bigint.SQLBigInt)(tokensBlockRange.LastKnown), newBlockRange.balanceCheckHash)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *BlockRangeSequentialDAO) upsertEthRange(chainID uint64, account common.Address,
|
||||
newBlockRange *BlockRange) (err error) {
|
||||
|
||||
ethTokensBlockRange, err := b.getBlockRange(chainID, account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange)
|
||||
|
||||
log.Debug("update eth blocks range", "account", account, "chainID", chainID,
|
||||
"start", blockRange.Start, "first", blockRange.FirstKnown, "last", blockRange.LastKnown, "old hash", ethTokensBlockRange.balanceCheckHash)
|
||||
|
||||
upsert, err := b.db.Prepare(`REPLACE INTO blocks_ranges_sequential
|
||||
(network_id, address, blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ethTokensBlockRange.tokens == nil {
|
||||
ethTokensBlockRange.tokens = NewBlockRange()
|
||||
}
|
||||
|
||||
_, err = upsert.Exec(chainID, account, (*bigint.SQLBigInt)(blockRange.Start), (*bigint.SQLBigInt)(blockRange.FirstKnown), (*bigint.SQLBigInt)(blockRange.LastKnown),
|
||||
(*bigint.SQLBigInt)(ethTokensBlockRange.tokens.Start), (*bigint.SQLBigInt)(ethTokensBlockRange.tokens.FirstKnown), (*bigint.SQLBigInt)(ethTokensBlockRange.tokens.LastKnown), ethTokensBlockRange.balanceCheckHash)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *BlockRangeSequentialDAO) updateTokenRange(chainID uint64, account common.Address,
|
||||
newBlockRange *BlockRange) (err error) {
|
||||
|
||||
ethTokensBlockRange, err := b.getBlockRange(chainID, account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange)
|
||||
|
||||
log.Debug("update tokens blocks range", "account", account, "chainID", chainID,
|
||||
"start", blockRange.Start, "first", blockRange.FirstKnown, "last", blockRange.LastKnown, "old hash", ethTokensBlockRange.balanceCheckHash)
|
||||
|
||||
update, err := b.db.Prepare(`UPDATE blocks_ranges_sequential SET token_blk_start = ?, token_blk_first = ?, token_blk_last = ? WHERE network_id = ? AND address = ?`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = update.Exec((*bigint.SQLBigInt)(blockRange.Start), (*bigint.SQLBigInt)(blockRange.FirstKnown),
|
||||
(*bigint.SQLBigInt)(blockRange.LastKnown), chainID, account)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func prepareUpdatedBlockRange(blockRange, newBlockRange *BlockRange) *BlockRange {
|
||||
// Update existing range
|
||||
if blockRange != nil {
|
||||
if newBlockRange != nil {
|
||||
// Ovewrite start block if there was not any or if new one is older, because it can be precised only
|
||||
// to a greater value, because no history can be before some block that is considered
|
||||
// as a start of history, but due to concurrent block range checks, a newer greater block
|
||||
// can be found that matches criteria of a start block (nonce is zero, balances are equal)
|
||||
if newBlockRange.Start != nil && (blockRange.Start == nil || blockRange.Start.Cmp(newBlockRange.Start) < 0) {
|
||||
blockRange.Start = newBlockRange.Start
|
||||
}
|
||||
|
||||
// Overwrite first known block if there was not any or if new one is older
|
||||
if (blockRange.FirstKnown == nil && newBlockRange.FirstKnown != nil) ||
|
||||
(blockRange.FirstKnown != nil && newBlockRange.FirstKnown != nil && blockRange.FirstKnown.Cmp(newBlockRange.FirstKnown) > 0) {
|
||||
blockRange.FirstKnown = newBlockRange.FirstKnown
|
||||
}
|
||||
|
||||
// Overwrite last known block if there was not any or if new one is newer
|
||||
if (blockRange.LastKnown == nil && newBlockRange.LastKnown != nil) ||
|
||||
(blockRange.LastKnown != nil && newBlockRange.LastKnown != nil && blockRange.LastKnown.Cmp(newBlockRange.LastKnown) < 0) {
|
||||
blockRange.LastKnown = newBlockRange.LastKnown
|
||||
}
|
||||
}
|
||||
} else {
|
||||
blockRange = newBlockRange
|
||||
}
|
||||
|
||||
return blockRange
|
||||
}
|
||||
224
vendor/github.com/status-im/status-go/services/wallet/transfer/bridge_identifier.go
generated
vendored
Normal file
224
vendor/github.com/status-im/status-go/services/wallet/transfer/bridge_identifier.go
generated
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/status-im/status-go/rpc/chain"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/token"
|
||||
)
|
||||
|
||||
// TODO: Find proper way to uniquely match Origin and Destination transactions (some sort of hash or uniqueID)
|
||||
// Current approach is not failsafe (for example, if multiple identical bridge operations are triggered
|
||||
// at the same time)
|
||||
// Recipient + Relayer + Data should match in both Origin and Destination transactions
|
||||
func getHopBridgeFromL1CrossTxID(recipient common.Address, relayer common.Address, logData []byte) string {
|
||||
return fmt.Sprintf("FromL1_%s_%s_%s", recipient.String(), relayer.String(), hex.EncodeToString(logData))
|
||||
}
|
||||
|
||||
func getHopBridgeFromL2CrossTxID(transferID *big.Int) string {
|
||||
return fmt.Sprintf("FromL2_0x%s", transferID.Text(16))
|
||||
}
|
||||
|
||||
type originTxParams struct {
|
||||
fromNetworkID uint64
|
||||
fromTxHash common.Hash
|
||||
fromAddress common.Address
|
||||
fromAsset string
|
||||
fromAmount *big.Int
|
||||
toNetworkID uint64
|
||||
toAddress common.Address
|
||||
crossTxID string
|
||||
timestamp uint64
|
||||
}
|
||||
|
||||
func upsertHopBridgeOriginTx(ctx context.Context, transactionManager *TransactionManager, params originTxParams) (*MultiTransaction, error) {
|
||||
// Try to find "destination" half of the multiTx
|
||||
multiTx, err := transactionManager.GetBridgeDestinationMultiTransaction(ctx, params.toNetworkID, params.crossTxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if multiTx == nil {
|
||||
multiTx = &MultiTransaction{
|
||||
// Data from "origin" transaction
|
||||
FromNetworkID: params.fromNetworkID,
|
||||
FromTxHash: params.fromTxHash,
|
||||
FromAddress: params.fromAddress,
|
||||
FromAsset: params.fromAsset,
|
||||
FromAmount: (*hexutil.Big)(params.fromAmount),
|
||||
ToNetworkID: params.toNetworkID,
|
||||
ToAddress: params.toAddress,
|
||||
// To be replaced by "destination" transaction, need to be non-null
|
||||
ToAsset: params.fromAsset,
|
||||
ToAmount: (*hexutil.Big)(params.fromAmount),
|
||||
// Common data
|
||||
Type: MultiTransactionBridge,
|
||||
CrossTxID: params.crossTxID,
|
||||
Timestamp: params.timestamp,
|
||||
}
|
||||
|
||||
_, err := transactionManager.InsertMultiTransaction(multiTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
multiTx.FromNetworkID = params.fromNetworkID
|
||||
multiTx.FromTxHash = params.fromTxHash
|
||||
multiTx.FromAddress = params.fromAddress
|
||||
multiTx.FromAsset = params.fromAsset
|
||||
multiTx.FromAmount = (*hexutil.Big)(params.fromAmount)
|
||||
multiTx.Timestamp = params.timestamp
|
||||
|
||||
err := transactionManager.UpdateMultiTransaction(multiTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return multiTx, nil
|
||||
}
|
||||
|
||||
type destinationTxParams struct {
|
||||
toNetworkID uint64
|
||||
toTxHash common.Hash
|
||||
toAddress common.Address
|
||||
toAsset string
|
||||
toAmount *big.Int
|
||||
crossTxID string
|
||||
timestamp uint64
|
||||
}
|
||||
|
||||
func upsertHopBridgeDestinationTx(ctx context.Context, transactionManager *TransactionManager, params destinationTxParams) (*MultiTransaction, error) {
|
||||
// Try to find "origin" half of the multiTx
|
||||
multiTx, err := transactionManager.GetBridgeOriginMultiTransaction(ctx, params.toNetworkID, params.crossTxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if multiTx == nil {
|
||||
multiTx = &MultiTransaction{
|
||||
// To be replaced by "origin" transaction, need to be non-null
|
||||
FromAddress: params.toAddress,
|
||||
FromAsset: params.toAsset,
|
||||
FromAmount: (*hexutil.Big)(params.toAmount),
|
||||
// Data from "destination" transaction
|
||||
ToNetworkID: params.toNetworkID,
|
||||
ToTxHash: params.toTxHash,
|
||||
ToAddress: params.toAddress,
|
||||
ToAsset: params.toAsset,
|
||||
ToAmount: (*hexutil.Big)(params.toAmount),
|
||||
// Common data
|
||||
Type: MultiTransactionBridge,
|
||||
CrossTxID: params.crossTxID,
|
||||
Timestamp: params.timestamp,
|
||||
}
|
||||
|
||||
_, err := transactionManager.InsertMultiTransaction(multiTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
multiTx.ToTxHash = params.toTxHash
|
||||
multiTx.ToAsset = params.toAsset
|
||||
multiTx.ToAmount = (*hexutil.Big)(params.toAmount)
|
||||
multiTx.Timestamp = params.timestamp
|
||||
|
||||
err := transactionManager.UpdateMultiTransaction(multiTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return multiTx, nil
|
||||
}
|
||||
|
||||
func buildHopBridgeMultitransaction(ctx context.Context, client chain.ClientInterface, transactionManager *TransactionManager, tokenManager *token.Manager, subTx *Transfer) (*MultiTransaction, error) {
|
||||
// Identify if it's from/to transaction
|
||||
switch w_common.GetEventType(subTx.Log) {
|
||||
case w_common.HopBridgeTransferSentToL2EventType:
|
||||
// L1->L2 Origin transaction
|
||||
toChainID, recipient, relayer, fromAmount, err := w_common.ParseHopBridgeTransferSentToL2Log(subTx.Log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := originTxParams{
|
||||
fromNetworkID: subTx.NetworkID,
|
||||
fromTxHash: subTx.Receipt.TxHash,
|
||||
fromAddress: subTx.From,
|
||||
fromAsset: "ETH",
|
||||
fromAmount: fromAmount,
|
||||
toNetworkID: toChainID,
|
||||
toAddress: recipient,
|
||||
crossTxID: getHopBridgeFromL1CrossTxID(recipient, relayer, subTx.Log.Data),
|
||||
timestamp: subTx.Timestamp,
|
||||
}
|
||||
|
||||
return upsertHopBridgeOriginTx(ctx, transactionManager, params)
|
||||
|
||||
case w_common.HopBridgeTransferFromL1CompletedEventType:
|
||||
// L1->L2 Destination transaction
|
||||
recipient, relayer, toAmount, err := w_common.ParseHopBridgeTransferFromL1CompletedLog(subTx.Log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := destinationTxParams{
|
||||
toNetworkID: subTx.NetworkID,
|
||||
toTxHash: subTx.Receipt.TxHash,
|
||||
toAddress: recipient,
|
||||
toAsset: "ETH",
|
||||
toAmount: toAmount,
|
||||
crossTxID: getHopBridgeFromL1CrossTxID(recipient, relayer, subTx.Log.Data),
|
||||
timestamp: subTx.Timestamp,
|
||||
}
|
||||
|
||||
return upsertHopBridgeDestinationTx(ctx, transactionManager, params)
|
||||
|
||||
case w_common.HopBridgeTransferSentEventType:
|
||||
// L2->L1 / L2->L2 Origin transaction
|
||||
transferID, toChainID, recipient, fromAmount, _, _, _, _, _, err := w_common.ParseHopBridgeTransferSentLog(subTx.Log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := originTxParams{
|
||||
fromNetworkID: subTx.NetworkID,
|
||||
fromTxHash: subTx.Receipt.TxHash,
|
||||
fromAddress: subTx.From,
|
||||
fromAsset: "ETH",
|
||||
fromAmount: fromAmount,
|
||||
toNetworkID: toChainID,
|
||||
toAddress: recipient,
|
||||
crossTxID: getHopBridgeFromL2CrossTxID(transferID),
|
||||
timestamp: subTx.Timestamp,
|
||||
}
|
||||
|
||||
return upsertHopBridgeOriginTx(ctx, transactionManager, params)
|
||||
|
||||
case w_common.HopBridgeWithdrawalBondedEventType:
|
||||
// L2->L1 / L2->L2 Destination transaction
|
||||
transferID, toAmount, err := w_common.ParseHopWithdrawalBondedLog(subTx.Log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := destinationTxParams{
|
||||
toNetworkID: subTx.NetworkID,
|
||||
toTxHash: subTx.Receipt.TxHash,
|
||||
toAddress: subTx.Address,
|
||||
toAsset: "ETH",
|
||||
toAmount: toAmount,
|
||||
crossTxID: getHopBridgeFromL2CrossTxID(transferID),
|
||||
timestamp: subTx.Timestamp,
|
||||
}
|
||||
|
||||
return upsertHopBridgeDestinationTx(ctx, transactionManager, params)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
654
vendor/github.com/status-im/status-go/services/wallet/transfer/commands.go
generated
vendored
Normal file
654
vendor/github.com/status-im/status-go/services/wallet/transfer/commands.go
generated
vendored
Normal file
@@ -0,0 +1,654 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/status-im/status-go/rpc/chain"
|
||||
"github.com/status-im/status-go/services/wallet/async"
|
||||
"github.com/status-im/status-go/services/wallet/balance"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/token"
|
||||
"github.com/status-im/status-go/services/wallet/walletevent"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
)
|
||||
|
||||
const (
|
||||
// EventNewTransfers emitted when new block was added to the same canonical chan.
|
||||
EventNewTransfers walletevent.EventType = "new-transfers"
|
||||
// EventFetchingRecentHistory emitted when fetching of lastest tx history is started
|
||||
EventFetchingRecentHistory walletevent.EventType = "recent-history-fetching"
|
||||
// EventRecentHistoryReady emitted when fetching of lastest tx history is started
|
||||
EventRecentHistoryReady walletevent.EventType = "recent-history-ready"
|
||||
// EventFetchingHistoryError emitted when fetching of tx history failed
|
||||
EventFetchingHistoryError walletevent.EventType = "fetching-history-error"
|
||||
// EventNonArchivalNodeDetected emitted when a connection to a non archival node is detected
|
||||
EventNonArchivalNodeDetected walletevent.EventType = "non-archival-node-detected"
|
||||
|
||||
// Internal events emitted when different kinds of transfers are detected
|
||||
EventInternalETHTransferDetected walletevent.EventType = walletevent.InternalEventTypePrefix + "eth-transfer-detected"
|
||||
EventInternalERC20TransferDetected walletevent.EventType = walletevent.InternalEventTypePrefix + "erc20-transfer-detected"
|
||||
EventInternalERC721TransferDetected walletevent.EventType = walletevent.InternalEventTypePrefix + "erc721-transfer-detected"
|
||||
EventInternalERC1155TransferDetected walletevent.EventType = walletevent.InternalEventTypePrefix + "erc1155-transfer-detected"
|
||||
|
||||
numberOfBlocksCheckedPerIteration = 40
|
||||
noBlockLimit = 0
|
||||
)
|
||||
|
||||
var (
|
||||
// This will work only for binance testnet as mainnet doesn't support
|
||||
// archival request.
|
||||
binanceChainErc20BatchSize = big.NewInt(5000)
|
||||
goerliErc20BatchSize = big.NewInt(100000)
|
||||
goerliErc20ArbitrumBatchSize = big.NewInt(10000)
|
||||
goerliErc20OptimismBatchSize = big.NewInt(10000)
|
||||
erc20BatchSize = big.NewInt(500000)
|
||||
binancChainID = uint64(56)
|
||||
goerliChainID = uint64(5)
|
||||
goerliArbitrumChainID = uint64(421613)
|
||||
goerliOptimismChainID = uint64(420)
|
||||
binanceTestChainID = uint64(97)
|
||||
|
||||
transfersRetryInterval = 5 * time.Second
|
||||
)
|
||||
|
||||
type ethHistoricalCommand struct {
|
||||
address common.Address
|
||||
chainClient chain.ClientInterface
|
||||
balanceCacher balance.Cacher
|
||||
feed *event.Feed
|
||||
foundHeaders []*DBHeader
|
||||
error error
|
||||
noLimit bool
|
||||
|
||||
from *Block
|
||||
to, resultingFrom, startBlock *big.Int
|
||||
threadLimit uint32
|
||||
}
|
||||
|
||||
type Transaction []*Transfer
|
||||
|
||||
func (c *ethHistoricalCommand) Command() async.Command {
|
||||
return async.FiniteCommand{
|
||||
Interval: 5 * time.Second,
|
||||
Runable: c.Run,
|
||||
}.Run
|
||||
}
|
||||
|
||||
func (c *ethHistoricalCommand) Run(ctx context.Context) (err error) {
|
||||
log.Debug("eth historical downloader start", "chainID", c.chainClient.NetworkID(), "address", c.address,
|
||||
"from", c.from.Number, "to", c.to, "noLimit", c.noLimit)
|
||||
|
||||
start := time.Now()
|
||||
if c.from.Number != nil && c.from.Balance != nil {
|
||||
c.balanceCacher.Cache().AddBalance(c.address, c.chainClient.NetworkID(), c.from.Number, c.from.Balance)
|
||||
}
|
||||
if c.from.Number != nil && c.from.Nonce != nil {
|
||||
c.balanceCacher.Cache().AddNonce(c.address, c.chainClient.NetworkID(), c.from.Number, c.from.Nonce)
|
||||
}
|
||||
from, headers, startBlock, err := findBlocksWithEthTransfers(ctx, c.chainClient,
|
||||
c.balanceCacher, c.address, c.from.Number, c.to, c.noLimit, c.threadLimit)
|
||||
|
||||
if err != nil {
|
||||
c.error = err
|
||||
log.Error("failed to find blocks with transfers", "error", err, "chainID", c.chainClient.NetworkID(),
|
||||
"address", c.address, "from", c.from.Number, "to", c.to)
|
||||
return nil
|
||||
}
|
||||
|
||||
c.foundHeaders = headers
|
||||
c.resultingFrom = from
|
||||
c.startBlock = startBlock
|
||||
|
||||
log.Debug("eth historical downloader finished successfully", "chain", c.chainClient.NetworkID(),
|
||||
"address", c.address, "from", from, "to", c.to, "total blocks", len(headers), "time", time.Since(start))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type erc20HistoricalCommand struct {
|
||||
erc20 BatchDownloader
|
||||
chainClient chain.ClientInterface
|
||||
feed *event.Feed
|
||||
|
||||
iterator *IterativeDownloader
|
||||
to *big.Int
|
||||
from *big.Int
|
||||
foundHeaders []*DBHeader
|
||||
}
|
||||
|
||||
func (c *erc20HistoricalCommand) Command() async.Command {
|
||||
return async.FiniteCommand{
|
||||
Interval: 5 * time.Second,
|
||||
Runable: c.Run,
|
||||
}.Run
|
||||
}
|
||||
|
||||
func getErc20BatchSize(chainID uint64) *big.Int {
|
||||
if isBinanceChain(chainID) {
|
||||
return binanceChainErc20BatchSize
|
||||
}
|
||||
|
||||
if chainID == goerliChainID {
|
||||
return goerliErc20BatchSize
|
||||
}
|
||||
|
||||
if chainID == goerliOptimismChainID {
|
||||
return goerliErc20OptimismBatchSize
|
||||
}
|
||||
|
||||
if chainID == goerliArbitrumChainID {
|
||||
return goerliErc20ArbitrumBatchSize
|
||||
}
|
||||
|
||||
return erc20BatchSize
|
||||
}
|
||||
|
||||
func (c *erc20HistoricalCommand) Run(ctx context.Context) (err error) {
|
||||
log.Debug("wallet historical downloader for erc20 transfers start", "chainID", c.chainClient.NetworkID(),
|
||||
"from", c.from, "to", c.to)
|
||||
|
||||
start := time.Now()
|
||||
if c.iterator == nil {
|
||||
c.iterator, err = SetupIterativeDownloader(
|
||||
c.chainClient,
|
||||
c.erc20, getErc20BatchSize(c.chainClient.NetworkID()), c.to, c.from)
|
||||
if err != nil {
|
||||
log.Error("failed to setup historical downloader for erc20")
|
||||
return err
|
||||
}
|
||||
}
|
||||
for !c.iterator.Finished() {
|
||||
headers, _, _, err := c.iterator.Next(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get next batch", "error", err, "chainID", c.chainClient.NetworkID()) // TODO: stop inifinite command in case of an error that we can't fix like missing trie node
|
||||
return err
|
||||
}
|
||||
c.foundHeaders = append(c.foundHeaders, headers...)
|
||||
}
|
||||
log.Debug("wallet historical downloader for erc20 transfers finished", "chainID", c.chainClient.NetworkID(),
|
||||
"from", c.from, "to", c.to, "time", time.Since(start), "headers", len(c.foundHeaders))
|
||||
return nil
|
||||
}
|
||||
|
||||
type transfersCommand struct {
|
||||
db *Database
|
||||
blockDAO *BlockDAO
|
||||
eth *ETHDownloader
|
||||
blockNums []*big.Int
|
||||
address common.Address
|
||||
chainClient chain.ClientInterface
|
||||
blocksLimit int
|
||||
transactionManager *TransactionManager
|
||||
pendingTxManager *transactions.PendingTxTracker
|
||||
tokenManager *token.Manager
|
||||
feed *event.Feed
|
||||
|
||||
// result
|
||||
fetchedTransfers []Transfer
|
||||
}
|
||||
|
||||
func (c *transfersCommand) Runner(interval ...time.Duration) async.Runner {
|
||||
intvl := transfersRetryInterval
|
||||
if len(interval) > 0 {
|
||||
intvl = interval[0]
|
||||
}
|
||||
return async.FiniteCommandWithErrorCounter{
|
||||
FiniteCommand: async.FiniteCommand{
|
||||
Interval: intvl,
|
||||
Runable: c.Run,
|
||||
},
|
||||
ErrorCounter: async.NewErrorCounter(5, "transfersCommand"),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *transfersCommand) Command(interval ...time.Duration) async.Command {
|
||||
return c.Runner(interval...).Run
|
||||
}
|
||||
|
||||
func (c *transfersCommand) Run(ctx context.Context) (err error) {
|
||||
// Take blocks from cache if available and disrespect the limit
|
||||
// If no blocks are available in cache, take blocks from DB respecting the limit
|
||||
// If no limit is set, take all blocks from DB
|
||||
log.Debug("start transfersCommand", "chain", c.chainClient.NetworkID(), "address", c.address, "blockNums", c.blockNums)
|
||||
startTs := time.Now()
|
||||
|
||||
for {
|
||||
blocks := c.blockNums
|
||||
if blocks == nil {
|
||||
blocks, _ = c.blockDAO.GetBlocksToLoadByAddress(c.chainClient.NetworkID(), c.address, numberOfBlocksCheckedPerIteration)
|
||||
}
|
||||
|
||||
for _, blockNum := range blocks {
|
||||
log.Debug("transfersCommand block start", "chain", c.chainClient.NetworkID(), "address", c.address, "block", blockNum)
|
||||
|
||||
allTransfers, err := c.eth.GetTransfersByNumber(ctx, blockNum)
|
||||
if err != nil {
|
||||
log.Error("getTransfersByBlocks error", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.processUnknownErc20CommunityTransactions(ctx, allTransfers)
|
||||
|
||||
err = c.processMultiTransactions(ctx, allTransfers)
|
||||
if err != nil {
|
||||
log.Error("processMultiTransactions error", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(allTransfers) > 0 {
|
||||
err := c.saveAndConfirmPending(allTransfers, blockNum)
|
||||
if err != nil {
|
||||
log.Error("saveAndConfirmPending error", "error", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// If no transfers found, that is suspecting, because downloader returned this block as containing transfers
|
||||
log.Error("no transfers found in block", "chain", c.chainClient.NetworkID(), "address", c.address, "block", blockNum)
|
||||
|
||||
err = markBlocksAsLoaded(c.chainClient.NetworkID(), c.db.client, c.address, []*big.Int{blockNum})
|
||||
if err != nil {
|
||||
log.Error("Mark blocks loaded error", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.fetchedTransfers = append(c.fetchedTransfers, allTransfers...)
|
||||
|
||||
c.notifyOfNewTransfers(blockNum, allTransfers)
|
||||
c.notifyOfLatestTransfers(allTransfers, w_common.EthTransfer)
|
||||
c.notifyOfLatestTransfers(allTransfers, w_common.Erc20Transfer)
|
||||
c.notifyOfLatestTransfers(allTransfers, w_common.Erc721Transfer)
|
||||
c.notifyOfLatestTransfers(allTransfers, w_common.Erc1155Transfer)
|
||||
|
||||
log.Debug("transfersCommand block end", "chain", c.chainClient.NetworkID(), "address", c.address,
|
||||
"block", blockNum, "tranfers.len", len(allTransfers), "fetchedTransfers.len", len(c.fetchedTransfers))
|
||||
}
|
||||
|
||||
if c.blockNums != nil || len(blocks) == 0 ||
|
||||
(c.blocksLimit > noBlockLimit && len(blocks) >= c.blocksLimit) {
|
||||
log.Debug("loadTransfers breaking loop on block limits reached or 0 blocks", "chain", c.chainClient.NetworkID(),
|
||||
"address", c.address, "limit", c.blocksLimit, "blocks", len(blocks))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("end transfersCommand", "chain", c.chainClient.NetworkID(), "address", c.address,
|
||||
"blocks.len", len(c.blockNums), "transfers.len", len(c.fetchedTransfers), "in", time.Since(startTs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveAndConfirmPending ensures only the transaction that has owner (Address) as a sender is matched to the
|
||||
// corresponding multi-transaction (by multi-transaction ID). This way we ensure that if receiver is in the list
|
||||
// of accounts filter will discard the proper one
|
||||
func (c *transfersCommand) saveAndConfirmPending(allTransfers []Transfer, blockNum *big.Int) error {
|
||||
tx, resErr := c.db.client.Begin()
|
||||
if resErr != nil {
|
||||
return resErr
|
||||
}
|
||||
notifyFunctions := c.confirmPendingTransactions(tx, allTransfers)
|
||||
defer func() {
|
||||
if resErr == nil {
|
||||
commitErr := tx.Commit()
|
||||
if commitErr != nil {
|
||||
log.Error("failed to commit", "error", commitErr)
|
||||
}
|
||||
for _, notify := range notifyFunctions {
|
||||
notify()
|
||||
}
|
||||
} else {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error("failed to rollback", "error", rollbackErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
resErr = saveTransfersMarkBlocksLoaded(tx, c.chainClient.NetworkID(), c.address, allTransfers, []*big.Int{blockNum})
|
||||
if resErr != nil {
|
||||
log.Error("SaveTransfers error", "error", resErr)
|
||||
}
|
||||
|
||||
return resErr
|
||||
}
|
||||
|
||||
func (c *transfersCommand) confirmPendingTransactions(tx *sql.Tx, allTransfers []Transfer) (notifyFunctions []func()) {
|
||||
notifyFunctions = make([]func(), 0)
|
||||
|
||||
// Confirm all pending transactions that are included in this block
|
||||
for i, tr := range allTransfers {
|
||||
chainID := w_common.ChainID(tr.NetworkID)
|
||||
txHash := tr.Receipt.TxHash
|
||||
txType, mTID, err := transactions.GetOwnedPendingStatus(tx, chainID, txHash, tr.Address)
|
||||
if err == sql.ErrNoRows {
|
||||
if tr.MultiTransactionID > 0 {
|
||||
continue
|
||||
} else {
|
||||
// Outside transaction, already confirmed by another duplicate or not yet downloaded
|
||||
existingMTID, err := GetOwnedMultiTransactionID(tx, chainID, tr.ID, tr.Address)
|
||||
if err == sql.ErrNoRows || existingMTID == 0 {
|
||||
// Outside transaction, ignore it
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Warn("GetOwnedMultiTransactionID", "error", err)
|
||||
continue
|
||||
}
|
||||
mTID = w_common.NewAndSet(existingMTID)
|
||||
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Warn("GetOwnedPendingStatus", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if mTID != nil {
|
||||
allTransfers[i].MultiTransactionID = MultiTransactionIDType(*mTID)
|
||||
}
|
||||
if txType != nil && *txType == transactions.WalletTransfer {
|
||||
notify, err := c.pendingTxManager.DeleteBySQLTx(tx, chainID, txHash)
|
||||
if err != nil && err != transactions.ErrStillPending {
|
||||
log.Error("DeleteBySqlTx error", "error", err)
|
||||
}
|
||||
notifyFunctions = append(notifyFunctions, notify)
|
||||
}
|
||||
}
|
||||
return notifyFunctions
|
||||
}
|
||||
|
||||
// Mark all subTxs of a given Tx with the same multiTxID
|
||||
func setMultiTxID(tx Transaction, multiTxID MultiTransactionIDType) {
|
||||
for _, subTx := range tx {
|
||||
subTx.MultiTransactionID = multiTxID
|
||||
}
|
||||
}
|
||||
|
||||
func (c *transfersCommand) markMultiTxTokensAsPreviouslyOwned(ctx context.Context, multiTransaction *MultiTransaction, ownerAddress common.Address) {
|
||||
if multiTransaction == nil {
|
||||
return
|
||||
}
|
||||
if len(multiTransaction.ToAsset) > 0 && multiTransaction.ToNetworkID > 0 {
|
||||
token := c.tokenManager.GetToken(multiTransaction.ToNetworkID, multiTransaction.ToAsset)
|
||||
_ = c.tokenManager.MarkAsPreviouslyOwnedToken(token, ownerAddress)
|
||||
}
|
||||
if len(multiTransaction.FromAsset) > 0 && multiTransaction.FromNetworkID > 0 {
|
||||
token := c.tokenManager.GetToken(multiTransaction.FromNetworkID, multiTransaction.FromAsset)
|
||||
_ = c.tokenManager.MarkAsPreviouslyOwnedToken(token, ownerAddress)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *transfersCommand) checkAndProcessSwapMultiTx(ctx context.Context, tx Transaction) (bool, error) {
|
||||
for _, subTx := range tx {
|
||||
switch subTx.Type {
|
||||
// If the Tx contains any uniswapV2Swap/uniswapV3Swap subTx, generate a Swap multiTx
|
||||
case w_common.UniswapV2Swap, w_common.UniswapV3Swap:
|
||||
multiTransaction, err := buildUniswapSwapMultitransaction(ctx, c.chainClient, c.tokenManager, subTx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if multiTransaction != nil {
|
||||
id, err := c.transactionManager.InsertMultiTransaction(multiTransaction)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
setMultiTxID(tx, id)
|
||||
c.markMultiTxTokensAsPreviouslyOwned(ctx, multiTransaction, subTx.Address)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *transfersCommand) checkAndProcessBridgeMultiTx(ctx context.Context, tx Transaction) (bool, error) {
|
||||
for _, subTx := range tx {
|
||||
switch subTx.Type {
|
||||
// If the Tx contains any hopBridge subTx, create/update Bridge multiTx
|
||||
case w_common.HopBridgeFrom, w_common.HopBridgeTo:
|
||||
multiTransaction, err := buildHopBridgeMultitransaction(ctx, c.chainClient, c.transactionManager, c.tokenManager, subTx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if multiTransaction != nil {
|
||||
setMultiTxID(tx, MultiTransactionIDType(multiTransaction.ID))
|
||||
c.markMultiTxTokensAsPreviouslyOwned(ctx, multiTransaction, subTx.Address)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *transfersCommand) processUnknownErc20CommunityTransactions(ctx context.Context, allTransfers []Transfer) {
|
||||
for _, tx := range allTransfers {
|
||||
// To can be nil in case of erc20 contract creation
|
||||
if tx.Type == w_common.Erc20Transfer && tx.Transaction.To() != nil {
|
||||
// Find token in db or if this is a community token, find its metadata
|
||||
token := c.tokenManager.FindOrCreateTokenByAddress(ctx, tx.NetworkID, *tx.Transaction.To())
|
||||
if token != nil {
|
||||
if token.Verified || token.CommunityData != nil {
|
||||
_ = c.tokenManager.MarkAsPreviouslyOwnedToken(token, tx.Address)
|
||||
}
|
||||
if token.CommunityData != nil {
|
||||
go c.tokenManager.SignalCommunityTokenReceived(tx.Address, tx.ID, tx.Transaction.Value(), token)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *transfersCommand) processMultiTransactions(ctx context.Context, allTransfers []Transfer) error {
|
||||
txByTxHash := subTransactionListToTransactionsByTxHash(allTransfers)
|
||||
|
||||
// Detect / Generate multitransactions
|
||||
// Iterate over all detected transactions
|
||||
for _, tx := range txByTxHash {
|
||||
// Then check for a Swap transaction
|
||||
txProcessed, err := c.checkAndProcessSwapMultiTx(ctx, tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if txProcessed {
|
||||
continue
|
||||
}
|
||||
|
||||
// Then check for a Bridge transaction
|
||||
_, err = c.checkAndProcessBridgeMultiTx(ctx, tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *transfersCommand) notifyOfNewTransfers(blockNum *big.Int, transfers []Transfer) {
|
||||
if c.feed != nil {
|
||||
if len(transfers) > 0 {
|
||||
c.feed.Send(walletevent.Event{
|
||||
Type: EventNewTransfers,
|
||||
Accounts: []common.Address{c.address},
|
||||
ChainID: c.chainClient.NetworkID(),
|
||||
BlockNumber: blockNum,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func transferTypeToEventType(transferType w_common.Type) walletevent.EventType {
|
||||
switch transferType {
|
||||
case w_common.EthTransfer:
|
||||
return EventInternalETHTransferDetected
|
||||
case w_common.Erc20Transfer:
|
||||
return EventInternalERC20TransferDetected
|
||||
case w_common.Erc721Transfer:
|
||||
return EventInternalERC721TransferDetected
|
||||
case w_common.Erc1155Transfer:
|
||||
return EventInternalERC1155TransferDetected
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (c *transfersCommand) notifyOfLatestTransfers(transfers []Transfer, transferType w_common.Type) {
|
||||
if c.feed != nil {
|
||||
eventTransfers := make([]Transfer, 0, len(transfers))
|
||||
latestTransferTimestamp := uint64(0)
|
||||
for _, transfer := range transfers {
|
||||
if transfer.Type == transferType {
|
||||
eventTransfers = append(eventTransfers, transfer)
|
||||
if transfer.Timestamp > latestTransferTimestamp {
|
||||
latestTransferTimestamp = transfer.Timestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(eventTransfers) > 0 {
|
||||
c.feed.Send(walletevent.Event{
|
||||
Type: transferTypeToEventType(transferType),
|
||||
Accounts: []common.Address{c.address},
|
||||
ChainID: c.chainClient.NetworkID(),
|
||||
At: int64(latestTransferTimestamp),
|
||||
EventParams: eventTransfers,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type loadTransfersCommand struct {
|
||||
accounts []common.Address
|
||||
db *Database
|
||||
blockDAO *BlockDAO
|
||||
chainClient chain.ClientInterface
|
||||
blocksByAddress map[common.Address][]*big.Int
|
||||
transactionManager *TransactionManager
|
||||
pendingTxManager *transactions.PendingTxTracker
|
||||
blocksLimit int
|
||||
tokenManager *token.Manager
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
func (c *loadTransfersCommand) Command() async.Command {
|
||||
return async.FiniteCommand{
|
||||
Interval: 5 * time.Second,
|
||||
Runable: c.Run,
|
||||
}.Run
|
||||
}
|
||||
|
||||
// This command always returns nil, even if there is an error in one of the commands.
|
||||
// `transferCommand`s retry until maxError, but this command doesn't retry.
|
||||
// In case some transfer is not loaded after max retries, it will be retried only after restart of the app.
|
||||
// Currently there is no implementation to keep retrying until success. I think this should be implemented
|
||||
// in `transferCommand` with exponential backoff instead of `loadTransfersCommand` (issue #4608).
|
||||
func (c *loadTransfersCommand) Run(parent context.Context) (err error) {
|
||||
return loadTransfers(parent, c.blockDAO, c.db, c.chainClient, c.blocksLimit, c.blocksByAddress,
|
||||
c.transactionManager, c.pendingTxManager, c.tokenManager, c.feed)
|
||||
}
|
||||
|
||||
func loadTransfers(ctx context.Context, blockDAO *BlockDAO, db *Database,
|
||||
chainClient chain.ClientInterface, blocksLimitPerAccount int, blocksByAddress map[common.Address][]*big.Int,
|
||||
transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker,
|
||||
tokenManager *token.Manager, feed *event.Feed) error {
|
||||
|
||||
log.Debug("loadTransfers start", "chain", chainClient.NetworkID(), "limit", blocksLimitPerAccount)
|
||||
|
||||
start := time.Now()
|
||||
group := async.NewGroup(ctx)
|
||||
|
||||
accounts := maps.Keys(blocksByAddress)
|
||||
for _, address := range accounts {
|
||||
transfers := &transfersCommand{
|
||||
db: db,
|
||||
blockDAO: blockDAO,
|
||||
chainClient: chainClient,
|
||||
address: address,
|
||||
eth: ÐDownloader{
|
||||
chainClient: chainClient,
|
||||
accounts: []common.Address{address},
|
||||
signer: types.LatestSignerForChainID(chainClient.ToBigInt()),
|
||||
db: db,
|
||||
},
|
||||
blockNums: blocksByAddress[address],
|
||||
transactionManager: transactionManager,
|
||||
pendingTxManager: pendingTxManager,
|
||||
tokenManager: tokenManager,
|
||||
feed: feed,
|
||||
}
|
||||
group.Add(transfers.Command())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debug("loadTransfers cancelled", "chain", chainClient.NetworkID(), "error", ctx.Err())
|
||||
case <-group.WaitAsync():
|
||||
log.Debug("loadTransfers finished for account", "in", time.Since(start), "chain", chainClient.NetworkID())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isBinanceChain(chainID uint64) bool {
|
||||
return chainID == binancChainID || chainID == binanceTestChainID
|
||||
}
|
||||
|
||||
// Ensure 1 DBHeader per Block Hash
|
||||
func uniqueHeaderPerBlockHash(allHeaders []*DBHeader) []*DBHeader {
|
||||
uniqHeadersByHash := map[common.Hash]*DBHeader{}
|
||||
for _, header := range allHeaders {
|
||||
uniqHeader, ok := uniqHeadersByHash[header.Hash]
|
||||
if ok {
|
||||
if len(header.PreloadedTransactions) > 0 {
|
||||
uniqHeader.PreloadedTransactions = append(uniqHeader.PreloadedTransactions, header.PreloadedTransactions...)
|
||||
}
|
||||
uniqHeadersByHash[header.Hash] = uniqHeader
|
||||
} else {
|
||||
uniqHeadersByHash[header.Hash] = header
|
||||
}
|
||||
}
|
||||
|
||||
uniqHeaders := []*DBHeader{}
|
||||
for _, header := range uniqHeadersByHash {
|
||||
uniqHeaders = append(uniqHeaders, header)
|
||||
}
|
||||
|
||||
return uniqHeaders
|
||||
}
|
||||
|
||||
// Organize subTransactions by Transaction Hash
|
||||
func subTransactionListToTransactionsByTxHash(subTransactions []Transfer) map[common.Hash]Transaction {
|
||||
rst := map[common.Hash]Transaction{}
|
||||
|
||||
for index := range subTransactions {
|
||||
subTx := &subTransactions[index]
|
||||
txHash := subTx.Transaction.Hash()
|
||||
|
||||
if _, ok := rst[txHash]; !ok {
|
||||
rst[txHash] = make([]*Transfer, 0)
|
||||
}
|
||||
rst[txHash] = append(rst[txHash], subTx)
|
||||
}
|
||||
|
||||
return rst
|
||||
}
|
||||
|
||||
func IsTransferDetectionEvent(ev walletevent.EventType) bool {
|
||||
if ev == EventInternalETHTransferDetected ||
|
||||
ev == EventInternalERC20TransferDetected ||
|
||||
ev == EventInternalERC721TransferDetected ||
|
||||
ev == EventInternalERC1155TransferDetected {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
1261
vendor/github.com/status-im/status-go/services/wallet/transfer/commands_sequential.go
generated
vendored
Normal file
1261
vendor/github.com/status-im/status-go/services/wallet/transfer/commands_sequential.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
260
vendor/github.com/status-im/status-go/services/wallet/transfer/concurrent.go
generated
vendored
Normal file
260
vendor/github.com/status-im/status-go/services/wallet/transfer/concurrent.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/services/wallet/async"
|
||||
"github.com/status-im/status-go/services/wallet/balance"
|
||||
)
|
||||
|
||||
const (
|
||||
NoThreadLimit uint32 = 0
|
||||
SequentialThreadLimit uint32 = 10
|
||||
)
|
||||
|
||||
// NewConcurrentDownloader creates ConcurrentDownloader instance.
|
||||
func NewConcurrentDownloader(ctx context.Context, limit uint32) *ConcurrentDownloader {
|
||||
runner := async.NewQueuedAtomicGroup(ctx, limit)
|
||||
result := &Result{}
|
||||
return &ConcurrentDownloader{runner, result}
|
||||
}
|
||||
|
||||
type ConcurrentDownloader struct {
|
||||
*async.QueuedAtomicGroup
|
||||
*Result
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
mu sync.Mutex
|
||||
transfers []Transfer
|
||||
headers []*DBHeader
|
||||
blockRanges [][]*big.Int
|
||||
}
|
||||
|
||||
var errDownloaderStuck = errors.New("eth downloader is stuck")
|
||||
|
||||
func (r *Result) Push(transfers ...Transfer) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.transfers = append(r.transfers, transfers...)
|
||||
}
|
||||
|
||||
func (r *Result) Get() []Transfer {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
rst := make([]Transfer, len(r.transfers))
|
||||
copy(rst, r.transfers)
|
||||
return rst
|
||||
}
|
||||
|
||||
func (r *Result) PushHeader(block *DBHeader) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.headers = append(r.headers, block)
|
||||
}
|
||||
|
||||
func (r *Result) GetHeaders() []*DBHeader {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
rst := make([]*DBHeader, len(r.headers))
|
||||
copy(rst, r.headers)
|
||||
return rst
|
||||
}
|
||||
|
||||
func (r *Result) PushRange(blockRange []*big.Int) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.blockRanges = append(r.blockRanges, blockRange)
|
||||
}
|
||||
|
||||
func (r *Result) GetRanges() [][]*big.Int {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
rst := make([][]*big.Int, len(r.blockRanges))
|
||||
copy(rst, r.blockRanges)
|
||||
r.blockRanges = [][]*big.Int{}
|
||||
|
||||
return rst
|
||||
}
|
||||
|
||||
// Downloader downloads transfers from single block using number.
|
||||
type Downloader interface {
|
||||
GetTransfersByNumber(context.Context, *big.Int) ([]Transfer, error)
|
||||
}
|
||||
|
||||
// Returns new block ranges that contain transfers and found block headers that contain transfers, and a block where
|
||||
// beginning of trasfers history detected
|
||||
func checkRangesWithStartBlock(parent context.Context, client balance.Reader, cache balance.Cacher,
|
||||
account common.Address, ranges [][]*big.Int, threadLimit uint32, startBlock *big.Int) (
|
||||
resRanges [][]*big.Int, headers []*DBHeader, newStartBlock *big.Int, err error) {
|
||||
|
||||
log.Debug("start checkRanges", "account", account.Hex(), "ranges len", len(ranges), "startBlock", startBlock)
|
||||
|
||||
ctx, cancel := context.WithTimeout(parent, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
c := NewConcurrentDownloader(ctx, threadLimit)
|
||||
|
||||
newStartBlock = startBlock
|
||||
|
||||
for _, blocksRange := range ranges {
|
||||
from := blocksRange[0]
|
||||
to := blocksRange[1]
|
||||
|
||||
log.Debug("check block range", "from", from, "to", to)
|
||||
|
||||
if startBlock != nil {
|
||||
if to.Cmp(newStartBlock) <= 0 {
|
||||
log.Debug("'to' block is less than 'start' block", "to", to, "startBlock", startBlock)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
c.Add(func(ctx context.Context) error {
|
||||
if from.Cmp(to) >= 0 {
|
||||
log.Debug("'from' block is greater than or equal to 'to' block", "from", from, "to", to)
|
||||
return nil
|
||||
}
|
||||
log.Debug("eth transfers comparing blocks", "from", from, "to", to)
|
||||
|
||||
if startBlock != nil {
|
||||
if to.Cmp(startBlock) <= 0 {
|
||||
log.Debug("'to' block is less than 'start' block", "to", to, "startBlock", startBlock)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
lb, err := cache.BalanceAt(ctx, client, account, from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hb, err := cache.BalanceAt(ctx, client, account, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lb.Cmp(hb) == 0 {
|
||||
log.Debug("balances are equal", "from", from, "to", to, "lb", lb, "hb", hb)
|
||||
|
||||
hn, err := cache.NonceAt(ctx, client, account, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if nonce is zero in a newer block then there is no need to check an older one
|
||||
if *hn == 0 {
|
||||
log.Debug("zero nonce", "to", to)
|
||||
|
||||
if hb.Cmp(big.NewInt(0)) == 0 { // balance is 0, nonce is 0, we stop checking further, that will be the start block (even though the real one can be a later one)
|
||||
if startBlock != nil {
|
||||
if to.Cmp(newStartBlock) > 0 {
|
||||
log.Debug("found possible start block, we should not search back", "block", to)
|
||||
newStartBlock = to // increase newStartBlock if we found a new higher block
|
||||
}
|
||||
} else {
|
||||
newStartBlock = to
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
ln, err := cache.NonceAt(ctx, client, account, from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *ln == *hn {
|
||||
log.Debug("transaction count is also equal", "from", from, "to", to, "ln", *ln, "hn", *hn)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if new(big.Int).Sub(to, from).Cmp(one) == 0 {
|
||||
// WARNING: Block hash calculation from plain header returns a wrong value.
|
||||
header, err := client.HeaderByNumber(ctx, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Obtain block hash from first transaction
|
||||
firstTransaction, err := client.FullTransactionByBlockNumberAndIndex(ctx, to, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.PushHeader(toDBHeader(header, *firstTransaction.BlockHash, account))
|
||||
return nil
|
||||
}
|
||||
mid := new(big.Int).Add(from, to)
|
||||
mid = mid.Div(mid, two)
|
||||
_, err = cache.BalanceAt(ctx, client, account, mid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("balances are not equal", "from", from, "mid", mid, "to", to)
|
||||
|
||||
c.PushRange([]*big.Int{mid, to})
|
||||
c.PushRange([]*big.Int{from, mid})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.WaitAsync():
|
||||
case <-ctx.Done():
|
||||
return nil, nil, nil, errDownloaderStuck
|
||||
}
|
||||
|
||||
if c.Error() != nil {
|
||||
return nil, nil, nil, errors.Wrap(c.Error(), "failed to dowload transfers using concurrent downloader")
|
||||
}
|
||||
|
||||
log.Debug("end checkRanges", "account", account.Hex(), "newStartBlock", newStartBlock)
|
||||
return c.GetRanges(), c.GetHeaders(), newStartBlock, nil
|
||||
}
|
||||
|
||||
func findBlocksWithEthTransfers(parent context.Context, client balance.Reader, cache balance.Cacher,
|
||||
account common.Address, low, high *big.Int, noLimit bool, threadLimit uint32) (
|
||||
from *big.Int, headers []*DBHeader, resStartBlock *big.Int, err error) {
|
||||
|
||||
ranges := [][]*big.Int{{low, high}}
|
||||
from = big.NewInt(low.Int64())
|
||||
headers = []*DBHeader{}
|
||||
var lvl = 1
|
||||
|
||||
for len(ranges) > 0 && lvl <= 30 {
|
||||
log.Debug("check blocks ranges", "lvl", lvl, "ranges len", len(ranges))
|
||||
lvl++
|
||||
// Check if there are transfers in blocks in ranges. To do that, nonce and balance is checked
|
||||
// the block ranges that have transfers are returned
|
||||
newRanges, newHeaders, strtBlock, err := checkRangesWithStartBlock(parent, client, cache,
|
||||
account, ranges, threadLimit, resStartBlock)
|
||||
resStartBlock = strtBlock
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
headers = append(headers, newHeaders...)
|
||||
|
||||
if len(newRanges) > 0 {
|
||||
log.Debug("found new ranges", "account", account, "lvl", lvl, "new ranges len", len(newRanges))
|
||||
}
|
||||
if len(newRanges) > 60 && !noLimit {
|
||||
sort.SliceStable(newRanges, func(i, j int) bool {
|
||||
return newRanges[i][0].Cmp(newRanges[j][0]) == 1
|
||||
})
|
||||
|
||||
newRanges = newRanges[:60]
|
||||
from = newRanges[len(newRanges)-1][0]
|
||||
}
|
||||
|
||||
ranges = newRanges
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
307
vendor/github.com/status-im/status-go/services/wallet/transfer/controller.go
generated
vendored
Normal file
307
vendor/github.com/status-im/status-go/services/wallet/transfer/controller.go
generated
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/exp/slices" // since 1.21, this is in the standard library
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
statusaccounts "github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/multiaccounts/settings"
|
||||
"github.com/status-im/status-go/rpc"
|
||||
"github.com/status-im/status-go/services/accounts/accountsevent"
|
||||
"github.com/status-im/status-go/services/wallet/balance"
|
||||
"github.com/status-im/status-go/services/wallet/blockchainstate"
|
||||
"github.com/status-im/status-go/services/wallet/token"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
)
|
||||
|
||||
type Controller struct {
|
||||
db *Database
|
||||
accountsDB *statusaccounts.Database
|
||||
rpcClient *rpc.Client
|
||||
blockDAO *BlockDAO
|
||||
blockRangesSeqDAO *BlockRangeSequentialDAO
|
||||
reactor *Reactor
|
||||
accountFeed *event.Feed
|
||||
TransferFeed *event.Feed
|
||||
accWatcher *accountsevent.Watcher
|
||||
transactionManager *TransactionManager
|
||||
pendingTxManager *transactions.PendingTxTracker
|
||||
tokenManager *token.Manager
|
||||
balanceCacher balance.Cacher
|
||||
blockChainState *blockchainstate.BlockChainState
|
||||
}
|
||||
|
||||
func NewTransferController(db *sql.DB, accountsDB *statusaccounts.Database, rpcClient *rpc.Client, accountFeed *event.Feed, transferFeed *event.Feed,
|
||||
transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker, tokenManager *token.Manager,
|
||||
balanceCacher balance.Cacher, blockChainState *blockchainstate.BlockChainState) *Controller {
|
||||
|
||||
blockDAO := &BlockDAO{db}
|
||||
return &Controller{
|
||||
db: NewDB(db),
|
||||
accountsDB: accountsDB,
|
||||
blockDAO: blockDAO,
|
||||
blockRangesSeqDAO: &BlockRangeSequentialDAO{db},
|
||||
rpcClient: rpcClient,
|
||||
accountFeed: accountFeed,
|
||||
TransferFeed: transferFeed,
|
||||
transactionManager: transactionManager,
|
||||
pendingTxManager: pendingTxManager,
|
||||
tokenManager: tokenManager,
|
||||
balanceCacher: balanceCacher,
|
||||
blockChainState: blockChainState,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) Start() {
|
||||
go func() { _ = c.cleanupAccountsLeftovers() }()
|
||||
}
|
||||
|
||||
func (c *Controller) Stop() {
|
||||
if c.reactor != nil {
|
||||
c.reactor.stop()
|
||||
}
|
||||
|
||||
if c.accWatcher != nil {
|
||||
c.accWatcher.Stop()
|
||||
c.accWatcher = nil
|
||||
}
|
||||
}
|
||||
|
||||
func sameChains(chainIDs1 []uint64, chainIDs2 []uint64) bool {
|
||||
if len(chainIDs1) != len(chainIDs2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, chainID := range chainIDs1 {
|
||||
if !slices.Contains(chainIDs2, chainID) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) CheckRecentHistory(chainIDs []uint64, accounts []common.Address) error {
|
||||
if len(accounts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(chainIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := c.blockDAO.mergeBlocksRanges(chainIDs, accounts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chainClients, err := c.rpcClient.EthClients(chainIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.reactor != nil {
|
||||
if !sameChains(chainIDs, c.reactor.chainIDs) {
|
||||
err := c.reactor.restart(chainClients, accounts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
multiaccSettings, err := c.accountsDB.GetSettings()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
omitHistory := multiaccSettings.OmitTransfersHistoryScan
|
||||
if omitHistory {
|
||||
err := c.accountsDB.SaveSettingField(settings.OmitTransfersHistoryScan, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.reactor = NewReactor(c.db, c.blockDAO, c.blockRangesSeqDAO, c.accountsDB, c.TransferFeed, c.transactionManager,
|
||||
c.pendingTxManager, c.tokenManager, c.balanceCacher, omitHistory, c.blockChainState)
|
||||
|
||||
err = c.reactor.start(chainClients, accounts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.startAccountWatcher(chainIDs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) startAccountWatcher(chainIDs []uint64) {
|
||||
if c.accWatcher == nil {
|
||||
c.accWatcher = accountsevent.NewWatcher(c.accountsDB, c.accountFeed, func(changedAddresses []common.Address, eventType accountsevent.EventType, currentAddresses []common.Address) {
|
||||
c.onAccountsChanged(changedAddresses, eventType, currentAddresses, chainIDs)
|
||||
})
|
||||
c.accWatcher.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) onAccountsChanged(changedAddresses []common.Address, eventType accountsevent.EventType, currentAddresses []common.Address, chainIDs []uint64) {
|
||||
if eventType == accountsevent.EventTypeRemoved {
|
||||
for _, address := range changedAddresses {
|
||||
c.cleanUpRemovedAccount(address)
|
||||
}
|
||||
}
|
||||
|
||||
if c.reactor == nil {
|
||||
log.Warn("reactor is not initialized")
|
||||
return
|
||||
}
|
||||
|
||||
if eventType == accountsevent.EventTypeAdded || eventType == accountsevent.EventTypeRemoved {
|
||||
log.Debug("list of accounts was changed from a previous version. reactor will be restarted", "new", currentAddresses)
|
||||
|
||||
chainClients, err := c.rpcClient.EthClients(chainIDs)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = c.reactor.restart(chainClients, currentAddresses)
|
||||
if err != nil {
|
||||
log.Error("failed to restart reactor with new accounts", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only used by status-mobile
|
||||
func (c *Controller) LoadTransferByHash(ctx context.Context, rpcClient *rpc.Client, address common.Address, hash common.Hash) error {
|
||||
chainClient, err := rpcClient.EthClient(rpcClient.UpstreamChainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signer := types.LatestSignerForChainID(chainClient.ToBigInt())
|
||||
|
||||
transfer, err := getTransferByHash(ctx, chainClient, signer, address, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transfers := []Transfer{*transfer}
|
||||
|
||||
err = c.db.InsertBlock(rpcClient.UpstreamChainID, address, transfer.BlockNumber, transfer.BlockHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := c.db.client.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blocks := []*big.Int{transfer.BlockNumber}
|
||||
err = saveTransfersMarkBlocksLoaded(tx, rpcClient.UpstreamChainID, address, transfers, blocks)
|
||||
if err != nil {
|
||||
rollErr := tx.Rollback()
|
||||
if rollErr != nil {
|
||||
return fmt.Errorf("failed to rollback transaction due to error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) GetTransfersByAddress(ctx context.Context, chainID uint64, address common.Address, toBlock *big.Int,
|
||||
limit int64, fetchMore bool) ([]View, error) {
|
||||
|
||||
rst, err := c.reactor.getTransfersByAddress(ctx, chainID, address, toBlock, limit)
|
||||
if err != nil {
|
||||
log.Error("[WalletAPI:: GetTransfersByAddress] can't fetch transfers", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return castToTransferViews(rst), nil
|
||||
}
|
||||
|
||||
func (c *Controller) GetTransfersForIdentities(ctx context.Context, identities []TransactionIdentity) ([]View, error) {
|
||||
rst, err := c.db.GetTransfersForIdentities(ctx, identities)
|
||||
if err != nil {
|
||||
log.Error("[transfer.Controller.GetTransfersForIdentities] DB err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return castToTransferViews(rst), nil
|
||||
}
|
||||
|
||||
func (c *Controller) GetCachedBalances(ctx context.Context, chainID uint64, addresses []common.Address) ([]BlockView, error) {
|
||||
result, error := c.blockDAO.getLastKnownBlocks(chainID, addresses)
|
||||
if error != nil {
|
||||
return nil, error
|
||||
}
|
||||
|
||||
return blocksToViews(result), nil
|
||||
}
|
||||
|
||||
func (c *Controller) cleanUpRemovedAccount(address common.Address) {
|
||||
// Transfers will be deleted by foreign key constraint by cascade
|
||||
err := deleteBlocks(c.db.client, address)
|
||||
if err != nil {
|
||||
log.Error("Failed to delete blocks", "error", err)
|
||||
}
|
||||
err = deleteAllRanges(c.db.client, address)
|
||||
if err != nil {
|
||||
log.Error("Failed to delete old blocks ranges", "error", err)
|
||||
}
|
||||
|
||||
err = c.blockRangesSeqDAO.deleteRange(address)
|
||||
if err != nil {
|
||||
log.Error("Failed to delete blocks ranges sequential", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) cleanupAccountsLeftovers() error {
|
||||
// We clean up accounts that were deleted and soft removed
|
||||
accounts, err := c.accountsDB.GetWalletAddresses()
|
||||
if err != nil {
|
||||
log.Error("Failed to get accounts", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
existingAddresses := make([]common.Address, len(accounts))
|
||||
for i, account := range accounts {
|
||||
existingAddresses[i] = (common.Address)(account)
|
||||
}
|
||||
|
||||
addressesInWalletDB, err := getAddresses(c.db.client)
|
||||
if err != nil {
|
||||
log.Error("Failed to get addresses from wallet db", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
missing := findMissingItems(addressesInWalletDB, existingAddresses)
|
||||
for _, address := range missing {
|
||||
c.cleanUpRemovedAccount(address)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// find items from one slice that are not in another
|
||||
func findMissingItems(slice1 []common.Address, slice2 []common.Address) []common.Address {
|
||||
var missing []common.Address
|
||||
for _, item := range slice1 {
|
||||
if !slices.Contains(slice2, item) {
|
||||
missing = append(missing, item)
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
609
vendor/github.com/status-im/status-go/services/wallet/transfer/database.go
generated
vendored
Normal file
609
vendor/github.com/status-im/status-go/services/wallet/transfer/database.go
generated
vendored
Normal file
@@ -0,0 +1,609 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/big"
|
||||
"reflect"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/thirdparty"
|
||||
"github.com/status-im/status-go/sqlite"
|
||||
)
|
||||
|
||||
// DBHeader fields from header that are stored in database.
|
||||
type DBHeader struct {
|
||||
Number *big.Int
|
||||
Hash common.Hash
|
||||
Timestamp uint64
|
||||
PreloadedTransactions []*PreloadedTransaction
|
||||
Network uint64
|
||||
Address common.Address
|
||||
// Head is true if the block was a head at the time it was pulled from chain.
|
||||
Head bool
|
||||
// Loaded is true if transfers from this block have been already fetched
|
||||
Loaded bool
|
||||
}
|
||||
|
||||
func toDBHeader(header *types.Header, blockHash common.Hash, account common.Address) *DBHeader {
|
||||
return &DBHeader{
|
||||
Hash: blockHash,
|
||||
Number: header.Number,
|
||||
Timestamp: header.Time,
|
||||
Loaded: false,
|
||||
Address: account,
|
||||
}
|
||||
}
|
||||
|
||||
// SyncOption is used to specify that application processed transfers for that block.
|
||||
type SyncOption uint
|
||||
|
||||
// JSONBlob type for marshaling/unmarshaling inner type to json.
|
||||
type JSONBlob struct {
|
||||
data interface{}
|
||||
}
|
||||
|
||||
// Scan implements interface.
|
||||
func (blob *JSONBlob) Scan(value interface{}) error {
|
||||
if value == nil || reflect.ValueOf(blob.data).IsNil() {
|
||||
return nil
|
||||
}
|
||||
bytes, ok := value.([]byte)
|
||||
if !ok {
|
||||
return errors.New("not a byte slice")
|
||||
}
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
err := json.Unmarshal(bytes, blob.data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Value implements interface.
|
||||
func (blob *JSONBlob) Value() (driver.Value, error) {
|
||||
if blob.data == nil || reflect.ValueOf(blob.data).IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
return json.Marshal(blob.data)
|
||||
}
|
||||
|
||||
func NewDB(client *sql.DB) *Database {
|
||||
return &Database{client: client}
|
||||
}
|
||||
|
||||
// Database sql wrapper for operations with wallet objects.
|
||||
type Database struct {
|
||||
client *sql.DB
|
||||
}
|
||||
|
||||
// Close closes database.
|
||||
func (db *Database) Close() error {
|
||||
return db.client.Close()
|
||||
}
|
||||
|
||||
func (db *Database) SaveBlocks(chainID uint64, headers []*DBHeader) (err error) {
|
||||
var (
|
||||
tx *sql.Tx
|
||||
)
|
||||
tx, err = db.client.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
return
|
||||
}
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
err = insertBlocksWithTransactions(chainID, tx, headers)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func saveTransfersMarkBlocksLoaded(creator statementCreator, chainID uint64, address common.Address, transfers []Transfer, blocks []*big.Int) (err error) {
|
||||
err = updateOrInsertTransfers(chainID, creator, transfers)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = markBlocksAsLoaded(chainID, creator, address, blocks)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetTransfersInRange loads transfers for a given address between two blocks.
|
||||
func (db *Database) GetTransfersInRange(chainID uint64, address common.Address, start, end *big.Int) (rst []Transfer, err error) {
|
||||
query := newTransfersQuery().FilterNetwork(chainID).FilterAddress(address).FilterStart(start).FilterEnd(end).FilterLoaded(1)
|
||||
rows, err := db.client.Query(query.String(), query.Args()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
return query.TransferScan(rows)
|
||||
}
|
||||
|
||||
// GetTransfersByAddress loads transfers for a given address between two blocks.
|
||||
func (db *Database) GetTransfersByAddress(chainID uint64, address common.Address, toBlock *big.Int, limit int64) (rst []Transfer, err error) {
|
||||
query := newTransfersQuery().
|
||||
FilterNetwork(chainID).
|
||||
FilterAddress(address).
|
||||
FilterEnd(toBlock).
|
||||
FilterLoaded(1).
|
||||
SortByBlockNumberAndHash().
|
||||
Limit(limit)
|
||||
|
||||
rows, err := db.client.Query(query.String(), query.Args()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
return query.TransferScan(rows)
|
||||
}
|
||||
|
||||
// GetTransfersByAddressAndBlock loads transfers for a given address and block.
|
||||
func (db *Database) GetTransfersByAddressAndBlock(chainID uint64, address common.Address, block *big.Int, limit int64) (rst []Transfer, err error) {
|
||||
query := newTransfersQuery().
|
||||
FilterNetwork(chainID).
|
||||
FilterAddress(address).
|
||||
FilterBlockNumber(block).
|
||||
FilterLoaded(1).
|
||||
SortByBlockNumberAndHash().
|
||||
Limit(limit)
|
||||
|
||||
rows, err := db.client.Query(query.String(), query.Args()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
return query.TransferScan(rows)
|
||||
}
|
||||
|
||||
// GetTransfers load transfers transfer between two blocks.
|
||||
func (db *Database) GetTransfers(chainID uint64, start, end *big.Int) (rst []Transfer, err error) {
|
||||
query := newTransfersQuery().FilterNetwork(chainID).FilterStart(start).FilterEnd(end).FilterLoaded(1)
|
||||
rows, err := db.client.Query(query.String(), query.Args()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
return query.TransferScan(rows)
|
||||
}
|
||||
|
||||
func (db *Database) GetTransfersForIdentities(ctx context.Context, identities []TransactionIdentity) (rst []Transfer, err error) {
|
||||
query := newTransfersQuery()
|
||||
for _, identity := range identities {
|
||||
subQuery := newSubQuery()
|
||||
subQuery = subQuery.FilterNetwork(uint64(identity.ChainID)).FilterTransactionID(identity.Hash).FilterAddress(identity.Address)
|
||||
query.addSubQuery(subQuery, OrSeparator)
|
||||
}
|
||||
rows, err := db.client.QueryContext(ctx, query.String(), query.Args()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
return query.TransferScan(rows)
|
||||
}
|
||||
|
||||
func (db *Database) GetTransactionsToLoad(chainID uint64, address common.Address, blockNumber *big.Int) (rst []*PreloadedTransaction, err error) {
|
||||
query := newTransfersQueryForPreloadedTransactions().
|
||||
FilterNetwork(chainID).
|
||||
FilterLoaded(0)
|
||||
|
||||
if address != (common.Address{}) {
|
||||
query.FilterAddress(address)
|
||||
}
|
||||
|
||||
if blockNumber != nil {
|
||||
query.FilterBlockNumber(blockNumber)
|
||||
}
|
||||
|
||||
rows, err := db.client.Query(query.String(), query.Args()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
return query.PreloadedTransactionScan(rows)
|
||||
}
|
||||
|
||||
// statementCreator allows to pass transaction or database to use in consumer.
|
||||
type statementCreator interface {
|
||||
Prepare(query string) (*sql.Stmt, error)
|
||||
}
|
||||
|
||||
// Only used by status-mobile
|
||||
func (db *Database) InsertBlock(chainID uint64, account common.Address, blockNumber *big.Int, blockHash common.Hash) error {
|
||||
var (
|
||||
tx *sql.Tx
|
||||
)
|
||||
tx, err := db.client.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
return
|
||||
}
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
blockDB := blockDBFields{
|
||||
chainID: chainID,
|
||||
account: account,
|
||||
blockNumber: blockNumber,
|
||||
blockHash: blockHash,
|
||||
}
|
||||
return insertBlockDBFields(tx, blockDB)
|
||||
}
|
||||
|
||||
type blockDBFields struct {
|
||||
chainID uint64
|
||||
account common.Address
|
||||
blockNumber *big.Int
|
||||
blockHash common.Hash
|
||||
}
|
||||
|
||||
func insertBlockDBFields(creator statementCreator, block blockDBFields) error {
|
||||
insert, err := creator.Prepare("INSERT OR IGNORE INTO blocks(network_id, address, blk_number, blk_hash, loaded) VALUES (?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = insert.Exec(block.chainID, block.account, (*bigint.SQLBigInt)(block.blockNumber), block.blockHash, true)
|
||||
return err
|
||||
}
|
||||
|
||||
func insertBlocksWithTransactions(chainID uint64, creator statementCreator, headers []*DBHeader) error {
|
||||
insert, err := creator.Prepare("INSERT OR IGNORE INTO blocks(network_id, address, blk_number, blk_hash, loaded) VALUES (?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updateTx, err := creator.Prepare(`UPDATE transfers
|
||||
SET log = ?, log_index = ?
|
||||
WHERE network_id = ? AND address = ? AND hash = ?`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
insertTx, err := creator.Prepare(`INSERT OR IGNORE
|
||||
INTO transfers (network_id, address, sender, hash, blk_number, blk_hash, type, timestamp, log, loaded, log_index, token_id, amount_padded128hex)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, 0, ?, 0, ?, ?, ?)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, header := range headers {
|
||||
_, err = insert.Exec(chainID, header.Address, (*bigint.SQLBigInt)(header.Number), header.Hash, header.Loaded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, transaction := range header.PreloadedTransactions {
|
||||
var logIndex *uint
|
||||
if transaction.Log != nil {
|
||||
logIndex = new(uint)
|
||||
*logIndex = transaction.Log.Index
|
||||
}
|
||||
res, err := updateTx.Exec(&JSONBlob{transaction.Log}, logIndex, chainID, header.Address, transaction.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
affected, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if affected > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
tokenID := (*bigint.SQLBigIntBytes)(transaction.TokenID)
|
||||
txValue := sqlite.BigIntToPadded128BitsStr(transaction.Value)
|
||||
// Is that correct to set sender as account address?
|
||||
_, err = insertTx.Exec(chainID, header.Address, header.Address, transaction.ID, (*bigint.SQLBigInt)(header.Number), header.Hash, transaction.Type, &JSONBlob{transaction.Log}, logIndex, tokenID, txValue)
|
||||
if err != nil {
|
||||
log.Error("error saving token transfer", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateOrInsertTransfers(chainID uint64, creator statementCreator, transfers []Transfer) error {
|
||||
txsDBFields := make([]transferDBFields, 0, len(transfers))
|
||||
for _, t := range transfers {
|
||||
var receiptType *uint8
|
||||
var txHash, blockHash *common.Hash
|
||||
var receiptStatus, cumulativeGasUsed, gasUsed *uint64
|
||||
var contractAddress *common.Address
|
||||
var transactionIndex, logIndex *uint
|
||||
|
||||
if t.Receipt != nil {
|
||||
receiptType = &t.Receipt.Type
|
||||
receiptStatus = &t.Receipt.Status
|
||||
txHash = &t.Receipt.TxHash
|
||||
if t.Log != nil {
|
||||
logIndex = new(uint)
|
||||
*logIndex = t.Log.Index
|
||||
}
|
||||
blockHash = &t.Receipt.BlockHash
|
||||
cumulativeGasUsed = &t.Receipt.CumulativeGasUsed
|
||||
contractAddress = &t.Receipt.ContractAddress
|
||||
gasUsed = &t.Receipt.GasUsed
|
||||
transactionIndex = &t.Receipt.TransactionIndex
|
||||
}
|
||||
|
||||
var txProtected *bool
|
||||
var txGas, txNonce, txSize *uint64
|
||||
var txGasPrice, txGasTipCap, txGasFeeCap *big.Int
|
||||
var txType *uint8
|
||||
var txValue *big.Int
|
||||
var tokenAddress *common.Address
|
||||
var tokenID *big.Int
|
||||
var txFrom *common.Address
|
||||
var txTo *common.Address
|
||||
if t.Transaction != nil {
|
||||
if t.Log != nil {
|
||||
_, tokenAddress, txFrom, txTo = w_common.ExtractTokenTransferData(t.Type, t.Log, t.Transaction)
|
||||
tokenID = t.TokenID
|
||||
// Zero tokenID can be used for ERC721 and ERC1155 transfers but when serialzed/deserialized it becomes nil
|
||||
// as 0 value of big.Int bytes is nil.
|
||||
if tokenID == nil && (t.Type == w_common.Erc721Transfer || t.Type == w_common.Erc1155Transfer) {
|
||||
tokenID = big.NewInt(0)
|
||||
}
|
||||
txValue = t.TokenValue
|
||||
} else {
|
||||
txValue = new(big.Int).Set(t.Transaction.Value())
|
||||
txFrom = &t.From
|
||||
txTo = t.Transaction.To()
|
||||
}
|
||||
|
||||
txType = new(uint8)
|
||||
*txType = t.Transaction.Type()
|
||||
txProtected = new(bool)
|
||||
*txProtected = t.Transaction.Protected()
|
||||
txGas = new(uint64)
|
||||
*txGas = t.Transaction.Gas()
|
||||
txGasPrice = t.Transaction.GasPrice()
|
||||
txGasTipCap = t.Transaction.GasTipCap()
|
||||
txGasFeeCap = t.Transaction.GasFeeCap()
|
||||
txNonce = new(uint64)
|
||||
*txNonce = t.Transaction.Nonce()
|
||||
txSize = new(uint64)
|
||||
*txSize = t.Transaction.Size()
|
||||
}
|
||||
|
||||
dbFields := transferDBFields{
|
||||
chainID: chainID,
|
||||
id: t.ID,
|
||||
blockHash: t.BlockHash,
|
||||
blockNumber: t.BlockNumber,
|
||||
timestamp: t.Timestamp,
|
||||
address: t.Address,
|
||||
transaction: t.Transaction,
|
||||
sender: t.From,
|
||||
receipt: t.Receipt,
|
||||
log: t.Log,
|
||||
transferType: t.Type,
|
||||
baseGasFees: t.BaseGasFees,
|
||||
multiTransactionID: t.MultiTransactionID,
|
||||
receiptStatus: receiptStatus,
|
||||
receiptType: receiptType,
|
||||
txHash: txHash,
|
||||
logIndex: logIndex,
|
||||
receiptBlockHash: blockHash,
|
||||
cumulativeGasUsed: cumulativeGasUsed,
|
||||
contractAddress: contractAddress,
|
||||
gasUsed: gasUsed,
|
||||
transactionIndex: transactionIndex,
|
||||
txType: txType,
|
||||
txProtected: txProtected,
|
||||
txGas: txGas,
|
||||
txGasPrice: txGasPrice,
|
||||
txGasTipCap: txGasTipCap,
|
||||
txGasFeeCap: txGasFeeCap,
|
||||
txValue: txValue,
|
||||
txNonce: txNonce,
|
||||
txSize: txSize,
|
||||
tokenAddress: tokenAddress,
|
||||
tokenID: tokenID,
|
||||
txFrom: txFrom,
|
||||
txTo: txTo,
|
||||
}
|
||||
txsDBFields = append(txsDBFields, dbFields)
|
||||
}
|
||||
|
||||
return updateOrInsertTransfersDBFields(creator, txsDBFields)
|
||||
}
|
||||
|
||||
type transferDBFields struct {
|
||||
chainID uint64
|
||||
id common.Hash
|
||||
blockHash common.Hash
|
||||
blockNumber *big.Int
|
||||
timestamp uint64
|
||||
address common.Address
|
||||
transaction *types.Transaction
|
||||
sender common.Address
|
||||
receipt *types.Receipt
|
||||
log *types.Log
|
||||
transferType w_common.Type
|
||||
baseGasFees string
|
||||
multiTransactionID MultiTransactionIDType
|
||||
receiptStatus *uint64
|
||||
receiptType *uint8
|
||||
txHash *common.Hash
|
||||
logIndex *uint
|
||||
receiptBlockHash *common.Hash
|
||||
cumulativeGasUsed *uint64
|
||||
contractAddress *common.Address
|
||||
gasUsed *uint64
|
||||
transactionIndex *uint
|
||||
txType *uint8
|
||||
txProtected *bool
|
||||
txGas *uint64
|
||||
txGasPrice *big.Int
|
||||
txGasTipCap *big.Int
|
||||
txGasFeeCap *big.Int
|
||||
txValue *big.Int
|
||||
txNonce *uint64
|
||||
txSize *uint64
|
||||
tokenAddress *common.Address
|
||||
tokenID *big.Int
|
||||
txFrom *common.Address
|
||||
txTo *common.Address
|
||||
}
|
||||
|
||||
func updateOrInsertTransfersDBFields(creator statementCreator, transfers []transferDBFields) error {
|
||||
insert, err := creator.Prepare(`INSERT OR REPLACE INTO transfers
|
||||
(network_id, hash, blk_hash, blk_number, timestamp, address, tx, sender, receipt, log, type, loaded, base_gas_fee, multi_transaction_id,
|
||||
status, receipt_type, tx_hash, log_index, block_hash, cumulative_gas_used, contract_address, gas_used, tx_index,
|
||||
tx_type, protected, gas_limit, gas_price_clamped64, gas_tip_cap_clamped64, gas_fee_cap_clamped64, amount_padded128hex, account_nonce, size, token_address, token_id, tx_from_address, tx_to_address)
|
||||
VALUES
|
||||
(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range transfers {
|
||||
txGasPrice := sqlite.BigIntToClampedInt64(t.txGasPrice)
|
||||
txGasTipCap := sqlite.BigIntToClampedInt64(t.txGasTipCap)
|
||||
txGasFeeCap := sqlite.BigIntToClampedInt64(t.txGasFeeCap)
|
||||
txValue := sqlite.BigIntToPadded128BitsStr(t.txValue)
|
||||
|
||||
_, err = insert.Exec(t.chainID, t.id, t.blockHash, (*bigint.SQLBigInt)(t.blockNumber), t.timestamp, t.address, &JSONBlob{t.transaction}, t.sender, &JSONBlob{t.receipt}, &JSONBlob{t.log}, t.transferType, t.baseGasFees, t.multiTransactionID,
|
||||
t.receiptStatus, t.receiptType, t.txHash, t.logIndex, t.receiptBlockHash, t.cumulativeGasUsed, t.contractAddress, t.gasUsed, t.transactionIndex,
|
||||
t.txType, t.txProtected, t.txGas, txGasPrice, txGasTipCap, txGasFeeCap, txValue, t.txNonce, t.txSize, t.tokenAddress, (*bigint.SQLBigIntBytes)(t.tokenID), t.txFrom, t.txTo)
|
||||
if err != nil {
|
||||
log.Error("can't save transfer", "b-hash", t.blockHash, "b-n", t.blockNumber, "a", t.address, "h", t.id)
|
||||
return err
|
||||
}
|
||||
|
||||
err = removeGasOnlyEthTransfer(creator, t)
|
||||
if err != nil {
|
||||
log.Error("can't remove gas only eth transfer", "b-hash", t.blockHash, "b-n", t.blockNumber, "a", t.address, "h", t.id, "err", err)
|
||||
// no return err, since it's not critical
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeGasOnlyEthTransfer(creator statementCreator, t transferDBFields) error {
|
||||
if t.transferType != w_common.EthTransfer {
|
||||
query, err := creator.Prepare(`DELETE FROM transfers WHERE tx_hash = ? AND address = ? AND network_id = ?
|
||||
AND account_nonce = ? AND type = 'eth' AND amount_padded128hex = '00000000000000000000000000000000'`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = query.Exec(t.txHash, t.address, t.chainID, t.txNonce)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// markBlocksAsLoaded(chainID, tx, address, blockNumbers)
|
||||
// In case block contains both ETH and token transfers, it will be marked as loaded on ETH transfer processing.
|
||||
// This is not a problem since for token transfers we have preloaded transactions and blocks 'loaded' flag is needed
|
||||
// for ETH transfers only.
|
||||
func markBlocksAsLoaded(chainID uint64, creator statementCreator, address common.Address, blocks []*big.Int) error {
|
||||
update, err := creator.Prepare("UPDATE blocks SET loaded=? WHERE address=? AND blk_number=? AND network_id=?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, block := range blocks {
|
||||
_, err := update.Exec(true, address, (*bigint.SQLBigInt)(block), chainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOwnedMultiTransactionID returns sql.ErrNoRows if no transaction is found for the given identity
|
||||
func GetOwnedMultiTransactionID(tx *sql.Tx, chainID w_common.ChainID, id common.Hash, address common.Address) (mTID int64, err error) {
|
||||
row := tx.QueryRow(`SELECT COALESCE(multi_transaction_id, 0) FROM transfers WHERE network_id = ? AND hash = ? AND address = ?`, chainID, id, address)
|
||||
err = row.Scan(&mTID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return mTID, nil
|
||||
}
|
||||
|
||||
func (db *Database) GetLatestCollectibleTransfer(address common.Address, id thirdparty.CollectibleUniqueID) (*Transfer, error) {
|
||||
query := newTransfersQuery().
|
||||
FilterAddress(address).
|
||||
FilterNetwork(uint64(id.ContractID.ChainID)).
|
||||
FilterTokenAddress(id.ContractID.Address).
|
||||
FilterTokenID(id.TokenID.Int).
|
||||
FilterLoaded(1).
|
||||
SortByTimestamp(false).
|
||||
Limit(1)
|
||||
rows, err := db.client.Query(query.String(), query.Args()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
transfers, err := query.TransferScan(rows)
|
||||
if err == sql.ErrNoRows || len(transfers) == 0 {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &transfers[0], nil
|
||||
}
|
||||
|
||||
// Delete blocks for address and chainID
|
||||
// Transfers will be deleted by cascade
|
||||
func deleteBlocks(creator statementCreator, address common.Address) error {
|
||||
delete, err := creator.Prepare("DELETE FROM blocks WHERE address = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = delete.Exec(address)
|
||||
return err
|
||||
}
|
||||
|
||||
func getAddresses(creator statementCreator) (rst []common.Address, err error) {
|
||||
stmt, err := creator.Prepare(`SELECT address FROM transfers UNION SELECT address FROM blocks UNION
|
||||
SELECT address FROM blocks_ranges_sequential UNION SELECT address FROM blocks_ranges`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rows, err := stmt.Query()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
address := common.Address{}
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rst = append(rst, address)
|
||||
}
|
||||
|
||||
return rst, nil
|
||||
}
|
||||
615
vendor/github.com/status-im/status-go/services/wallet/transfer/downloader.go
generated
vendored
Normal file
615
vendor/github.com/status-im/status-go/services/wallet/transfer/downloader.go
generated
vendored
Normal file
@@ -0,0 +1,615 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices" // since 1.21, this is in the standard library
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"github.com/status-im/status-go/rpc/chain"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
)
|
||||
|
||||
var (
|
||||
zero = big.NewInt(0)
|
||||
one = big.NewInt(1)
|
||||
two = big.NewInt(2)
|
||||
)
|
||||
|
||||
// Partial transaction info obtained by ERC20Downloader.
|
||||
// A PreloadedTransaction represents a Transaction which contains one
|
||||
// ERC20/ERC721/ERC1155 transfer event.
|
||||
// To be converted into one Transfer object post-indexing.
|
||||
type PreloadedTransaction struct {
|
||||
Type w_common.Type `json:"type"`
|
||||
ID common.Hash `json:"-"`
|
||||
Address common.Address `json:"address"`
|
||||
// Log that was used to generate preloaded transaction.
|
||||
Log *types.Log `json:"log"`
|
||||
TokenID *big.Int `json:"tokenId"`
|
||||
Value *big.Int `json:"value"`
|
||||
}
|
||||
|
||||
// Transfer stores information about transfer.
|
||||
// A Transfer represents a plain ETH transfer or some token activity inside a Transaction
|
||||
// Since ERC1155 transfers can contain multiple tokens, a single Transfer represents a single token transfer,
|
||||
// that means ERC1155 batch transfers will be represented by multiple Transfer objects.
|
||||
type Transfer struct {
|
||||
Type w_common.Type `json:"type"`
|
||||
ID common.Hash `json:"-"`
|
||||
Address common.Address `json:"address"`
|
||||
BlockNumber *big.Int `json:"blockNumber"`
|
||||
BlockHash common.Hash `json:"blockhash"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Transaction *types.Transaction `json:"transaction"`
|
||||
Loaded bool
|
||||
NetworkID uint64
|
||||
// From is derived from tx signature in order to offload this computation from UI component.
|
||||
From common.Address `json:"from"`
|
||||
Receipt *types.Receipt `json:"receipt"`
|
||||
// Log that was used to generate erc20 transfer. Nil for eth transfer.
|
||||
Log *types.Log `json:"log"`
|
||||
// TokenID is the id of the transferred token. Nil for eth transfer.
|
||||
TokenID *big.Int `json:"tokenId"`
|
||||
// TokenValue is the value of the token transfer. Nil for eth transfer.
|
||||
TokenValue *big.Int `json:"tokenValue"`
|
||||
BaseGasFees string
|
||||
// Internal field that is used to track multi-transaction transfers.
|
||||
MultiTransactionID MultiTransactionIDType `json:"multi_transaction_id"`
|
||||
}
|
||||
|
||||
// ETHDownloader downloads regular eth transfers and tokens transfers.
|
||||
type ETHDownloader struct {
|
||||
chainClient chain.ClientInterface
|
||||
accounts []common.Address
|
||||
signer types.Signer
|
||||
db *Database
|
||||
}
|
||||
|
||||
var errLogsDownloaderStuck = errors.New("logs downloader stuck")
|
||||
|
||||
func (d *ETHDownloader) GetTransfersByNumber(ctx context.Context, number *big.Int) ([]Transfer, error) {
|
||||
blk, err := d.chainClient.BlockByNumber(ctx, number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rst, err := d.getTransfersInBlock(ctx, blk, d.accounts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rst, err
|
||||
}
|
||||
|
||||
// Only used by status-mobile
|
||||
func getTransferByHash(ctx context.Context, client chain.ClientInterface, signer types.Signer, address common.Address, hash common.Hash) (*Transfer, error) {
|
||||
transaction, _, err := client.TransactionByHash(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
receipt, err := client.TransactionReceipt(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventType, transactionLog := w_common.GetFirstEvent(receipt.Logs)
|
||||
transactionType := w_common.EventTypeToSubtransactionType(eventType)
|
||||
|
||||
from, err := types.Sender(signer, transaction)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseGasFee, err := client.GetBaseFeeFromBlock(big.NewInt(int64(transactionLog.BlockNumber)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transfer := &Transfer{
|
||||
Type: transactionType,
|
||||
ID: hash,
|
||||
Address: address,
|
||||
BlockNumber: receipt.BlockNumber,
|
||||
BlockHash: receipt.BlockHash,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Transaction: transaction,
|
||||
From: from,
|
||||
Receipt: receipt,
|
||||
Log: transactionLog,
|
||||
BaseGasFees: baseGasFee,
|
||||
}
|
||||
|
||||
return transfer, nil
|
||||
}
|
||||
|
||||
func (d *ETHDownloader) getTransfersInBlock(ctx context.Context, blk *types.Block, accounts []common.Address) ([]Transfer, error) {
|
||||
startTs := time.Now()
|
||||
|
||||
rst := make([]Transfer, 0, len(blk.Transactions()))
|
||||
|
||||
receiptsByAddressAndTxHash := make(map[common.Address]map[common.Hash]*types.Receipt)
|
||||
txsByAddressAndTxHash := make(map[common.Address]map[common.Hash]*types.Transaction)
|
||||
|
||||
addReceiptToCache := func(address common.Address, txHash common.Hash, receipt *types.Receipt) {
|
||||
if receiptsByAddressAndTxHash[address] == nil {
|
||||
receiptsByAddressAndTxHash[address] = make(map[common.Hash]*types.Receipt)
|
||||
}
|
||||
receiptsByAddressAndTxHash[address][txHash] = receipt
|
||||
}
|
||||
|
||||
addTxToCache := func(address common.Address, txHash common.Hash, tx *types.Transaction) {
|
||||
if txsByAddressAndTxHash[address] == nil {
|
||||
txsByAddressAndTxHash[address] = make(map[common.Hash]*types.Transaction)
|
||||
}
|
||||
txsByAddressAndTxHash[address][txHash] = tx
|
||||
}
|
||||
|
||||
getReceiptFromCache := func(address common.Address, txHash common.Hash) *types.Receipt {
|
||||
if receiptsByAddressAndTxHash[address] == nil {
|
||||
return nil
|
||||
}
|
||||
return receiptsByAddressAndTxHash[address][txHash]
|
||||
}
|
||||
|
||||
getTxFromCache := func(address common.Address, txHash common.Hash) *types.Transaction {
|
||||
if txsByAddressAndTxHash[address] == nil {
|
||||
return nil
|
||||
}
|
||||
return txsByAddressAndTxHash[address][txHash]
|
||||
}
|
||||
|
||||
getReceipt := func(address common.Address, txHash common.Hash) (receipt *types.Receipt, err error) {
|
||||
receipt = getReceiptFromCache(address, txHash)
|
||||
if receipt == nil {
|
||||
receipt, err = d.fetchTransactionReceipt(ctx, txHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addReceiptToCache(address, txHash, receipt)
|
||||
}
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
getTx := func(address common.Address, txHash common.Hash) (tx *types.Transaction, err error) {
|
||||
tx = getTxFromCache(address, txHash)
|
||||
if tx == nil {
|
||||
tx, err = d.fetchTransaction(ctx, txHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addTxToCache(address, txHash, tx)
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
for _, address := range accounts {
|
||||
// During block discovery, we should have populated the DB with 1 item per transfer log containing
|
||||
// erc20/erc721/erc1155 transfers.
|
||||
// ID is a hash of the tx hash and the log index. log_index is unique per ERC20/721 tx, but not per ERC1155 tx.
|
||||
transactionsToLoad, err := d.db.GetTransactionsToLoad(d.chainClient.NetworkID(), address, blk.Number())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
areSubTxsCheckedForTxHash := make(map[common.Hash]bool)
|
||||
|
||||
log.Debug("getTransfersInBlock", "block", blk.Number(), "transactionsToLoad", len(transactionsToLoad))
|
||||
|
||||
for _, t := range transactionsToLoad {
|
||||
receipt, err := getReceipt(address, t.Log.TxHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx, err := getTx(address, t.Log.TxHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subtransactions, err := d.subTransactionsFromPreloaded(t, tx, receipt, blk)
|
||||
if err != nil {
|
||||
log.Error("can't fetch subTxs for erc20/erc721/erc1155 transfer", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
rst = append(rst, subtransactions...)
|
||||
areSubTxsCheckedForTxHash[t.Log.TxHash] = true
|
||||
}
|
||||
|
||||
for _, tx := range blk.Transactions() {
|
||||
// Skip dummy blob transactions, as they are not supported by us
|
||||
if tx.Type() == types.BlobTxType {
|
||||
continue
|
||||
}
|
||||
if tx.ChainId().Cmp(big.NewInt(0)) != 0 && tx.ChainId().Cmp(d.chainClient.ToBigInt()) != 0 {
|
||||
log.Info("chain id mismatch", "tx hash", tx.Hash(), "tx chain id", tx.ChainId(), "expected chain id", d.chainClient.NetworkID())
|
||||
continue
|
||||
}
|
||||
from, err := types.Sender(d.signer, tx)
|
||||
|
||||
if err != nil {
|
||||
if err == core.ErrTxTypeNotSupported {
|
||||
log.Error("Tx Type not supported", "tx chain id", tx.ChainId(), "type", tx.Type(), "error", err)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isPlainTransfer := from == address || (tx.To() != nil && *tx.To() == address)
|
||||
mustCheckSubTxs := false
|
||||
|
||||
if !isPlainTransfer {
|
||||
// We might miss some subTransactions of interest for some transaction types. We need to check if we
|
||||
// find the address in the transaction data.
|
||||
switch tx.Type() {
|
||||
case types.DynamicFeeTxType, types.OptimismDepositTxType, types.ArbitrumDepositTxType, types.ArbitrumRetryTxType:
|
||||
mustCheckSubTxs = !areSubTxsCheckedForTxHash[tx.Hash()] && w_common.TxDataContainsAddress(tx.Type(), tx.Data(), address)
|
||||
}
|
||||
}
|
||||
|
||||
if isPlainTransfer || mustCheckSubTxs {
|
||||
receipt, err := getReceipt(address, tx.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Since we've already got the receipt, check for subTxs of
|
||||
// interest in case we haven't already.
|
||||
if !areSubTxsCheckedForTxHash[tx.Hash()] {
|
||||
subtransactions, err := d.subTransactionsFromTransactionData(address, from, tx, receipt, blk)
|
||||
if err != nil {
|
||||
log.Error("can't fetch subTxs for eth transfer", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
rst = append(rst, subtransactions...)
|
||||
areSubTxsCheckedForTxHash[tx.Hash()] = true
|
||||
}
|
||||
|
||||
// If it's a plain ETH transfer, add it to the list
|
||||
if isPlainTransfer {
|
||||
rst = append(rst, Transfer{
|
||||
Type: w_common.EthTransfer,
|
||||
NetworkID: tx.ChainId().Uint64(),
|
||||
ID: tx.Hash(),
|
||||
Address: address,
|
||||
BlockNumber: blk.Number(),
|
||||
BlockHash: receipt.BlockHash,
|
||||
Timestamp: blk.Time(),
|
||||
Transaction: tx,
|
||||
From: from,
|
||||
Receipt: receipt,
|
||||
Log: nil,
|
||||
BaseGasFees: blk.BaseFee().String(),
|
||||
MultiTransactionID: NoMultiTransactionID})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debug("getTransfersInBlock found", "block", blk.Number(), "len", len(rst), "time", time.Since(startTs))
|
||||
// TODO(dshulyak) test that balance difference was covered by transactions
|
||||
return rst, nil
|
||||
}
|
||||
|
||||
// NewERC20TransfersDownloader returns new instance.
|
||||
func NewERC20TransfersDownloader(client chain.ClientInterface, accounts []common.Address, signer types.Signer, incomingOnly bool) *ERC20TransfersDownloader {
|
||||
signature := w_common.GetEventSignatureHash(w_common.Erc20_721TransferEventSignature)
|
||||
|
||||
return &ERC20TransfersDownloader{
|
||||
client: client,
|
||||
accounts: accounts,
|
||||
signature: signature,
|
||||
incomingOnly: incomingOnly,
|
||||
signatureErc1155Single: w_common.GetEventSignatureHash(w_common.Erc1155TransferSingleEventSignature),
|
||||
signatureErc1155Batch: w_common.GetEventSignatureHash(w_common.Erc1155TransferBatchEventSignature),
|
||||
signer: signer,
|
||||
}
|
||||
}
|
||||
|
||||
// ERC20TransfersDownloader is a downloader for erc20 and erc721 tokens transfers.
|
||||
// Since both transaction types share the same signature, both will be assigned
|
||||
// type Erc20Transfer. Until the downloader gets refactored and a migration of the
|
||||
// database gets implemented, differentiation between erc20 and erc721 will handled
|
||||
// in the controller.
|
||||
type ERC20TransfersDownloader struct {
|
||||
client chain.ClientInterface
|
||||
accounts []common.Address
|
||||
incomingOnly bool
|
||||
|
||||
// hash of the Transfer event signature
|
||||
signature common.Hash
|
||||
signatureErc1155Single common.Hash
|
||||
signatureErc1155Batch common.Hash
|
||||
|
||||
// signer is used to derive tx sender from tx signature
|
||||
signer types.Signer
|
||||
}
|
||||
|
||||
func topicFromAddressSlice(addresses []common.Address) []common.Hash {
|
||||
rst := make([]common.Hash, len(addresses))
|
||||
for i, address := range addresses {
|
||||
rst[i] = common.BytesToHash(address.Bytes())
|
||||
}
|
||||
return rst
|
||||
}
|
||||
|
||||
func (d *ERC20TransfersDownloader) inboundTopics(addresses []common.Address) [][]common.Hash {
|
||||
return [][]common.Hash{{d.signature}, {}, topicFromAddressSlice(addresses)}
|
||||
}
|
||||
|
||||
func (d *ERC20TransfersDownloader) outboundTopics(addresses []common.Address) [][]common.Hash {
|
||||
return [][]common.Hash{{d.signature}, topicFromAddressSlice(addresses), {}}
|
||||
}
|
||||
|
||||
func (d *ERC20TransfersDownloader) inboundERC20OutboundERC1155Topics(addresses []common.Address) [][]common.Hash {
|
||||
return [][]common.Hash{{d.signature, d.signatureErc1155Single, d.signatureErc1155Batch}, {}, topicFromAddressSlice(addresses)}
|
||||
}
|
||||
|
||||
func (d *ERC20TransfersDownloader) inboundTopicsERC1155(addresses []common.Address) [][]common.Hash {
|
||||
return [][]common.Hash{{d.signatureErc1155Single, d.signatureErc1155Batch}, {}, {}, topicFromAddressSlice(addresses)}
|
||||
}
|
||||
|
||||
func (d *ETHDownloader) fetchTransactionReceipt(parent context.Context, txHash common.Hash) (*types.Receipt, error) {
|
||||
ctx, cancel := context.WithTimeout(parent, 3*time.Second)
|
||||
receipt, err := d.chainClient.TransactionReceipt(ctx, txHash)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
func (d *ETHDownloader) fetchTransaction(parent context.Context, txHash common.Hash) (*types.Transaction, error) {
|
||||
ctx, cancel := context.WithTimeout(parent, 3*time.Second)
|
||||
tx, _, err := d.chainClient.TransactionByHash(ctx, txHash) // TODO Save on requests by checking in the DB first
|
||||
cancel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
func (d *ETHDownloader) subTransactionsFromPreloaded(preloadedTx *PreloadedTransaction, tx *types.Transaction, receipt *types.Receipt, blk *types.Block) ([]Transfer, error) {
|
||||
log.Debug("subTransactionsFromPreloaded start", "txHash", tx.Hash().Hex(), "address", preloadedTx.Address, "tokenID", preloadedTx.TokenID, "value", preloadedTx.Value)
|
||||
address := preloadedTx.Address
|
||||
txLog := preloadedTx.Log
|
||||
|
||||
rst := make([]Transfer, 0, 1)
|
||||
|
||||
from, err := types.Sender(d.signer, tx)
|
||||
if err != nil {
|
||||
if err == core.ErrTxTypeNotSupported {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventType := w_common.GetEventType(preloadedTx.Log)
|
||||
// Only add ERC20/ERC721/ERC1155 transfers from/to the given account
|
||||
// from/to matching is already handled by getLogs filter
|
||||
switch eventType {
|
||||
case w_common.Erc20TransferEventType,
|
||||
w_common.Erc721TransferEventType,
|
||||
w_common.Erc1155TransferSingleEventType, w_common.Erc1155TransferBatchEventType:
|
||||
log.Debug("subTransactionsFromPreloaded transfer", "eventType", eventType, "logIdx", txLog.Index, "txHash", tx.Hash().Hex(), "address", address.Hex(), "tokenID", preloadedTx.TokenID, "value", preloadedTx.Value, "baseFee", blk.BaseFee().String())
|
||||
|
||||
transfer := Transfer{
|
||||
Type: w_common.EventTypeToSubtransactionType(eventType),
|
||||
ID: preloadedTx.ID,
|
||||
Address: address,
|
||||
BlockNumber: new(big.Int).SetUint64(txLog.BlockNumber),
|
||||
BlockHash: txLog.BlockHash,
|
||||
Loaded: true,
|
||||
NetworkID: d.signer.ChainID().Uint64(),
|
||||
From: from,
|
||||
Log: txLog,
|
||||
TokenID: preloadedTx.TokenID,
|
||||
TokenValue: preloadedTx.Value,
|
||||
BaseGasFees: blk.BaseFee().String(),
|
||||
Transaction: tx,
|
||||
Receipt: receipt,
|
||||
Timestamp: blk.Time(),
|
||||
MultiTransactionID: NoMultiTransactionID,
|
||||
}
|
||||
|
||||
rst = append(rst, transfer)
|
||||
}
|
||||
|
||||
log.Debug("subTransactionsFromPreloaded end", "txHash", tx.Hash().Hex(), "address", address.Hex(), "tokenID", preloadedTx.TokenID, "value", preloadedTx.Value)
|
||||
return rst, nil
|
||||
}
|
||||
|
||||
func (d *ETHDownloader) subTransactionsFromTransactionData(address, from common.Address, tx *types.Transaction, receipt *types.Receipt, blk *types.Block) ([]Transfer, error) {
|
||||
log.Debug("subTransactionsFromTransactionData start", "txHash", tx.Hash().Hex(), "address", address)
|
||||
|
||||
rst := make([]Transfer, 0, 1)
|
||||
|
||||
for _, txLog := range receipt.Logs {
|
||||
eventType := w_common.GetEventType(txLog)
|
||||
switch eventType {
|
||||
case w_common.UniswapV2SwapEventType, w_common.UniswapV3SwapEventType,
|
||||
w_common.HopBridgeTransferSentToL2EventType, w_common.HopBridgeTransferFromL1CompletedEventType,
|
||||
w_common.HopBridgeWithdrawalBondedEventType, w_common.HopBridgeTransferSentEventType:
|
||||
transfer := Transfer{
|
||||
Type: w_common.EventTypeToSubtransactionType(eventType),
|
||||
ID: w_common.GetLogSubTxID(*txLog),
|
||||
Address: address,
|
||||
BlockNumber: new(big.Int).SetUint64(txLog.BlockNumber),
|
||||
BlockHash: txLog.BlockHash,
|
||||
Loaded: true,
|
||||
NetworkID: d.signer.ChainID().Uint64(),
|
||||
From: from,
|
||||
Log: txLog,
|
||||
BaseGasFees: blk.BaseFee().String(),
|
||||
Transaction: tx,
|
||||
Receipt: receipt,
|
||||
Timestamp: blk.Time(),
|
||||
MultiTransactionID: NoMultiTransactionID,
|
||||
}
|
||||
|
||||
rst = append(rst, transfer)
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("subTransactionsFromTransactionData end", "txHash", tx.Hash().Hex(), "address", address.Hex())
|
||||
return rst, nil
|
||||
}
|
||||
|
||||
func (d *ERC20TransfersDownloader) blocksFromLogs(parent context.Context, logs []types.Log) ([]*DBHeader, error) {
|
||||
concurrent := NewConcurrentDownloader(parent, NoThreadLimit)
|
||||
|
||||
for i := range logs {
|
||||
l := logs[i]
|
||||
|
||||
if l.Removed {
|
||||
continue
|
||||
}
|
||||
|
||||
var address common.Address
|
||||
from, to, txIDs, tokenIDs, values, err := w_common.ParseTransferLog(l)
|
||||
if err != nil {
|
||||
log.Error("failed to parse transfer log", "log", l, "address", d.accounts, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Double check provider returned the correct log
|
||||
if slices.Contains(d.accounts, from) {
|
||||
address = from
|
||||
} else if slices.Contains(d.accounts, to) {
|
||||
address = to
|
||||
} else {
|
||||
log.Error("from/to address mismatch", "log", l, "addresses", d.accounts)
|
||||
continue
|
||||
}
|
||||
|
||||
eventType := w_common.GetEventType(&l)
|
||||
logType := w_common.EventTypeToSubtransactionType(eventType)
|
||||
|
||||
for i, txID := range txIDs {
|
||||
log.Debug("block from logs", "block", l.BlockNumber, "log", l, "logType", logType, "txID", txID)
|
||||
|
||||
// For ERC20 there is no tokenID, so we use nil
|
||||
var tokenID *big.Int
|
||||
if len(tokenIDs) > i {
|
||||
tokenID = tokenIDs[i]
|
||||
}
|
||||
|
||||
header := &DBHeader{
|
||||
Number: big.NewInt(int64(l.BlockNumber)),
|
||||
Hash: l.BlockHash,
|
||||
Address: address,
|
||||
PreloadedTransactions: []*PreloadedTransaction{{
|
||||
ID: txID,
|
||||
Type: logType,
|
||||
Log: &l,
|
||||
TokenID: tokenID,
|
||||
Value: values[i],
|
||||
}},
|
||||
Loaded: false,
|
||||
}
|
||||
|
||||
concurrent.Add(func(ctx context.Context) error {
|
||||
concurrent.PushHeader(header)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-concurrent.WaitAsync():
|
||||
case <-parent.Done():
|
||||
return nil, errLogsDownloaderStuck
|
||||
}
|
||||
return concurrent.GetHeaders(), concurrent.Error()
|
||||
}
|
||||
|
||||
// GetHeadersInRange returns transfers between two blocks.
|
||||
// time to get logs for 100000 blocks = 1.144686979s. with 249 events in the result set.
|
||||
func (d *ERC20TransfersDownloader) GetHeadersInRange(parent context.Context, from, to *big.Int) ([]*DBHeader, error) {
|
||||
start := time.Now()
|
||||
log.Debug("get erc20 transfers in range start", "chainID", d.client.NetworkID(), "from", from, "to", to)
|
||||
headers := []*DBHeader{}
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
outbound := []types.Log{}
|
||||
var inboundOrMixed []types.Log // inbound ERC20 or outbound ERC1155 share the same signature for our purposes
|
||||
if !d.incomingOnly {
|
||||
outbound, err = d.client.FilterLogs(ctx, ethereum.FilterQuery{
|
||||
FromBlock: from,
|
||||
ToBlock: to,
|
||||
Topics: d.outboundTopics(d.accounts),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inboundOrMixed, err = d.client.FilterLogs(ctx, ethereum.FilterQuery{
|
||||
FromBlock: from,
|
||||
ToBlock: to,
|
||||
Topics: d.inboundERC20OutboundERC1155Topics(d.accounts),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
inboundOrMixed, err = d.client.FilterLogs(ctx, ethereum.FilterQuery{
|
||||
FromBlock: from,
|
||||
ToBlock: to,
|
||||
Topics: d.inboundTopics(d.accounts),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
inbound1155, err := d.client.FilterLogs(ctx, ethereum.FilterQuery{
|
||||
FromBlock: from,
|
||||
ToBlock: to,
|
||||
Topics: d.inboundTopicsERC1155(d.accounts),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logs := concatLogs(outbound, inboundOrMixed, inbound1155)
|
||||
|
||||
if len(logs) == 0 {
|
||||
log.Debug("no logs found for account")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rst, err := d.blocksFromLogs(parent, logs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(rst) == 0 {
|
||||
log.Warn("no headers found in logs for account", "chainID", d.client.NetworkID(), "addresses", d.accounts, "from", from, "to", to)
|
||||
} else {
|
||||
headers = append(headers, rst...)
|
||||
log.Debug("found erc20 transfers for account", "chainID", d.client.NetworkID(), "addresses", d.accounts,
|
||||
"from", from, "to", to, "headers", len(headers))
|
||||
}
|
||||
|
||||
log.Debug("get erc20 transfers in range end", "chainID", d.client.NetworkID(),
|
||||
"from", from, "to", to, "headers", len(headers), "took", time.Since(start))
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
func concatLogs(slices ...[]types.Log) []types.Log {
|
||||
var totalLen int
|
||||
for _, s := range slices {
|
||||
totalLen += len(s)
|
||||
}
|
||||
tmp := make([]types.Log, totalLen)
|
||||
var i int
|
||||
for _, s := range slices {
|
||||
i += copy(tmp[i:], s)
|
||||
}
|
||||
|
||||
return tmp
|
||||
}
|
||||
82
vendor/github.com/status-im/status-go/services/wallet/transfer/iterative.go
generated
vendored
Normal file
82
vendor/github.com/status-im/status-go/services/wallet/transfer/iterative.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// SetupIterativeDownloader configures IterativeDownloader with last known synced block.
|
||||
func SetupIterativeDownloader(
|
||||
client HeaderReader, downloader BatchDownloader, size *big.Int, to *big.Int, from *big.Int) (*IterativeDownloader, error) {
|
||||
|
||||
if to == nil || from == nil {
|
||||
return nil, errors.New("to or from cannot be nil")
|
||||
}
|
||||
|
||||
log.Debug("iterative downloader", "from", from, "to", to, "size", size)
|
||||
d := &IterativeDownloader{
|
||||
client: client,
|
||||
batchSize: size,
|
||||
downloader: downloader,
|
||||
from: from,
|
||||
to: to,
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// BatchDownloader interface for loading transfers in batches in speificed range of blocks.
|
||||
type BatchDownloader interface {
|
||||
GetHeadersInRange(ctx context.Context, from, to *big.Int) ([]*DBHeader, error)
|
||||
}
|
||||
|
||||
// IterativeDownloader downloads batches of transfers in a specified size.
|
||||
type IterativeDownloader struct {
|
||||
client HeaderReader
|
||||
|
||||
batchSize *big.Int
|
||||
|
||||
downloader BatchDownloader
|
||||
|
||||
from, to *big.Int
|
||||
previous *big.Int
|
||||
}
|
||||
|
||||
// Finished true when earliest block with given sync option is zero.
|
||||
func (d *IterativeDownloader) Finished() bool {
|
||||
return d.from.Cmp(d.to) == 0
|
||||
}
|
||||
|
||||
// Header return last synced header.
|
||||
func (d *IterativeDownloader) Header() *big.Int {
|
||||
return d.previous
|
||||
}
|
||||
|
||||
// Next moves closer to the end on every new iteration.
|
||||
func (d *IterativeDownloader) Next(parent context.Context) ([]*DBHeader, *big.Int, *big.Int, error) {
|
||||
to := d.to
|
||||
from := new(big.Int).Sub(to, d.batchSize)
|
||||
// if start < 0; start = 0
|
||||
if from.Cmp(d.from) == -1 {
|
||||
from = d.from
|
||||
}
|
||||
headers, err := d.downloader.GetHeadersInRange(parent, from, to)
|
||||
log.Debug("load erc20 transfers in range", "from", from, "to", to, "batchSize", d.batchSize)
|
||||
if err != nil {
|
||||
log.Error("failed to get transfer in between two blocks", "from", from, "to", to, "error", err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
d.previous, d.to = d.to, from
|
||||
return headers, d.from, to, nil
|
||||
}
|
||||
|
||||
// Revert reverts last step progress. Should be used if application failed to process transfers.
|
||||
// For example failed to persist them.
|
||||
func (d *IterativeDownloader) Revert() {
|
||||
if d.previous != nil {
|
||||
d.from = d.previous
|
||||
}
|
||||
}
|
||||
272
vendor/github.com/status-im/status-go/services/wallet/transfer/query.go
generated
vendored
Normal file
272
vendor/github.com/status-im/status-go/services/wallet/transfer/query.go
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
)
|
||||
|
||||
const baseTransfersQuery = "SELECT hash, type, blk_hash, blk_number, timestamp, address, tx, sender, receipt, log, network_id, base_gas_fee, COALESCE(multi_transaction_id, 0) %s FROM transfers"
|
||||
const preloadedTransfersQuery = "SELECT hash, type, address, log, token_id, amount_padded128hex FROM transfers"
|
||||
|
||||
type transfersQuery struct {
|
||||
buf *bytes.Buffer
|
||||
args []interface{}
|
||||
whereAdded bool
|
||||
subQuery bool
|
||||
}
|
||||
|
||||
func newTransfersQuery() *transfersQuery {
|
||||
newQuery := newEmptyQuery()
|
||||
transfersQueryString := fmt.Sprintf(baseTransfersQuery, "")
|
||||
newQuery.buf.WriteString(transfersQueryString)
|
||||
return newQuery
|
||||
}
|
||||
|
||||
func newTransfersQueryForPreloadedTransactions() *transfersQuery {
|
||||
newQuery := newEmptyQuery()
|
||||
newQuery.buf.WriteString(preloadedTransfersQuery)
|
||||
return newQuery
|
||||
}
|
||||
|
||||
func newSubQuery() *transfersQuery {
|
||||
newQuery := newEmptyQuery()
|
||||
newQuery.subQuery = true
|
||||
return newQuery
|
||||
}
|
||||
|
||||
func newEmptyQuery() *transfersQuery {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
return &transfersQuery{buf: buf}
|
||||
}
|
||||
|
||||
func (q *transfersQuery) addWhereSeparator(separator SeparatorType) {
|
||||
if !q.whereAdded {
|
||||
if !q.subQuery {
|
||||
q.buf.WriteString(" WHERE")
|
||||
}
|
||||
q.whereAdded = true
|
||||
} else if separator == OrSeparator {
|
||||
q.buf.WriteString(" OR")
|
||||
} else if separator == AndSeparator {
|
||||
q.buf.WriteString(" AND")
|
||||
} else if separator != NoSeparator {
|
||||
panic("Unknown separator. Need to handle current SeparatorType value")
|
||||
}
|
||||
}
|
||||
|
||||
type SeparatorType int
|
||||
|
||||
// Beware: please update addWhereSeparator if changing this enum
|
||||
const (
|
||||
NoSeparator SeparatorType = iota + 1
|
||||
OrSeparator
|
||||
AndSeparator
|
||||
)
|
||||
|
||||
// addSubQuery adds where clause formed as: WHERE/<separator> (<subQuery>)
|
||||
func (q *transfersQuery) addSubQuery(subQuery *transfersQuery, separator SeparatorType) *transfersQuery {
|
||||
q.addWhereSeparator(separator)
|
||||
q.buf.WriteString(" (")
|
||||
q.buf.Write(subQuery.buf.Bytes())
|
||||
q.buf.WriteString(")")
|
||||
q.args = append(q.args, subQuery.args...)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterStart(start *big.Int) *transfersQuery {
|
||||
if start != nil {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" blk_number >= ?")
|
||||
q.args = append(q.args, (*bigint.SQLBigInt)(start))
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterEnd(end *big.Int) *transfersQuery {
|
||||
if end != nil {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" blk_number <= ?")
|
||||
q.args = append(q.args, (*bigint.SQLBigInt)(end))
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterLoaded(loaded int) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" loaded = ? ")
|
||||
q.args = append(q.args, loaded)
|
||||
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterNetwork(network uint64) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" network_id = ?")
|
||||
q.args = append(q.args, network)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterAddress(address common.Address) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" address = ?")
|
||||
q.args = append(q.args, address)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterTransactionID(hash common.Hash) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" hash = ?")
|
||||
q.args = append(q.args, hash)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterTransactionHash(hash common.Hash) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" tx_hash = ?")
|
||||
q.args = append(q.args, hash)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterBlockHash(blockHash common.Hash) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" blk_hash = ?")
|
||||
q.args = append(q.args, blockHash)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterBlockNumber(blockNumber *big.Int) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" blk_number = ?")
|
||||
q.args = append(q.args, (*bigint.SQLBigInt)(blockNumber))
|
||||
return q
|
||||
}
|
||||
|
||||
func ascendingString(ascending bool) string {
|
||||
if ascending {
|
||||
return "ASC"
|
||||
}
|
||||
return "DESC"
|
||||
}
|
||||
|
||||
func (q *transfersQuery) SortByBlockNumberAndHash() *transfersQuery {
|
||||
q.buf.WriteString(" ORDER BY blk_number DESC, hash ASC ")
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) SortByTimestamp(ascending bool) *transfersQuery {
|
||||
q.buf.WriteString(fmt.Sprintf(" ORDER BY timestamp %s ", ascendingString(ascending)))
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) Limit(pageSize int64) *transfersQuery {
|
||||
q.buf.WriteString(" LIMIT ?")
|
||||
q.args = append(q.args, pageSize)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterType(txType w_common.Type) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" type = ?")
|
||||
q.args = append(q.args, txType)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterTokenAddress(address common.Address) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" token_address = ?")
|
||||
q.args = append(q.args, address)
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) FilterTokenID(tokenID *big.Int) *transfersQuery {
|
||||
q.addWhereSeparator(AndSeparator)
|
||||
q.buf.WriteString(" token_id = ?")
|
||||
q.args = append(q.args, (*bigint.SQLBigIntBytes)(tokenID))
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *transfersQuery) String() string {
|
||||
return q.buf.String()
|
||||
}
|
||||
|
||||
func (q *transfersQuery) Args() []interface{} {
|
||||
return q.args
|
||||
}
|
||||
|
||||
func (q *transfersQuery) TransferScan(rows *sql.Rows) (rst []Transfer, err error) {
|
||||
for rows.Next() {
|
||||
transfer := Transfer{
|
||||
BlockNumber: &big.Int{},
|
||||
Transaction: &types.Transaction{},
|
||||
Receipt: &types.Receipt{},
|
||||
Log: &types.Log{},
|
||||
}
|
||||
err = rows.Scan(
|
||||
&transfer.ID, &transfer.Type, &transfer.BlockHash,
|
||||
(*bigint.SQLBigInt)(transfer.BlockNumber), &transfer.Timestamp, &transfer.Address,
|
||||
&JSONBlob{transfer.Transaction}, &transfer.From, &JSONBlob{transfer.Receipt}, &JSONBlob{transfer.Log}, &transfer.NetworkID, &transfer.BaseGasFees, &transfer.MultiTransactionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rst = append(rst, transfer)
|
||||
}
|
||||
|
||||
return rst, nil
|
||||
}
|
||||
|
||||
func (q *transfersQuery) PreloadedTransactionScan(rows *sql.Rows) (rst []*PreloadedTransaction, err error) {
|
||||
transfers := make([]Transfer, 0)
|
||||
for rows.Next() {
|
||||
transfer := Transfer{
|
||||
Log: &types.Log{},
|
||||
}
|
||||
tokenValue := sql.NullString{}
|
||||
tokenID := sql.RawBytes{}
|
||||
err = rows.Scan(
|
||||
&transfer.ID, &transfer.Type,
|
||||
&transfer.Address,
|
||||
&JSONBlob{transfer.Log},
|
||||
&tokenID, &tokenValue)
|
||||
|
||||
if len(tokenID) > 0 {
|
||||
transfer.TokenID = new(big.Int).SetBytes(tokenID)
|
||||
}
|
||||
|
||||
if tokenValue.Valid {
|
||||
var ok bool
|
||||
transfer.TokenValue, ok = new(big.Int).SetString(tokenValue.String, 16)
|
||||
if !ok {
|
||||
panic("failed to parse token value")
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transfers = append(transfers, transfer)
|
||||
}
|
||||
|
||||
rst = make([]*PreloadedTransaction, 0, len(transfers))
|
||||
|
||||
for _, transfer := range transfers {
|
||||
preloadedTransaction := &PreloadedTransaction{
|
||||
ID: transfer.ID,
|
||||
Type: transfer.Type,
|
||||
Address: transfer.Address,
|
||||
Log: transfer.Log,
|
||||
TokenID: transfer.TokenID,
|
||||
Value: transfer.TokenValue,
|
||||
}
|
||||
|
||||
rst = append(rst, preloadedTransaction)
|
||||
}
|
||||
|
||||
return rst, nil
|
||||
}
|
||||
136
vendor/github.com/status-im/status-go/services/wallet/transfer/reactor.go
generated
vendored
Normal file
136
vendor/github.com/status-im/status-go/services/wallet/transfer/reactor.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/rpc/chain"
|
||||
"github.com/status-im/status-go/services/wallet/balance"
|
||||
"github.com/status-im/status-go/services/wallet/blockchainstate"
|
||||
"github.com/status-im/status-go/services/wallet/token"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
)
|
||||
|
||||
const (
|
||||
ReactorNotStarted string = "reactor not started"
|
||||
|
||||
NonArchivalNodeBlockChunkSize = 100
|
||||
DefaultNodeBlockChunkSize = 100000
|
||||
)
|
||||
|
||||
var errAlreadyRunning = errors.New("already running")
|
||||
|
||||
type FetchStrategyType int32
|
||||
|
||||
const (
|
||||
SequentialFetchStrategyType FetchStrategyType = iota
|
||||
)
|
||||
|
||||
// HeaderReader interface for reading headers using block number or hash.
|
||||
type HeaderReader interface {
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error)
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
|
||||
}
|
||||
|
||||
type HistoryFetcher interface {
|
||||
start() error
|
||||
stop()
|
||||
kind() FetchStrategyType
|
||||
|
||||
getTransfersByAddress(ctx context.Context, chainID uint64, address common.Address, toBlock *big.Int,
|
||||
limit int64) ([]Transfer, error)
|
||||
}
|
||||
|
||||
// Reactor listens to new blocks and stores transfers into the database.
|
||||
type Reactor struct {
|
||||
db *Database
|
||||
blockDAO *BlockDAO
|
||||
blockRangesSeqDAO *BlockRangeSequentialDAO
|
||||
accountsDB *accounts.Database
|
||||
feed *event.Feed
|
||||
transactionManager *TransactionManager
|
||||
pendingTxManager *transactions.PendingTxTracker
|
||||
tokenManager *token.Manager
|
||||
strategy HistoryFetcher
|
||||
balanceCacher balance.Cacher
|
||||
omitHistory bool
|
||||
blockChainState *blockchainstate.BlockChainState
|
||||
chainIDs []uint64
|
||||
}
|
||||
|
||||
func NewReactor(db *Database, blockDAO *BlockDAO, blockRangesSeqDAO *BlockRangeSequentialDAO, accountsDB *accounts.Database, feed *event.Feed, tm *TransactionManager,
|
||||
pendingTxManager *transactions.PendingTxTracker, tokenManager *token.Manager,
|
||||
balanceCacher balance.Cacher, omitHistory bool, blockChainState *blockchainstate.BlockChainState) *Reactor {
|
||||
return &Reactor{
|
||||
db: db,
|
||||
accountsDB: accountsDB,
|
||||
blockDAO: blockDAO,
|
||||
blockRangesSeqDAO: blockRangesSeqDAO,
|
||||
feed: feed,
|
||||
transactionManager: tm,
|
||||
pendingTxManager: pendingTxManager,
|
||||
tokenManager: tokenManager,
|
||||
balanceCacher: balanceCacher,
|
||||
omitHistory: omitHistory,
|
||||
blockChainState: blockChainState,
|
||||
}
|
||||
}
|
||||
|
||||
// Start runs reactor loop in background.
|
||||
func (r *Reactor) start(chainClients map[uint64]chain.ClientInterface, accounts []common.Address) error {
|
||||
chainIDs := []uint64{}
|
||||
for _, client := range chainClients {
|
||||
chainIDs = append(chainIDs, client.NetworkID())
|
||||
}
|
||||
r.chainIDs = chainIDs
|
||||
r.strategy = r.createFetchStrategy(chainClients, accounts)
|
||||
return r.strategy.start()
|
||||
}
|
||||
|
||||
// Stop stops reactor loop and waits till it exits.
|
||||
func (r *Reactor) stop() {
|
||||
if r.strategy != nil {
|
||||
r.strategy.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) restart(chainClients map[uint64]chain.ClientInterface, accounts []common.Address) error {
|
||||
|
||||
r.stop()
|
||||
return r.start(chainClients, accounts)
|
||||
}
|
||||
|
||||
func (r *Reactor) createFetchStrategy(chainClients map[uint64]chain.ClientInterface,
|
||||
accounts []common.Address) HistoryFetcher {
|
||||
|
||||
return NewSequentialFetchStrategy(
|
||||
r.db,
|
||||
r.blockDAO,
|
||||
r.blockRangesSeqDAO,
|
||||
r.accountsDB,
|
||||
r.feed,
|
||||
r.transactionManager,
|
||||
r.pendingTxManager,
|
||||
r.tokenManager,
|
||||
chainClients,
|
||||
accounts,
|
||||
r.balanceCacher,
|
||||
r.omitHistory,
|
||||
r.blockChainState,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *Reactor) getTransfersByAddress(ctx context.Context, chainID uint64, address common.Address, toBlock *big.Int,
|
||||
limit int64) ([]Transfer, error) {
|
||||
|
||||
if r.strategy != nil {
|
||||
return r.strategy.getTransfersByAddress(ctx, chainID, address, toBlock, limit)
|
||||
}
|
||||
|
||||
return nil, errors.New(ReactorNotStarted)
|
||||
}
|
||||
126
vendor/github.com/status-im/status-go/services/wallet/transfer/sequential_fetch_strategy.go
generated
vendored
Normal file
126
vendor/github.com/status-im/status-go/services/wallet/transfer/sequential_fetch_strategy.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/rpc/chain"
|
||||
"github.com/status-im/status-go/services/wallet/async"
|
||||
"github.com/status-im/status-go/services/wallet/balance"
|
||||
"github.com/status-im/status-go/services/wallet/blockchainstate"
|
||||
"github.com/status-im/status-go/services/wallet/token"
|
||||
"github.com/status-im/status-go/services/wallet/walletevent"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
)
|
||||
|
||||
func NewSequentialFetchStrategy(db *Database, blockDAO *BlockDAO, blockRangesSeqDAO *BlockRangeSequentialDAO, accountsDB *accounts.Database, feed *event.Feed,
|
||||
transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker,
|
||||
tokenManager *token.Manager,
|
||||
chainClients map[uint64]chain.ClientInterface,
|
||||
accounts []common.Address,
|
||||
balanceCacher balance.Cacher,
|
||||
omitHistory bool,
|
||||
blockChainState *blockchainstate.BlockChainState,
|
||||
) *SequentialFetchStrategy {
|
||||
|
||||
return &SequentialFetchStrategy{
|
||||
db: db,
|
||||
blockDAO: blockDAO,
|
||||
blockRangesSeqDAO: blockRangesSeqDAO,
|
||||
accountsDB: accountsDB,
|
||||
feed: feed,
|
||||
transactionManager: transactionManager,
|
||||
pendingTxManager: pendingTxManager,
|
||||
tokenManager: tokenManager,
|
||||
chainClients: chainClients,
|
||||
accounts: accounts,
|
||||
balanceCacher: balanceCacher,
|
||||
omitHistory: omitHistory,
|
||||
blockChainState: blockChainState,
|
||||
}
|
||||
}
|
||||
|
||||
type SequentialFetchStrategy struct {
|
||||
db *Database
|
||||
blockDAO *BlockDAO
|
||||
blockRangesSeqDAO *BlockRangeSequentialDAO
|
||||
accountsDB *accounts.Database
|
||||
feed *event.Feed
|
||||
mu sync.Mutex
|
||||
group *async.Group
|
||||
transactionManager *TransactionManager
|
||||
pendingTxManager *transactions.PendingTxTracker
|
||||
tokenManager *token.Manager
|
||||
chainClients map[uint64]chain.ClientInterface
|
||||
accounts []common.Address
|
||||
balanceCacher balance.Cacher
|
||||
omitHistory bool
|
||||
blockChainState *blockchainstate.BlockChainState
|
||||
}
|
||||
|
||||
func (s *SequentialFetchStrategy) newCommand(chainClient chain.ClientInterface,
|
||||
accounts []common.Address) async.Commander {
|
||||
|
||||
return newLoadBlocksAndTransfersCommand(accounts, s.db, s.accountsDB, s.blockDAO, s.blockRangesSeqDAO, chainClient, s.feed,
|
||||
s.transactionManager, s.pendingTxManager, s.tokenManager, s.balanceCacher, s.omitHistory, s.blockChainState)
|
||||
}
|
||||
|
||||
func (s *SequentialFetchStrategy) start() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.group != nil {
|
||||
return errAlreadyRunning
|
||||
}
|
||||
s.group = async.NewGroup(context.Background())
|
||||
|
||||
if s.feed != nil {
|
||||
s.feed.Send(walletevent.Event{
|
||||
Type: EventFetchingRecentHistory,
|
||||
Accounts: s.accounts,
|
||||
})
|
||||
}
|
||||
|
||||
for _, chainClient := range s.chainClients {
|
||||
ctl := s.newCommand(chainClient, s.accounts)
|
||||
s.group.Add(ctl.Command())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops reactor loop and waits till it exits.
|
||||
func (s *SequentialFetchStrategy) stop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.group == nil {
|
||||
return
|
||||
}
|
||||
s.group.Stop()
|
||||
s.group.Wait()
|
||||
s.group = nil
|
||||
}
|
||||
|
||||
func (s *SequentialFetchStrategy) kind() FetchStrategyType {
|
||||
return SequentialFetchStrategyType
|
||||
}
|
||||
|
||||
func (s *SequentialFetchStrategy) getTransfersByAddress(ctx context.Context, chainID uint64, address common.Address, toBlock *big.Int,
|
||||
limit int64) ([]Transfer, error) {
|
||||
|
||||
log.Debug("[WalletAPI:: GetTransfersByAddress] get transfers for an address", "address", address,
|
||||
"chainID", chainID, "toBlock", toBlock, "limit", limit)
|
||||
|
||||
rst, err := s.db.GetTransfersByAddress(chainID, address, toBlock, limit)
|
||||
if err != nil {
|
||||
log.Error("[WalletAPI:: GetTransfersByAddress] can't fetch transfers", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rst, nil
|
||||
}
|
||||
262
vendor/github.com/status-im/status-go/services/wallet/transfer/swap_identifier.go
generated
vendored
Normal file
262
vendor/github.com/status-im/status-go/services/wallet/transfer/swap_identifier.go
generated
vendored
Normal file
@@ -0,0 +1,262 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
uniswapv2 "github.com/status-im/status-go/contracts/uniswapV2"
|
||||
uniswapv3 "github.com/status-im/status-go/contracts/uniswapV3"
|
||||
"github.com/status-im/status-go/rpc/chain"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/token"
|
||||
)
|
||||
|
||||
const ETHSymbol string = "ETH"
|
||||
const WETHSymbol string = "WETH"
|
||||
|
||||
func fetchUniswapV2PairInfo(ctx context.Context, client chain.ClientInterface, pairAddress common.Address) (*common.Address, *common.Address, error) {
|
||||
caller, err := uniswapv2.NewUniswapv2Caller(pairAddress, client)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
token0Address, err := caller.Token0(&bind.CallOpts{
|
||||
Context: ctx,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
token1Address, err := caller.Token1(&bind.CallOpts{
|
||||
Context: ctx,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &token0Address, &token1Address, nil
|
||||
}
|
||||
|
||||
func fetchUniswapV3PoolInfo(ctx context.Context, client chain.ClientInterface, poolAddress common.Address) (*common.Address, *common.Address, error) {
|
||||
caller, err := uniswapv3.NewUniswapv3Caller(poolAddress, client)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
token0Address, err := caller.Token0(&bind.CallOpts{
|
||||
Context: ctx,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
token1Address, err := caller.Token1(&bind.CallOpts{
|
||||
Context: ctx,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &token0Address, &token1Address, nil
|
||||
}
|
||||
|
||||
func identifyUniswapV2Asset(tokenManager *token.Manager, chainID uint64, amount0 *big.Int, contractAddress0 common.Address, amount1 *big.Int, contractAddress1 common.Address) (token *token.Token, amount *big.Int, err error) {
|
||||
// Either amount0 or amount1 should be 0
|
||||
if amount1.Sign() == 0 && amount0.Sign() != 0 {
|
||||
token = tokenManager.FindTokenByAddress(chainID, contractAddress0)
|
||||
if token == nil {
|
||||
err = fmt.Errorf("couldn't find symbol for token0 %v", contractAddress0)
|
||||
return
|
||||
}
|
||||
amount = amount0
|
||||
} else if amount0.Sign() == 0 && amount1.Sign() != 0 {
|
||||
token = tokenManager.FindTokenByAddress(chainID, contractAddress1)
|
||||
if token == nil {
|
||||
err = fmt.Errorf("couldn't find symbol for token1 %v", contractAddress1)
|
||||
return
|
||||
}
|
||||
amount = amount1
|
||||
} else {
|
||||
err = fmt.Errorf("couldn't identify token %v %v %v %v", contractAddress0, amount0, contractAddress1, amount1)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func fetchUniswapV2Info(ctx context.Context, client chain.ClientInterface, tokenManager *token.Manager, log *types.Log) (fromAsset string, fromAmount *hexutil.Big, toAsset string, toAmount *hexutil.Big, err error) {
|
||||
pairAddress, _, _, amount0In, amount1In, amount0Out, amount1Out, err := w_common.ParseUniswapV2Log(log)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
token0ContractAddress, token1ContractAddress, err := fetchUniswapV2PairInfo(ctx, client, pairAddress)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fromToken, fromAmountInt, err := identifyUniswapV2Asset(tokenManager, client.NetworkID(), amount0In, *token0ContractAddress, amount1In, *token1ContractAddress)
|
||||
if err != nil {
|
||||
// "Soft" error, allow to continue with unknown asset
|
||||
fromAsset = ""
|
||||
fromAmount = (*hexutil.Big)(big.NewInt(0))
|
||||
} else {
|
||||
fromAsset = fromToken.Symbol
|
||||
fromAmount = (*hexutil.Big)(fromAmountInt)
|
||||
}
|
||||
|
||||
toToken, toAmountInt, err := identifyUniswapV2Asset(tokenManager, client.NetworkID(), amount0Out, *token0ContractAddress, amount1Out, *token1ContractAddress)
|
||||
if err != nil {
|
||||
// "Soft" error, allow to continue with unknown asset
|
||||
toAsset = ""
|
||||
toAmount = (*hexutil.Big)(big.NewInt(0))
|
||||
} else {
|
||||
toAsset = toToken.Symbol
|
||||
toAmount = (*hexutil.Big)(toAmountInt)
|
||||
}
|
||||
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
func identifyUniswapV3Assets(tokenManager *token.Manager, chainID uint64, amount0 *big.Int, contractAddress0 common.Address, amount1 *big.Int, contractAddress1 common.Address) (fromToken *token.Token, fromAmount *big.Int, toToken *token.Token, toAmount *big.Int, err error) {
|
||||
token0 := tokenManager.FindTokenByAddress(chainID, contractAddress0)
|
||||
if token0 == nil {
|
||||
err = fmt.Errorf("couldn't find symbol for token0 %v", contractAddress0)
|
||||
return
|
||||
}
|
||||
|
||||
token1 := tokenManager.FindTokenByAddress(chainID, contractAddress1)
|
||||
if token1 == nil {
|
||||
err = fmt.Errorf("couldn't find symbol for token1 %v", contractAddress1)
|
||||
return
|
||||
}
|
||||
|
||||
// amount0 and amount1 are the balance deltas of the pool
|
||||
// The positive amount is how much the sender spent
|
||||
// The negative amount is how much the recipent got
|
||||
if amount0.Sign() > 0 && amount1.Sign() < 0 {
|
||||
fromToken = token0
|
||||
fromAmount = amount0
|
||||
toToken = token1
|
||||
toAmount = new(big.Int).Neg(amount1)
|
||||
} else if amount0.Sign() < 0 && amount1.Sign() > 0 {
|
||||
fromToken = token1
|
||||
fromAmount = amount1
|
||||
toToken = token0
|
||||
toAmount = new(big.Int).Neg(amount0)
|
||||
} else {
|
||||
err = fmt.Errorf("couldn't identify tokens %v %v %v %v", contractAddress0, amount0, contractAddress1, amount1)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func fetchUniswapV3Info(ctx context.Context, client chain.ClientInterface, tokenManager *token.Manager, log *types.Log) (fromAsset string, fromAmount *hexutil.Big, toAsset string, toAmount *hexutil.Big, err error) {
|
||||
poolAddress, _, _, amount0, amount1, err := w_common.ParseUniswapV3Log(log)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
token0ContractAddress, token1ContractAddress, err := fetchUniswapV3PoolInfo(ctx, client, poolAddress)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fromToken, fromAmountInt, toToken, toAmountInt, err := identifyUniswapV3Assets(tokenManager, client.NetworkID(), amount0, *token0ContractAddress, amount1, *token1ContractAddress)
|
||||
if err != nil {
|
||||
// "Soft" error, allow to continue with unknown asset
|
||||
err = nil
|
||||
fromAsset = ""
|
||||
fromAmount = (*hexutil.Big)(big.NewInt(0))
|
||||
toAsset = ""
|
||||
toAmount = (*hexutil.Big)(big.NewInt(0))
|
||||
} else {
|
||||
fromAsset = fromToken.Symbol
|
||||
fromAmount = (*hexutil.Big)(fromAmountInt)
|
||||
toAsset = toToken.Symbol
|
||||
toAmount = (*hexutil.Big)(toAmountInt)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func fetchUniswapInfo(ctx context.Context, client chain.ClientInterface, tokenManager *token.Manager, log *types.Log, logType w_common.EventType) (fromAsset string, fromAmount *hexutil.Big, toAsset string, toAmount *hexutil.Big, err error) {
|
||||
switch logType {
|
||||
case w_common.UniswapV2SwapEventType:
|
||||
return fetchUniswapV2Info(ctx, client, tokenManager, log)
|
||||
case w_common.UniswapV3SwapEventType:
|
||||
return fetchUniswapV3Info(ctx, client, tokenManager, log)
|
||||
}
|
||||
err = fmt.Errorf("wrong log type %s", logType)
|
||||
return
|
||||
}
|
||||
|
||||
// Build a Swap multitransaction from a list containing one or several uniswapV2/uniswapV3 subTxs
|
||||
// We only care about the first and last swap to identify the input/output token and amounts
|
||||
func buildUniswapSwapMultitransaction(ctx context.Context, client chain.ClientInterface, tokenManager *token.Manager, transfer *Transfer) (*MultiTransaction, error) {
|
||||
multiTransaction := MultiTransaction{
|
||||
Type: MultiTransactionSwap,
|
||||
FromNetworkID: transfer.NetworkID,
|
||||
FromTxHash: transfer.Receipt.TxHash,
|
||||
FromAddress: transfer.Address,
|
||||
ToNetworkID: transfer.NetworkID,
|
||||
ToTxHash: transfer.Receipt.TxHash,
|
||||
ToAddress: transfer.Address,
|
||||
Timestamp: transfer.Timestamp,
|
||||
}
|
||||
|
||||
var firstSwapLog, lastSwapLog *types.Log
|
||||
var firstSwapLogType, lastSwapLogType w_common.EventType
|
||||
hasWETHDepositLog := false
|
||||
hasWETHWithdrawalLog := false
|
||||
|
||||
for _, ethlog := range transfer.Receipt.Logs {
|
||||
logType := w_common.GetEventType(ethlog)
|
||||
switch logType {
|
||||
case w_common.WETHDepositEventType:
|
||||
hasWETHDepositLog = true
|
||||
case w_common.WETHWithdrawalEventType:
|
||||
hasWETHWithdrawalLog = true
|
||||
case w_common.UniswapV2SwapEventType, w_common.UniswapV3SwapEventType:
|
||||
if firstSwapLog == nil {
|
||||
firstSwapLog = ethlog
|
||||
firstSwapLogType = logType
|
||||
}
|
||||
lastSwapLog = ethlog
|
||||
lastSwapLogType = logType
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
multiTransaction.FromAsset, multiTransaction.FromAmount, multiTransaction.ToAsset, multiTransaction.ToAmount, err = fetchUniswapInfo(ctx, client, tokenManager, firstSwapLog, firstSwapLogType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if firstSwapLog != lastSwapLog {
|
||||
_, _, multiTransaction.ToAsset, multiTransaction.ToAmount, err = fetchUniswapInfo(ctx, client, tokenManager, lastSwapLog, lastSwapLogType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// WETH and ETH have same decimals value, no need to change From/To Amount
|
||||
if multiTransaction.FromAsset == WETHSymbol && hasWETHDepositLog {
|
||||
multiTransaction.FromAsset = ETHSymbol
|
||||
}
|
||||
|
||||
if multiTransaction.ToAsset == WETHSymbol && hasWETHWithdrawalLog {
|
||||
multiTransaction.ToAsset = ETHSymbol
|
||||
}
|
||||
|
||||
return &multiTransaction, nil
|
||||
}
|
||||
391
vendor/github.com/status-im/status-go/services/wallet/transfer/testutils.go
generated
vendored
Normal file
391
vendor/github.com/status-im/status-go/services/wallet/transfer/testutils.go
generated
vendored
Normal file
@@ -0,0 +1,391 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
eth_common "github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
"github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/testutils"
|
||||
"github.com/status-im/status-go/services/wallet/token"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type TestTransaction struct {
|
||||
Hash eth_common.Hash
|
||||
ChainID common.ChainID
|
||||
From eth_common.Address // [sender]
|
||||
Timestamp int64
|
||||
BlkNumber int64
|
||||
Success bool
|
||||
Nonce uint64
|
||||
Contract eth_common.Address
|
||||
MultiTransactionID MultiTransactionIDType
|
||||
}
|
||||
|
||||
type TestTransfer struct {
|
||||
TestTransaction
|
||||
To eth_common.Address // [address]
|
||||
Value int64
|
||||
Token *token.Token
|
||||
}
|
||||
|
||||
type TestMultiTransaction struct {
|
||||
MultiTransactionID MultiTransactionIDType
|
||||
MultiTransactionType MultiTransactionType
|
||||
FromAddress eth_common.Address
|
||||
ToAddress eth_common.Address
|
||||
FromToken string
|
||||
ToToken string
|
||||
FromAmount int64
|
||||
ToAmount int64
|
||||
Timestamp int64
|
||||
FromNetworkID *uint64
|
||||
ToNetworkID *uint64
|
||||
}
|
||||
|
||||
func SeedToToken(seed int) *token.Token {
|
||||
tokenIndex := seed % len(TestTokens)
|
||||
return TestTokens[tokenIndex]
|
||||
}
|
||||
|
||||
func TestTrToToken(t *testing.T, tt *TestTransaction) (token *token.Token, isNative bool) {
|
||||
// Sanity check that none of the markers changed and they should be equal to seed
|
||||
require.Equal(t, tt.Timestamp, tt.BlkNumber)
|
||||
|
||||
tokenIndex := int(tt.Timestamp) % len(TestTokens)
|
||||
isNative = testutils.SliceContains(NativeTokenIndices, tokenIndex)
|
||||
|
||||
return TestTokens[tokenIndex], isNative
|
||||
}
|
||||
|
||||
func generateTestTransaction(seed int) TestTransaction {
|
||||
token := SeedToToken(seed)
|
||||
return TestTransaction{
|
||||
Hash: eth_common.HexToHash(fmt.Sprintf("0x1%d", seed)),
|
||||
ChainID: common.ChainID(token.ChainID),
|
||||
From: eth_common.HexToAddress(fmt.Sprintf("0x2%d", seed)),
|
||||
Timestamp: int64(seed),
|
||||
BlkNumber: int64(seed),
|
||||
Success: true,
|
||||
Nonce: uint64(seed),
|
||||
// In practice this is last20Bytes(Keccak256(RLP(From, nonce)))
|
||||
Contract: eth_common.HexToAddress(fmt.Sprintf("0x4%d", seed)),
|
||||
MultiTransactionID: NoMultiTransactionID,
|
||||
}
|
||||
}
|
||||
|
||||
func generateTestTransfer(seed int) TestTransfer {
|
||||
tokenIndex := seed % len(TestTokens)
|
||||
token := TestTokens[tokenIndex]
|
||||
return TestTransfer{
|
||||
TestTransaction: generateTestTransaction(seed),
|
||||
To: eth_common.HexToAddress(fmt.Sprintf("0x3%d", seed)),
|
||||
Value: int64(seed),
|
||||
Token: token,
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestSendMultiTransaction(tr TestTransfer) TestMultiTransaction {
|
||||
return TestMultiTransaction{
|
||||
MultiTransactionType: MultiTransactionSend,
|
||||
FromAddress: tr.From,
|
||||
ToAddress: tr.To,
|
||||
FromToken: tr.Token.Symbol,
|
||||
ToToken: tr.Token.Symbol,
|
||||
FromAmount: tr.Value,
|
||||
ToAmount: 0,
|
||||
Timestamp: tr.Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestSwapMultiTransaction(tr TestTransfer, toToken string, toAmount int64) TestMultiTransaction {
|
||||
return TestMultiTransaction{
|
||||
MultiTransactionType: MultiTransactionSwap,
|
||||
FromAddress: tr.From,
|
||||
ToAddress: tr.To,
|
||||
FromToken: tr.Token.Symbol,
|
||||
ToToken: toToken,
|
||||
FromAmount: tr.Value,
|
||||
ToAmount: toAmount,
|
||||
Timestamp: tr.Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestBridgeMultiTransaction(fromTr, toTr TestTransfer) TestMultiTransaction {
|
||||
return TestMultiTransaction{
|
||||
MultiTransactionType: MultiTransactionBridge,
|
||||
FromAddress: fromTr.From,
|
||||
ToAddress: toTr.To,
|
||||
FromToken: fromTr.Token.Symbol,
|
||||
ToToken: toTr.Token.Symbol,
|
||||
FromAmount: fromTr.Value,
|
||||
ToAmount: toTr.Value,
|
||||
Timestamp: fromTr.Timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateTestTransfers will generate transaction based on the TestTokens index and roll over if there are more than
|
||||
// len(TestTokens) transactions
|
||||
func GenerateTestTransfers(tb testing.TB, db *sql.DB, firstStartIndex int, count int) (result []TestTransfer, fromAddresses, toAddresses []eth_common.Address) {
|
||||
for i := firstStartIndex; i < (firstStartIndex + count); i++ {
|
||||
tr := generateTestTransfer(i)
|
||||
fromAddresses = append(fromAddresses, tr.From)
|
||||
toAddresses = append(toAddresses, tr.To)
|
||||
result = append(result, tr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type TestCollectible struct {
|
||||
TokenAddress eth_common.Address
|
||||
TokenID *big.Int
|
||||
ChainID common.ChainID
|
||||
}
|
||||
|
||||
var TestCollectibles = []TestCollectible{
|
||||
TestCollectible{
|
||||
TokenAddress: eth_common.HexToAddress("0x97a04fda4d97c6e3547d66b572e29f4a4ff40392"),
|
||||
TokenID: big.NewInt(1),
|
||||
ChainID: 1,
|
||||
},
|
||||
TestCollectible{ // Same token ID as above but different address
|
||||
TokenAddress: eth_common.HexToAddress("0x2cec8879915cdbd80c88d8b1416aa9413a24ddfa"),
|
||||
TokenID: big.NewInt(1),
|
||||
ChainID: 1,
|
||||
},
|
||||
TestCollectible{
|
||||
TokenAddress: eth_common.HexToAddress("0x1dea7a3e04849840c0eb15fd26a55f6c40c4a69b"),
|
||||
TokenID: big.NewInt(11),
|
||||
ChainID: 5,
|
||||
},
|
||||
TestCollectible{ // Same address as above but different token ID
|
||||
TokenAddress: eth_common.HexToAddress("0x1dea7a3e04849840c0eb15fd26a55f6c40c4a69b"),
|
||||
TokenID: big.NewInt(12),
|
||||
ChainID: 5,
|
||||
},
|
||||
}
|
||||
|
||||
var EthMainnet = token.Token{
|
||||
Address: eth_common.HexToAddress("0x"),
|
||||
Name: "Ether",
|
||||
Symbol: "ETH",
|
||||
ChainID: 1,
|
||||
}
|
||||
|
||||
var EthGoerli = token.Token{
|
||||
Address: eth_common.HexToAddress("0x"),
|
||||
Name: "Ether",
|
||||
Symbol: "ETH",
|
||||
ChainID: 5,
|
||||
}
|
||||
|
||||
var EthOptimism = token.Token{
|
||||
Address: eth_common.HexToAddress("0x"),
|
||||
Name: "Ether",
|
||||
Symbol: "ETH",
|
||||
ChainID: 10,
|
||||
}
|
||||
|
||||
var UsdcMainnet = token.Token{
|
||||
Address: eth_common.HexToAddress("0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"),
|
||||
Name: "USD Coin",
|
||||
Symbol: "USDC",
|
||||
ChainID: 1,
|
||||
}
|
||||
|
||||
var UsdcGoerli = token.Token{
|
||||
Address: eth_common.HexToAddress("0x98339d8c260052b7ad81c28c16c0b98420f2b46a"),
|
||||
Name: "USD Coin",
|
||||
Symbol: "USDC",
|
||||
ChainID: 5,
|
||||
}
|
||||
|
||||
var UsdcOptimism = token.Token{
|
||||
Address: eth_common.HexToAddress("0x7f5c764cbc14f9669b88837ca1490cca17c31607"),
|
||||
Name: "USD Coin",
|
||||
Symbol: "USDC",
|
||||
ChainID: 10,
|
||||
}
|
||||
|
||||
var SntMainnet = token.Token{
|
||||
Address: eth_common.HexToAddress("0x744d70fdbe2ba4cf95131626614a1763df805b9e"),
|
||||
Name: "Status Network Token",
|
||||
Symbol: "SNT",
|
||||
ChainID: 1,
|
||||
}
|
||||
|
||||
var DaiMainnet = token.Token{
|
||||
Address: eth_common.HexToAddress("0xf2edF1c091f683E3fb452497d9a98A49cBA84666"),
|
||||
Name: "DAI Stablecoin",
|
||||
Symbol: "DAI",
|
||||
ChainID: 5,
|
||||
}
|
||||
|
||||
var DaiGoerli = token.Token{
|
||||
Address: eth_common.HexToAddress("0xf2edF1c091f683E3fb452497d9a98A49cBA84666"),
|
||||
Name: "DAI Stablecoin",
|
||||
Symbol: "DAI",
|
||||
ChainID: 5,
|
||||
}
|
||||
|
||||
// TestTokens contains ETH/Mainnet, ETH/Goerli, ETH/Optimism, USDC/Mainnet, USDC/Goerli, USDC/Optimism, SNT/Mainnet, DAI/Mainnet, DAI/Goerli
|
||||
var TestTokens = []*token.Token{
|
||||
&EthMainnet, &EthGoerli, &EthOptimism, &UsdcMainnet, &UsdcGoerli, &UsdcOptimism, &SntMainnet, &DaiMainnet, &DaiGoerli,
|
||||
}
|
||||
|
||||
func LookupTokenIdentity(chainID uint64, address eth_common.Address, native bool) *token.Token {
|
||||
for _, token := range TestTokens {
|
||||
if token.ChainID == chainID && token.Address == address && token.IsNative() == native {
|
||||
return token
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var NativeTokenIndices = []int{0, 1, 2}
|
||||
|
||||
func InsertTestTransfer(tb testing.TB, db *sql.DB, address eth_common.Address, tr *TestTransfer) {
|
||||
token := TestTokens[int(tr.Timestamp)%len(TestTokens)]
|
||||
InsertTestTransferWithOptions(tb, db, address, tr, &TestTransferOptions{
|
||||
TokenAddress: token.Address,
|
||||
})
|
||||
}
|
||||
|
||||
type TestTransferOptions struct {
|
||||
TokenAddress eth_common.Address
|
||||
TokenID *big.Int
|
||||
NullifyAddresses []eth_common.Address
|
||||
Tx *types.Transaction
|
||||
Receipt *types.Receipt
|
||||
}
|
||||
|
||||
func GenerateTxField(data []byte) *types.Transaction {
|
||||
return types.NewTx(&types.DynamicFeeTx{
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
func InsertTestTransferWithOptions(tb testing.TB, db *sql.DB, address eth_common.Address, tr *TestTransfer, opt *TestTransferOptions) {
|
||||
var (
|
||||
tx *sql.Tx
|
||||
)
|
||||
tx, err := db.Begin()
|
||||
require.NoError(tb, err)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
return
|
||||
}
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
blkHash := eth_common.HexToHash("4")
|
||||
|
||||
block := blockDBFields{
|
||||
chainID: uint64(tr.ChainID),
|
||||
account: address,
|
||||
blockNumber: big.NewInt(tr.BlkNumber),
|
||||
blockHash: blkHash,
|
||||
}
|
||||
|
||||
// Respect `FOREIGN KEY(network_id,address,blk_hash)` of `transfers` table
|
||||
err = insertBlockDBFields(tx, block)
|
||||
require.NoError(tb, err)
|
||||
|
||||
receiptStatus := uint64(0)
|
||||
if tr.Success {
|
||||
receiptStatus = 1
|
||||
}
|
||||
|
||||
tokenType := "eth"
|
||||
if (opt.TokenAddress != eth_common.Address{}) {
|
||||
if opt.TokenID == nil {
|
||||
tokenType = "erc20"
|
||||
} else {
|
||||
tokenType = "erc721"
|
||||
}
|
||||
}
|
||||
|
||||
// Workaround to simulate writing of NULL values for addresses
|
||||
txTo := &tr.To
|
||||
txFrom := &tr.From
|
||||
for i := 0; i < len(opt.NullifyAddresses); i++ {
|
||||
if opt.NullifyAddresses[i] == tr.To {
|
||||
txTo = nil
|
||||
}
|
||||
if opt.NullifyAddresses[i] == tr.From {
|
||||
txFrom = nil
|
||||
}
|
||||
}
|
||||
|
||||
transfer := transferDBFields{
|
||||
chainID: uint64(tr.ChainID),
|
||||
id: tr.Hash,
|
||||
txHash: &tr.Hash,
|
||||
address: address,
|
||||
blockHash: blkHash,
|
||||
blockNumber: big.NewInt(tr.BlkNumber),
|
||||
sender: tr.From,
|
||||
transferType: common.Type(tokenType),
|
||||
timestamp: uint64(tr.Timestamp),
|
||||
multiTransactionID: tr.MultiTransactionID,
|
||||
baseGasFees: "0x0",
|
||||
receiptStatus: &receiptStatus,
|
||||
txValue: big.NewInt(tr.Value),
|
||||
txFrom: txFrom,
|
||||
txTo: txTo,
|
||||
txNonce: &tr.Nonce,
|
||||
tokenAddress: &opt.TokenAddress,
|
||||
contractAddress: &tr.Contract,
|
||||
tokenID: opt.TokenID,
|
||||
transaction: opt.Tx,
|
||||
receipt: opt.Receipt,
|
||||
}
|
||||
err = updateOrInsertTransfersDBFields(tx, []transferDBFields{transfer})
|
||||
require.NoError(tb, err)
|
||||
}
|
||||
|
||||
func InsertTestPendingTransaction(tb testing.TB, db *sql.DB, tr *TestTransfer) {
|
||||
_, err := db.Exec(`
|
||||
INSERT INTO pending_transactions (network_id, hash, timestamp, from_address, to_address,
|
||||
symbol, gas_price, gas_limit, value, data, type, additional_data, multi_transaction_id
|
||||
) VALUES (?, ?, ?, ?, ?, 'ETH', 0, 0, ?, '', 'eth', '', ?)`,
|
||||
tr.ChainID, tr.Hash, tr.Timestamp, tr.From, tr.To, (*bigint.SQLBigIntBytes)(big.NewInt(tr.Value)), tr.MultiTransactionID)
|
||||
require.NoError(tb, err)
|
||||
}
|
||||
|
||||
func InsertTestMultiTransaction(tb testing.TB, db *sql.DB, tr *TestMultiTransaction) MultiTransactionIDType {
|
||||
fromTokenType := tr.FromToken
|
||||
if tr.FromToken == "" {
|
||||
fromTokenType = testutils.EthSymbol
|
||||
}
|
||||
toTokenType := tr.ToToken
|
||||
if tr.ToToken == "" {
|
||||
toTokenType = testutils.EthSymbol
|
||||
}
|
||||
fromAmount := (*hexutil.Big)(big.NewInt(tr.FromAmount))
|
||||
toAmount := (*hexutil.Big)(big.NewInt(tr.ToAmount))
|
||||
|
||||
result, err := db.Exec(`
|
||||
INSERT INTO multi_transactions (from_address, from_asset, from_amount, to_address, to_asset, to_amount, type, timestamp, from_network_id, to_network_id
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
tr.FromAddress, fromTokenType, fromAmount.String(), tr.ToAddress, toTokenType, toAmount.String(), tr.MultiTransactionType, tr.Timestamp, tr.FromNetworkID, tr.ToNetworkID)
|
||||
require.NoError(tb, err)
|
||||
rowID, err := result.LastInsertId()
|
||||
require.NoError(tb, err)
|
||||
tr.MultiTransactionID = MultiTransactionIDType(rowID)
|
||||
return tr.MultiTransactionID
|
||||
}
|
||||
|
||||
// For using in tests only outside the package
|
||||
func SaveTransfersMarkBlocksLoaded(database *Database, chainID uint64, address eth_common.Address, transfers []Transfer, blocks []*big.Int) error {
|
||||
return saveTransfersMarkBlocksLoaded(database.client, chainID, address, transfers, blocks)
|
||||
}
|
||||
246
vendor/github.com/status-im/status-go/services/wallet/transfer/transaction_manager.go
generated
vendored
Normal file
246
vendor/github.com/status-im/status-go/services/wallet/transfer/transaction_manager.go
generated
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
ethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/status-im/status-go/account"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/multiaccounts/accounts"
|
||||
"github.com/status-im/status-go/params"
|
||||
"github.com/status-im/status-go/services/wallet/bridge"
|
||||
wallet_common "github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/walletevent"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
)
|
||||
|
||||
type MultiTransactionIDType int64
|
||||
|
||||
const (
|
||||
NoMultiTransactionID = MultiTransactionIDType(0)
|
||||
|
||||
// EventMTTransactionUpdate is emitted when a multi-transaction is updated (added or deleted)
|
||||
EventMTTransactionUpdate walletevent.EventType = "multi-transaction-update"
|
||||
)
|
||||
|
||||
type SignatureDetails struct {
|
||||
R string `json:"r"`
|
||||
S string `json:"s"`
|
||||
V string `json:"v"`
|
||||
}
|
||||
|
||||
type TransactionDescription struct {
|
||||
chainID uint64
|
||||
builtTx *ethTypes.Transaction
|
||||
signature []byte
|
||||
}
|
||||
|
||||
type TransactionManager struct {
|
||||
db *sql.DB
|
||||
gethManager *account.GethManager
|
||||
transactor *transactions.Transactor
|
||||
config *params.NodeConfig
|
||||
accountsDB *accounts.Database
|
||||
pendingTracker *transactions.PendingTxTracker
|
||||
eventFeed *event.Feed
|
||||
|
||||
multiTransactionForKeycardSigning *MultiTransaction
|
||||
transactionsBridgeData []*bridge.TransactionBridge
|
||||
transactionsForKeycardSingning map[common.Hash]*TransactionDescription
|
||||
}
|
||||
|
||||
func NewTransactionManager(
|
||||
db *sql.DB,
|
||||
gethManager *account.GethManager,
|
||||
transactor *transactions.Transactor,
|
||||
config *params.NodeConfig,
|
||||
accountsDB *accounts.Database,
|
||||
pendingTxManager *transactions.PendingTxTracker,
|
||||
eventFeed *event.Feed,
|
||||
) *TransactionManager {
|
||||
return &TransactionManager{
|
||||
db: db,
|
||||
gethManager: gethManager,
|
||||
transactor: transactor,
|
||||
config: config,
|
||||
accountsDB: accountsDB,
|
||||
pendingTracker: pendingTxManager,
|
||||
eventFeed: eventFeed,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
emptyHash = common.Hash{}
|
||||
)
|
||||
|
||||
type MultiTransactionType uint8
|
||||
|
||||
const (
|
||||
MultiTransactionSend = iota
|
||||
MultiTransactionSwap
|
||||
MultiTransactionBridge
|
||||
)
|
||||
|
||||
type MultiTransaction struct {
|
||||
ID uint `json:"id"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
FromNetworkID uint64 `json:"fromNetworkID"`
|
||||
ToNetworkID uint64 `json:"toNetworkID"`
|
||||
FromTxHash common.Hash `json:"fromTxHash"`
|
||||
ToTxHash common.Hash `json:"toTxHash"`
|
||||
FromAddress common.Address `json:"fromAddress"`
|
||||
ToAddress common.Address `json:"toAddress"`
|
||||
FromAsset string `json:"fromAsset"`
|
||||
ToAsset string `json:"toAsset"`
|
||||
FromAmount *hexutil.Big `json:"fromAmount"`
|
||||
ToAmount *hexutil.Big `json:"toAmount"`
|
||||
Type MultiTransactionType `json:"type"`
|
||||
CrossTxID string
|
||||
}
|
||||
|
||||
type MultiTransactionCommand struct {
|
||||
FromAddress common.Address `json:"fromAddress"`
|
||||
ToAddress common.Address `json:"toAddress"`
|
||||
FromAsset string `json:"fromAsset"`
|
||||
ToAsset string `json:"toAsset"`
|
||||
FromAmount *hexutil.Big `json:"fromAmount"`
|
||||
Type MultiTransactionType `json:"type"`
|
||||
}
|
||||
|
||||
type MultiTransactionCommandResult struct {
|
||||
ID int64 `json:"id"`
|
||||
Hashes map[uint64][]types.Hash `json:"hashes"`
|
||||
}
|
||||
|
||||
type TransactionIdentity struct {
|
||||
ChainID wallet_common.ChainID `json:"chainId"`
|
||||
Hash common.Hash `json:"hash"`
|
||||
Address common.Address `json:"address"`
|
||||
}
|
||||
|
||||
type TxResponse struct {
|
||||
KeyUID string `json:"keyUid,omitempty"`
|
||||
Address types.Address `json:"address,omitempty"`
|
||||
AddressPath string `json:"addressPath,omitempty"`
|
||||
SignOnKeycard bool `json:"signOnKeycard,omitempty"`
|
||||
ChainID uint64 `json:"chainId,omitempty"`
|
||||
MessageToSign interface{} `json:"messageToSign,omitempty"`
|
||||
TxArgs transactions.SendTxArgs `json:"txArgs,omitempty"`
|
||||
RawTx string `json:"rawTx,omitempty"`
|
||||
TxHash common.Hash `json:"txHash,omitempty"`
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) SignMessage(message types.HexBytes, address common.Address, password string) (string, error) {
|
||||
selectedAccount, err := tm.gethManager.VerifyAccountPassword(tm.config.KeyStoreDir, address.Hex(), password)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signature, err := crypto.Sign(message[:], selectedAccount.PrivateKey)
|
||||
|
||||
return types.EncodeHex(signature), err
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) BuildTransaction(chainID uint64, sendArgs transactions.SendTxArgs) (response *TxResponse, err error) {
|
||||
account, err := tm.accountsDB.GetAccountByAddress(sendArgs.From)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve account: %w", err)
|
||||
}
|
||||
|
||||
kp, err := tm.accountsDB.GetKeypairByKeyUID(account.KeyUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txBeingSigned, err := tm.transactor.ValidateAndBuildTransaction(chainID, sendArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set potential missing fields that were added while building the transaction
|
||||
if sendArgs.Value == nil {
|
||||
value := hexutil.Big(*txBeingSigned.Value())
|
||||
sendArgs.Value = &value
|
||||
}
|
||||
if sendArgs.Nonce == nil {
|
||||
nonce := hexutil.Uint64(txBeingSigned.Nonce())
|
||||
sendArgs.Nonce = &nonce
|
||||
}
|
||||
if sendArgs.Gas == nil {
|
||||
gas := hexutil.Uint64(txBeingSigned.Gas())
|
||||
sendArgs.Gas = &gas
|
||||
}
|
||||
if sendArgs.GasPrice == nil {
|
||||
gasPrice := hexutil.Big(*txBeingSigned.GasPrice())
|
||||
sendArgs.GasPrice = &gasPrice
|
||||
}
|
||||
|
||||
if sendArgs.IsDynamicFeeTx() {
|
||||
if sendArgs.MaxPriorityFeePerGas == nil {
|
||||
maxPriorityFeePerGas := hexutil.Big(*txBeingSigned.GasTipCap())
|
||||
sendArgs.MaxPriorityFeePerGas = &maxPriorityFeePerGas
|
||||
}
|
||||
if sendArgs.MaxFeePerGas == nil {
|
||||
maxFeePerGas := hexutil.Big(*txBeingSigned.GasFeeCap())
|
||||
sendArgs.MaxFeePerGas = &maxFeePerGas
|
||||
}
|
||||
}
|
||||
|
||||
signer := ethTypes.NewLondonSigner(new(big.Int).SetUint64(chainID))
|
||||
|
||||
return &TxResponse{
|
||||
KeyUID: account.KeyUID,
|
||||
Address: account.Address,
|
||||
AddressPath: account.Path,
|
||||
SignOnKeycard: kp.MigratedToKeycard(),
|
||||
ChainID: chainID,
|
||||
MessageToSign: signer.Hash(txBeingSigned),
|
||||
TxArgs: sendArgs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) BuildRawTransaction(chainID uint64, sendArgs transactions.SendTxArgs, signature []byte) (response *TxResponse, err error) {
|
||||
tx, err := tm.transactor.BuildTransactionWithSignature(chainID, sendArgs, signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TxResponse{
|
||||
ChainID: chainID,
|
||||
TxArgs: sendArgs,
|
||||
RawTx: types.EncodeHex(data),
|
||||
TxHash: tx.Hash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) SendTransactionWithSignature(chainID uint64, txType transactions.PendingTrxType, sendArgs transactions.SendTxArgs, signature []byte) (hash types.Hash, err error) {
|
||||
hash, err = tm.transactor.BuildTransactionAndSendWithSignature(chainID, sendArgs, signature)
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
|
||||
err = tm.pendingTracker.TrackPendingTransaction(
|
||||
wallet_common.ChainID(chainID),
|
||||
common.Hash(hash),
|
||||
common.Address(sendArgs.From),
|
||||
txType,
|
||||
transactions.AutoDelete,
|
||||
)
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
515
vendor/github.com/status-im/status-go/services/wallet/transfer/transaction_manager_multitransaction.go
generated
vendored
Normal file
515
vendor/github.com/status-im/status-go/services/wallet/transfer/transaction_manager_multitransaction.go
generated
vendored
Normal file
@@ -0,0 +1,515 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
ethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/account"
|
||||
"github.com/status-im/status-go/eth-node/crypto"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/services/wallet/bigint"
|
||||
"github.com/status-im/status-go/services/wallet/bridge"
|
||||
wallet_common "github.com/status-im/status-go/services/wallet/common"
|
||||
"github.com/status-im/status-go/services/wallet/walletevent"
|
||||
"github.com/status-im/status-go/signal"
|
||||
"github.com/status-im/status-go/transactions"
|
||||
)
|
||||
|
||||
const multiTransactionColumns = "from_network_id, from_tx_hash, from_address, from_asset, from_amount, to_network_id, to_tx_hash, to_address, to_asset, to_amount, type, cross_tx_id, timestamp"
|
||||
const selectMultiTransactionColumns = "COALESCE(from_network_id, 0), from_tx_hash, from_address, from_asset, from_amount, COALESCE(to_network_id, 0), to_tx_hash, to_address, to_asset, to_amount, type, cross_tx_id, timestamp"
|
||||
|
||||
func rowsToMultiTransactions(rows *sql.Rows) ([]*MultiTransaction, error) {
|
||||
var multiTransactions []*MultiTransaction
|
||||
for rows.Next() {
|
||||
multiTransaction := &MultiTransaction{}
|
||||
var fromAmountDB, toAmountDB sql.NullString
|
||||
var fromTxHash, toTxHash sql.RawBytes
|
||||
err := rows.Scan(
|
||||
&multiTransaction.ID,
|
||||
&multiTransaction.FromNetworkID,
|
||||
&fromTxHash,
|
||||
&multiTransaction.FromAddress,
|
||||
&multiTransaction.FromAsset,
|
||||
&fromAmountDB,
|
||||
&multiTransaction.ToNetworkID,
|
||||
&toTxHash,
|
||||
&multiTransaction.ToAddress,
|
||||
&multiTransaction.ToAsset,
|
||||
&toAmountDB,
|
||||
&multiTransaction.Type,
|
||||
&multiTransaction.CrossTxID,
|
||||
&multiTransaction.Timestamp,
|
||||
)
|
||||
if len(fromTxHash) > 0 {
|
||||
multiTransaction.FromTxHash = common.BytesToHash(fromTxHash)
|
||||
}
|
||||
if len(toTxHash) > 0 {
|
||||
multiTransaction.ToTxHash = common.BytesToHash(toTxHash)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fromAmountDB.Valid {
|
||||
multiTransaction.FromAmount = new(hexutil.Big)
|
||||
if _, ok := (*big.Int)(multiTransaction.FromAmount).SetString(fromAmountDB.String, 0); !ok {
|
||||
return nil, errors.New("failed to convert fromAmountDB.String to big.Int: " + fromAmountDB.String)
|
||||
}
|
||||
}
|
||||
|
||||
if toAmountDB.Valid {
|
||||
multiTransaction.ToAmount = new(hexutil.Big)
|
||||
if _, ok := (*big.Int)(multiTransaction.ToAmount).SetString(toAmountDB.String, 0); !ok {
|
||||
return nil, errors.New("failed to convert fromAmountDB.String to big.Int: " + toAmountDB.String)
|
||||
}
|
||||
}
|
||||
|
||||
multiTransactions = append(multiTransactions, multiTransaction)
|
||||
}
|
||||
|
||||
return multiTransactions, nil
|
||||
}
|
||||
|
||||
func getMultiTransactionTimestamp(multiTransaction *MultiTransaction) uint64 {
|
||||
if multiTransaction.Timestamp != 0 {
|
||||
return multiTransaction.Timestamp
|
||||
}
|
||||
return uint64(time.Now().Unix())
|
||||
}
|
||||
|
||||
// insertMultiTransaction inserts a multi transaction into the database and updates multi-transaction ID and timestamp
|
||||
func insertMultiTransaction(db *sql.DB, multiTransaction *MultiTransaction) (MultiTransactionIDType, error) {
|
||||
insert, err := db.Prepare(fmt.Sprintf(`INSERT INTO multi_transactions (%s)
|
||||
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, multiTransactionColumns))
|
||||
if err != nil {
|
||||
return NoMultiTransactionID, err
|
||||
}
|
||||
timestamp := getMultiTransactionTimestamp(multiTransaction)
|
||||
result, err := insert.Exec(
|
||||
multiTransaction.FromNetworkID,
|
||||
multiTransaction.FromTxHash,
|
||||
multiTransaction.FromAddress,
|
||||
multiTransaction.FromAsset,
|
||||
multiTransaction.FromAmount.String(),
|
||||
multiTransaction.ToNetworkID,
|
||||
multiTransaction.ToTxHash,
|
||||
multiTransaction.ToAddress,
|
||||
multiTransaction.ToAsset,
|
||||
multiTransaction.ToAmount.String(),
|
||||
multiTransaction.Type,
|
||||
multiTransaction.CrossTxID,
|
||||
timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
return NoMultiTransactionID, err
|
||||
}
|
||||
defer insert.Close()
|
||||
multiTransactionID, err := result.LastInsertId()
|
||||
|
||||
multiTransaction.Timestamp = timestamp
|
||||
multiTransaction.ID = uint(multiTransactionID)
|
||||
|
||||
return MultiTransactionIDType(multiTransactionID), err
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) InsertMultiTransaction(multiTransaction *MultiTransaction) (MultiTransactionIDType, error) {
|
||||
return tm.insertMultiTransactionAndNotify(tm.db, multiTransaction, nil)
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) insertMultiTransactionAndNotify(db *sql.DB, multiTransaction *MultiTransaction, chainIDs []uint64) (MultiTransactionIDType, error) {
|
||||
id, err := insertMultiTransaction(db, multiTransaction)
|
||||
if err != nil {
|
||||
publishMultiTransactionUpdatedEvent(db, multiTransaction, tm.eventFeed, chainIDs)
|
||||
}
|
||||
return id, err
|
||||
}
|
||||
|
||||
// publishMultiTransactionUpdatedEvent notify listeners of new multi transaction (used in activity history)
|
||||
func publishMultiTransactionUpdatedEvent(db *sql.DB, multiTransaction *MultiTransaction, eventFeed *event.Feed, chainIDs []uint64) {
|
||||
publishFn := func(chainID uint64) {
|
||||
eventFeed.Send(walletevent.Event{
|
||||
Type: EventMTTransactionUpdate,
|
||||
ChainID: chainID,
|
||||
Accounts: []common.Address{multiTransaction.FromAddress, multiTransaction.ToAddress},
|
||||
At: int64(multiTransaction.Timestamp),
|
||||
})
|
||||
}
|
||||
if len(chainIDs) > 0 {
|
||||
for _, chainID := range chainIDs {
|
||||
publishFn(chainID)
|
||||
}
|
||||
} else {
|
||||
publishFn(0)
|
||||
}
|
||||
}
|
||||
|
||||
func updateMultiTransaction(db *sql.DB, multiTransaction *MultiTransaction) error {
|
||||
if MultiTransactionIDType(multiTransaction.ID) == NoMultiTransactionID {
|
||||
return fmt.Errorf("no multitransaction ID")
|
||||
}
|
||||
|
||||
update, err := db.Prepare(fmt.Sprintf(`REPLACE INTO multi_transactions (rowid, %s)
|
||||
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, multiTransactionColumns))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
timestamp := getMultiTransactionTimestamp(multiTransaction)
|
||||
_, err = update.Exec(
|
||||
multiTransaction.ID,
|
||||
multiTransaction.FromNetworkID,
|
||||
multiTransaction.FromTxHash,
|
||||
multiTransaction.FromAddress,
|
||||
multiTransaction.FromAsset,
|
||||
multiTransaction.FromAmount.String(),
|
||||
multiTransaction.ToNetworkID,
|
||||
multiTransaction.ToTxHash,
|
||||
multiTransaction.ToAddress,
|
||||
multiTransaction.ToAsset,
|
||||
multiTransaction.ToAmount.String(),
|
||||
multiTransaction.Type,
|
||||
multiTransaction.CrossTxID,
|
||||
timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return update.Close()
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) UpdateMultiTransaction(multiTransaction *MultiTransaction) error {
|
||||
return updateMultiTransaction(tm.db, multiTransaction)
|
||||
}
|
||||
|
||||
// In case of keycard account, password should be empty
|
||||
func (tm *TransactionManager) CreateMultiTransactionFromCommand(ctx context.Context, command *MultiTransactionCommand,
|
||||
data []*bridge.TransactionBridge, bridges map[string]bridge.Bridge, password string) (*MultiTransactionCommandResult, error) {
|
||||
|
||||
multiTransaction := multiTransactionFromCommand(command)
|
||||
|
||||
chainIDs := make([]uint64, 0, len(data))
|
||||
for _, tx := range data {
|
||||
chainIDs = append(chainIDs, tx.ChainID)
|
||||
}
|
||||
if multiTransaction.Type == MultiTransactionSend && multiTransaction.FromNetworkID == 0 && len(chainIDs) == 1 {
|
||||
multiTransaction.FromNetworkID = chainIDs[0]
|
||||
}
|
||||
multiTransactionID, err := tm.insertMultiTransactionAndNotify(tm.db, multiTransaction, chainIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
multiTransaction.ID = uint(multiTransactionID)
|
||||
if password == "" {
|
||||
acc, err := tm.accountsDB.GetAccountByAddress(types.Address(multiTransaction.FromAddress))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kp, err := tm.accountsDB.GetKeypairByKeyUID(acc.KeyUID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !kp.MigratedToKeycard() {
|
||||
return nil, fmt.Errorf("account being used is not migrated to a keycard, password is required")
|
||||
}
|
||||
|
||||
tm.multiTransactionForKeycardSigning = multiTransaction
|
||||
tm.transactionsBridgeData = data
|
||||
hashes, err := tm.buildTransactions(bridges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signal.SendTransactionsForSigningEvent(hashes)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
hashes, err := tm.sendTransactions(multiTransaction, data, bridges, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = tm.storePendingTransactions(multiTransaction, hashes, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MultiTransactionCommandResult{
|
||||
ID: int64(multiTransactionID),
|
||||
Hashes: hashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) ProceedWithTransactionsSignatures(ctx context.Context, signatures map[string]SignatureDetails) (*MultiTransactionCommandResult, error) {
|
||||
if tm.multiTransactionForKeycardSigning == nil {
|
||||
return nil, errors.New("no multi transaction to proceed with")
|
||||
}
|
||||
if len(tm.transactionsBridgeData) == 0 {
|
||||
return nil, errors.New("no transactions bridge data to proceed with")
|
||||
}
|
||||
if len(tm.transactionsForKeycardSingning) == 0 {
|
||||
return nil, errors.New("no transactions to proceed with")
|
||||
}
|
||||
if len(signatures) != len(tm.transactionsForKeycardSingning) {
|
||||
return nil, errors.New("not all transactions have been signed")
|
||||
}
|
||||
|
||||
// check if all transactions have been signed
|
||||
for hash, desc := range tm.transactionsForKeycardSingning {
|
||||
sigDetails, ok := signatures[hash.String()]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing signature for transaction %s", hash)
|
||||
}
|
||||
|
||||
rBytes, _ := hex.DecodeString(sigDetails.R)
|
||||
sBytes, _ := hex.DecodeString(sigDetails.S)
|
||||
vByte := byte(0)
|
||||
if sigDetails.V == "01" {
|
||||
vByte = 1
|
||||
}
|
||||
|
||||
desc.signature = make([]byte, crypto.SignatureLength)
|
||||
copy(desc.signature[32-len(rBytes):32], rBytes)
|
||||
copy(desc.signature[64-len(rBytes):64], sBytes)
|
||||
desc.signature[64] = vByte
|
||||
}
|
||||
|
||||
// send transactions
|
||||
hashes := make(map[uint64][]types.Hash)
|
||||
for _, desc := range tm.transactionsForKeycardSingning {
|
||||
hash, err := tm.transactor.AddSignatureToTransactionAndSend(desc.chainID, desc.builtTx, desc.signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashes[desc.chainID] = append(hashes[desc.chainID], hash)
|
||||
}
|
||||
|
||||
err := tm.storePendingTransactions(tm.multiTransactionForKeycardSigning, hashes, tm.transactionsBridgeData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MultiTransactionCommandResult{
|
||||
ID: int64(tm.multiTransactionForKeycardSigning.ID),
|
||||
Hashes: hashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) storePendingTransactions(multiTransaction *MultiTransaction,
|
||||
hashes map[uint64][]types.Hash, data []*bridge.TransactionBridge) error {
|
||||
|
||||
txs := createPendingTransactions(hashes, data, multiTransaction)
|
||||
for _, tx := range txs {
|
||||
err := tm.pendingTracker.StoreAndTrackPendingTx(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPendingTransactions(hashes map[uint64][]types.Hash, data []*bridge.TransactionBridge,
|
||||
multiTransaction *MultiTransaction) []*transactions.PendingTransaction {
|
||||
|
||||
txs := make([]*transactions.PendingTransaction, 0)
|
||||
for _, tx := range data {
|
||||
for _, hash := range hashes[tx.ChainID] {
|
||||
pendingTransaction := &transactions.PendingTransaction{
|
||||
Hash: common.Hash(hash),
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Value: bigint.BigInt{Int: multiTransaction.FromAmount.ToInt()},
|
||||
From: common.Address(tx.From()),
|
||||
To: common.Address(tx.To()),
|
||||
Data: tx.Data().String(),
|
||||
Type: transactions.WalletTransfer,
|
||||
ChainID: wallet_common.ChainID(tx.ChainID),
|
||||
MultiTransactionID: int64(multiTransaction.ID),
|
||||
Symbol: multiTransaction.FromAsset,
|
||||
AutoDelete: new(bool),
|
||||
}
|
||||
// Transaction downloader will delete pending transaction as soon as it is confirmed
|
||||
*pendingTransaction.AutoDelete = false
|
||||
txs = append(txs, pendingTransaction)
|
||||
}
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func multiTransactionFromCommand(command *MultiTransactionCommand) *MultiTransaction {
|
||||
|
||||
log.Info("Creating multi transaction", "command", command)
|
||||
|
||||
multiTransaction := &MultiTransaction{
|
||||
FromAddress: command.FromAddress,
|
||||
ToAddress: command.ToAddress,
|
||||
FromAsset: command.FromAsset,
|
||||
ToAsset: command.ToAsset,
|
||||
FromAmount: command.FromAmount,
|
||||
ToAmount: new(hexutil.Big),
|
||||
Type: command.Type,
|
||||
}
|
||||
|
||||
return multiTransaction
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) buildTransactions(bridges map[string]bridge.Bridge) ([]string, error) {
|
||||
tm.transactionsForKeycardSingning = make(map[common.Hash]*TransactionDescription)
|
||||
var hashes []string
|
||||
for _, bridgeTx := range tm.transactionsBridgeData {
|
||||
builtTx, err := bridges[bridgeTx.BridgeName].BuildTransaction(bridgeTx)
|
||||
if err != nil {
|
||||
return hashes, err
|
||||
}
|
||||
|
||||
signer := ethTypes.NewLondonSigner(big.NewInt(int64(bridgeTx.ChainID)))
|
||||
txHash := signer.Hash(builtTx)
|
||||
|
||||
tm.transactionsForKeycardSingning[txHash] = &TransactionDescription{
|
||||
chainID: bridgeTx.ChainID,
|
||||
builtTx: builtTx,
|
||||
}
|
||||
|
||||
hashes = append(hashes, txHash.String())
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) sendTransactions(multiTransaction *MultiTransaction,
|
||||
data []*bridge.TransactionBridge, bridges map[string]bridge.Bridge, password string) (
|
||||
map[uint64][]types.Hash, error) {
|
||||
|
||||
log.Info("Making transactions", "multiTransaction", multiTransaction)
|
||||
|
||||
selectedAccount, err := tm.getVerifiedWalletAccount(multiTransaction.FromAddress.Hex(), password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hashes := make(map[uint64][]types.Hash)
|
||||
for _, tx := range data {
|
||||
hash, err := bridges[tx.BridgeName].Send(tx, selectedAccount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashes[tx.ChainID] = append(hashes[tx.ChainID], hash)
|
||||
}
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) GetMultiTransactions(ctx context.Context, ids []MultiTransactionIDType) ([]*MultiTransaction, error) {
|
||||
placeholders := make([]string, len(ids))
|
||||
args := make([]interface{}, len(ids))
|
||||
for i, v := range ids {
|
||||
placeholders[i] = "?"
|
||||
args[i] = v
|
||||
}
|
||||
|
||||
stmt, err := tm.db.Prepare(fmt.Sprintf(`SELECT rowid, %s
|
||||
FROM multi_transactions
|
||||
WHERE rowid in (%s)`,
|
||||
selectMultiTransactionColumns,
|
||||
strings.Join(placeholders, ",")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
return rowsToMultiTransactions(rows)
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) getBridgeMultiTransactions(ctx context.Context, toChainID uint64, crossTxID string) ([]*MultiTransaction, error) {
|
||||
stmt, err := tm.db.Prepare(fmt.Sprintf(`SELECT rowid, %s
|
||||
FROM multi_transactions
|
||||
WHERE type=? AND to_network_id=? AND cross_tx_id=?`,
|
||||
multiTransactionColumns))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
rows, err := stmt.Query(MultiTransactionBridge, toChainID, crossTxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
return rowsToMultiTransactions(rows)
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) GetBridgeOriginMultiTransaction(ctx context.Context, toChainID uint64, crossTxID string) (*MultiTransaction, error) {
|
||||
multiTxs, err := tm.getBridgeMultiTransactions(ctx, toChainID, crossTxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, multiTx := range multiTxs {
|
||||
// Origin MultiTxs will have a missing "ToTxHash"
|
||||
if multiTx.ToTxHash == emptyHash {
|
||||
return multiTx, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) GetBridgeDestinationMultiTransaction(ctx context.Context, toChainID uint64, crossTxID string) (*MultiTransaction, error) {
|
||||
multiTxs, err := tm.getBridgeMultiTransactions(ctx, toChainID, crossTxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, multiTx := range multiTxs {
|
||||
// Destination MultiTxs will have a missing "FromTxHash"
|
||||
if multiTx.FromTxHash == emptyHash {
|
||||
return multiTx, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tm *TransactionManager) getVerifiedWalletAccount(address, password string) (*account.SelectedExtKey, error) {
|
||||
exists, err := tm.accountsDB.AddressExists(types.HexToAddress(address))
|
||||
if err != nil {
|
||||
log.Error("failed to query db for a given address", "address", address, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
log.Error("failed to get a selected account", "err", transactions.ErrInvalidTxSender)
|
||||
return nil, transactions.ErrAccountDoesntExist
|
||||
}
|
||||
|
||||
key, err := tm.gethManager.VerifyAccountPassword(tm.config.KeyStoreDir, address, password)
|
||||
if err != nil {
|
||||
log.Error("failed to verify account", "account", address, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &account.SelectedExtKey{
|
||||
Address: key.Address,
|
||||
AccountKey: key,
|
||||
}, nil
|
||||
}
|
||||
118
vendor/github.com/status-im/status-go/services/wallet/transfer/view.go
generated
vendored
Normal file
118
vendor/github.com/status-im/status-go/services/wallet/transfer/view.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
w_common "github.com/status-im/status-go/services/wallet/common"
|
||||
)
|
||||
|
||||
// View stores only fields used by a client and ensures that all relevant fields are
|
||||
// encoded in hex.
|
||||
type View struct {
|
||||
ID common.Hash `json:"id"`
|
||||
Type w_common.Type `json:"type"`
|
||||
Address common.Address `json:"address"`
|
||||
BlockNumber *hexutil.Big `json:"blockNumber"`
|
||||
BlockHash common.Hash `json:"blockhash"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
GasPrice *hexutil.Big `json:"gasPrice"`
|
||||
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
|
||||
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
|
||||
EffectiveTip *hexutil.Big `json:"effectiveTip"`
|
||||
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
GasUsed hexutil.Uint64 `json:"gasUsed"`
|
||||
Nonce hexutil.Uint64 `json:"nonce"`
|
||||
TxStatus hexutil.Uint64 `json:"txStatus"`
|
||||
Input hexutil.Bytes `json:"input"`
|
||||
TxHash common.Hash `json:"txHash"`
|
||||
Value *hexutil.Big `json:"value"` // Only used for Type EthTransfer and Erc20Transfer
|
||||
TokenID *hexutil.Big `json:"tokenId"` // Only used for Type Erc721Transfer
|
||||
From common.Address `json:"from"`
|
||||
To common.Address `json:"to"`
|
||||
Contract common.Address `json:"contract"`
|
||||
NetworkID uint64 `json:"networkId"`
|
||||
MultiTransactionID int64 `json:"multiTransactionID"`
|
||||
BaseGasFees string `json:"base_gas_fee"`
|
||||
}
|
||||
|
||||
func castToTransferViews(transfers []Transfer) []View {
|
||||
views := make([]View, 0, len(transfers))
|
||||
for _, tx := range transfers {
|
||||
switch tx.Type {
|
||||
case w_common.EthTransfer, w_common.Erc20Transfer, w_common.Erc721Transfer:
|
||||
view := CastToTransferView(tx)
|
||||
views = append(views, view)
|
||||
}
|
||||
}
|
||||
return views
|
||||
}
|
||||
|
||||
func CastToTransferView(t Transfer) View {
|
||||
view := View{}
|
||||
view.ID = t.ID
|
||||
view.Type = getFixedTransferType(t)
|
||||
view.Address = t.Address
|
||||
view.BlockNumber = (*hexutil.Big)(t.BlockNumber)
|
||||
view.BlockHash = t.BlockHash
|
||||
view.Timestamp = hexutil.Uint64(t.Timestamp)
|
||||
view.GasPrice = (*hexutil.Big)(t.Transaction.GasPrice())
|
||||
if t.BaseGasFees != "" {
|
||||
baseFee := new(big.Int)
|
||||
baseFee.SetString(t.BaseGasFees[2:], 16)
|
||||
tip := t.Transaction.EffectiveGasTipValue(baseFee)
|
||||
|
||||
view.EffectiveTip = (*hexutil.Big)(tip)
|
||||
price := new(big.Int).Add(baseFee, tip)
|
||||
view.EffectiveGasPrice = (*hexutil.Big)(price)
|
||||
}
|
||||
view.MaxFeePerGas = (*hexutil.Big)(t.Transaction.GasFeeCap())
|
||||
view.MaxPriorityFeePerGas = (*hexutil.Big)(t.Transaction.GasTipCap())
|
||||
view.GasLimit = hexutil.Uint64(t.Transaction.Gas())
|
||||
view.GasUsed = hexutil.Uint64(t.Receipt.GasUsed)
|
||||
view.BaseGasFees = t.BaseGasFees
|
||||
view.Nonce = hexutil.Uint64(t.Transaction.Nonce())
|
||||
view.TxStatus = hexutil.Uint64(t.Receipt.Status)
|
||||
view.Input = hexutil.Bytes(t.Transaction.Data())
|
||||
view.TxHash = t.Transaction.Hash()
|
||||
view.NetworkID = t.NetworkID
|
||||
|
||||
value := new(hexutil.Big)
|
||||
tokenID := new(hexutil.Big)
|
||||
|
||||
switch view.Type {
|
||||
case w_common.EthTransfer:
|
||||
view.From = t.From
|
||||
if t.Transaction.To() != nil {
|
||||
view.To = *t.Transaction.To()
|
||||
}
|
||||
value = (*hexutil.Big)(t.Transaction.Value())
|
||||
view.Contract = t.Receipt.ContractAddress
|
||||
case w_common.Erc20Transfer:
|
||||
view.Contract = t.Log.Address
|
||||
from, to, valueInt := w_common.ParseErc20TransferLog(t.Log)
|
||||
view.From, view.To, value = from, to, (*hexutil.Big)(valueInt)
|
||||
case w_common.Erc721Transfer:
|
||||
view.Contract = t.Log.Address
|
||||
from, to, tokenIDInt := w_common.ParseErc721TransferLog(t.Log)
|
||||
view.From, view.To, tokenID = from, to, (*hexutil.Big)(tokenIDInt)
|
||||
}
|
||||
|
||||
view.MultiTransactionID = int64(t.MultiTransactionID)
|
||||
view.Value = value
|
||||
view.TokenID = tokenID
|
||||
|
||||
return view
|
||||
}
|
||||
|
||||
func getFixedTransferType(tx Transfer) w_common.Type {
|
||||
// erc721 transfers share signature with erc20 ones, so they both used to be categorized as erc20
|
||||
// by the Downloader. We fix this here since they might be mis-categorized in the db.
|
||||
if tx.Type == w_common.Erc20Transfer {
|
||||
eventType := w_common.GetEventType(tx.Log)
|
||||
return w_common.EventTypeToSubtransactionType(eventType)
|
||||
}
|
||||
return tx.Type
|
||||
}
|
||||
Reference in New Issue
Block a user