Skip to content

Commit

Permalink
Add for wind/unwind support to tbc (#159)
Browse files Browse the repository at this point in the history
* Add function to determine canonical chain

* Refactor tests to simplify calling code

* Add name concept to block so that we can easily match blocks and geometry

* Proper exit tests so that we can multiple fake nodes and tbc clients

* Create odd geometries to hit corner cases

* Detect direction

* I think this fixes the unknown tip issue; code needs to be cleaned up but should go in for backup

* Determine direction and call wind/unwind function

* Flip direction logic around; while it was correct it was difficult to reason about

* Add linear test

* Add not linear error to check against

* unindex transactions

* cache parsed tx's in mined block for easy reference

* Flesh out mustHave to check all cached tx's exist

* ensure iterator does not step pout of bounds; it does :(

* Fix silly reversals

* Make this somewhat work

* ugh go vet being dumb

* Fixup names

* Do not store coinbase in spent transaction cache

* Bring back spent transaction cache test now that we no longer store the coinbase transaction

* Collapse IsLinear function and add callers for the various indexers

* Add utxo unwind

* Attempt at utxo unwind

* tbc: clean up unnecessary use of fmt.Errorf

* XXX

* maybe fix utxo unwind

* try to spend some coinbase tx'

* oops add new tx to mempool

* rebase main fallout

* Add a seperate tx signing test so that we can make this into a function that can be called from various spots in the code; this is too brittle to embed

* Remove cruft

* tbc: value of direction is never used (SA4006)

* tbc: add missing err check in TestBlockHeaderEncodeDecode

* tbc: replace loop with peers = append(peers, moreSeeds...) (S1011)

* tbc: remove unused (*Server).blocksMissing function

* tbcd/level: remove unused bytes2Block function

* tbcd/level: remove unused bytes2Header function

* tbcd/level: fix types being nested in one type block

* Create generic transaction from another transaction lots of help max

* Oops, print hex for scripthash

* Add a non forking test

* Working unwind!!! help from max and joshua

* Make a note that genesis does not produce a spent output

* mostly working forking unwind tests

* Keep track of balances

* disable broken balnce checks

* Note a bug in unwinding utxos

* Note an odd bug

* wait for server to shut down

* wait for port to be open

* do not error if context is canceled

* do not panic on cancel

* tbc: handle err returned by btcutil.NewAddressPubKeyHash in test

* tbc: use errors.New when formatting is not needed

* tbc: improve some spacing, code flow and readability

* tbc: attempt to improve impossible case for direction in TxIndexer

* tbc: remove unnecessary space before : in errors

* tbc: attempt to improve impossible case for direction in UtxoIndexer

* large wtf

* Fix off by one

* remove wait groups, add listener.close, more timing for mine and send

* add longer timing

* The big Spend->Spent rename

* Fix hemictl help

* Bunch of cleanup

* more fixes

* Couple more fixes, lot's of help joshuasing

* Update database/tbcd/database.go

Co-authored-by: Joshua Sing <[email protected]>

---------

Co-authored-by: Joshua Sing <[email protected]>
Co-authored-by: ClaytonNorthey92 <[email protected]>
  • Loading branch information
3 people authored Jul 22, 2024
1 parent 5c764a0 commit f09d4e5
Show file tree
Hide file tree
Showing 11 changed files with 2,048 additions and 817 deletions.
29 changes: 22 additions & 7 deletions cmd/hemictl/hemictl.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ func tbcdb() error {
return fmt.Errorf("new server: %w", err)
}
// Open db.
err = s.DBOpen(ctx)
err = s.DBOpen(ctx) // XXX kill this and verify all reversed hashes as parameters
if err != nil {
return fmt.Errorf("db open: %w", err)
}
Expand Down Expand Up @@ -398,7 +398,8 @@ func tbcdb() error {
fmt.Println("\tdumpoutputs <prefix>")
fmt.Println("\thelp")
fmt.Println("\tscripthashbyoutpoint [txid] [index]")
fmt.Println("\tspendoutputsbytxid [txid] [index]")
fmt.Println("\tspentoutputsbytxid <txid>")
fmt.Println("\ttxbyid <hash>")
fmt.Println("\ttxindex <height> <count> <maxcache>")
fmt.Println("\tutxoindex <height> <count> <maxcache>")
fmt.Println("\tutxosbyscripthash [hash]")
Expand Down Expand Up @@ -460,15 +461,31 @@ func tbcdb() error {
var revTxId [32]byte
copy(revTxId[:], chtxid[:])

bh, err := s.DB().BlocksByTxId(ctx, revTxId)
bh, err := s.DB().BlocksByTxId(ctx, revTxId[:])
if err != nil {
return fmt.Errorf("block by txid: %w", err)
}
for k := range bh {
fmt.Printf("%v\n", bh[k])
}

case "spendoutputsbytxid":
case "txbyid":
txid := args["txid"]
if txid == "" {
return errors.New("txid: must be set")
}
chtxid, err := chainhash.NewHashFromStr(txid)
if err != nil {
return fmt.Errorf("chainhash: %w", err)
}

tx, err := s.TxById(ctx, chtxid)
if err != nil {
return fmt.Errorf("block by txid: %w", err)
}
fmt.Printf("%v\n", spew.Sdump(tx))

case "spentoutputsbytxid":
txid := args["txid"]
if txid == "" {
return errors.New("txid: must be set")
Expand All @@ -477,10 +494,8 @@ func tbcdb() error {
if err != nil {
return fmt.Errorf("chainhash: %w", err)
}
var revTxId [32]byte
copy(revTxId[:], chtxid[:])

si, err := s.DB().SpendOutputsByTxId(ctx, revTxId)
si, err := s.SpentOutputsByTxId(ctx, chtxid)
if err != nil {
return fmt.Errorf("spend outputs by txid: %w", err)
}
Expand Down
19 changes: 7 additions & 12 deletions database/level/level.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,14 @@ func init() {
loggo.ConfigureLoggers(logLevel)
}

type (
Pool map[string]*leveldb.DB
Database struct {
mtx sync.RWMutex
wg sync.WaitGroup // Wait group for notification handler exit
type Pool map[string]*leveldb.DB

pool Pool // database pool
type Database struct {
mtx sync.RWMutex
pool Pool // database pool

ntfn map[database.NotificationName]int // Notification handlers
home string // leveld toplevel database directory
}
)
home string // leveld toplevel database directory
}

var _ database.Database = (*Database)(nil)

Expand All @@ -68,8 +64,7 @@ func (l *Database) Close() error {

var errSeen error // XXX return last error for now
for k, v := range l.pool {
err := v.Close()
if err != nil {
if err := v.Close(); err != nil {
// do continue, leveldb does not like unfresh shutdowns
log.Errorf("close %v: %v", k, err)
errSeen = err
Expand Down
45 changes: 38 additions & 7 deletions database/tbcd/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,10 @@ type Database interface {
BlockByHash(ctx context.Context, hash []byte) (*Block, error)

// Transactions
BlockUtxoUpdate(ctx context.Context, utxos map[Outpoint]CacheOutput) error
BlockTxUpdate(ctx context.Context, txs map[TxKey]*TxValue) error
BlocksByTxId(ctx context.Context, txId TxId) ([]BlockHash, error)
SpendOutputsByTxId(ctx context.Context, txId TxId) ([]SpendInfo, error)
BlockUtxoUpdate(ctx context.Context, direction int, utxos map[Outpoint]CacheOutput) error
BlockTxUpdate(ctx context.Context, direction int, txs map[TxKey]*TxValue) error
BlocksByTxId(ctx context.Context, txId []byte) ([]BlockHash, error)
SpentOutputsByTxId(ctx context.Context, txId []byte) ([]SpentInfo, error)

// Peer manager
PeersStats(ctx context.Context) (int, int) // good, bad count
Expand All @@ -82,6 +82,12 @@ type Database interface {
UtxosByScriptHash(ctx context.Context, sh ScriptHash, start uint64, count uint64) ([]Utxo, error)
}

// XXX there exist various types in this file that need to be reevaluated.
// Such as BlockHash, ScriptHash etc. They exist for convenience reasons but
// it may be worth to switch to chainhash and btcd.OutPoint etc. This does need
// thought because we have composites that are needed for the code to function
// properly.

// BlockHeader contains the first 80 raw bytes of a bitcoin block plus its
// location information (hash+height) and the cumulative difficulty.
type BlockHeader struct {
Expand Down Expand Up @@ -142,7 +148,7 @@ type BlockIdentifier struct {
Hash database.ByteArray
}

type SpendInfo struct {
type SpentInfo struct {
BlockHash BlockHash
TxId TxId
InputIndex uint32
Expand Down Expand Up @@ -199,7 +205,7 @@ type CacheOutput [32 + 8 + 4]byte // script_hash + value + out_idx
// String reutrns pretty printable CacheOutput. Hash is not reversed since it is an
// opaque pointer. It prints satoshis@script_hash:output_index
func (c CacheOutput) String() string {
return fmt.Sprintf("%d @ %v:%d", binary.BigEndian.Uint64(c[32:40]),
return fmt.Sprintf("%d @ %x:%d", binary.BigEndian.Uint64(c[32:40]),
c[0:32], binary.BigEndian.Uint32(c[40:]))
}

Expand Down Expand Up @@ -314,6 +320,11 @@ func (t TxId) String() string {
return hex.EncodeToString(rev[:])
}

func (t TxId) Hash() *chainhash.Hash {
h, _ := chainhash.NewHash(t[:])
return h
}

func NewTxId(x [32]byte) (txId TxId) {
copy(txId[:], x[:])
return
Expand All @@ -340,6 +351,11 @@ func (bh BlockHash) String() string {
return hex.EncodeToString(rev[:])
}

func (bh BlockHash) Hash() *chainhash.Hash {
h, _ := chainhash.NewHash(bh[:])
return h
}

func NewBlockHash(x [32]byte) (blockHash BlockHash) {
copy(blockHash[:], x[:])
return
Expand Down Expand Up @@ -378,7 +394,7 @@ func NewScriptHashFromBytes(x []byte) (scriptHash ScriptHash, err error) {

// Spent Transaction:
//
// s + txin.PrevOutPoint.Hash + txin.PrevOutPoint.Index + blockhash = txid + txin_index + blockhash | [1 + 32 + 4 + 32] = [32 + 4]
// s + txin.PrevOutPoint.Hash + txin.PrevOutPoint.Index + blockhash = txid + txin_index | [1 + 32 + 4 + 32] = [32 + 4]
//
// Transaction ID to Block mapping:
//
Expand Down Expand Up @@ -414,6 +430,21 @@ func NewTxMapping(txId, blockHash *chainhash.Hash) (txKey TxKey) {
return txKey
}

func TxIdBlockHashFromTxKey(txKey TxKey) (*TxId, *BlockHash, error) {
if txKey[0] != 't' {
return nil, nil, fmt.Errorf("invalid magic 0x%02x", txKey[0])
}
txId, err := NewTxIdFromBytes(txKey[1:33])
if err != nil {
return nil, nil, fmt.Errorf("invalid tx id: %w", err)
}
blockHash, err := NewBlockHashFromBytes(txKey[33:65])
if err != nil {
return nil, nil, fmt.Errorf("invalid block hash: %w", err)
}
return &txId, &blockHash, nil
}

// Helper functions

// B2H converts a raw block header to a wire block header structure.
Expand Down
56 changes: 36 additions & 20 deletions database/tbcd/level/level.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,9 @@ func (l *ldb) BlockHeaderGenesisInsert(ctx context.Context, bh [80]byte) error {

hhKey := heightHashToKey(0, bhash[:])
hhBatch.Put(hhKey, []byte{})
ebh := encodeBlockHeader(0, bh, new(big.Int))
cdiff := big.NewInt(0)
cdiff = new(big.Int).Add(cdiff, blockchain.CalcWork(wbh.Bits))
ebh := encodeBlockHeader(0, bh, cdiff)
bhBatch.Put(bhash[:], ebh[:])

bhBatch.Put([]byte(bhsCanonicalTipKey), ebh[:])
Expand Down Expand Up @@ -724,7 +726,7 @@ func (l *ldb) BlockByHash(ctx context.Context, hash []byte) (*tbcd.Block, error)
}, nil
}

func (l *ldb) BlocksByTxId(ctx context.Context, txId tbcd.TxId) ([]tbcd.BlockHash, error) {
func (l *ldb) BlocksByTxId(ctx context.Context, txId []byte) ([]tbcd.BlockHash, error) {
log.Tracef("BlocksByTxId")
defer log.Tracef("BlocksByTxId exit")

Expand All @@ -746,29 +748,29 @@ func (l *ldb) BlocksByTxId(ctx context.Context, txId tbcd.TxId) ([]tbcd.BlockHas
return nil, fmt.Errorf("blocks by id iterator: %w", err)
}
if len(blocks) == 0 {
ch, _ := chainhash.NewHash(txId[:])
return nil, database.NotFoundError(fmt.Sprintf("tx not found: %v", ch))
ctxid, _ := chainhash.NewHash(txId)
return nil, database.NotFoundError(fmt.Sprintf("tx not found: %v", ctxid))
}

return blocks, nil
}

func (l *ldb) SpendOutputsByTxId(ctx context.Context, txId tbcd.TxId) ([]tbcd.SpendInfo, error) {
log.Tracef("SpendOutputByOutpoint")
defer log.Tracef("SpendOutputByOutpoint exit")
func (l *ldb) SpentOutputsByTxId(ctx context.Context, txId []byte) ([]tbcd.SpentInfo, error) {
log.Tracef("SpentOutputByOutpoint")
defer log.Tracef("SpentOutputByOutpoint exit")

si := make([]tbcd.SpendInfo, 0, 2)
si := make([]tbcd.SpentInfo, 0, 2)
txDB := l.pool[level.TransactionsDB]
var key [1 + 32]byte
key[0] = 's'
copy(key[1:], txId[:])
it := txDB.NewIterator(&util.Range{Start: key[:]}, nil)
defer it.Release()
for it.Next() {
if !bytes.Equal(it.Key()[1:33], key[1:33]) {
break
if !bytes.Equal(it.Key()[:33], key[:]) {
continue
}
var s tbcd.SpendInfo
var s tbcd.SpentInfo
copy(s.TxId[:], it.Value()[0:32])
copy(s.BlockHash[:], it.Key()[37:])
s.InputIndex = binary.BigEndian.Uint32(it.Value()[32:36])
Expand All @@ -778,8 +780,7 @@ func (l *ldb) SpendOutputsByTxId(ctx context.Context, txId tbcd.TxId) ([]tbcd.Sp
return nil, fmt.Errorf("blocks by id iterator: %w", err)
}
if len(si) == 0 {
ch, _ := chainhash.NewHash(txId[:])
return nil, database.NotFoundError(fmt.Sprintf("not found %v", ch))
return nil, database.NotFoundError(fmt.Sprintf("not found %v", txId))
}

return si, nil
Expand Down Expand Up @@ -856,10 +857,14 @@ func (l *ldb) UtxosByScriptHash(ctx context.Context, sh tbcd.ScriptHash, start u
return utxos, nil
}

func (l *ldb) BlockUtxoUpdate(ctx context.Context, utxos map[tbcd.Outpoint]tbcd.CacheOutput) error {
func (l *ldb) BlockUtxoUpdate(ctx context.Context, direction int, utxos map[tbcd.Outpoint]tbcd.CacheOutput) error {
log.Tracef("BlockUtxoUpdate")
defer log.Tracef("BlockUtxoUpdate exit")

if !(direction == 1 || direction == -1) {
return fmt.Errorf("invalid direction: %v", direction)
}

// outputs
outsTx, outsCommit, outsDiscard, err := l.startTransaction(level.OutputsDB)
if err != nil {
Expand All @@ -877,6 +882,8 @@ func (l *ldb) BlockUtxoUpdate(ctx context.Context, utxos map[tbcd.Outpoint]tbcd.
copy(hop[33:65], op.TxId())
copy(hop[65:], utxo.OutputIndexBytes())

// The cache is updated in a way that makes the direction
// irrelevant.
if utxo.IsDelete() {
// Delete balance and utxos
outsBatch.Delete(op[:][:])
Expand All @@ -886,6 +893,7 @@ func (l *ldb) BlockUtxoUpdate(ctx context.Context, utxos map[tbcd.Outpoint]tbcd.
outsBatch.Put(op[:], utxo.ScriptHashSlice())
outsBatch.Put(hop[:], utxo.ValueBytes())
}

// XXX this probably should be done by the caller but we do it
// here to lower memory pressure as large gobs of data are
// written to disk.
Expand All @@ -905,10 +913,14 @@ func (l *ldb) BlockUtxoUpdate(ctx context.Context, utxos map[tbcd.Outpoint]tbcd.
return nil
}

func (l *ldb) BlockTxUpdate(ctx context.Context, txs map[tbcd.TxKey]*tbcd.TxValue) error {
func (l *ldb) BlockTxUpdate(ctx context.Context, direction int, txs map[tbcd.TxKey]*tbcd.TxValue) error {
log.Tracef("BlockTxUpdate")
defer log.Tracef("BlockTxUpdate exit")

if !(direction == 1 || direction == -1) {
return fmt.Errorf("invalid direction: %v", direction)
}

// transactions
txsTx, txsCommit, txsDiscard, err := l.startTransaction(level.TransactionsDB)
if err != nil {
Expand All @@ -931,12 +943,16 @@ func (l *ldb) BlockTxUpdate(ctx context.Context, txs map[tbcd.TxKey]*tbcd.TxValu
default:
return fmt.Errorf("invalid cache entry: %v", spew.Sdump(k))
}
switch direction {
case -1:
txsBatch.Delete(key)
case 1:
txsBatch.Put(key, value)
}

txsBatch.Put(key, value)
// log.Infof("%v:%v", spew.Sdump(key), spew.Sdump(value))
// // XXX this probably should be done by the caller but we do it
// // here to lower memory pressure as large gobs of data are
// // written to disk.
// XXX this probably should be done by the caller but we do it
// here to lower memory pressure as large gobs of data are
// written to disk.
delete(txs, k)
}

Expand Down
Loading

0 comments on commit f09d4e5

Please sign in to comment.