From 88047e707e2db8522e2ad77c5f849e55bc94cd10 Mon Sep 17 00:00:00 2001 From: Marco Peereboom Date: Tue, 16 Apr 2024 15:43:43 +0100 Subject: [PATCH] Add tbcd, a small bitcoin daemon that participates on bitcoin p2p (#50) TBC is a Bitcoin indexer which Hemi embeds inside the Hemi virtual machine for hVM Bitcoin Interoperability. Features: * Syncs the entire Bitcoin blockchain (headers and blocks) over P2P * Indexes UTXOs and transactions * Supports indexing up to specific height to ensure deterministic indexer state across multiple Hemi nodes Indexer State Supported Queries: * Headers by height/hash and for tip * Height of header hash * Balance by address/scripthash * UTXOs by address/scripthash * Transactions by TxID * Block(s) containing transaction * Scripthash from Outpoint (TxID + output index) Co-authored-by: Joshua Sing Co-authored-by: John C. Vernaleo Co-authored-by: ClaytonNorthey92 Co-authored-by: Max Sanchez --- .github/workflows/go.yml | 1 + .gitignore | 3 + Makefile | 20 +- api/api.go | 2 +- api/tbcapi/tbcapi.go | 249 +++ cmd/btctool/bdf/bdf.go | 231 +++ cmd/btctool/blockstream/blockstream.go | 97 ++ cmd/btctool/btctool.go | 694 ++++++++ cmd/btctool/btctool/btctool.go | 37 + cmd/btctool/httpclient/httpclient.go | 50 + cmd/hemictl/hemictl.go | 523 +++++- cmd/tbcd/README.md | 56 + cmd/tbcd/tbcd.go | 178 ++ database/bfgd/postgres/postgres.go | 1 + database/database.go | 23 +- database/database_test.go | 31 + database/level/level.go | 211 +++ database/postgres/postgres.go | 2 + database/tbcd/TESTS.md | 11 + database/tbcd/database.go | 371 ++++ database/tbcd/database_ext_test.go | 5 + database/tbcd/level/level.go | 875 ++++++++++ database/tbcd/level/level_test.go | 419 +++++ e2e/docker-compose.yml | 1 + go.mod | 8 +- go.sum | 22 +- service/tbc/crawler.go | 444 +++++ service/tbc/crawler_test.go | 125 ++ service/tbc/peer.go | 202 +++ service/tbc/rpc.go | 559 ++++++ service/tbc/tbc.go | 1777 ++++++++++++++++++++ service/tbc/tbc_test.go | 2150 ++++++++++++++++++++++++ service/tbc/ulimit_darwin.go | 65 + service/tbc/ulimit_linux.go | 69 + service/tbc/ulimit_other.go | 13 + 35 files changed, 9503 insertions(+), 22 deletions(-) create mode 100644 api/tbcapi/tbcapi.go create mode 100644 cmd/btctool/bdf/bdf.go create mode 100644 cmd/btctool/blockstream/blockstream.go create mode 100644 cmd/btctool/btctool.go create mode 100644 cmd/btctool/btctool/btctool.go create mode 100644 cmd/btctool/httpclient/httpclient.go create mode 100644 cmd/tbcd/README.md create mode 100644 cmd/tbcd/tbcd.go create mode 100644 database/level/level.go create mode 100644 database/tbcd/TESTS.md create mode 100644 database/tbcd/database.go create mode 100644 database/tbcd/database_ext_test.go create mode 100644 database/tbcd/level/level.go create mode 100644 database/tbcd/level/level_test.go create mode 100644 service/tbc/crawler.go create mode 100644 service/tbc/crawler_test.go create mode 100644 service/tbc/peer.go create mode 100644 service/tbc/rpc.go create mode 100644 service/tbc/tbc.go create mode 100644 service/tbc/tbc_test.go create mode 100644 service/tbc/ulimit_darwin.go create mode 100644 service/tbc/ulimit_linux.go create mode 100644 service/tbc/ulimit_other.go diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 98c3e4784..0ae46ef9a 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -60,6 +60,7 @@ jobs: if: (success() || failure()) && steps.deps.outcome == 'success' env: PGTESTURI: "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" + HEMI_DOCKER_TESTS: "1" run: | make git diff --exit-code diff --git a/.gitignore b/.gitignore index 70257fde4..ef04addd3 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,9 @@ /pkg/ /.gocache/ +## Tests +/service/tbc/.testleveldb/ + ### Common editors ## Vim # Swap diff --git a/Makefile b/Makefile index 7621b5aff..b0c26cb8c 100644 --- a/Makefile +++ b/Makefile @@ -20,24 +20,28 @@ project = heminetwork version = $(shell git describe --tags 2>/dev/null || echo "v0.0.0") cmds = \ - bfgd \ - bssd \ - extool \ - keygen \ - popmd \ - hemictl + bfgd \ + bssd \ + extool \ + hemictl \ + keygen \ + popmd \ + tbcd .PHONY: all clean clean-dist deps $(cmds) build install lint lint-deps tidy race test vulncheck \ vulncheck-deps dist archive sources checksums networktest all: lint tidy test build install -clean: clean-dist +clean: clean-dist clean-test rm -rf $(GOBIN) $(GOCACHE) $(GOPKG) clean-dist: rm -rf $(DIST) +clean-test: + rm -rf $(PROJECTPATH)/service/tbc/.testleveldb/ + deps: lint-deps vulncheck-deps go mod download go mod verify @@ -54,7 +58,7 @@ lint: $(shell go env GOPATH)/bin/goimports -local github.com/hemilabs/heminetwork -w -l . $(shell go env GOPATH)/bin/gofumpt -w -l . $(shell go env GOPATH)/bin/addlicense -c "Hemi Labs, Inc." -f $(PROJECTPATH)/license_header.txt \ - -ignore "{.idea,.vscode}/**" -ignore ".github/release.yml" -ignore ".github/ISSUE_TEMPLATE/**" -v . + -ignore "{.idea,.vscode}/**" -ignore ".github/release.yml" -ignore ".github/ISSUE_TEMPLATE/**" . go vet ./... lint-deps: diff --git a/api/api.go b/api/api.go index fcacf075e..beaa85062 100644 --- a/api/api.go +++ b/api/api.go @@ -11,7 +11,7 @@ import ( "strings" ) -// hexDecode decodes a string that may be prefixed with " and/or 0x. Thus +// hexDecode decodes a string that may be prefixed with " and/or 0x. Thus, // "0x00" and 0x00 or 00 are all valid hex encodings. If length is provided the // decoded size must exactly match. The length parameter will be ignored if it // is less than 0. diff --git a/api/tbcapi/tbcapi.go b/api/tbcapi/tbcapi.go new file mode 100644 index 000000000..1b7e06e82 --- /dev/null +++ b/api/tbcapi/tbcapi.go @@ -0,0 +1,249 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbcapi + +import ( + "context" + "fmt" + "maps" + "reflect" + + "github.com/hemilabs/heminetwork/api" + "github.com/hemilabs/heminetwork/api/protocol" +) + +const ( + APIVersion = 1 + + CmdPingRequest = "tbcapi-ping-request" + CmdPingResponse = "tbcapi-ping-response" + + CmdBlockHeadersByHeightRawRequest = "tbcapi-block-headers-by-height-raw-request" + CmdBlockHeadersByHeightRawResponse = "tbcapi-block-headers-by-height-raw-response" + + CmdBlockHeadersByHeightRequest = "tbcapi-block-headers-by-height-request" + CmdBlockHeadersByHeightResponse = "tbcapi-block-headers-by-height-response" + + CmdBlockHeadersBestRawRequest = "tbcapi-block-headers-best-raw-request" + CmdBlockHeadersBestRawResponse = "tbcapi-block-headers-best-raw-response" + + CmdBlockHeadersBestRequest = "tbcapi-block-headers-best-request" + CmdBlockHeadersBestResponse = "tbcapi-block-headers-best-response" + + CmdBalanceByAddressRequest = "tbcapi-balance-by-address-request" + CmdBalanceByAddressResponse = "tbcapi-balance-by-address-response" + + CmdUtxosByAddressRawRequest = "tbcapi-utxos-by-address-raw-request" + CmdUtxosByAddressRawResponse = "tbcapi-utxos-by-address-raw-response" + + CmdUtxosByAddressRequest = "tbcapi-utxos-by-address-request" + CmdUtxosByAddressResponse = "tbcapi-utxos-by-address-response" + + CmdTxByIdRawRequest = "tbcapi-tx-by-id-raw-request" + CmdTxByIdRawResponse = "tbcapi-tx-by-id-raw-response" + + CmdTxByIdRequest = "tbcapi-tx-by-id-request" + CmdTxByIdResponse = "tbcapi-tx-by-id-response" +) + +var ( + APIVersionRoute = fmt.Sprintf("v%d", APIVersion) + RouteWebsocket = fmt.Sprintf("/%s/ws", APIVersionRoute) + + DefaultListen = "localhost:8082" + DefaultURL = fmt.Sprintf("ws://%s/%s", DefaultListen, RouteWebsocket) +) + +type ( + PingRequest protocol.PingRequest + PingResponse protocol.PingResponse +) + +type BlockHeader struct { + Version int32 `json:"version"` + PrevHash string `json:"prev_hash"` + MerkleRoot string `json:"merkle_root"` + Timestamp int64 `json:"timestamp"` + Bits string `json:"bits"` + Nonce uint32 `json:"nonce"` +} + +type BlockHeadersByHeightRawRequest struct { + Height uint32 `json:"height"` +} + +type BlockHeadersByHeightRawResponse struct { + BlockHeaders []api.ByteSlice `json:"block_headers"` + Error *protocol.Error `json:"error,omitempty"` +} + +type BlockHeadersByHeightRequest struct { + Height uint32 `json:"height"` +} + +type BlockHeadersByHeightResponse struct { + BlockHeaders []*BlockHeader `json:"block_headers"` + Error *protocol.Error `json:"error,omitempty"` +} + +type BlockHeadersBestRawRequest struct{} + +type BlockHeadersBestRawResponse struct { + Height uint64 `json:"height"` + BlockHeaders []api.ByteSlice `json:"block_headers"` + Error *protocol.Error `json:"error,omitempty"` +} + +type BlockHeadersBestRequest struct{} + +type BlockHeadersBestResponse struct { + Height uint64 `json:"height"` + BlockHeaders []*BlockHeader `json:"block_headers"` + Error *protocol.Error `json:"error,omitempty"` +} + +type BalanceByAddressRequest struct { + Address string `json:"address"` +} + +type BalanceByAddressResponse struct { + Balance uint64 `json:"balance"` + Error *protocol.Error `json:"error,omitempty"` +} + +type UtxosByAddressRawRequest struct { + Address string `json:"address"` + Start uint `json:"start"` + Count uint `json:"count"` +} + +type UtxosByAddressRawResponse struct { + Utxos []api.ByteSlice `json:"utxos"` + Error *protocol.Error `json:"error,omitempty"` +} + +type UtxosByAddressRequest struct { + Address string `json:"address"` + Start uint `json:"start"` + Count uint `json:"count"` +} + +type Utxo struct { + TxId api.ByteSlice `json:"tx_id"` + Value uint64 `json:"value"` + OutIndex uint32 `json:"out_index"` +} + +type UtxosByAddressResponse struct { + Utxos []Utxo `json:"utxos"` + Error *protocol.Error `json:"error,omitempty"` +} + +type TxByIdRawRequest struct { + TxId api.ByteSlice `json:"tx_id"` +} + +type TxByIdRawResponse struct { + Tx api.ByteSlice `json:"tx"` + Error *protocol.Error `json:"error,omitempty"` +} + +type TxByIdRequest struct { + TxId api.ByteSlice `json:"tx_id"` +} + +type TxByIdResponse struct { + Tx Tx `json:"tx"` + Error *protocol.Error `json:"error,omitempty"` +} + +type OutPoint struct { + Hash api.ByteSlice `json:"hash"` + Index uint32 `json:"index"` +} + +type TxWitness []api.ByteSlice + +type TxIn struct { + PreviousOutPoint OutPoint `json:"outpoint"` + SignatureScript api.ByteSlice `json:"signature_script"` + Witness TxWitness `json:"tx_witness"` + Sequence uint32 `json:"sequence"` +} + +type TxOut struct { + Value int64 `json:"value"` + PkScript api.ByteSlice `json:"pk_script"` +} + +type Tx struct { + Version int32 `json:"version"` + LockTime uint32 `json:"lock_time"` + TxIn []*TxIn `json:"tx_in"` + TxOut []*TxOut `json:"tx_out"` +} + +var commands = map[protocol.Command]reflect.Type{ + CmdPingRequest: reflect.TypeOf(PingRequest{}), + CmdPingResponse: reflect.TypeOf(PingResponse{}), + CmdBlockHeadersByHeightRawRequest: reflect.TypeOf(BlockHeadersByHeightRawRequest{}), + CmdBlockHeadersByHeightRawResponse: reflect.TypeOf(BlockHeadersByHeightRawResponse{}), + CmdBlockHeadersByHeightRequest: reflect.TypeOf(BlockHeadersByHeightRequest{}), + CmdBlockHeadersByHeightResponse: reflect.TypeOf(BlockHeadersByHeightResponse{}), + CmdBlockHeadersBestRawRequest: reflect.TypeOf(BlockHeadersBestRawRequest{}), + CmdBlockHeadersBestRawResponse: reflect.TypeOf(BlockHeadersBestRawResponse{}), + CmdBlockHeadersBestRequest: reflect.TypeOf(BlockHeadersBestRequest{}), + CmdBlockHeadersBestResponse: reflect.TypeOf(BlockHeadersBestResponse{}), + CmdBalanceByAddressRequest: reflect.TypeOf(BalanceByAddressRequest{}), + CmdBalanceByAddressResponse: reflect.TypeOf(BalanceByAddressResponse{}), + CmdUtxosByAddressRawRequest: reflect.TypeOf(UtxosByAddressRawRequest{}), + CmdUtxosByAddressRawResponse: reflect.TypeOf(UtxosByAddressRawResponse{}), + CmdUtxosByAddressRequest: reflect.TypeOf(UtxosByAddressRequest{}), + CmdUtxosByAddressResponse: reflect.TypeOf(UtxosByAddressResponse{}), + CmdTxByIdRawRequest: reflect.TypeOf(TxByIdRawRequest{}), + CmdTxByIdRawResponse: reflect.TypeOf(TxByIdRawResponse{}), + CmdTxByIdRequest: reflect.TypeOf(TxByIdRequest{}), + CmdTxByIdResponse: reflect.TypeOf(TxByIdResponse{}), +} + +type tbcAPI struct{} + +func (a *tbcAPI) Commands() map[protocol.Command]reflect.Type { + return commands +} + +func APICommands() map[protocol.Command]reflect.Type { + return maps.Clone(commands) +} + +// Write is the low level primitive of a protocol Write. One should generally +// not use this function and use WriteConn and Call instead. +func Write(ctx context.Context, c protocol.APIConn, id string, payload any) error { + return protocol.Write(ctx, c, &tbcAPI{}, id, payload) +} + +// Read is the low level primitive of a protocol Read. One should generally +// not use this function and use ReadConn instead. +func Read(ctx context.Context, c protocol.APIConn) (protocol.Command, string, any, error) { + return protocol.Read(ctx, c, &tbcAPI{}) +} + +// Call is a blocking call. One should use ReadConn when using Call or else the +// completion will end up in the Read instead of being completed as expected. +func Call(ctx context.Context, c *protocol.Conn, payload any) (protocol.Command, string, any, error) { + return c.Call(ctx, &tbcAPI{}, payload) +} + +// WriteConn writes to Conn. It is equivalent to Write but exists for symmetry +// reasons. +func WriteConn(ctx context.Context, c *protocol.Conn, id string, payload any) error { + return c.Write(ctx, &tbcAPI{}, id, payload) +} + +// ReadConn reads from Conn and performs callbacks. One should use ReadConn over +// Read when mixing Write, WriteConn and Call. +func ReadConn(ctx context.Context, c *protocol.Conn) (protocol.Command, string, any, error) { + return c.Read(ctx, &tbcAPI{}) +} diff --git a/cmd/btctool/bdf/bdf.go b/cmd/btctool/bdf/bdf.go new file mode 100644 index 000000000..4bc92af31 --- /dev/null +++ b/cmd/btctool/bdf/bdf.go @@ -0,0 +1,231 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package bdf + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "sync" + + "github.com/btcsuite/btcd/wire" + "github.com/juju/loggo" +) + +// XXX add tests + +var ( + log = loggo.GetLogger("bdf") + dfm sync.RWMutex +) + +const ( + DefaultNet = "testnet" + HeightFilename = "height" +) + +var DefaultDataDir = filepath.Join(DefaultNet, "bitcoin_headers") + +type LastHeight struct { + Height int `json:"height"` + Hash string `json:"hash"` +} + +type Header struct { + Height int `json:"height"` + Header string `json:"header"` +} + +func ReadHeight(filename string) (int, string, error) { + dfm.RLock() + defer dfm.RUnlock() + + f, err := os.Open(filename) + if err != nil { + return 0, "", err + } + d := json.NewDecoder(f) + var lh LastHeight + err = d.Decode(&lh) + if err != nil { + f.Close() + return 0, "", err + } + err = f.Close() + if err != nil { + return 0, "", err + } + return lh.Height, lh.Hash, nil +} + +func ReadHeader(filename string) (*wire.BlockHeader, int, error) { + dfm.RLock() + defer dfm.RUnlock() + + f, err := os.Open(filename) + if err != nil { + return nil, 0, err + } + d := json.NewDecoder(f) + var h Header + err = d.Decode(&h) + if err != nil { + f.Close() + return nil, 0, err + } + err = f.Close() + if err != nil { + return nil, 0, err + } + dh, err := hex.DecodeString(h.Header) + if err != nil { + return nil, 0, err + } + if len(dh) != 80 { + return nil, 0, err + } + var wbh wire.BlockHeader + err = wbh.Deserialize(bytes.NewReader(dh)) + if err != nil { + return nil, 0, err + } + return &wbh, h.Height, nil +} + +func Header2Bytes(wbh *wire.BlockHeader) ([]byte, error) { + var b bytes.Buffer + err := wbh.Serialize(&b) + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func Header2ExactBytes(wbh *wire.BlockHeader, header *[80]byte) error { + b, err := Header2Bytes(wbh) + if err != nil { + return err + } + if len(b) != 80 { + return fmt.Errorf("should not happen length %v", len(b)) + } + copy(header[:], b) + return nil +} + +func Header2Hex(wbh *wire.BlockHeader) (string, error) { + b, err := Header2Bytes(wbh) + if err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} + +func Hex2Header(header string) (*wire.BlockHeader, error) { + blockHeader, err := hex.DecodeString(header) + if err != nil { + return nil, fmt.Errorf("DecodeString: %v", err) + } + var bh wire.BlockHeader + err = bh.Deserialize(bytes.NewReader(blockHeader)) + if err != nil { + return nil, fmt.Errorf("Deserialize: %v", err) + } + return &bh, nil +} + +// writeHeight reads the latest height and overwrites it if the provided height +// is higher. Poor mans for resolution :-) +func writeHeight(height int, hash, dir string) error { + log.Tracef("WriteHeight %v %v", height, hash) + defer log.Tracef("WriteHeight exit") + + var lh LastHeight + filename := filepath.Join(dir, HeightFilename) + f, err := os.Open(filename) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // do nothing + defer f.Close() + } else { + return fmt.Errorf("Open: %v", err) + } + } else { + defer f.Close() + d := json.NewDecoder(f) + err = d.Decode(&lh) + if err != nil { + return fmt.Errorf("%v corrupt: %v", filename, err) + } + } + if lh.Height > height { + log.Tracef("not overwriting height: %v > %v", lh.Height, height) + return nil + } + fw, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("OpenFile: %v", err) + } + e := json.NewEncoder(fw) + lh.Height = height + lh.Hash = hash + err = e.Encode(lh) + if err != nil { + return fmt.Errorf("Encode: %v", err) + } + return fw.Close() +} + +// writeHeader writes a header. We pass the hash to verify that the header is correct. +func writeHeader(height int, hash, header, dir string) error { + filename := filepath.Join(dir, hash) + overwrite := false + if !overwrite { + _, err := os.Stat(filename) + if err == nil { + return fmt.Errorf("caught up at height: %v", height) + } + } + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("Create: %v", err) + } + e := json.NewEncoder(f) + err = e.Encode(Header{ + Height: height, + Header: header, + }) + if err != nil { + f.Close() + return fmt.Errorf("Encode: %v", err) + } + + return f.Close() +} + +func WriteHeader(height int, header, dir string) error { + bh, err := Hex2Header(header) + if err != nil { + return fmt.Errorf("Hex2Header: %v", err) + } + + dfm.Lock() + defer dfm.Unlock() + + err = writeHeader(height, bh.BlockHash().String(), header, dir) + if err != nil { + return fmt.Errorf("writeHeader: %v", err) + } + err = writeHeight(height, bh.BlockHash().String(), dir) + if err != nil { + return fmt.Errorf("writeHeight: %v", err) + } + return nil +} diff --git a/cmd/btctool/blockstream/blockstream.go b/cmd/btctool/blockstream/blockstream.go new file mode 100644 index 000000000..839d1e8b4 --- /dev/null +++ b/cmd/btctool/blockstream/blockstream.go @@ -0,0 +1,97 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package blockstream + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + + "github.com/hemilabs/heminetwork/cmd/btctool/httpclient" +) + +var ( + bsTestnetURL = "https://blockstream.info/testnet/api" // XXX wrap in structure + bsMainnetURL = "https://blockstream.info/api" // XXX wrap in structure + bsURL = bsTestnetURL // XXX wrap in structure +) + +type TBlock struct { + ID string `json:"id"` + Height uint `json:"height"` + Version uint `json:"version"` + Timestamp int64 `json:"timestamp"` + TxCount uint `json:"tx_count"` + Size uint `json:"size"` + Weight uint `json:"weight"` + MerkleRoot string `json:"merkle_root"` + PreviousBlockHash string `json:"previousblockhash"` + MedianTime int64 `json:"mediantime"` + Nonce uint `json:"nonce"` + Bits uint `json:"bits"` + Difficulty uint `json:"difficulty"` +} + +func Tip(ctx context.Context) (int, error) { + b, err := httpclient.Request(ctx, "GET", bsURL+"/blocks/tip/height", nil) + if err != nil { + return 0, fmt.Errorf("request: %v", err) + } + height, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return 0, fmt.Errorf("ParseUint: %v", err) + } + + return int(height), nil +} + +func BlockHeader(ctx context.Context, hash string) (string, error) { + bh, err := httpclient.Request(ctx, "GET", bsURL+"/block/"+hash+"/header", nil) + if err != nil { + return "", fmt.Errorf("request: %v", err) + } + _, err = hex.DecodeString(string(bh)) + if err != nil { + return "", fmt.Errorf("DecodeString: %v", err) + } + return string(bh), nil +} + +func BlockHeightHash(ctx context.Context, height string) (string, error) { + bh, err := httpclient.Request(ctx, "GET", bsURL+"/block-height/"+height, nil) + if err != nil { + return "", fmt.Errorf("request: %v", err) + } + _, err = hex.DecodeString(string(bh)) + if err != nil { + return "", fmt.Errorf("DecodeString: %v", err) + } + return string(bh), nil +} + +func Block(ctx context.Context, hash string, raw bool) (string, error) { + suffix := "" + if raw { + suffix = "/raw" + } + b, err := httpclient.Request(ctx, "GET", bsURL+"/block/"+hash+suffix, nil) + if err != nil { + return "", fmt.Errorf("request: %v", err) + } + if raw { + return hex.EncodeToString(b), nil + } + return string(b), nil +} + +func BlockBytes(ctx context.Context, hash string) ([]byte, error) { + suffix := "/raw" + b, err := httpclient.Request(ctx, "GET", bsURL+"/block/"+hash+suffix, nil) + if err != nil { + return nil, fmt.Errorf("request: %v", err) + } + return b, nil +} diff --git a/cmd/btctool/btctool.go b/cmd/btctool/btctool.go new file mode 100644 index 000000000..271553571 --- /dev/null +++ b/cmd/btctool/btctool.go @@ -0,0 +1,694 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package main // XXX wrap in structure + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "flag" + "fmt" + "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/juju/loggo" + "github.com/mitchellh/go-homedir" + + "github.com/hemilabs/heminetwork/cmd/btctool/bdf" + "github.com/hemilabs/heminetwork/cmd/btctool/blockstream" + "github.com/hemilabs/heminetwork/cmd/btctool/btctool" +) + +var log = loggo.GetLogger("bdf") + +func parseBlockFromHex(blk string) (*btcutil.Block, error) { + eb, err := hex.DecodeString(strings.Trim(blk, "\n")) + if err != nil { + return nil, err + } + + // decode + b, err := btcutil.NewBlockFromBytes(eb) + if err != nil { + return nil, err + } + + return b, nil +} + +func parseBlock(ctx context.Context, filename string) (*btcutil.Block, error) { + heb, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + eb, err := hex.DecodeString(strings.Trim(string(heb), "\n")) + if err != nil { + return nil, err + } + fmt.Printf("len %v\n", len(eb)) + + // decode + b, err := btcutil.NewBlockFromBytes(eb) + if err != nil { + return nil, err + } + + return b, nil +} + +type peer struct { + mtx sync.RWMutex + address string + conn net.Conn + + protocolVersion uint32 + network wire.BitcoinNet + + remoteVersion *wire.MsgVersion + addrV2 bool +} + +func NewPeer(network wire.BitcoinNet, address string) (*peer, error) { + return &peer{ + protocolVersion: wire.ProtocolVersion, + network: network, + address: address, + }, nil +} + +func (p *peer) connect(ctx context.Context) error { + p.mtx.Lock() + if p.conn != nil { + p.mtx.Unlock() + return fmt.Errorf("already open") + } + p.mtx.Unlock() + // XXX this races + + d := net.Dialer{} + conn, err := d.DialContext(ctx, "tcp", p.address) + if err != nil { + return err + } + p.mtx.Lock() + p.conn = conn + p.mtx.Unlock() + + return nil +} + +func (p *peer) close() error { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.conn != nil { + return p.conn.Close() + } + return fmt.Errorf("already closed") +} + +func (p *peer) write(msg wire.Message) error { + _, err := wire.WriteMessageWithEncodingN(p.conn, msg, p.protocolVersion, + p.network, wire.LatestEncoding) + return err +} + +func (p *peer) read() (wire.Message, error) { + _, msg, _, err := wire.ReadMessageWithEncodingN(p.conn, p.protocolVersion, + p.network, wire.LatestEncoding) + return msg, err +} + +func (p *peer) handshake(ctx context.Context) error { + // 1. send our version + // 2. receive version + // 3. send sendaddrv2 + // 4. send verack + // 5. receive sendaddrv2, verack or ignore + + us := &wire.NetAddress{Timestamp: time.Now()} + them := &wire.NetAddress{Timestamp: time.Now()} + msg := wire.NewMsgVersion(us, them, uint64(rand.Int63()), 0) + err := p.write(msg) + if err != nil { + return fmt.Errorf("could not write version message: %v", err) + } + + // 2. receive version + rmsg, err := p.read() + if err != nil { + return fmt.Errorf("could not read version message: %v", err) + } + v, ok := rmsg.(*wire.MsgVersion) + if !ok { + return fmt.Errorf("expected version message") + } + p.remoteVersion = v + + // 3. send sendaddrv2 + if v.ProtocolVersion >= 70016 { + err = p.write(wire.NewMsgSendAddrV2()) + if err != nil { + return fmt.Errorf("could not send sendaddrv2: %v", err) + } + } + + // 4. send verack + err = p.write(wire.NewMsgVerAck()) + if err != nil { + return fmt.Errorf("could not send verack: %v", err) + } + + for count := 0; count < 3; count++ { + msg, err := p.read() + if err == wire.ErrUnknownMessage { + continue + } else if err != nil { + return err + } + + switch msg.(type) { + case *wire.MsgVerAck: + return nil + case *wire.MsgSendAddrV2: + p.addrV2 = true + continue + default: + return fmt.Errorf("unexpected message type: %T", msg) + } + } + + return fmt.Errorf("handshake failed") +} + +func handlePing(p *peer, msg *wire.MsgPing) { + fmt.Printf("ping %v\n", msg.Nonce) + pong := wire.NewMsgPong(msg.Nonce) + err := p.write(pong) + if err != nil { + fmt.Printf("could not write pong message: %v", err) + return + } + fmt.Printf("wrote pong %v\n", pong.Nonce) +} + +func downloadBlock(p *peer, height int, hash chainhash.Hash) error { + fmt.Printf("get block at %v: %v\n", height, hash) + + getData := wire.NewMsgGetData() + getData.InvList = append(getData.InvList, + &wire.InvVect{ + Type: wire.InvTypeBlock, + Hash: hash, + }) + err := p.write(getData) + if err != nil { + return fmt.Errorf("could not write get block message: %v", err) + } + fmt.Printf("wrote get block %v\n", hash) + + return nil +} + +func handleInv(p *peer, msg *wire.MsgInv) { + fmt.Printf("inv: %v\n", len(msg.InvList)) + + for k := range msg.InvList { + switch msg.InvList[k].Type { + case wire.InvTypeBlock: + fmt.Printf("height %v hash %v\n", k+1, msg.InvList[k].Hash) + err := downloadBlock(p, k+1, msg.InvList[k].Hash) + if err != nil { + fmt.Printf("download block at %v: %v\n", k+1, err) + } + default: + fmt.Printf("skipping inv type: %v\n", msg.InvList[k].Type) + } + } +} + +func handleBlock(p *peer, msg *wire.MsgBlock) { + fmt.Printf("handle block: %v txs %v\n", msg.Header.BlockHash(), + len(msg.Transactions)) +} + +func btcConnect(ctx context.Context, btcNet string) error { + //ips, err := net.LookupIP("seed.bitcoin.sipa.be") + //if err != nil { + // return err + //} + + mainnetPort := "8333" + testnetPort := "18333" + var ( + port string + wireNet wire.BitcoinNet + chainParams *chaincfg.Params + ) + switch btcNet { + case "mainnet": + port = mainnetPort + wireNet = wire.MainNet + chainParams = &chaincfg.MainNetParams + case "testnet", "testnet3": + port = testnetPort + wireNet = wire.TestNet3 + chainParams = &chaincfg.TestNet3Params + default: + return fmt.Errorf("invalid network: %v", btcNet) + } + + p, err := NewPeer(wireNet, "140.238.169.133"+port) + if err != nil { + return fmt.Errorf("new peer: %v", err) + } + + err = p.connect(ctx) + if err != nil { + return fmt.Errorf("connect: %v", err) + } + + err = p.handshake(ctx) + if err != nil { + return fmt.Errorf("connect: %v", err) + } + + fmt.Printf("handshake complete with: %v\n", p.address) + + // send ibd start using get blocks + fmt.Printf("genesis hash: %v\n", chainParams.GenesisHash) + getBlocks := wire.NewMsgGetBlocks(chainParams.GenesisHash) + err = p.write(getBlocks) + if err != nil { + fmt.Printf("could not write getBlocks message: %v", err) + } + + verbose := false + for { + // see if we were interrupted + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + msg, err := p.read() + if err == wire.ErrUnknownMessage { + // skip unknown + continue + } else if err != nil { + return fmt.Errorf("read: %w", err) + } + + if verbose { + spew.Dump(msg) + } + + switch m := msg.(type) { + case *wire.MsgPing: + go handlePing(p, m) + + case *wire.MsgInv: + go handleInv(p, m) + + case *wire.MsgBlock: + go handleBlock(p, m) + + default: + fmt.Printf("unhandled message type: %T\n", msg) + } + } + + //fmt.Printf("waiting for exit\n") + //<-ctx.Done() + //return nil + + //peers := make(map[string]*peer, len(ips)) + //ips = []net.IP{ + // net.ParseIP("140.238.169.133"), + // // net.ParseIP("84.250.91.34"), + // // net.ParseIP("3.14.15.90"), + // // net.ParseIP("104.182.210.230"), + //} + //for _, ip := range ips { + // address := ip.To4() + // if address == nil { + // continue + // } + // // XXX this does not test for link local and other exclusions + + // // Should be an IPv4 address here + // ma := fmt.Sprintf("%v:%v", address, port) + // p := &peer{address: ma} + // peers[ma] = p + + // // connect + // go func(pp *peer) { + // err := pp.connect(ctx) + // if err != nil { + // fmt.Printf("err: %v\n", err) + // } else { + // fmt.Printf("connected: %v\n", pp.address) + + // pver := wire.ProtocolVersion + + // // write ver + // me := &wire.NetAddress{ + // Timestamp: time.Now(), + // Services: wire.SFNodeNetwork, + // // IP: net.ParseIP("193.218.159.178"), + // // Port: 18333, + + // } + // // spew.Dump(pp.conn.LocalAddr()) + // // theirIP := pp.conn.RemoteAddr().String() + // you := &wire.NetAddress{ + // Timestamp: time.Now(), + // // IP: ips[0], + // // Port: 18333, + // // Services: wire.SFNodeNetwork, + // } + // // spew.Dump(me) + // // spew.Dump(theirIP) + // wmsg := wire.NewMsgVersion(me, you, uint64(rand.Int63()), 0) + // wmsg.Services = wire.SFNodeNetwork + // wmsg.DisableRelayTx = true + // spew.Dump(wmsg) + // n, err := wire.WriteMessageWithEncodingN(pp.conn, wmsg, pver, wireNet, wire.LatestEncoding) + // if err != nil { + // fmt.Printf("write error: %v\n", err) + // return + // } + // fmt.Printf("write n NewMsgVersion: %v\n", n) + + // n, rmsg, rawPayload, err := wire.ReadMessageWithEncodingN(pp.conn, pver, wireNet, wire.LatestEncoding) + // fmt.Printf("read n %T: %v\n", rmsg, n) + // if err != nil { + // fmt.Printf("read error: %v\n", err) + // return + + // } + // _ = rawPayload + // fmt.Printf("%v\n", spew.Sdump(rmsg)) + // // fmt.Printf("%v\n", spew.Sdump(rawPayload)) + // v := rmsg.(*wire.MsgVersion) + // if v.ProtocolVersion >= 70016 { + // fmt.Printf("sendaddrv2\n") + // sendAddrMsg := wire.NewMsgSendAddrV2() + // n, err := wire.WriteMessageWithEncodingN(pp.conn, sendAddrMsg, pver, wireNet, wire.LatestEncoding) + // if err != nil { + // fmt.Printf("write error: %v\n", err) + // return + // } + // fmt.Printf("write n MsgSendAddrV2: %v\n", n) + // } + + // // send verack + // verack := wire.NewMsgVerAck() + // n, err = wire.WriteMessageWithEncodingN(pp.conn, verack, pver, wireNet, wire.LatestEncoding) + // if err != nil { + // fmt.Printf("write error: %v\n", err) + // return + // } + // fmt.Printf("write n MsgVerAck: %v\n", n) + + // for { + // // read what comes back + // n, rmsg, rawPayload, err = wire.ReadMessageWithEncodingN(pp.conn, pver, wireNet, wire.LatestEncoding) + // fmt.Printf("read n %T: %v\n", rmsg, n) + // if err != nil { + // fmt.Printf("read error continue: %v\n", err) + // // XXX exit if eof + // continue + + // } + // _ = rawPayload + // fmt.Printf("%v\n", spew.Sdump(rmsg)) + // } + // } + // }(p) + //} + + //<-ctx.Done() + + // return nil +} + +func StoreBlockHeaders(ctx context.Context, endHeight, blockCount int, dir string) error { + for h := 0; h < blockCount; h++ { + height := endHeight - blockCount + h + 1 + hash, err := btctool.GetAndStoreBlockHeader(ctx, height, dir) + if err != nil { + return err + } + fmt.Printf("%v: %v\n", height, hash) + } + return nil +} + +func parseArgs(args []string) (string, map[string]string, error) { + if len(args) < 1 { + flag.Usage() + return "", nil, fmt.Errorf("action required") + } + + action := args[0] + parsed := make(map[string]string, 10) + + for _, v := range args[1:] { + s := strings.Split(v, "=") + if len(s) != 2 { + return "", nil, fmt.Errorf("invalid argument: %v", v) + } + if len(s[0]) == 0 || len(s[1]) == 0 { + return "", nil, fmt.Errorf("expected a=b, got %v", v) + } + parsed[s[0]] = s[1] + } + + return action, parsed, nil +} + +func addressToScript(addr string) (btcutil.Address, error) { + return btcutil.DecodeAddress(addr, &chaincfg.TestNet3Params) +} + +func init() { +} + +func _main() error { + flag.Usage = func() { + f := flag.CommandLine.Output() + fmt.Fprintf(f, "Usage of %v \n", os.Args[0]) + fmt.Fprintf(f, "Flags:\n") + flag.PrintDefaults() + fmt.Fprintf(f, "Actions:\n") + fmt.Fprintf(f, " block [json=bool] [wire=bool] - retrieve block for hash\n") + fmt.Fprintf(f, " blockheader - retrieve blockheader for hash\n") + fmt.Fprintf(f, " blockheighthash - block hash at height\n") + fmt.Fprintf(f, " storeblockheaders [start=int] [count=int] - store block headers\n") + fmt.Fprintf(f, " tip - retrieve tip height\n") + } + + //var ( + // endHeight, blockCount int + // downloadDir string + //) + //flag.IntVar(&endHeight, "startblock", -1, "Height to start downloading, negative means start at current max height") + //flag.IntVar(&blockCount, "count", -1024, "number of blocks to download, negative goes backwards from height") + //flag.StringVar(&downloadDir, "downloaddir", "", "Directory to download block header and data to. Leave empty to dump to stdout.") + flag.Parse() + + err := loggo.ConfigureLoggers("info") // XXX make flag + if err != nil { + return fmt.Errorf("ConfigureLoggers: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + _ = cancel + + action, args, err := parseArgs(flag.Args()) + if err != nil { + return err + } + + switch action { + case "standardscript": + address := args["address"] + if address == "" { + return fmt.Errorf("address: must be set") + } + var ( + a btcutil.Address + h []byte + sh [32]byte + ) + a, err = addressToScript(address) + h, err = txscript.PayToAddrScript(a) + sh = sha256.Sum256(h) + spew.Dump(a) + spew.Dump(h) + spew.Dump(sh) + + case "block": + raw := true + + wireSpew := false + wireSet := args["wire"] + if wireSet == "1" || strings.ToLower(wireSet) == "true" { + wireSpew = true + } + + jsonSet := args["json"] + if jsonSet == "1" || strings.ToLower(jsonSet) == "true" { + raw = false + if wireSpew { + return fmt.Errorf("wire and json may not be both set") + } + } + hash := args["hash"] + if hash == "" { + return fmt.Errorf("hash: must be set") + } + var b string + b, err = blockstream.Block(ctx, hash, raw) + if err == nil { + if wireSpew { + //eb, err := hex.DecodeString(strings.Trim(b, "\n")) + //if err != nil { + // return err + //} + //fmt.Printf("%v", spew.Sdump(eb)) + + blk, err := parseBlockFromHex(b) + if err != nil { + return err + } + fmt.Printf("%v", spew.Sdump(blk.MsgBlock())) + } else { + fmt.Printf("%v\n", b) + } + } + case "blockheader": + hash := args["hash"] + if hash == "" { + return fmt.Errorf("hash: must be set") + } + var bh string + bh, err = blockstream.BlockHeader(ctx, hash) + if err == nil { + fmt.Printf("%v\n", bh) + } + case "blockheighthash": + height := args["height"] + if height == "" { + return fmt.Errorf("height: must be set") + } + var bh string + bh, err = blockstream.BlockHeightHash(ctx, height) + if err == nil { + fmt.Printf("%v\n", bh) + } + case "tip": + var height int + height, err = blockstream.Tip(ctx) + if err == nil { + fmt.Printf("%v\n", height) + } + + case "p2p": + err = btcConnect(ctx, "testnet3") + + case "parseblock": + filename := args["filename"] + if filename == "" { + return fmt.Errorf("filename: must be set") + } + var block *btcutil.Block + block, err = parseBlock(ctx, filename) + if err == nil { + spew.Dump(block) + } + + case "storeblockheaders": + // XXX remove + kill bfd + downloadDir := filepath.Join("~/.mocksicle", bdf.DefaultDataDir) + downloadDir, err = homedir.Expand(downloadDir) + if err != nil { + return fmt.Errorf("invalid directory: %v", err) + } + + err = os.MkdirAll(downloadDir, 0o700) + if err != nil { + return fmt.Errorf("MkdirAll: %v", err) + } + + blockCount := int(1024) + count := args["count"] + if count != "" { + bc, err := strconv.ParseInt(count, 10, 64) + if err != nil { + return fmt.Errorf("count: %v", err) + } + if bc < 0 { + return fmt.Errorf("count must not be negative: %v", bc) + } + blockCount = int(bc) + } + + // Where do we end + var endHeight int + end := args["end"] + if end == "" { + endHeight, err = blockstream.Tip(ctx) + if err != nil { + return fmt.Errorf("tip: %v", err) + } + } else { + e, err := strconv.ParseInt(end, 10, 64) + if err != nil { + return fmt.Errorf("end: %v", err) + } + if e < 0 { + bh, err := blockstream.Tip(ctx) + if err != nil { + return fmt.Errorf("tip: %v", err) + } + e = int64(bh) + e + if e < 0 { + return fmt.Errorf("end height must not be "+ + "negative: %v", e) + } + fmt.Printf("tip at %v, downloading to %v\n", bh, e) + } + endHeight = int(e) + } + err = StoreBlockHeaders(ctx, endHeight, blockCount, downloadDir) + default: + return fmt.Errorf("invalid action: %v", os.Args[1]) + } + + return err +} + +func main() { + err := _main() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + return +} diff --git a/cmd/btctool/btctool/btctool.go b/cmd/btctool/btctool/btctool.go new file mode 100644 index 000000000..24caf5ce0 --- /dev/null +++ b/cmd/btctool/btctool/btctool.go @@ -0,0 +1,37 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package btctool + +import ( + "context" + "fmt" + + "github.com/juju/loggo" + + "github.com/hemilabs/heminetwork/cmd/btctool/bdf" + "github.com/hemilabs/heminetwork/cmd/btctool/blockstream" +) + +var log = loggo.GetLogger("btctool") + +func GetAndStoreBlockHeader(ctx context.Context, height int, dir string) (string, error) { + hash, err := blockstream.BlockHeightHash(ctx, fmt.Sprintf("%v", height)) + if err != nil { + return "", fmt.Errorf("BlockHeightHash %v: %v", height, err) + } + + header, err := blockstream.BlockHeader(ctx, hash) + if err != nil { + return "", fmt.Errorf("BlockHeader %v: %v", hash, err) + } + + // Write header + err = bdf.WriteHeader(height, header, dir) + if err != nil { + return "", fmt.Errorf("WriteHeight: %v", err) + } + + return hash, nil +} diff --git a/cmd/btctool/httpclient/httpclient.go b/cmd/btctool/httpclient/httpclient.go new file mode 100644 index 000000000..5e4b6d0b8 --- /dev/null +++ b/cmd/btctool/httpclient/httpclient.go @@ -0,0 +1,50 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package httpclient + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" +) + +func Request(ctx context.Context, method, url string, body any) ([]byte, error) { + var r io.Reader + if body != nil { + b, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("marshal body: %v", err) + } + r = bytes.NewReader(b) + } + c := &http.Client{} + for retry := 1; ; retry++ { + req, err := http.NewRequestWithContext(ctx, method, url, r) + if err != nil { + return nil, fmt.Errorf("NewRequestWithContext: %v", err) + } + resp, err := c.Do(req) + if err != nil { + return nil, fmt.Errorf("Do: %v", err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + case http.StatusTooManyRequests: + time.Sleep(time.Duration(retry) * time.Second) + continue + default: + return nil, fmt.Errorf("%v %v %v %v", method, url, + resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return io.ReadAll(resp.Body) + } +} diff --git a/cmd/hemictl/hemictl.go b/cmd/hemictl/hemictl.go index 8995594b5..8006c6719 100644 --- a/cmd/hemictl/hemictl.go +++ b/cmd/hemictl/hemictl.go @@ -7,6 +7,9 @@ package main import ( "bytes" "context" + "crypto/sha256" + "encoding/binary" + "encoding/hex" "encoding/json" "errors" "flag" @@ -18,19 +21,31 @@ import ( "reflect" "regexp" "sort" + "strconv" "strings" "sync" "time" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" "github.com/davecgh/go-spew/spew" "github.com/juju/loggo" "github.com/mitchellh/go-homedir" + "github.com/syndtr/goleveldb/leveldb/util" "github.com/hemilabs/heminetwork/api/bfgapi" "github.com/hemilabs/heminetwork/api/bssapi" "github.com/hemilabs/heminetwork/api/protocol" + "github.com/hemilabs/heminetwork/api/tbcapi" "github.com/hemilabs/heminetwork/config" + "github.com/hemilabs/heminetwork/database" "github.com/hemilabs/heminetwork/database/bfgd/postgres" + ldb "github.com/hemilabs/heminetwork/database/level" + "github.com/hemilabs/heminetwork/database/tbcd" + "github.com/hemilabs/heminetwork/database/tbcd/level" + "github.com/hemilabs/heminetwork/service/tbc" "github.com/hemilabs/heminetwork/version" ) @@ -85,6 +100,16 @@ func handleBFGWebsocketReadUnauth(ctx context.Context, conn *protocol.Conn) { } } +// handleTBCWebsocketRead discards all reads but has to exist in order to +// be able to use tbcapi.Call. +func handleTBCWebsocketRead(ctx context.Context, conn *protocol.Conn) { + for { + if _, _, _, err := tbcapi.ReadConn(ctx, conn); err != nil { + return + } + } +} + func bfgdb() error { ctx, cancel := context.WithTimeout(context.Background(), callTimeout) defer cancel() @@ -138,6 +163,492 @@ func bfgdb() error { return nil } +func parseArgs(args []string) (string, map[string]string, error) { + if len(args) < 1 { + flag.Usage() + return "", nil, fmt.Errorf("action required") + } + + action := args[0] + parsed := make(map[string]string, 10) + + for _, v := range args[1:] { + s := strings.Split(v, "=") + if len(s) != 2 { + return "", nil, fmt.Errorf("invalid argument: %v", v) + } + if len(s[0]) == 0 || len(s[1]) == 0 { + return "", nil, fmt.Errorf("expected a=b, got %v", v) + } + parsed[s[0]] = s[1] + } + + return action, parsed, nil +} + +func tbcdb() error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + action, args, err := parseArgs(flag.Args()[1:]) + if err != nil { + return err + } + + // special commands + //switch action { + //case "crossreference": + // return crossReference(ctx) + //} + + // create fake service to call crawler + cfg := tbc.NewDefaultConfig() + cfg.LevelDBHome = "~/.tbcd" + cfg.Network = "testnet3" + s, err := tbc.NewServer(cfg) + if err != nil { + return fmt.Errorf("new server: %w", err) + } + // Open db. + err = s.DBOpen(ctx) + if err != nil { + return fmt.Errorf("db open: %w", err) + } + defer func() { + err := s.DBClose() + if err != nil { + fmt.Fprintf(os.Stderr, "db close: %v\n", err) + os.Exit(1) + } + }() + + // commands + switch action { + case "blockheaderbyhash": + hash := args["hash"] + if hash == "" { + return fmt.Errorf("hash: must be set") + } + ch, err := chainhash.NewHashFromStr(hash) + if err != nil { + return fmt.Errorf("chainhash: %w", err) + } + bh, err := s.DB().BlockHeaderByHash(ctx, ch[:]) + if err != nil { + return fmt.Errorf("block header by hash: %w", err) + } + fmt.Printf("hash : %v\n", bh) + fmt.Printf("height: %v\n", bh.Height) + + case "blockheadersbest": + bhs, err := s.DB().BlockHeadersBest(ctx) + if err != nil { + return fmt.Errorf("block headers best: %w", err) + } + for k := range bhs { + fmt.Printf("hash (%v): %v\n", k, bhs[k]) + fmt.Printf("height (%v): %v\n", k, bhs[k].Height) + } + + case "blockheadersbyheight": + height := args["height"] + if height == "" { + return fmt.Errorf("height: must be set") + } + h, err := strconv.ParseUint(height, 10, 64) + if err != nil { + return fmt.Errorf("parse uint: %w", err) + } + bh, err := s.DB().BlockHeadersByHeight(ctx, h) + if err != nil { + return fmt.Errorf("block header by height: %w", err) + } + spew.Dump(bh) + + // case "blockheadersinsert": + + case "blocksmissing": + count := args["count"] + c, err := strconv.ParseUint(count, 10, 64) + if len(count) > 0 && err != nil { + return fmt.Errorf("parse uint: %w", err) + } + if c == 0 { + c = 1 + } + bh, err := s.DB().BlocksMissing(ctx, int(c)) + if err != nil { + return fmt.Errorf("block header by height: %w", err) + } + spew.Dump(bh) + + // case "blockinsert": + + case "blockbyhash": + hash := args["hash"] + if hash == "" { + return fmt.Errorf("hash: must be set") + } + ch, err := chainhash.NewHashFromStr(hash) + if err != nil { + return fmt.Errorf("chainhash: %w", err) + } + b, err := s.DB().BlockByHash(ctx, ch[:]) + if err != nil { + return fmt.Errorf("block by hash: %w", err) + } + spew.Dump(b) + + case "deletemetadata": + key := args["key"] + if key == "" { + return fmt.Errorf("key: must be set") + } + + s.DBClose() + + levelDBHome := "~/.tbcd" // XXX + network := "testnet3" + db, err := level.New(ctx, filepath.Join(levelDBHome, network)) + if err != nil { + return err + } + defer db.Close() + pool := db.DB() + mdDB := pool[ldb.MetadataDB] + err = mdDB.Delete([]byte(key), nil) + if err != nil { + return err + } + + case "dumpmetadata": + s.DBClose() + + levelDBHome := "~/.tbcd" // XXX + network := "testnet3" + db, err := level.New(ctx, filepath.Join(levelDBHome, network)) + if err != nil { + return err + } + defer db.Close() + pool := db.DB() + mdDB := pool[ldb.MetadataDB] + it := mdDB.NewIterator(nil, nil) + defer it.Release() + for it.Next() { + fmt.Printf("metadata key %vvalue %v", spew.Sdump(it.Key()), spew.Sdump(it.Value())) + } + + case "dumpoutputs": + s.DBClose() + + levelDBHome := "~/.tbcd" // XXX + network := "testnet3" + db, err := level.New(ctx, filepath.Join(levelDBHome, network)) + if err != nil { + return err + } + defer db.Close() + prefix := args["prefix"] + if len(prefix) > 1 { + return fmt.Errorf("prefix must be one byte") + } else if len(prefix) == 1 && !(prefix[0] == 'h' || prefix[0] == 'u') { + return fmt.Errorf("prefix must be h or u") + } + pool := db.DB() + outsDB := pool[ldb.OutputsDB] + it := outsDB.NewIterator(&util.Range{Start: []byte(prefix)}, nil) + defer it.Release() + for it.Next() { + fmt.Printf("outputs key %vvalue %v", spew.Sdump(it.Key()), spew.Sdump(it.Value())) + } + + case "feesbyheight": + height := args["height"] + if height == "" { + return fmt.Errorf("height: must be set") + } + h, err := strconv.ParseInt(height, 10, 64) + if err != nil { + return fmt.Errorf("parse uint: %w", err) + } + count := args["count"] + c, err := strconv.ParseInt(count, 10, 64) + if len(count) > 0 && err != nil { + return fmt.Errorf("parse uint: %w", err) + } + if c == 0 { + c = 1 + } + bh, err := s.FeesAtHeight(ctx, h, c) + if err != nil { + return fmt.Errorf("fees by height: %w", err) + } + spew.Dump(bh) + + case "help", "h": + fmt.Printf("tbcd db manipulator commands:\n") + fmt.Printf("\tbalancebyscripthash [hash]\n") + fmt.Printf("\tblockbyhash [hash]\n") + fmt.Printf("\tblockheaderbyhash [hash]\n") + fmt.Printf("\tblockheadersbest\n") + fmt.Printf("\tblockheadersbyheight [height]\n") + fmt.Printf("\tblocksbytxid [hash]\n") + fmt.Printf("\tblocksmissing [count]\n") + fmt.Printf("\tdeletemetadata\n") + fmt.Printf("\tdumpmetadata\n") + fmt.Printf("\tdumpoutputs \n") + fmt.Printf("\thelp\n") + fmt.Printf("\tscripthashbyoutpoint [txid] [index]\n") + fmt.Printf("\tspendoutputsbytxid [txid] [index]\n") + fmt.Printf("\ttxindex \n") + fmt.Printf("\tutxoindex \n") + fmt.Printf("\tutxosbyscripthash [hash]\n") + + case "utxoindex": + var h, c, mc uint64 + height := args["height"] + if height == "" { + // Get height from db + he, err := s.DB().MetadataGet(ctx, tbc.UtxoIndexHeightKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return fmt.Errorf("metadata %v: %w", + string(tbc.UtxoIndexHeightKey), err) + } + he = make([]byte, 8) + } + h = binary.BigEndian.Uint64(he) + } else if h, err = strconv.ParseUint(height, 10, 64); err != nil { + return fmt.Errorf("height: %w", err) + } + count := args["count"] + if count == "" { + c = 0 + } else if c, err = strconv.ParseUint(count, 10, 64); err != nil { + return fmt.Errorf("count: %w", err) + } + maxCache := args["maxcache"] + if maxCache != "" { + if mc, err = strconv.ParseUint(maxCache, 10, 64); err != nil { + return fmt.Errorf("maxCache: %w", err) + } + cfg.MaxCachedTxs = int(mc) + } + err = s.UtxoIndexer(ctx, h, c) + if err != nil { + return fmt.Errorf("indexer: %w", err) + } + + case "txindex": + var h, c, mc uint64 + height := args["height"] + if height == "" { + // Get height from db + he, err := s.DB().MetadataGet(ctx, tbc.TxIndexHeightKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return fmt.Errorf("metadata %v: %w", + string(tbc.TxIndexHeightKey), err) + } + he = make([]byte, 8) + } + h = binary.BigEndian.Uint64(he) + } else if h, err = strconv.ParseUint(height, 10, 64); err != nil { + return fmt.Errorf("height: %w", err) + } + count := args["count"] + if count == "" { + c = 0 + } else if c, err = strconv.ParseUint(count, 10, 64); err != nil { + return fmt.Errorf("count: %w", err) + } + maxCache := args["maxcache"] + if maxCache != "" { + if mc, err = strconv.ParseUint(maxCache, 10, 64); err != nil { + return fmt.Errorf("maxCache: %w", err) + } + cfg.MaxCachedTxs = int(mc) + } + err = s.TxIndexer(ctx, h, c) + if err != nil { + return fmt.Errorf("indexer: %w", err) + } + + case "blocksbytxid": + txid := args["txid"] + if txid == "" { + return fmt.Errorf("txid: must be set") + } + chtxid, err := chainhash.NewHashFromStr(txid) + if err != nil { + return fmt.Errorf("chainhash: %w", err) + } + var revTxId [32]byte + copy(revTxId[:], chtxid[:]) + + bh, err := s.DB().BlocksByTxId(ctx, revTxId) + if err != nil { + return fmt.Errorf("block by txid: %w", err) + } + for k := range bh { + fmt.Printf("%v\n", bh[k]) + } + + case "spendoutputsbytxid": + txid := args["txid"] + if txid == "" { + return fmt.Errorf("txid: must be set") + } + chtxid, err := chainhash.NewHashFromStr(txid) + if err != nil { + return fmt.Errorf("chainhash: %w", err) + } + var revTxId [32]byte + copy(revTxId[:], chtxid[:]) + + si, err := s.DB().SpendOutputsByTxId(ctx, revTxId) + if err != nil { + return fmt.Errorf("spend outputs by txid: %w", err) + } + for k := range si { + fmt.Printf("%v\n", si[k]) + } + + case "scripthashbyoutpoint": + txid := args["txid"] + if txid == "" { + return fmt.Errorf("txid: must be set") + } + chtxid, err := chainhash.NewHashFromStr(txid) + if err != nil { + return fmt.Errorf("chainhash: %w", err) + } + var revTxId [32]byte + copy(revTxId[:], chtxid[:]) + + index := args["index"] + if index == "" { + return fmt.Errorf("index: must be set") + } + idx, err := strconv.ParseUint(index, 10, 32) + if err != nil { + return fmt.Errorf("index: %w", err) + } + op := tbcd.NewOutpoint(revTxId, uint32(idx)) + sh, err := s.DB().ScriptHashByOutpoint(ctx, op) + if err != nil { + return fmt.Errorf("block by hash: %w", err) + } + spew.Dump(sh) + + case "balancebyscripthash": + address := args["address"] + hash := args["hash"] + if address == "" && hash == "" { + return fmt.Errorf("hash or address: must be set") + } else if address != "" && hash != "" { + return fmt.Errorf("hash or address: both set") + } + + var hh [32]byte + if hash != "" { + h, err := hex.DecodeString(hash) + if err != nil { + return fmt.Errorf("decode hex: %w", err) + } + copy(hh[:], h) + } + if address != "" { + // XXX set params + a, err := btcutil.DecodeAddress(address, &chaincfg.TestNet3Params) + if err != nil { + return err + } + h, err := txscript.PayToAddrScript(a) + if err != nil { + return err + } + sh := sha256.Sum256(h) + copy(hh[:], sh[:]) + } + + balance, err := s.DB().BalanceByScriptHash(ctx, hh) + if err != nil { + return fmt.Errorf("block by hash: %w", err) + } + spew.Dump(balance) + + case "utxosbyscripthash": + address := args["address"] + hash := args["hash"] + count := args["count"] + start := args["start"] + + if address == "" && hash == "" { + return fmt.Errorf("hash or address: must be set") + } else if address != "" && hash != "" { + return fmt.Errorf("hash or address: both set") + } + + if count == "" { + count = "100" + } + + if start == "" { + start = "0" + } + + countNum, err := strconv.ParseUint(count, 10, 64) + if err != nil { + return err + } + + startNum, err := strconv.ParseUint(start, 10, 64) + if err != nil { + return err + } + + var hh [32]byte + if hash != "" { + h, err := hex.DecodeString(hash) + if err != nil { + return fmt.Errorf("decode hex: %w", err) + } + copy(hh[:], h) + } + if address != "" { + // XXX set params + a, err := btcutil.DecodeAddress(address, &chaincfg.TestNet3Params) + if err != nil { + return err + } + h, err := txscript.PayToAddrScript(a) + if err != nil { + return err + } + sh := sha256.Sum256(h) + copy(hh[:], sh[:]) + } + + utxos, err := s.DB().UtxosByScriptHash(ctx, hh, startNum, countNum) + if err != nil { + return fmt.Errorf("block by hash: %w", err) + } + var balance uint64 + for k := range utxos { + fmt.Printf("%v\n", utxos[k]) + balance += utxos[k].Value() + } + fmt.Printf("utxos: %v total: %v\n", len(utxos), balance) + + default: + return fmt.Errorf("invalid action: %v", action) + } + + return nil +} + type bssClient struct { wg *sync.WaitGroup bssURL string @@ -265,7 +776,7 @@ func bssLong(ctx context.Context) error { func client(which string) error { log.Debugf("client %v", which) - defer log.Debugf("client exit", which) + defer log.Debugf("client %v exit", which) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -305,6 +816,9 @@ func init() { for k, v := range bfgapi.APICommands() { allCommands[string(k)] = v } + for k, v := range tbcapi.APICommands() { + allCommands[string(k)] = v + } sortedCommands = make([]string, 0, len(allCommands)) for k := range allCommands { @@ -321,6 +835,7 @@ func usage() { fmt.Fprintf(os.Stderr, "\tbss-client long connection to bss\n") fmt.Fprintf(os.Stderr, "\thelp (this help)\n") fmt.Fprintf(os.Stderr, "\thelp-verbose JSON print RPC default request/response\n") + fmt.Fprintf(os.Stderr, "\ttbcdb datase open (tbcd must not be running)\n") fmt.Fprintf(os.Stderr, "Environment:\n") config.Help(os.Stderr, cm) fmt.Fprintf(os.Stderr, "Commands:\n") @@ -389,6 +904,8 @@ func _main() error { case "help-verbose": helpVerbose() return nil + case "tbcdb": + return tbcdb() } // Deal with generic commands @@ -411,6 +928,10 @@ func _main() error { u = bfgapi.DefaultPrivateURL callHandler = handleBFGWebsocketReadUnauth call = bfgapi.Call // XXX yuck + case strings.HasPrefix(cmd, "tbcapi"): + u = tbcapi.DefaultURL + callHandler = handleTBCWebsocketRead + call = tbcapi.Call // XXX yuck? default: return fmt.Errorf("can't derive URL from command: %v", cmd) } diff --git a/cmd/tbcd/README.md b/cmd/tbcd/README.md new file mode 100644 index 000000000..a529863a7 --- /dev/null +++ b/cmd/tbcd/README.md @@ -0,0 +1,56 @@ +# tbcd + +## Hemi Tiny Bitcoin Daemon + +tbcd is a very minimal bitcoin block downloader and indexer meant for embedding in other applications that require access to bitcoin data (blocks and txes). + +tbcd requires sufficient disk space for a full download of bitcoin block data on a fast (preferably ssd or better disk. + +tbcd is build with the heminetwork makefile, To build standalone (requires `go 1.21+`), type: + +``` sh +cd heminetowkr/cmd/tbcd +go build +``` + +On some linux systems you may need to increase the number of open files allowed (particularly with slower disks) and the maximum stack size. If you run into open file or OOM errors, in the shell you are going to run tbcd, run: + +```sh +ulimit -n 8192 +ulimit -s 8192 +``` + +You can confirm these settings wiht: + +```sh +ulimit -a +``` + +For a full list of options: + +``` sh +./bin/tbcd --help +``` + +You can change the file storage with: + +``` sh +export TBC_LEVELDB_HOME=/path/to/files +``` + +Specify the network with + +``` sh +export TBC_NETWORK=mainnet +``` + +Then run with: + +``` sh +./bin/tbcd +``` + +### License + +This project is licensed under the [MIT License](../../LICENSE). + diff --git a/cmd/tbcd/tbcd.go b/cmd/tbcd/tbcd.go new file mode 100644 index 000000000..ceb585063 --- /dev/null +++ b/cmd/tbcd/tbcd.go @@ -0,0 +1,178 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + + "github.com/juju/loggo" + + "github.com/hemilabs/heminetwork/api/tbcapi" + "github.com/hemilabs/heminetwork/config" + "github.com/hemilabs/heminetwork/service/tbc" + "github.com/hemilabs/heminetwork/version" +) + +const ( + daemonName = "tbcd" + defaultLogLevel = daemonName + "=INFO;tbc=INFO;level=INFO" + defaultNetwork = "testnet3" // XXX make this mainnet + defaultHome = "~/." + daemonName +) + +var ( + log = loggo.GetLogger(daemonName) + welcome = fmt.Sprintf("Hemi Tiny Bitcoin Daemon: v%v", version.String()) + + cfg = tbc.NewDefaultConfig() + cm = config.CfgMap{ + "TBC_ADDRESS": config.Config{ + Value: &cfg.ListenAddress, + DefaultValue: tbcapi.DefaultListen, + Help: "address port to listen on", + Print: config.PrintAll, + }, + "TBC_AUTO_INDEX": config.Config{ + Value: &cfg.AutoIndex, + DefaultValue: true, + Help: "enable auto utxo and tx indexes", + Print: config.PrintAll, + }, + "TBC_BLOCK_SANITY": config.Config{ + Value: &cfg.BlockSanity, + DefaultValue: false, + Help: "enable/disable block sanity checks before inserting", + Print: config.PrintAll, + }, + "TBC_LEVELDB_HOME": config.Config{ + Value: &cfg.LevelDBHome, + DefaultValue: defaultHome, + Help: "data directory for leveldb", + Print: config.PrintAll, + }, + "TBC_LOG_LEVEL": config.Config{ + Value: &cfg.LogLevel, + DefaultValue: defaultLogLevel, + Help: "loglevel for various packages; INFO, DEBUG and TRACE", + Print: config.PrintAll, + }, + "TBC_MAX_CACHED_TXS": config.Config{ + Value: &cfg.MaxCachedTxs, + DefaultValue: 1000000, + Help: "maximum cached utxos and/or txs during indexing", + Print: config.PrintAll, + }, + "TBC_NETWORK": config.Config{ + Value: &cfg.Network, + DefaultValue: defaultNetwork, + Help: "bitcoin network; mainnet or testnet3", + Print: config.PrintAll, + }, + "TBC_PROMETHEUS_ADDRESS": config.Config{ + Value: &cfg.PrometheusListenAddress, + DefaultValue: "", + Help: "address and port tbcd prometheus listens on", + Print: config.PrintAll, + }, + } +) + +func HandleSignals(ctx context.Context, cancel context.CancelFunc, callback func(os.Signal)) { + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + signal.Notify(signalChan, os.Kill) + defer func() { + signal.Stop(signalChan) + cancel() + }() + + select { + case <-ctx.Done(): + case s := <-signalChan: // First signal, cancel context. + if callback != nil { + callback(s) // Do whatever caller wants first. + cancel() + } + } + <-signalChan // Second signal, hard exit. + os.Exit(2) +} + +func _main() error { + // Parse configuration from environment + if err := config.Parse(cm); err != nil { + return err + } + + loggo.ConfigureLoggers(cfg.LogLevel) + log.Infof("%v", welcome) + + pc := config.PrintableConfig(cm) + for k := range pc { + log.Infof("%v", pc[k]) + } + + ctx, cancel := context.WithCancel(context.Background()) + go HandleSignals(ctx, cancel, func(s os.Signal) { + log.Infof("tbc service received signal: %s", s) + }) + + server, err := tbc.NewServer(cfg) + if err != nil { + return fmt.Errorf("Failed to create tbc server: %v", err) + } + // XXX remove, this is an illustration of calling the direct API of server + // go func() { + // for { + // select { + // case <-ctx.Done(): + // return + // case <-time.After(2 * time.Second): + // } + + // log.Infof("synced: %v", spew.Sdump(server.Synced(ctx))) + // hashS := "000000001a4c2c64beded987790ab0c00675b4bc467cd3574ad455b1397c967c" + // ch, err := chainhash.NewHashFromStr(hashS) + // if err != nil { + // panic(err) + // } + // bh, height, err := server.BlockHeaderByHash(ctx, ch) + // if err != nil { + // panic(err) + // } + // log.Infof("height %v hash %v%v", height, bh.BlockHash(), spew.Sdump(bh)) + + // bhbh, err := server.BlockHeadersByHeight(ctx, height) + // if err != nil { + // panic(err) + // } + // log.Infof("height %v headers %v", height, spew.Sdump(bhbh)) + // } + //}() + if err := server.Run(ctx); err != context.Canceled { + return fmt.Errorf("tbc server terminated: %v", err) + } + + return nil +} + +func main() { + if len(os.Args) != 1 { + fmt.Fprintf(os.Stderr, "%v\n", welcome) + fmt.Fprintf(os.Stderr, "Usage:\n") + fmt.Fprintf(os.Stderr, "\thelp (this help)\n") + fmt.Fprintf(os.Stderr, "Environment:\n") + config.Help(os.Stderr, cm) + os.Exit(1) + } + + if err := _main(); err != nil { + log.Errorf("%v", err) + os.Exit(1) + } +} diff --git a/database/bfgd/postgres/postgres.go b/database/bfgd/postgres/postgres.go index bc8b1bd05..043d062f4 100644 --- a/database/bfgd/postgres/postgres.go +++ b/database/bfgd/postgres/postgres.go @@ -275,6 +275,7 @@ func (p *pgdb) L2KeystonesMostRecentN(ctx context.Context, n uint32) ([]bfgd.L2K func (p *pgdb) BtcBlockInsert(ctx context.Context, bb *bfgd.BtcBlock) error { log.Tracef("BtcBlockInsert") defer log.Tracef("BtcBlockInsert exit") + const qBtcBlockInsert = ` INSERT INTO btc_blocks (hash, header, height) VALUES ($1, $2, $3) diff --git a/database/database.go b/database/database.go index bddaeb83c..a77df1f12 100644 --- a/database/database.go +++ b/database/database.go @@ -57,10 +57,22 @@ func (ve ValidationError) Is(target error) bool { return ok } +type ZeroRowsError string + +func (ze ZeroRowsError) Error() string { + return string(ze) +} + +func (ze ZeroRowsError) Is(target error) bool { + _, ok := target.(ZeroRowsError) + return ok +} + var ( ErrDuplicate = DuplicateError("duplicate") ErrNotFound = NotFoundError("not found") ErrValidation = ValidationError("validation") + ErrZeroRows = ZeroRowsError("zero rows affected") ) // ByteArray is a type that corresponds to BYTEA in a database. It supports @@ -72,11 +84,11 @@ func (ba ByteArray) String() string { return hex.EncodeToString([]byte(ba)) } -func (ba *ByteArray) MarshalJSON() ([]byte, error) { - if *ba == nil { +func (ba ByteArray) MarshalJSON() ([]byte, error) { + if ba == nil { return []byte("null"), nil } - return []byte(fmt.Sprintf("\"\\\\x%s\"", hex.EncodeToString([]byte(*ba)))), nil + return []byte(fmt.Sprintf("\"\\\\x%s\"", hex.EncodeToString([]byte(ba)))), nil } func (ba *ByteArray) UnmarshalJSON(data []byte) error { @@ -156,7 +168,7 @@ func (bi *BigInt) SetUint64(val uint64) *BigInt { return bi } -func (bi *BigInt) MarshalJSON() ([]byte, error) { +func (bi BigInt) MarshalJSON() ([]byte, error) { if bi.Int == nil { return []byte("null"), nil } @@ -214,9 +226,8 @@ type Timestamp struct { const timestampFormat = `2006-01-02T15:04:05.999999999` -// NewTimestamp returns a Timestamp initialized with the given time. func NewTimestamp(time time.Time) Timestamp { - return Timestamp{Time: time} + return Timestamp{Time: time.Round(0).UTC()} } func (ts Timestamp) MarshalJSON() ([]byte, error) { diff --git a/database/database_test.go b/database/database_test.go index afd131978..80b5ce2a4 100644 --- a/database/database_test.go +++ b/database/database_test.go @@ -6,11 +6,42 @@ package database import ( "bytes" + "encoding/json" "fmt" + "reflect" "testing" "time" + + "github.com/davecgh/go-spew/spew" ) +func TestStructByteArrayJSON(t *testing.T) { + type X struct { + Y BigInt + Ts Timestamp + MyByteArray ByteArray + } + + y := NewBigIntZero().SetUint64(15) + x := X{ + Y: *y, + Ts: NewTimestamp(time.Now()), + MyByteArray: []byte{0x01, 0x02}, + } + jx, err := json.Marshal(x) + if err != nil { + t.Fatal(err) + } + var xx X + err = json.Unmarshal(jx, &xx) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(x, xx) { + t.Fatalf("not equal %v%v", spew.Sdump(x), spew.Sdump(xx)) + } +} + func TestByteArrayJSON(t *testing.T) { tests := []struct { data []byte diff --git a/database/level/level.go b/database/level/level.go new file mode 100644 index 000000000..f226b2a79 --- /dev/null +++ b/database/level/level.go @@ -0,0 +1,211 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package level + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "sync" + + "github.com/juju/loggo" + "github.com/mitchellh/go-homedir" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + + "github.com/hemilabs/heminetwork/database" +) + +const ( + logLevel = "INFO" + + BlockHeadersDB = "blockheaders" + BlocksMissingDB = "blocksmissing" + BlocksDB = "blocks" + MetadataDB = "metadata" + HeightHashDB = "heighthash" + PeersDB = "peers" + OutputsDB = "outputs" + TransactionsDB = "transactions" + + versionKey = "version" + databaseVersion = 1 +) + +var log = loggo.GetLogger("level") + +func init() { + loggo.ConfigureLoggers(logLevel) +} + +type ( + Pool map[string]*leveldb.DB + Database struct { + mtx sync.RWMutex + wg sync.WaitGroup // Wait group for notification handler exit + + pool Pool // database pool + + ntfn map[database.NotificationName]int // Notification handlers + home string // leveld toplevel database directory + } +) + +var _ database.Database = (*Database)(nil) + +func (l *Database) Close() error { + log.Tracef("Close") + defer log.Tracef("Close exit") + + l.mtx.Lock() + defer l.mtx.Unlock() + + var errSeen error // XXX return last error for now + for k, v := range l.pool { + err := v.Close() + if err != nil { + // do continue, leveldb does not like unfresh shutdowns + log.Errorf("close %v: %v", k, err) + errSeen = err + } + } + + return errSeen +} + +func (l *Database) DB() Pool { + log.Tracef("DB") + defer log.Tracef("DB exit") + + return l.pool +} + +func (l *Database) RegisterNotification(ctx context.Context, n database.NotificationName, f database.NotificationCallback, payload any) error { + log.Tracef("RegisterNotification") + defer log.Tracef("RegisterNotification exit") + + return fmt.Errorf("RegisterNotification") +} + +func (l *Database) UnregisterNotification(n database.NotificationName) error { + log.Tracef("UnregisterNotification") + defer log.Tracef("UnregisterNotification exit") + + return fmt.Errorf("UnregisterNotification") +} + +func (l *Database) openDB(name string, options *opt.Options) error { + l.mtx.Lock() + defer l.mtx.Unlock() + + bhs := filepath.Join(l.home, name) + bhsDB, err := leveldb.OpenFile(bhs, options) + if err != nil { + return fmt.Errorf("leveldb open %v: %w", name, err) + } + l.pool[name] = bhsDB + + return nil +} + +func (l *Database) Version(ctx context.Context) (int, error) { + mdDB := l.pool[MetadataDB] + value, err := mdDB.Get([]byte(versionKey), nil) + if err != nil { + return -1, fmt.Errorf("version: %w", err) + } + var dbVersion uint64 + dbVersion = binary.BigEndian.Uint64(value) + + return int(dbVersion), nil +} + +func New(ctx context.Context, home string, version int) (*Database, error) { + log.Tracef("New") + defer log.Tracef("New exit") + + h, err := homedir.Expand(home) + if err != nil { + return nil, fmt.Errorf("home dir: %w", err) + } + err = os.MkdirAll(h, 0o0700) + if err != nil { + return nil, fmt.Errorf("mkdir: %w", err) + } + + l := &Database{ + home: h, + pool: make(Pool), + } + + unwind := true + defer func() { + if unwind { + log.Errorf("new unwind exited with: %v", l.Close()) + } + }() + + // Peers table + err = l.openDB(BlockHeadersDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", BlockHeadersDB, err) + } + err = l.openDB(BlocksDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", BlocksDB, err) + } + err = l.openDB(BlocksMissingDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", BlocksMissingDB, err) + } + err = l.openDB(HeightHashDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", HeightHashDB, err) + } + err = l.openDB(PeersDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", PeersDB, err) + } + err = l.openDB(OutputsDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", OutputsDB, err) + } + err = l.openDB(TransactionsDB, nil) + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", TransactionsDB, err) + } + + // Treat metadata special so that we can insert some stuff. + err = l.openDB(MetadataDB, &opt.Options{ErrorIfMissing: true}) + if errors.Is(err, fs.ErrNotExist) { + err = l.openDB(MetadataDB, &opt.Options{ErrorIfMissing: false}) + if err != nil { + return nil, fmt.Errorf("leveldb initial %v: %w", MetadataDB, err) + } + versionData := make([]byte, 8) + binary.BigEndian.PutUint64(versionData, databaseVersion) + err = l.pool[MetadataDB].Put([]byte(versionKey), versionData, nil) + } + // Check metadata error + if err != nil { + return nil, fmt.Errorf("leveldb %v: %w", MetadataDB, err) + } + dbVersion, err := l.Version(ctx) + if err != nil { + return nil, err + } + if dbVersion != version { + return nil, fmt.Errorf("invalid version: wanted %v got %v", + dbVersion, version) + } + + unwind = false + + return l, nil +} diff --git a/database/postgres/postgres.go b/database/postgres/postgres.go index 54d3200b7..7e4f8daed 100644 --- a/database/postgres/postgres.go +++ b/database/postgres/postgres.go @@ -50,6 +50,8 @@ type Database struct { pool *sql.DB } +var _ database.Database = (*Database)(nil) + // Connect connects to a postgres database. This is only used in tests. func Connect(ctx context.Context, uri string) (*sql.DB, error) { pool, err := sql.Open("postgres", uri) diff --git a/database/tbcd/TESTS.md b/database/tbcd/TESTS.md new file mode 100644 index 000000000..f7d9962b8 --- /dev/null +++ b/database/tbcd/TESTS.md @@ -0,0 +1,11 @@ +## Running extended tests + +Create a user that has CREATEDB privilege. +``` +sudo -u postgres psql -c "CREATE ROLE tbcdtest WITH LOGIN PASSWORD 'password' NOSUPERUSER CREATEDB;" +``` + +run tests: +``` +PGTESTURI="postgres://tbcdtest:password@localhost/postgres" go test -v ./... +``` diff --git a/database/tbcd/database.go b/database/tbcd/database.go new file mode 100644 index 000000000..5375e20b3 --- /dev/null +++ b/database/tbcd/database.go @@ -0,0 +1,371 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbcd + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + + "github.com/hemilabs/heminetwork/database" +) + +type Database interface { + database.Database + + // Metadata + Version(ctx context.Context) (int, error) + MetadataGet(ctx context.Context, key []byte) ([]byte, error) + MetadataPut(ctx context.Context, key, value []byte) error + + // Block header + BlockHeaderByHash(ctx context.Context, hash []byte) (*BlockHeader, error) + BlockHeadersBest(ctx context.Context) ([]BlockHeader, error) + BlockHeadersByHeight(ctx context.Context, height uint64) ([]BlockHeader, error) + BlockHeadersInsert(ctx context.Context, bhs []BlockHeader) error + + // Block + BlocksMissing(ctx context.Context, count int) ([]BlockIdentifier, error) + BlockInsert(ctx context.Context, b *Block) (int64, error) + // XXX replace BlockInsert with plural version + // BlocksInsert(ctx context.Context, bs []*Block) (int64, error) + BlockByHash(ctx context.Context, hash []byte) (*Block, error) + + // Transactions + BlockUtxoUpdate(ctx context.Context, utxos map[Outpoint]CacheOutput) error + BlockTxUpdate(ctx context.Context, txs map[TxKey]*TxValue) error + BlocksByTxId(ctx context.Context, txId TxId) ([]BlockHash, error) + SpendOutputsByTxId(ctx context.Context, txId TxId) ([]SpendInfo, error) + + // Peer manager + PeersStats(ctx context.Context) (int, int) // good, bad count + PeersInsert(ctx context.Context, peers []Peer) error // insert or update + PeerDelete(ctx context.Context, host, port string) error // remove peer + PeersRandom(ctx context.Context, count int) ([]Peer, error) + + // ScriptHash returns the sha256 of PkScript for the provided outpoint. + BalanceByScriptHash(ctx context.Context, sh ScriptHash) (uint64, error) + ScriptHashByOutpoint(ctx context.Context, op Outpoint) (*ScriptHash, error) + UtxosByScriptHash(ctx context.Context, sh ScriptHash, start uint64, count uint64) ([]Utxo, error) +} + +// BlockHeader contains the first 80 raw bytes of a bitcoin block and its +// location information (hash+height). +type BlockHeader struct { + Hash database.ByteArray + Height uint64 + Header database.ByteArray +} + +func (bh BlockHeader) String() string { + ch, _ := chainhash.NewHash(bh.Hash) + return ch.String() +} + +func (bh BlockHeader) Timestamp() time.Time { + var wbh wire.BlockHeader + err := wbh.Deserialize(bytes.NewReader(bh.Header)) + if err != nil { + return time.Time{} + } + return wbh.Timestamp +} + +func (bh BlockHeader) Wire() (*wire.BlockHeader, error) { + var wbh wire.BlockHeader + err := wbh.Deserialize(bytes.NewReader(bh.Header)) + if err != nil { + return nil, fmt.Errorf("deserialize: %w", err) + } + return &wbh, nil +} + +// Block contains a raw bitcoin block and its corresponding hash. +type Block struct { + Hash database.ByteArray + Block database.ByteArray +} + +// BlockIdentifier uniquely identifies a block using it's hash and height. +type BlockIdentifier struct { + Height uint64 + Hash database.ByteArray +} + +type SpendInfo struct { + BlockHash BlockHash + TxId TxId + InputIndex uint32 +} + +// Peer +type Peer struct { + Host string + Port string + LastAt database.Timestamp `deep:"-"` // Last time connected + CreatedAt database.Timestamp `deep:"-"` +} + +// XXX we can probably save a bunch of bcopy if we construct the key directly +// for the db. Peek at the s + t cache which does this. + +// Outpoint is a bitcoin structure that points to a transaction in a block. It +// is expressed as an array of bytes in order to pack it as dense as possible +// for memory conservation reasons. +type Outpoint [37]byte // Outpoint Tx id + +// String returns a reversed pretty printed outpoint. +func (o Outpoint) String() string { + hash, _ := chainhash.NewHash(o[1:33]) + return fmt.Sprintf("%s:%d", hash, binary.BigEndian.Uint32(o[33:])) +} + +func (o Outpoint) TxId() []byte { + return o[1:33] +} + +func (o Outpoint) TxIndex() uint32 { + return binary.BigEndian.Uint32(o[33:]) +} + +func (o Outpoint) TxIndexBytes() []byte { + return o[33:] +} + +func NewOutpoint(txid [32]byte, index uint32) (op Outpoint) { + op[0] = 'u' // match leveldb cache so that we preven a bunch of bcopy + copy(op[1:33], txid[:]) + binary.BigEndian.PutUint32(op[33:], index) + return +} + +// CacheOutput is a densely packed representation of a bitcoin UTXo. The fields are +// script_hash + value + out_index. It is packed for +// memory conservation reasons. +type CacheOutput [32 + 8 + 4]byte // script_hash + value + out_idx + +// String reutrns pretty printable CacheOutput. Hash is not reversed since it is an +// opaque pointer. It prints satoshis@script_hash:output_index +func (c CacheOutput) String() string { + return fmt.Sprintf("%d @ %v:%d", binary.BigEndian.Uint64(c[32:40]), + c[0:32], binary.BigEndian.Uint32(c[40:])) +} + +func (c CacheOutput) ScriptHash() (hash [32]byte) { + copy(hash[:], c[0:32]) + return +} + +func (c CacheOutput) ScriptHashSlice() []byte { + return c[0:32] +} + +func (c CacheOutput) Value() uint64 { + return binary.BigEndian.Uint64(c[32:40]) +} + +func (c CacheOutput) ValueBytes() []byte { + return c[32:40] +} + +func (c CacheOutput) OutputIndex() uint32 { + return binary.BigEndian.Uint32(c[40:]) +} + +func (c CacheOutput) OutputIndexBytes() []byte { + return c[40:44] +} + +func (c CacheOutput) Equal(x CacheOutput) bool { + return bytes.Equal(c[:], x[:]) +} + +// DeleteUtxo is the max uint64 value which is used as a sentinel to indicate +// that a utxo should be reaped. The remaining fields must remain untouched +// since they are part of the lookup key of the utxo balance. +var DeleteUtxo = [8]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +func (c CacheOutput) IsDelete() bool { + return bytes.Equal(c[32:40], DeleteUtxo[:]) +} + +func NewCacheOutput(hash [32]byte, value uint64, outIndex uint32) (co CacheOutput) { + copy(co[0:32], hash[:]) // scripthash + binary.BigEndian.PutUint64(co[32:40], value) + binary.BigEndian.PutUint32(co[40:], outIndex) + return +} + +func NewDeleteCacheOutput(hash [32]byte, outIndex uint32) (co CacheOutput) { + copy(co[0:32], hash[:]) // scripthash or txid + copy(co[32:40], DeleteUtxo[:]) + binary.BigEndian.PutUint32(co[40:], outIndex) + return +} + +// Utxo packs a transaction id, the value and the out index. +type Utxo [32 + 8 + 4]byte // tx_id + value + out_idx + +// String reutrns pretty printable CacheOutput. Hash is not reversed since it is an +// opaque pointer. It prints satoshis@script_hash:output_index +func (u Utxo) String() string { + ch, _ := chainhash.NewHash(u[0:32]) + return fmt.Sprintf("%d @ %v:%d", binary.BigEndian.Uint64(u[32:40]), + ch, binary.BigEndian.Uint32(u[40:])) +} + +func (u Utxo) ScriptHash() (hash [32]byte) { + copy(hash[:], u[0:32]) + return +} + +func (u Utxo) ScriptHashSlice() []byte { + return u[0:32] +} + +func (u Utxo) Value() uint64 { + return binary.BigEndian.Uint64(u[32:40]) +} + +func (u Utxo) ValueBytes() []byte { + return u[32:40] +} + +func (u Utxo) OutputIndex() uint32 { + return binary.BigEndian.Uint32(u[40:]) +} + +func (u Utxo) OutputIndexBytes() []byte { + return u[40:44] +} + +func (u Utxo) Equal(x CacheOutput) bool { + return bytes.Equal(u[:], x[:]) +} + +func NewUtxo(hash [32]byte, value uint64, outIndex uint32) (u Utxo) { + copy(u[0:32], hash[:]) // txid + binary.BigEndian.PutUint64(u[32:40], value) + binary.BigEndian.PutUint32(u[40:], outIndex) + return +} + +// TxId is a bitcoin transaction id. The underlying slice is reversed, only +// when using the stringer does it apear in human readable format. +type TxId [32]byte + +func (t TxId) String() string { + var rev [32]byte + for k := range t { + rev[32-k-1] = t[k] + } + return hex.EncodeToString(rev[:]) +} + +func NewTxId(x [32]byte) (txId TxId) { + copy(txId[:], x[:]) + return +} + +func NewTxIdFromBytes(x []byte) (txId TxId, err error) { + if len(x) != 32 { + err = fmt.Errorf("invalid transaction hash length") + return + } + copy(txId[:], x[:]) + return +} + +// BlockHash is a bitcoin transaction id. The underlying slice is reversed, only +// when using the stringer does it apear in human readable format. +type BlockHash [32]byte + +func (bh BlockHash) String() string { + var rev [32]byte + for k := range bh { + rev[32-k-1] = bh[k] + } + return hex.EncodeToString(rev[:]) +} + +func NewBlockHash(x [32]byte) (blockHash BlockHash) { + copy(blockHash[:], x[:]) + return +} + +func NewBlockHashFromBytes(x []byte) (blockHash BlockHash, err error) { + if len(x) != 32 { + err = fmt.Errorf("invalid block hash length") + return + } + copy(blockHash[:], x[:]) + return +} + +// ScriptHash is a bitcoin transaction id. The underlying slice is reversed, only +// when using the stringer does it apear in human readable format. +type ScriptHash [32]byte + +func (bh ScriptHash) String() string { + return hex.EncodeToString(bh[:]) +} + +func NewScriptHash(x [32]byte) (scriptHash ScriptHash) { + copy(scriptHash[:], x[:]) + return +} + +func NewScriptHashFromBytes(x []byte) (scriptHash ScriptHash, err error) { + if len(x) != 32 { + err = fmt.Errorf("invalid script hash length") + return + } + copy(scriptHash[:], x[:]) + return +} + +// Spent Transaction: +// +// s + txin.PrevOutPoint.Hash + txin.PrevOutPoint.Index + blockhash = txid + txin_index + blockhash | [1 + 32 + 4 + 32] = [32 + 4] +// +// Transaction ID to Block mapping: +// +// t + txid + blockhash = nil | [1 + 32 + 32] = nil +type ( + TxKey [69]byte // Allocate max sized key, the prefix byte determines the lengths + TxValue [36]byte // allocate max sized value +) + +// NewTxSpent returns a TxKey and TxValue that maps a spent transaction to a +// location in a block. +func NewTxSpent(blockHash, txId, inPrevHash *chainhash.Hash, inPrevIndex, txInIndex uint32) (txKey TxKey, txValue TxValue) { + // Construct key + txKey[0] = 's' + copy(txKey[1:33], inPrevHash[:]) + binary.BigEndian.PutUint32(txKey[33:37], inPrevIndex) + copy(txKey[37:], blockHash[:]) + + // Construct value + copy(txValue[0:], txId[:]) + binary.BigEndian.PutUint32(txValue[32:36], txInIndex) + + return txKey, txValue +} + +// NewTxMapping returns a TxKey and TxValue that maps a tx id to a block hash. +func NewTxMapping(txId, blockHash *chainhash.Hash) (txKey TxKey) { + // Construct key + txKey[0] = 't' + copy(txKey[1:33], txId[:]) + copy(txKey[33:], blockHash[:]) + + return txKey +} diff --git a/database/tbcd/database_ext_test.go b/database/tbcd/database_ext_test.go new file mode 100644 index 000000000..696b55e8e --- /dev/null +++ b/database/tbcd/database_ext_test.go @@ -0,0 +1,5 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbcd_test diff --git a/database/tbcd/level/level.go b/database/tbcd/level/level.go new file mode 100644 index 000000000..b25fc531c --- /dev/null +++ b/database/tbcd/level/level.go @@ -0,0 +1,875 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package level + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/davecgh/go-spew/spew" + "github.com/juju/loggo" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/hemilabs/heminetwork/database" + "github.com/hemilabs/heminetwork/database/level" + "github.com/hemilabs/heminetwork/database/tbcd" +) + +// Locking order: +// +// BlockHeaders +// BlocksMissing +// HeightHash +// Blocks +// +// Balances +// Utxos + +const ( + ldbVersion = 1 + + logLevel = "INFO" + verbose = false + + bhsLastKey = "last" + + minPeersRequired = 64 // minimum number of peers in good map before cache is purged +) + +type IteratorError error + +var log = loggo.GetLogger("level") + +var ErrIterator = IteratorError(errors.New("iteration error")) + +func init() { + loggo.ConfigureLoggers(logLevel) +} + +type ldb struct { + mtx sync.Mutex + blocksMissingCacheEnabled bool // XXX verify this code in tests + blocksMissingCache map[string]*cacheEntry // XXX purge and manages cache size + + // maybe remove this because it eats a bit of memory + peersGood map[string]struct{} + peersBad map[string]struct{} + + *level.Database + pool level.Pool +} + +var _ tbcd.Database = (*ldb)(nil) + +func New(ctx context.Context, home string) (*ldb, error) { + log.Tracef("New") + defer log.Tracef("New exit") + + ld, err := level.New(ctx, home, ldbVersion) + if err != nil { + return nil, err + } + log.Debugf("tbcdb database version: %v", ldbVersion) + l := &ldb{ + Database: ld, + pool: ld.DB(), + blocksMissingCacheEnabled: true, // XXX make setting + blocksMissingCache: make(map[string]*cacheEntry, 1024), + peersGood: make(map[string]struct{}, 1000), + peersBad: make(map[string]struct{}, 1000), + } + + return l, nil +} + +type ( + discardFunc func() + commitFunc func() error +) + +func (l *ldb) startTransaction(db string) (*leveldb.Transaction, commitFunc, discardFunc, error) { + bhsDB := l.pool[db] + tx, err := bhsDB.OpenTransaction() + if err != nil { + return nil, nil, nil, fmt.Errorf("%v open tansaction: %w", db, err) + } + d := true + discard := &d + df := func() { + if *discard { + log.Debugf("discarding transaction: %v", db) + tx.Discard() + } + } + cf := func() error { + err = tx.Commit() + if err != nil { + return fmt.Errorf("%v discard: %w", db, err) + } + *discard = false + return nil + } + + return tx, cf, df, nil +} + +func (l *ldb) MetadataGet(ctx context.Context, key []byte) ([]byte, error) { + log.Tracef("MetadataGet") + defer log.Tracef("MetadataGet exit") + + mdDB := l.pool[level.MetadataDB] + v, err := mdDB.Get(key, nil) + if err == leveldb.ErrNotFound { + return nil, database.NotFoundError(fmt.Sprintf("key not found: %v", + string(key))) + } + return v, err +} + +func (l *ldb) MetadataPut(ctx context.Context, key, value []byte) error { + log.Tracef("MetadataPut") + defer log.Tracef("MetadataPut exit") + + mdDB := l.pool[level.MetadataDB] + return mdDB.Put(key, value, nil) +} + +func (l *ldb) BlockHeaderByHash(ctx context.Context, hash []byte) (*tbcd.BlockHeader, error) { + log.Tracef("BlockHeaderByHash") + defer log.Tracef("BlockHeaderByHash exit") + + // It stands to reason that this code does not need a trasaction. The + // caller code will either receive or not receice an answer. It does + // not seem likely to be racing higher up in the stack. + + bhsDB := l.pool[level.BlockHeadersDB] + ebh, err := bhsDB.Get(hash, nil) + if err != nil { + if err == leveldb.ErrNotFound { + return nil, database.NotFoundError(fmt.Sprintf("block header not found: %x", hash)) + } + return nil, fmt.Errorf("block header get: %w", err) + } + return decodeBlockHeader(hash, ebh), nil +} + +func (l *ldb) BlockHeadersByHeight(ctx context.Context, height uint64) ([]tbcd.BlockHeader, error) { + log.Tracef("BlockHeadersByHeight") + defer log.Tracef("BlockHeadersByHeight exit") + + bhs := make([]tbcd.BlockHeader, 0, 4) + start := make([]byte, 8) + binary.BigEndian.PutUint64(start, height) + limit := make([]byte, 8) + binary.BigEndian.PutUint64(limit, height+2) + + hhDB := l.pool[level.HeightHashDB] + it := hhDB.NewIterator(&util.Range{Start: start, Limit: limit}, nil) + defer it.Release() + for it.Next() { + fh, hash := keyToHeightHash(it.Key()) + if fh != height { + // all done + break + } + bh, err := l.BlockHeaderByHash(ctx, hash) + if err != nil { + return nil, fmt.Errorf("headers by height: %v", err) + } + bhs = append(bhs, *bh) + } + if len(bhs) == 0 { + return nil, database.NotFoundError(fmt.Sprintf("not found")) + } + return bhs, nil +} + +func (l *ldb) BlockHeadersBest(ctx context.Context) ([]tbcd.BlockHeader, error) { + log.Tracef("BlockHeadersBest") + defer log.Tracef("BlockHeadersBest exit") + + // This function is a bit of a crapshoot. It will receive many calls + // and thus it is racing by definition. Avoid the lock and let the + // caller serialize the response. + + // XXX this code does not handle multiple "best" block headers. + + bhsDB := l.pool[level.BlockHeadersDB] + // Get last record + ebh, err := bhsDB.Get([]byte(bhsLastKey), nil) + if err != nil { + if err == leveldb.ErrNotFound { + return []tbcd.BlockHeader{}, nil + } + return nil, fmt.Errorf("block headers best: %w", err) + } + + // Convert height to hash, cheat because we know where height lives in ebh. + return l.BlockHeadersByHeight(ctx, binary.BigEndian.Uint64(ebh[0:8])) +} + +// heightHashToKey generates a sortable key from height and hash. With this key +// we can iterate over the block headers table and see what block records are +// missing. +func heightHashToKey(height uint64, hash []byte) []byte { + if len(hash) != chainhash.HashSize { + panic(fmt.Sprintf("invalid hash size: %v", len(hash))) + } + key := make([]byte, 8+1+chainhash.HashSize) + binary.BigEndian.PutUint64(key[0:8], height) + copy(key[9:], hash) + return key +} + +// keyToHeightHash reverses the process of heightHashToKey. +func keyToHeightHash(key []byte) (uint64, []byte) { + if len(key) != 8+1+chainhash.HashSize { + panic(fmt.Sprintf("invalid key size: %v", len(key))) + } + hash := make([]byte, chainhash.HashSize) // must make copy! + copy(hash, key[9:]) + return binary.BigEndian.Uint64(key[0:8]), hash +} + +// encodeBlockHeader encodes a database block header as [height,header] or +// [8+80] bytes. The hash is the leveldb table key. +func encodeBlockHeader(bh *tbcd.BlockHeader) (ebhr [88]byte) { + binary.BigEndian.PutUint64(ebhr[0:8], bh.Height) + copy(ebhr[8:], bh.Header[:]) + return +} + +// decodeBlockHeader reverse the process of encodeBlockHeader. The hash must be +// passed in but that is fine because it is the leveldb lookup key. +func decodeBlockHeader(hashSlice []byte, ebh []byte) *tbcd.BlockHeader { + // copy the values to prevent slicing reentrancy problems. + var ( + hash [32]byte + header [80]byte + ) + copy(hash[:], hashSlice) + copy(header[:], ebh[8:]) + return &tbcd.BlockHeader{ + Hash: hash[:], + Height: binary.BigEndian.Uint64(ebh[0:8]), + Header: header[:], + } +} + +func (l *ldb) BlockHeadersInsert(ctx context.Context, bhs []tbcd.BlockHeader) error { + log.Tracef("BlockHeadersInsert") + defer log.Tracef("BlockHeadersInsert exit") + + if len(bhs) == 0 { + return fmt.Errorf("block headers insert: no block headers to insert") + } + + // block headers + bhsTx, bhsCommit, bhsDiscard, err := l.startTransaction(level.BlockHeadersDB) + if err != nil { + return fmt.Errorf("block headers open transaction: %w", err) + } + defer bhsDiscard() + + // Make sure we are not inserting the same blocks + has, err := bhsTx.Has(bhs[0].Hash, nil) + if err != nil { + return fmt.Errorf("block headers insert has: %v", err) + } + if has { + return database.DuplicateError("block headers insert duplicate") + } + + // blocks missing + bmTx, bmCommit, bmDiscard, err := l.startTransaction(level.BlocksMissingDB) + if err != nil { + return fmt.Errorf("blocks missing open transaction: %w", err) + } + defer bmDiscard() + + // height hash + hhTx, hhCommit, hhDiscard, err := l.startTransaction(level.HeightHashDB) + if err != nil { + return fmt.Errorf("height hash open transaction: %w", err) + } + defer hhDiscard() + + // Insert missing blocks and block headers + var lastRecord []byte + hhBatch := new(leveldb.Batch) + bmBatch := new(leveldb.Batch) + bhsBatch := new(leveldb.Batch) + for k := range bhs { + hhKey := heightHashToKey(bhs[k].Height, bhs[k].Hash[:]) + // Height 0 is genesis, we do not want a missing block record for that. + if bhs[k].Height != 0 { + // Insert a synthesized height_hash key that serves as + // an index to see which blocks are missing. + bmBatch.Put(hhKey, []byte{}) + } + + // Store height_hash for future reference + hhBatch.Put(hhKey, []byte{}) + + // XXX reason about pre encoding. Due to the caller code being + // heavily reentrant the odds are not good that encoding would + // only happens once. The downside is that this encoding + // happens in the database transaction and is thus locked. + + // Encode block header as [hash][height,header] or [32][8+80] bytes + ebh := encodeBlockHeader(&bhs[k]) + bhsBatch.Put(bhs[k].Hash, ebh[:]) + lastRecord = ebh[:] + } + + // Insert last height into block headers XXX this does not deal with forks + bhsBatch.Put([]byte(bhsLastKey), lastRecord) + + // Write height hash batch + err = hhTx.Write(hhBatch, nil) + if err != nil { + return fmt.Errorf("height hash batch: %w", err) + } + + // Write missing blocks batch + err = bmTx.Write(bmBatch, nil) + if err != nil { + return fmt.Errorf("blocks missing batch: %w", err) + } + + // Write block headers batch + err = bhsTx.Write(bhsBatch, nil) + if err != nil { + return fmt.Errorf("block headers insert: %w", err) + } + + // height hash commit + err = hhCommit() + if err != nil { + return fmt.Errorf("height hash commit: %w", err) + } + + // blocks missing commit + err = bmCommit() + if err != nil { + return fmt.Errorf("blocks missing commit: %w", err) + } + + // block headers commit + err = bhsCommit() + if err != nil { + return fmt.Errorf("block headers commit: %w", err) + } + + return nil +} + +type cacheEntry struct { + height uint64 + timestamp time.Time +} + +// XXX return hash and height only +func (l *ldb) BlocksMissing(ctx context.Context, count int) ([]tbcd.BlockIdentifier, error) { + log.Tracef("BlocksMissing") + defer log.Tracef("BlocksMissing exit") + + // This is a read only call and it can be run without a transaction. + // False positives may be returned to the caller and it should mostly + // handle that. If a block is inserted multiple time it will be silently + // ignored. + + var blockCacheLen, x int + bmDB := l.pool[level.BlocksMissingDB] + bis := make([]tbcd.BlockIdentifier, 0, count) + it := bmDB.NewIterator(nil, nil) + defer it.Release() + for it.Next() { + bh := tbcd.BlockIdentifier{} + bh.Height, bh.Hash = keyToHeightHash(it.Key()) + bis = append(bis, bh) + + // cache the reply + if l.blocksMissingCacheEnabled { + l.mtx.Lock() + // XXX we MUST bind this map but for now let it be piggy + if _, ok := l.blocksMissingCache[string(bh.Hash)]; !ok { + l.blocksMissingCache[string(bh.Hash)] = &cacheEntry{ + height: bh.Height, + timestamp: time.Now(), + } + } + blockCacheLen = len(l.blocksMissingCache) + l.mtx.Unlock() + } + // if blockCacheLen >= 128 { + // log.Tracef("max cache %v", blockCacheLen) + // break + // } + + x++ + if x >= count { + break + } + } + + log.Debugf("BlocksMissing returning %v cached %v", len(bis), blockCacheLen) + + return bis, nil +} + +func (l *ldb) BlockInsert(ctx context.Context, b *tbcd.Block) (int64, error) { + log.Tracef("BlockInsert") + defer log.Tracef("BlockInsert exit") + + // Try cache first + var ce *cacheEntry + if l.blocksMissingCacheEnabled { + // XXX explain here why using string(b.Hash) is acceptable + l.mtx.Lock() + ce = l.blocksMissingCache[string(b.Hash)] + l.mtx.Unlock() + + defer func() { + // purge cache as well + l.mtx.Lock() + delete(l.blocksMissingCache, string(b.Hash)) + bmcl := len(l.blocksMissingCache) + l.mtx.Unlock() + // XXX string b.Hash is shit + log.Debugf("BlockInsert cached %v", bmcl) + }() + } + + // Determine block height either from cache or the database. + var bh *tbcd.BlockHeader + + // If cache entry is not found grab it from the database. + if ce == nil { + // Open the block headers database transaction + bhsDB := l.pool[level.BlockHeadersDB] + ebh, err := bhsDB.Get(b.Hash, nil) + if err != nil { + if err == leveldb.ErrNotFound { + return -1, database.NotFoundError(fmt.Sprintf( + "block insert block header not found: %v", + b.Hash)) + } + return -1, fmt.Errorf("block insert block header: %w", err) + } + // XXX only do the big endian decoding here!, less bcopy + bh = decodeBlockHeader(b.Hash, ebh) + } else { + bh = &tbcd.BlockHeader{ + Height: ce.height, + Hash: b.Hash, + } + } + + // Insert block without transaction, if it succeeds and the missing + // does not it will be simply redone. + bDB := l.pool[level.BlocksDB] + has, err := bDB.Has(b.Hash, nil) + if err != nil { + return -1, fmt.Errorf("block insert has: %v", err) + } + if !has { + // Insert block since we do not have it yet + err = bDB.Put(b.Hash, b.Block, nil) + if err != nil { + return -1, fmt.Errorf("blocks insert put: %v", err) + } + } + + // It's possible to remove the transaction for bm without a transaction + // as well since the only risk would be duplicate work. Reason about + // this some more. + + // Remove block identifier from blocks missing + key := heightHashToKey(bh.Height, bh.Hash) + bmDB := l.pool[level.BlocksMissingDB] + err = bmDB.Delete(key, nil) + if err != nil { + // Ignore not found + if err == leveldb.ErrNotFound { + log.Errorf("block insert delete from missing: %v", err) + } else { + return -1, fmt.Errorf("block insert delete from missing: %v", err) + } + } + // XXX think about Height type; why are we forced to mix types? + return int64(bh.Height), nil +} + +func (l *ldb) BlockByHash(ctx context.Context, hash []byte) (*tbcd.Block, error) { + log.Tracef("BlockByHash") + defer log.Tracef("BlockByHash exit") + + bDB := l.pool[level.BlocksDB] + eb, err := bDB.Get(hash, nil) + if err != nil { + if err == leveldb.ErrNotFound { + ch, _ := chainhash.NewHash(hash) + return nil, database.NotFoundError(fmt.Sprintf("block not found: %v", ch)) + } + return nil, fmt.Errorf("block get: %w", err) + } + return &tbcd.Block{ + Hash: hash, + Block: eb, + }, nil +} + +func (l *ldb) BlocksByTxId(ctx context.Context, txId tbcd.TxId) ([]tbcd.BlockHash, error) { + log.Tracef("BlocksByTxId") + defer log.Tracef("BlocksByTxId exit") + + blocks := make([]tbcd.BlockHash, 0, 2) + txDB := l.pool[level.TransactionsDB] + var txid [33]byte + txid[0] = 't' + copy(txid[1:], txId[:]) + it := txDB.NewIterator(util.BytesPrefix(txid[:]), nil) + defer it.Release() + for it.Next() { + block, err := tbcd.NewBlockHashFromBytes(it.Key()[33:]) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + if err := it.Error(); err != nil { + return nil, fmt.Errorf("blocks by id iterator: %w", err) + } + if len(blocks) == 0 { + ch, _ := chainhash.NewHash(txId[:]) + return nil, database.NotFoundError(fmt.Sprintf("tx not found: %v", ch)) + } + + return blocks, nil +} + +func (l *ldb) SpendOutputsByTxId(ctx context.Context, txId tbcd.TxId) ([]tbcd.SpendInfo, error) { + log.Tracef("SpendOutputByOutpoint") + defer log.Tracef("SpendOutputByOutpoint exit") + + si := make([]tbcd.SpendInfo, 0, 2) + txDB := l.pool[level.TransactionsDB] + var key [1 + 32]byte + key[0] = 's' + copy(key[1:], txId[:]) + it := txDB.NewIterator(&util.Range{Start: key[:]}, nil) + defer it.Release() + for it.Next() { + if !bytes.Equal(it.Key()[1:33], key[1:33]) { + break + } + var s tbcd.SpendInfo + copy(s.TxId[:], it.Value()[0:32]) + copy(s.BlockHash[:], it.Key()[37:]) + s.InputIndex = binary.BigEndian.Uint32(it.Value()[32:36]) + si = append(si, s) + } + if err := it.Error(); err != nil { + return nil, fmt.Errorf("blocks by id iterator: %w", err) + } + if len(si) == 0 { + ch, _ := chainhash.NewHash(txId[:]) + return nil, database.NotFoundError(fmt.Sprintf("not found %v", ch)) + } + + return si, nil +} + +func (l *ldb) ScriptHashByOutpoint(ctx context.Context, op tbcd.Outpoint) (*tbcd.ScriptHash, error) { + log.Tracef("ScriptHashByOutpoint") + defer log.Tracef("ScriptHashByOutpoint exit") + + var uop [37]byte // 'u' tx_id idx + uop[0] = 'u' + copy(uop[1:], op[:]) + + uDB := l.pool[level.OutputsDB] + scriptHash, err := uDB.Get(uop[:], nil) + if err != nil { + return nil, fmt.Errorf("script hash by outpoint: %w", err) + } + + sh, err := tbcd.NewScriptHashFromBytes(scriptHash) + return &sh, err +} + +func (l *ldb) BalanceByScriptHash(ctx context.Context, sh tbcd.ScriptHash) (uint64, error) { + log.Tracef("BalanceByScriptHash") + defer log.Tracef("BalanceByScriptHash exit") + + var ( + start [33]byte + balance uint64 + ) + start[0] = 'h' + copy(start[1:], sh[:]) + oDB := l.pool[level.OutputsDB] + it := oDB.NewIterator(util.BytesPrefix(start[:]), nil) + defer it.Release() + for it.Next() { + balance += binary.BigEndian.Uint64(it.Value()) + } + if err := it.Error(); err != nil { + return 0, IteratorError(err) + } + + return balance, nil +} + +func (l *ldb) UtxosByScriptHash(ctx context.Context, sh tbcd.ScriptHash, start uint64, count uint64) ([]tbcd.Utxo, error) { + log.Tracef("UtxosByScriptHash") + defer log.Tracef("UtxosByScriptHash exit") + + var prefix [33]byte + utxos := make([]tbcd.Utxo, 0, 32) + prefix[0] = 'h' + copy(prefix[1:], sh[:]) + oDB := l.pool[level.OutputsDB] + it := oDB.NewIterator(util.BytesPrefix(prefix[:]), nil) + defer it.Release() + skip := start + for it.Next() { + if skip > 0 { + skip-- + continue + } + index := binary.BigEndian.Uint32(it.Key()[65:]) + value := binary.BigEndian.Uint64(it.Value()) + var txId tbcd.TxId + copy(txId[:], it.Key()[33:65]) + utxos = append(utxos, tbcd.NewUtxo(txId, value, index)) + + if len(utxos) >= int(count) { + break + } + } + if err := it.Error(); err != nil { + return nil, IteratorError(err) + } + + return utxos, nil +} + +func (l *ldb) BlockUtxoUpdate(ctx context.Context, utxos map[tbcd.Outpoint]tbcd.CacheOutput) error { + log.Tracef("BlockUtxoUpdate") + defer log.Tracef("BlockUtxoUpdate exit") + + // outputs + outsTx, outsCommit, outsDiscard, err := l.startTransaction(level.OutputsDB) + if err != nil { + return fmt.Errorf("outputs open db transaction: %w", err) + } + defer outsDiscard() + + outsBatch := new(leveldb.Batch) + for op, utxo := range utxos { + // op is already 'u' tx_id idx + + var hop [69]byte // 'h' script_hash tx_id tx_output_idx + hop[0] = 'h' + copy(hop[1:33], utxo.ScriptHashSlice()) + copy(hop[33:65], op.TxId()) + copy(hop[65:], utxo.OutputIndexBytes()) + + if utxo.IsDelete() { + // Delete balance and utxos + outsBatch.Delete(op[:][:]) + outsBatch.Delete(hop[:]) + } else { + // Add utxo to balance and utxos + outsBatch.Put(op[:], utxo.ScriptHashSlice()) + outsBatch.Put(hop[:], utxo.ValueBytes()) + } + // XXX this probably should be done by the caller but we do it + // here to lower memory pressure as large gobs of data are + // written to disk. + delete(utxos, op) + } + + // Write outputs batch + err = outsTx.Write(outsBatch, nil) + if err != nil { + return fmt.Errorf("outputs insert: %w", err) + } + + // outputs commit + err = outsCommit() + if err != nil { + return fmt.Errorf("outputs commit: %w", err) + } + + return nil +} + +func (l *ldb) BlockTxUpdate(ctx context.Context, txs map[tbcd.TxKey]*tbcd.TxValue) error { + log.Tracef("BlockTxUpdate") + defer log.Tracef("BlockTxUpdate exit") + + // transactions + txsTx, txsCommit, txsDiscard, err := l.startTransaction(level.TransactionsDB) + if err != nil { + return fmt.Errorf("transactions open db transaction: %w", err) + } + defer txsDiscard() + + txsBatch := new(leveldb.Batch) + for k, v := range txs { + // cache is being emptied so we can slice it here. + var key, value []byte + switch k[0] { + case 't': + key = k[0:65] + value = nil + + case 's': + key = k[:] + value = v[:] + default: + return fmt.Errorf("invalid cache entry: %v", spew.Sdump(k)) + } + + txsBatch.Put(key, value) + // log.Infof("%v:%v", spew.Sdump(key), spew.Sdump(value)) + // // XXX this probably should be done by the caller but we do it + // // here to lower memory pressure as large gobs of data are + // // written to disk. + delete(txs, k) + } + + // Write transactions batch + err = txsTx.Write(txsBatch, nil) + if err != nil { + return fmt.Errorf("transactions insert: %w", err) + } + + // transactions commit + err = txsCommit() + if err != nil { + return fmt.Errorf("transactions commit: %w", err) + } + + return nil +} + +func (l *ldb) PeersStats(ctx context.Context) (int, int) { + log.Tracef("PeersInsert") + defer log.Tracef("PeersInsert exit") + + l.mtx.Lock() + defer l.mtx.Unlock() + return len(l.peersGood), len(l.peersBad) +} + +func (l *ldb) PeersInsert(ctx context.Context, peers []tbcd.Peer) error { + log.Tracef("PeersInsert") + defer log.Tracef("PeersInsert exit") + + l.mtx.Lock() + for k := range peers { + p := peers[k] + a := net.JoinHostPort(p.Host, p.Port) + if len(a) < 7 { + // 0.0.0.0 + continue + } + if _, ok := l.peersBad[a]; ok { + // Skip bad peers + continue + } + if _, ok := l.peersGood[a]; ok { + // Not strictly needed to skip but this os working pseudode code + continue + } + + l.peersGood[a] = struct{}{} + } + allGoodPeers := len(l.peersGood) + allBadPeers := len(l.peersBad) + l.mtx.Unlock() + + log.Debugf("PeersInsert exit %v good %v bad %v", + len(peers), allGoodPeers, allBadPeers) + + return nil +} + +func (l *ldb) PeerDelete(ctx context.Context, host, port string) error { + log.Tracef("PeerDelete") + defer log.Tracef("PeerDelete exit") + + a := net.JoinHostPort(host, port) + if len(a) < 7 { + // 0.0.0.0 + return nil + } + + l.mtx.Lock() + if _, ok := l.peersGood[a]; ok { + delete(l.peersGood, a) + l.peersBad[a] = struct{}{} + } + + // Crude hammer to reset good/bad state of peers + if len(l.peersGood) < minPeersRequired { + // Kill all peers to force caller to reseed. This happens when + // network is down for a while and all peers are moved into + // bad map. + l.peersGood = make(map[string]struct{}, 1000) + l.peersBad = make(map[string]struct{}, 1000) + log.Tracef("peer cache purged") + } + + allGoodPeers := len(l.peersGood) + allBadPeers := len(l.peersBad) + + l.mtx.Unlock() + + log.Debugf("PeerDelete exit good %v bad %v", allGoodPeers, allBadPeers) + + return nil +} + +func (l *ldb) PeersRandom(ctx context.Context, count int) ([]tbcd.Peer, error) { + log.Tracef("PeersRandom") + + x := 0 + peers := make([]tbcd.Peer, 0, count) + + l.mtx.Lock() + allGoodPeers := len(l.peersGood) + allBadPeers := len(l.peersBad) + for k := range l.peersGood { + h, p, err := net.SplitHostPort(k) + if err != nil { + continue + } + peers = append(peers, tbcd.Peer{Host: h, Port: p}) + x++ + if x >= count { + break + } + } + l.mtx.Unlock() + + log.Debugf("PeersRandom exit %v (good %v bad %v)", len(peers), + allGoodPeers, allBadPeers) + + // XXX For now return peers in order and let the stack above deal with it. + return peers, nil +} diff --git a/database/tbcd/level/level_test.go b/database/tbcd/level/level_test.go new file mode 100644 index 000000000..2058ed75a --- /dev/null +++ b/database/tbcd/level/level_test.go @@ -0,0 +1,419 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package level + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + "os" + "reflect" + "sort" + "testing" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/juju/loggo" + + "github.com/hemilabs/heminetwork/database" + "github.com/hemilabs/heminetwork/database/tbcd" +) + +func bytes2Block(block []byte) (*wire.MsgBlock, error) { + var b wire.MsgBlock + err := b.Deserialize(bytes.NewReader(block)) + if err != nil { + return nil, fmt.Errorf("Deserialize: %v", err) + } + return &b, nil +} + +func bytes2Header(header []byte) (*wire.BlockHeader, error) { + var bh wire.BlockHeader + err := bh.Deserialize(bytes.NewReader(header)) + if err != nil { + return nil, fmt.Errorf("Deserialize: %v", err) + } + return &bh, nil +} + +func h2b(wbh *wire.BlockHeader) []byte { + hb, err := header2Bytes(wbh) + if err != nil { + panic(err) + } + return hb +} + +func header2Bytes(wbh *wire.BlockHeader) ([]byte, error) { + var b bytes.Buffer + err := wbh.Serialize(&b) + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// random returns a variable number of random bytes. +func random(n int) []byte { + buffer := make([]byte, n) + _, err := io.ReadFull(rand.Reader, buffer) + if err != nil { + panic(err) + } + return buffer +} + +func TestEncodeDecodeBlockHeader(t *testing.T) { + cp := chaincfg.TestNet3Params + genesisBH := cp.GenesisBlock.Header + genesisHash := cp.GenesisHash + + bh := tbcd.BlockHeader{ + Hash: genesisHash[:], + Height: 0x1122334455667788, // we need not zero to test decoding of height + Header: h2b(&genesisBH), + } + t.Logf("%v", spew.Sdump(bh)) + er := encodeBlockHeader(&bh) + dr := decodeBlockHeader(bh.Hash, er[:]) + if !reflect.DeepEqual(bh, *dr) { + t.Fatalf("encode decode block header wanted %v got %v", + spew.Sdump(bh), spew.Sdump(*dr)) + } +} + +func TestKey(t *testing.T) { + height := uint64(0xffeeddcc11223344) + hv := []byte{1, 3, 3, 7} + hash := chainhash.DoubleHashH(hv) + key := heightHashToKey(height, hash[:]) + + heightO, hashO := keyToHeightHash(key) + if height != heightO { + t.Fatalf("invalid height wanted %v got %v", height, heightO) + } + if !bytes.Equal(hash[:], hashO) { + t.Fatalf("invalid hash wanted %v got %v", + spew.Sdump(hash), spew.Sdump(hashO)) + } + + t.Logf("height %x", height) + t.Logf("key %v", spew.Sdump(key)) + t.Logf("%v%v", spew.Sdump(hash[:]), spew.Sdump(hashO)) +} + +type ByteSlice [][]byte + +func (x ByteSlice) Len() int { return len(x) } +func (x ByteSlice) Less(i, j int) bool { return bytes.Compare(x[i], x[j]) == -1 } +func (x ByteSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func TestKeyOrder(t *testing.T) { + // Create slice in reverse order + count := uint64(10) + keys := make(ByteSlice, count) + for i := uint64(0); i < count; i++ { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + hash := chainhash.DoubleHashH(b) + keys[count-1-i] = heightHashToKey(i, hash[:]) + } + log.Infof("%v", spew.Sdump(keys)) + + // Now sort + sort.Sort(keys) + log.Infof("%v", spew.Sdump(keys)) + + for i := uint64(0); i < count; i++ { + height, hash := keyToHeightHash(keys[i]) + if i != height { + t.Fatalf("invalid height wanted %v got %v", i, height) + } + + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + expectedHash := chainhash.DoubleHashH(b) + if !bytes.Equal(expectedHash[:], hash) { + t.Fatalf("invalid hash wanted %x got %x", expectedHash, hash) + } + } +} + +func TestLevelDB(t *testing.T) { + // Missing blocks + // 1 000 000 000 + + loggo.ConfigureLoggers("INFO") + + dir, err := os.MkdirTemp("", "leveldbtest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + ctx := context.Background() + ldb, err := New(ctx, dir) + if err != nil { + t.Fatal(err) + } + defer func() { + err := ldb.Close() + if err != nil { + t.Fatalf("close: %v", err) + } + }() + + // Create fake blockchain somewhat resembling tbc calls + + // Insert genesis + cp := &chaincfg.TestNet3Params + gbh, err := header2Bytes(&cp.GenesisBlock.Header) + if err != nil { + t.Fatal(err) + } + + // Insert genesis + tgbh := tbcd.BlockHeader{ + Height: 0, + Hash: cp.GenesisHash[:], + Header: gbh, + } + err = ldb.BlockHeadersInsert(ctx, []tbcd.BlockHeader{tgbh}) + if err != nil { + t.Fatalf("block headers insert: %v", err) + } + + missing, err := ldb.BlocksMissing(ctx, 16) + if err != nil { + t.Fatalf("block headers missing: %v", err) + } + + if len(missing) != 0 { + t.Fatal("genesis should not be returned") + } + + // Insert fake block headers + count := uint64(64) + bhs := make([]tbcd.BlockHeader, 0, count+1) + bhs = append(bhs, tgbh) // need genesis for prevhash + for i := uint64(1); i < count; i++ { + bits := uint32(i + 4567) + nonce := uint32(i + 1337) + prevHash, err := chainhash.NewHash(bhs[i-1].Hash[:]) + if err != nil { + t.Fatalf("prevhash %v", err) + } + merkleHash := chainhash.DoubleHashH(prevHash[:]) + wbh := wire.NewBlockHeader(1, prevHash, &merkleHash, bits, nonce) + blockHash := wbh.BlockHash() + t.Logf("height %v prev %v", i, prevHash) + bhs = append(bhs, tbcd.BlockHeader{ + Height: i, + Hash: database.ByteArray(blockHash[:]), + Header: h2b(wbh), + }) + } + t.Logf("%v", spew.Sdump(bhs)) + // Insert missing blocks + err = ldb.BlockHeadersInsert(ctx, bhs[1:]) // skip genesis insert + if err != nil { + t.Fatalf("block headers insert: %v", err) + } + + expectedMissingBH := 16 + missing, err = ldb.BlocksMissing(ctx, expectedMissingBH) + if err != nil { + t.Fatalf("block headers missing: %v", err) + } + t.Logf("%v", spew.Sdump(missing)) + + if len(missing) != min(expectedMissingBH, int(count-1)) { + t.Fatalf("%v %v %v", len(missing), expectedMissingBH, count) + } + + // Start at height 1 + height := uint64(1) + for k := range missing { + if height != bhs[height].Height { + t.Fatalf("unexpected internal height wanted %v got %v", + height, bhs[height].Height) + } + if bhs[height].Height != missing[k].Height { + t.Fatalf("unexpected missing height wanted %v got %v", + bhs[height].Height, missing[k].Height) + } + if !bytes.Equal(bhs[height].Hash, missing[k].Hash) { + t.Fatalf("unexpected missing hash wanted %v got %v", + bhs[height].Hash, missing[k].Hash) + } + + height++ + } + + // Insert missing blocks + for i := uint64(1); i < count; i++ { + b := tbcd.Block{ + Hash: bhs[i].Hash, + Block: []byte{'i', 'a', 'm', 'b', 'l', 'o', 'c', 'k'}, + } + insertedHeight, err := ldb.BlockInsert(ctx, &b) + if err != nil { + t.Fatal(err) + } + log.Infof("inserted height: %v", insertedHeight) + } + + // Ensure blocks missing table is updated + missing, err = ldb.BlocksMissing(ctx, expectedMissingBH) + if err != nil { + t.Fatalf("block headers missing: %v", err) + } + if len(missing) != 0 { + t.Fatalf("expected missing table to be empty: %v", spew.Sdump(missing)) + } + if len(ldb.blocksMissingCache) != 0 { + t.Fatalf("expected missing blocks cache to be empty: %v", + spew.Sdump(ldb.blocksMissingCache)) + } +} + +//func TestBitcoinBits(t *testing.T) { +// // Decode block +// block381 := `01000000c5b9489065fa7e1ac4facc51a5a0ccc2111911609f43386ebe7ca1d200000000a0db3bbb22a2a8441d84dbe335c24959ea3d3d6e91bf67e66bbcb0d7e0a9c4836a834a4dffff001d041813660201000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0e046a834a4d017e062f503253482fffffffff0100f2052a01000000232103dac3fb8de40965f42fb4afb3baa07d3304bc2aa28cfc25f12b52f1523681451dac00000000010000001518b50db063333a3261b9b41e887b4aa5b69becdc9967550507c120e22a764967000000004a493046022100e49de3c89180769db346145cdda48323ddecc2af0041293432528767b18407650221009f7878deb054e4f9c0e6aecbe6de15f5d829041c11f7952d33e96c76ada1258b01ffffffff322948a4806acfeca2b32248d0e183c8eb09d5e5ef48adf33777307635414cc0000000004a493046022100ba88d34e4d4fd85ab5e4d77cb74f71c87a24235bcbe39cf4334633f70ff27233022100b5aa1b96bab59457d3d837473de1e4f9f89ba3ee39964463952271c5b4140fa001ffffffffcf330295467623ec1378dc6fa312103ad8a210b3e1351f2f4b6a57ac43fcd472000000004a493046022100b21560dfda52352c4416c1e48496659ea3d29e4e25706a991986864210bc759e0221009c1e45af6e2eba0883a862442d85a2b48c3395e35a4276f535cd70d45a971c7401ffffffffeeed0f4d975db8f66788f809ecf8c351d19ff5805276ef31983bc5682548342d0000000049483045022100e02cc0b4bf8a126807b1577819944c1bb13e8f4028cf7df0a0729013d511b071022010a1bcdefca334588939f9fe40e0d8607588191684fce0f46180a139305b8b4001ffffffffc8ac0a2fb1c01e0e0a5339d296eb072b2b9f9cb1d410a1fdd69a2c797094dda50000000049483045022016ba8f50d7f30be7e4a68c3d50368d577e2ef6c8b60842725ae636b2985776fc022100bb39d47d1955ffca47920d743bcd6f05b31ea2bf3dc7ede225eb4c901126b48901fffffffff1b03cf0680b9ef33fd311f6bbc6db3f1c164f9341f48a02df1905cec4ce241b000000004948304502201dbbfabc47f6da84ceedbc92b792d4a8ef632f0bddf7ebfad5ca21f3731f450502210098751ccf37fd97ff82446486d4c1d62860c2080a1128ea5ddb0d30bfde3cd7a801ffffffff1fe6898ac074a680fe7458ff87a03956db73a880d2ace6539efcc43002bd97ed000000004a493046022100f8a83fadb06af9c0cc730f17ae47fe7a09cada9eae623b8dd86bf365ef0e20480221009a10b073b2a8b313d975f801213efdf12b94141d7b6a8e98de3b0c67ee1cef4c01ffffffff6fd85c0213cfe9863573596a4d5f1509ac41a91b572e6c1bdafe46d9249a5fa4000000004a493046022100f3e98f3e76cc0f533b0e1cccd82650b704e31e3e7e62bf81bb474cf2add58ebf022100f77003eec814a3336cc305b8461cf3ccb19b1f18f06f66208ed31c3e468466ed01ffffffff9e93a056a6515e7916fc049578708d188c2146d3c12638acac92e0b72e076edd000000004a4930460221008ee8d7348aed82a8d074753ab4c8dbdd28a668da821269c4cd0c5c253738cab7022100b06a0208d60af1be6303dd883fd05f964a42f7de317761641ec1158944f52b6b01ffffffff0ecc7b73d8cd5d377d74d836bb6e3473478554a923154286ddaf6985948fd9d300000000494830450221008159ed783bc717ff5a6502cd87a8d8144fae74c6fc6943a5a38da7170203cb3802207e31577a576bc01510cb2280f918a371f63eee44cd2b4490c0994d261787916e01ffffffff78966e9f0a2d4452ab2418249fa6fb1a325a04f039d015899141a82aa5a6c05c000000004847304402206655b13198e413ac8f1aa8926d4617560758cf8b5045afdfc9116da0873ed89802205db55cf3f398467bfc6997f68c881e5f2a7225293ebbd2af40d15df6de4ef87701ffffffff69f2096bbede7015fee2fb307f7d7dd084641b7f4af5c3074dc7b2b6df03277c000000004a493046022100c9199296673a1beae598a6d2348ef13ad1b9f15eebaa825d2282adf017cbb5f0022100b54934e40ff0194a53dcaa9d017c36a93dbb53aa45fe21ab93b07fbb58570d5501ffffffff3c11b146d43fd62ec36b733942a52ba0c352c95a3f078808a38d080898cb83300000000048473044022004c64773b9e6a17cfca7ff583be650104c0538940289b2da8f8bebbd32e486b302200174d8f0938a0f9eeab4c4b137581e032f06d4740e3b0ad9d0423a0a8de65af101ffffffff59ac3c37adfa89b9a907ef9d485c57262e9283e1eb96069c2de04369ef1b3c7600000000494830450220306f3ac72de9dbeb1ec139e4e89cc3b3b9bcb63747bf0e165fcfc773f3669832022100c00a16800f16bf1c71ac6c2989b42d974b0ec2f3e3671325fb2cae52a1c569d801ffffffffb4bbecee818dd986e5ab82f36dbd5ccc29ab134614e304c0a397e14082fe7bb7000000004a493046022100ed68e0303052b41ffd80c1e905cee5547e92422d43b73e473a615e4a47146bb5022100ecab3f92c62477350753b4efea19d608fcce15b1b2c38fbe905e9d1f9ad7631f01ffffffff7546bbac9ae1c8980da6e8c154b368eb4df305b6f3f27ff38f195a13c9ee0484000000004948304502202288566af2b68b6982d1244e293ea3d7c156a425329b7f61b272e4deec317bea022100d9739976b442d35c32830cb2c105e0d7275f7efaa99eaeea4b24a553267a31fc01ffffffffd15854d1e5ba349daf72089f470b24557a2be25105b7831a3f18a62fb8bab677000000004948304502206e3a23075e0248ea8cabc7c875b4cfd9f036c1c4f358a00ec152fc96d1cb6cf8022100d34c018815f63c65f5364061369382b31d579cd6d8a4afe9ec1f03ba66d7717801ffffffffdf686a7f31c2c1de6a608553b26d6336434719fa45428eb3df59bbef75ce9e7e000000004948304502200a22a24a8f817a2f24d3f8c2670f3cb25cd389ce25e0d45eeb0aea08563c5c9802210081ff14edb230a44e5b52e35f573676096a937fc27cc830b153b229b92cac75c101ffffffffd226fea91b99c5a31a034d340f647b722e50950c96a876eb96569efaeaf3b227000000004a4930460221009684e60a7fd61362d0dad79858044aa4a7b878b3f0bd432e384fe4c7e6c90bde0221009883e4f739cffe574bac5bed0a4e69708433973a2490d9415d303614fc31be4701fffffffff640c60ea438dc020048599869836f5323ef47477ee17caddf076ed428898f7100000000494830450220028eb7617dc161a282512c81975d41a1594c05f34cb26fb759682bf784da7071022100a0913abea7229b3c465a4fa32dc861f72ef684e8dd3f19aac5f0f74ea39c03cf01ffffffffd59d2a49b1883c6f7ac68a9d2649dc0dde3f0205e19d8fdaf8065381f9ba61cc000000004a4930460221009f5b27dfd397423a04cab52ee6e8215e290e9666309f0f59f5bc5f6c207d3639022100f5a79133db2cc786140aeee0bf7c8a81adca6071928e8210f1c9f0c653e2f04201ffffffff0240195e29010000001976a914944a7d4b3a8d3a5ecf19dfdfd8dcc18c6f1487dd88acc0c01e49170000001976a91432040178c5cf81cb200ab99af1131f187745b51588ac00000000` +// +// bb, err := hex.DecodeString(block381) +// if err != nil { +// t.Fatal(err) +// } +// // decode +// b, err := btcutil.NewBlockFromBytes(bb) +// if err != nil { +// t.Fatal(err) +// } +// txs := b.Transactions() +// chainParams := &chaincfg.TestNet3Params +// for k := range txs { +// tx := txs[k] +// t.Logf("tx %v %v", tx.Index(), tx.Hash()) +// if blockchain.IsCoinBase(tx) { +// t.Logf("coinbase! %v", spew.Sdump(tx.MsgTx())) +// } +// for kk := range tx.MsgTx().TxOut { +// scriptClass, _, _, err := txscript.ExtractPkScriptAddrs( +// tx.MsgTx().TxOut[kk].PkScript, &chaincfg.TestNet3Params, +// ) +// t.Logf("---- %v", spew.Sdump(scriptClass)) +// +// p, err := txscript.ParsePkScript(tx.MsgTx().TxOut[kk].PkScript) +// if err != nil { +// t.Logf("ERROR: %v %v", kk, err) +// continue +// } else { +// t.Logf("tx %v", spew.Sdump(p)) +// } +// a, err := p.Address(chainParams) +// if err != nil { +// t.Logf("ERROR address: %v %v", kk, err) +// } else { +// t.Logf("tx address %v", spew.Sdump(a)) +// } +// } +// } +//} +// +//func TestDumpIndex(t *testing.T) { +// levelDBHome := "~/.tbcd" +// network := "testnet3" +// +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// // Open existing DB +// db, err := New(ctx, filepath.Join(levelDBHome, network)) +// if err != nil { +// t.Fatal(err) +// } +// defer func() { +// err := db.Close() +// if err != nil { +// t.Fatalf("close: %v", err) +// } +// }() +// +// outsDB := db.pool[level.OutputsDB] +// it := outsDB.NewIterator(nil, nil) +// defer it.Release() +// for it.Next() { +// t.Logf("outputs key %vvalue %v", spew.Sdump(it.Key()), spew.Sdump(it.Value())) +// } +// +// bsDB := db.pool[level.BalancesDB] +// bsIt := bsDB.NewIterator(&util.Range{Start: nil, Limit: nil}, nil) +// defer bsIt.Release() +// for bsIt.Next() { +// t.Logf("balances key %vvalue %v", spew.Sdump(bsIt.Key()), spew.Sdump(bsIt.Value())) +// } +//} +// +//func TestIndex(t *testing.T) { +// // t.Skip() +// +// // start block +// levelDBHome := "~/.tbcd" +// network := "testnet3" +// +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// // Open existing DB +// db, err := New(ctx, filepath.Join(levelDBHome, network)) +// if err != nil { +// t.Fatal(err) +// } +// defer func() { +// err := db.Close() +// if err != nil { +// t.Fatalf("close: %v", err) +// } +// }() +// +// startHeight := uint64(0) +// count := uint64(10) // block 381 is the first to spend transactions +// start := time.Now() +// log.Infof("Starting to index to height %v at %v", startHeight, start) +// elapsed := time.Now() +// for height := startHeight; height < startHeight+count; height++ { +// bhs, err := db.BlockHeadersByHeight(ctx, height) +// if err != nil { +// t.Fatalf("block headers by height %v: %v", height, err) +// } +// t.Logf("%v", bhs) +// _ = elapsed +// //b, err := db.BlockByHash(ctx, bhs[0].Hash) +// //if err != nil { +// // t.Fatalf("block by hash %v: %v", height, err) +// //} +// //bh, btxs, err := tbcd.BlockTxs(&chaincfg.TestNet3Params, b.Block) +// //if err != nil { +// // t.Fatalf("block transactions %v: %v", height, err) +// //} +// //err = db.BlockTxUpdate(ctx, bh[:], btxs) +// //if err != nil { +// // // t.Fatalf("%v", spew.Sdump(btxs)) +// // t.Fatalf("block utxos %v: %v", height, err) +// //} +// //if height%1000 == 0 { +// // log.Infof("height %v %v", height, time.Now().Sub(elapsed)) +// // elapsed = time.Now() +// //} +// } +// log.Infof("Ending index height %v took %v", count, time.Now().Sub(start)) +//} diff --git a/e2e/docker-compose.yml b/e2e/docker-compose.yml index f1c5d0a34..da102f422 100644 --- a/e2e/docker-compose.yml +++ b/e2e/docker-compose.yml @@ -17,6 +17,7 @@ services: - '-txindex=1' ports: - 18443:18443 + - 18444:18444 volumes: - type: tmpfs target: /bitcoin/.bitcoin diff --git a/go.mod b/go.mod index 4d67133db..e0fd38a80 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 github.com/docker/docker v25.0.5+incompatible github.com/docker/go-connections v0.5.0 + github.com/dustin/go-humanize v1.0.1 github.com/ethereum/go-ethereum v1.13.5 github.com/go-test/deep v1.1.0 github.com/juju/loggo v1.0.0 @@ -18,7 +19,9 @@ require ( github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 github.com/prometheus/client_golang v1.18.0 github.com/sethvargo/go-retry v0.2.4 + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/testcontainers/testcontainers-go v0.28.0 + golang.org/x/sys v0.17.0 nhooyr.io/websocket v1.8.10 ) @@ -44,6 +47,7 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 // indirect github.com/holiman/uint256 v1.2.3 // indirect github.com/klauspost/compress v1.17.7 // indirect @@ -61,6 +65,7 @@ require ( github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.47.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/shirou/gopsutil/v3 v3.24.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -74,8 +79,7 @@ require ( golang.org/x/crypto v0.19.0 // indirect golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/tools v0.18.1-0.20240311201521-78fbdeb61842 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect google.golang.org/grpc v1.62.0 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/go.sum b/go.sum index 8bc236abb..df0b9c070 100644 --- a/go.sum +++ b/go.sum @@ -69,12 +69,16 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -98,6 +102,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -151,14 +157,17 @@ github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -181,8 +190,8 @@ github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpj github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= @@ -201,6 +210,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8= github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU= @@ -290,11 +300,12 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.18.1-0.20240311201521-78fbdeb61842 h1:No0LMXYFkp3j4oEsPdtY8LUQz33gu79Rm9DE+izMeGQ= +golang.org/x/tools v0.18.1-0.20240311201521-78fbdeb61842/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= @@ -318,10 +329,13 @@ gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/service/tbc/crawler.go b/service/tbc/crawler.go new file mode 100644 index 000000000..3e2287dd6 --- /dev/null +++ b/service/tbc/crawler.go @@ -0,0 +1,444 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbc + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "runtime" + "sync" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/dustin/go-humanize" + + "github.com/hemilabs/heminetwork/database" + "github.com/hemilabs/heminetwork/database/tbcd" +) + +var ( + UtxoIndexHeightKey = []byte("utxoindexheight") // last indexed utxo height key + TxIndexHeightKey = []byte("txindexheight") // last indexed tx height key +) + +func logMemStats() { + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + + // Go memory statistics are hard to interpret but the following list is + // an aproximation: + // Alloc is currently allocated memory + // TotalAlloc is all memory allocated over time + // Sys is basicaly a peak memory use + log.Infof("Alloc = %v, TotalAlloc = %v, Sys = %v, NumGC = %v\n", + humanize.IBytes(mem.Alloc), + humanize.IBytes(mem.TotalAlloc), + humanize.IBytes(mem.Sys), + mem.NumGC) +} + +func processUtxos(cp *chaincfg.Params, txs []*btcutil.Tx, utxos map[tbcd.Outpoint]tbcd.CacheOutput) error { + for idx, tx := range txs { + for _, txIn := range tx.MsgTx().TxIn { + if idx == 0 { + // Skip coinbase inputs + continue + } + op := tbcd.NewOutpoint(txIn.PreviousOutPoint.Hash, + txIn.PreviousOutPoint.Index) + if utxo, ok := utxos[op]; ok && !utxo.IsDelete() { + delete(utxos, op) + continue + } + } + for outIndex, txOut := range tx.MsgTx().TxOut { + if txscript.IsUnspendable(txOut.PkScript) { + continue + } + utxos[tbcd.NewOutpoint(*tx.Hash(), uint32(outIndex))] = tbcd.NewCacheOutput( + sha256.Sum256(txOut.PkScript), + uint64(txOut.Value), + uint32(outIndex)) + } + } + // log.Infof("%v", spew.Sdump(utxos)) + return nil +} + +func (s *Server) fetchOP(ctx context.Context, w *sync.WaitGroup, op tbcd.Outpoint, utxos map[tbcd.Outpoint]tbcd.CacheOutput) { + defer w.Done() + + pkScript, err := s.db.ScriptHashByOutpoint(ctx, op) + if err != nil { + // This happens when a transaction is created and spent in the + // same block. + // XXX this is probably too loud but log for investigation and + // remove later. + log.Debugf("db missing pkscript: %v", op) + return + } + s.mtx.Lock() + utxos[op] = tbcd.NewDeleteCacheOutput(*pkScript, op.TxIndex()) + s.mtx.Unlock() +} + +func (s *Server) fixupCache(ctx context.Context, b *btcutil.Block, utxos map[tbcd.Outpoint]tbcd.CacheOutput) error { + w := new(sync.WaitGroup) + txs := b.Transactions() + for idx, tx := range txs { + for _, txIn := range tx.MsgTx().TxIn { + if idx == 0 { + // Skip coinbase inputs + continue + } + op := tbcd.NewOutpoint(txIn.PreviousOutPoint.Hash, + txIn.PreviousOutPoint.Index) + s.mtx.Lock() + if _, ok := utxos[op]; ok { + s.mtx.Unlock() + continue + } + s.mtx.Unlock() + + // utxo not found, retrieve pkscript from database. + w.Add(1) + go s.fetchOP(ctx, w, op, utxos) + } + } + + w.Wait() + + return nil +} + +func (s *Server) indexUtxosInBlocks(ctx context.Context, startHeight, maxHeight uint64, utxos map[tbcd.Outpoint]tbcd.CacheOutput) (int, error) { + log.Tracef("indexUtxoBlocks") + defer log.Tracef("indexUtxoBlocks exit") + + circuitBreaker := false + if maxHeight != 0 { + circuitBreaker = true + } + + utxosPercentage := 95 // flush cache at >95% capacity + blocksProcessed := 0 + for height := startHeight; ; height++ { + bhs, err := s.db.BlockHeadersByHeight(ctx, height) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + log.Infof("No more blocks at: %v", height) + break + } + return 0, fmt.Errorf("block headers by height %v: %v", height, err) + } + eb, err := s.db.BlockByHash(ctx, bhs[0].Hash) + if err != nil { + return 0, fmt.Errorf("block by hash %v: %v", height, err) + } + b, err := btcutil.NewBlockFromBytes(eb.Block) + if err != nil { + ch, _ := chainhash.NewHash(bhs[0].Hash) + return 0, fmt.Errorf("could not decode block %v %v: %v", + height, ch, err) + } + + // fixupCache is executed in parallel meaning that the utxos + // map must be locked as it is being processed. + err = s.fixupCache(ctx, b, utxos) + if err != nil { + return 0, fmt.Errorf("parse block %v: %v", height, err) + } + // At this point we can lockless since it is all single + // threaded again. + err = processUtxos(s.chainParams, b.Transactions(), utxos) + if err != nil { + return 0, fmt.Errorf("process utxos %v: %v", height, err) + } + + blocksProcessed++ + + // Try not to overshoot the cache to prevent costly allocations + cp := len(utxos) * 100 / s.cfg.MaxCachedTxs + if height%10000 == 0 || cp > utxosPercentage || blocksProcessed == 1 { + log.Infof("Utxo indexer height: %v utxo cache %v%%", height, cp) + } + if cp > utxosPercentage { + // Set utxosMax to the largest utxo capacity seen + s.cfg.MaxCachedTxs = max(len(utxos), s.cfg.MaxCachedTxs) + // Flush + break + } + + // If set we may have to exit early + if circuitBreaker { + if height >= maxHeight-1 { + break + } + } + } + + return blocksProcessed, nil +} + +func (s *Server) UtxoIndexer(ctx context.Context, height, count uint64) error { + log.Tracef("UtxoIndexer") + defer log.Tracef("UtxoIndexer exit") + + var maxHeight uint64 + circuitBreaker := false + if count != 0 { + circuitBreaker = true + maxHeight = height + count + } + + // Allocate here so that we don't waste space when not indexing. + utxos := make(map[tbcd.Outpoint]tbcd.CacheOutput, s.cfg.MaxCachedTxs) + defer clear(utxos) + + log.Infof("Start indexing UTxos at height %v count %v", height, count) + for { + start := time.Now() + blocksProcessed, err := s.indexUtxosInBlocks(ctx, height, maxHeight, utxos) + if err != nil { + return fmt.Errorf("index blocks: %w", err) + } + if blocksProcessed == 0 { + return nil + } + utxosCached := len(utxos) + log.Infof("Utxo indexer blocks processed %v in %v utxos cached %v cache unused %v avg tx/blk %v", + blocksProcessed, time.Now().Sub(start), utxosCached, + s.cfg.MaxCachedTxs-utxosCached, utxosCached/blocksProcessed) + + start = time.Now() + err = s.db.BlockUtxoUpdate(ctx, utxos) + if err != nil { + return fmt.Errorf("block tx update: %w", err) + } + // leveldb does all kinds of allocations, force GC to lower + // memory preassure. + logMemStats() + runtime.GC() + + log.Infof("Flushing utxos complete %v took %v", + utxosCached, time.Now().Sub(start)) + + height += uint64(blocksProcessed) + + // Record height in metadata + var dbHeight [8]byte + binary.BigEndian.PutUint64(dbHeight[:], height) + err = s.db.MetadataPut(ctx, UtxoIndexHeightKey, dbHeight[:]) + if err != nil { + return fmt.Errorf("metadata utxo height: %w", err) + } + + // If set we may have to exit early + if circuitBreaker { + log.Infof("Indexed utxos to height: %v", height-1) + if height >= maxHeight { + return nil + } + } + } +} + +func processTxs(cp *chaincfg.Params, blockHash *chainhash.Hash, txs []*btcutil.Tx, txsCache map[tbcd.TxKey]*tbcd.TxValue) error { + for _, tx := range txs { + // cache txid <-> block + txsCache[tbcd.NewTxMapping(tx.Hash(), blockHash)] = nil + + // cache spent transactions + for txInIdx, txIn := range tx.MsgTx().TxIn { + txk, txv := tbcd.NewTxSpent( + blockHash, + tx.Hash(), + &txIn.PreviousOutPoint.Hash, + txIn.PreviousOutPoint.Index, + uint32(txInIdx)) + txsCache[txk] = &txv + } + } + return nil +} + +func (s *Server) indexTxsInBlocks(ctx context.Context, startHeight, maxHeight uint64, txs map[tbcd.TxKey]*tbcd.TxValue) (int, error) { + log.Tracef("indexTxsInBlocks") + defer log.Tracef("indexTxsInBlocks exit") + + circuitBreaker := false + if maxHeight != 0 { + circuitBreaker = true + } + + txsPercentage := 95 // flush cache at >95% capacity + blocksProcessed := 0 + for height := startHeight; ; height++ { + bhs, err := s.db.BlockHeadersByHeight(ctx, height) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + log.Infof("No more blocks at: %v", height) + break + } + return 0, fmt.Errorf("block headers by height %v: %v", height, err) + } + eb, err := s.db.BlockByHash(ctx, bhs[0].Hash) + if err != nil { + return 0, fmt.Errorf("block by hash %v: %v", height, err) + } + b, err := btcutil.NewBlockFromBytes(eb.Block) + if err != nil { + ch, _ := chainhash.NewHash(bhs[0].Hash) + return 0, fmt.Errorf("could not decode block %v %v: %v", + height, ch, err) + } + + err = processTxs(s.chainParams, b.Hash(), b.Transactions(), txs) + if err != nil { + return 0, fmt.Errorf("process txs %v: %v", height, err) + } + + blocksProcessed++ + + // Try not to overshoot the cache to prevent costly allocations + cp := len(txs) * 100 / s.cfg.MaxCachedTxs + if height%10000 == 0 || cp > txsPercentage || blocksProcessed == 1 { + log.Infof("Tx indexer height: %v tx cache %v%%", height, cp) + } + if cp > txsPercentage { + // Set txsMax to the largest tx capacity seen + s.cfg.MaxCachedTxs = max(len(txs), s.cfg.MaxCachedTxs) + // Flush + break + } + + // If set we may have to exit early + if circuitBreaker { + if height >= maxHeight-1 { + break + } + } + } + + return blocksProcessed, nil +} + +// TxIndexer starts indexing at start height for count blocks. If count is 0 +// the indexers will index to tip. It does NOT verify that the provided start +// height is correct. This is the version of the function that has no training +// wheels and is meant for internal use only. +func (s *Server) TxIndexer(ctx context.Context, height, count uint64) error { + log.Tracef("TxIndexer") + defer log.Tracef("TxIndexer exit") + + var maxHeight uint64 + circuitBreaker := false + if count != 0 { + circuitBreaker = true + maxHeight = height + count + } + + // Allocate here so that we don't waste space when not indexing. + txs := make(map[tbcd.TxKey]*tbcd.TxValue, s.cfg.MaxCachedTxs) + // log.Infof("max %v %v", s.cfg.MaxCachedTxs, s.cfg.MaxCachedTxs*(105)) + // return nil + defer clear(txs) + + log.Infof("Start indexing transactions at height %v count %v", height, count) + for { + start := time.Now() + blocksProcessed, err := s.indexTxsInBlocks(ctx, height, maxHeight, txs) + if err != nil { + return fmt.Errorf("index blocks: %w", err) + } + if blocksProcessed == 0 { + return nil + } + txsCached := len(txs) + log.Infof("Tx indexer blocks processed %v in %v transactions cached %v cache unused %v avg tx/blk %v", + blocksProcessed, time.Now().Sub(start), txsCached, + s.cfg.MaxCachedTxs-txsCached, txsCached/blocksProcessed) + + start = time.Now() + err = s.db.BlockTxUpdate(ctx, txs) + if err != nil { + return fmt.Errorf("block tx update: %w", err) + } + // leveldb does all kinds of allocations, force GC to lower + // memory preassure. + logMemStats() + runtime.GC() + + log.Infof("Flushing txs complete %v took %v", + txsCached, time.Now().Sub(start)) + + height += uint64(blocksProcessed) + + // Record height in metadata + var dbHeight [8]byte + binary.BigEndian.PutUint64(dbHeight[:], height) + err = s.db.MetadataPut(ctx, TxIndexHeightKey, dbHeight[:]) + if err != nil { + return fmt.Errorf("metadata tx height: %w", err) + } + + // If set we may have to exit early + if circuitBreaker { + log.Infof("Indexed transactions to height: %v", height-1) + if height >= maxHeight { + return nil + } + } + } +} + +// SyncIndexersToHeight tries to move the various indexers to the suplied +// height (inclusive). +func (s *Server) SyncIndexersToHeight(ctx context.Context, height uint64) error { + log.Tracef("SyncIndexersToHeight") + defer log.Tracef("SyncIndexersToHeight exit") + + // Outputs index + uhBE, err := s.db.MetadataGet(ctx, UtxoIndexHeightKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return fmt.Errorf("utxo indexer metadata get: %w", err) + } + uhBE = make([]byte, 8) + } + heightUtxo := binary.BigEndian.Uint64(uhBE) + countUtxo := int64(height) - int64(heightUtxo) + if countUtxo >= 0 { + err := s.UtxoIndexer(ctx, heightUtxo, uint64(countUtxo+1)) + if err != nil { + return fmt.Errorf("utxo indexer: %w", err) + } + } + + // Transactions index + thBE, err := s.db.MetadataGet(ctx, TxIndexHeightKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return fmt.Errorf("tx indexer metadata get: %w", err) + } + thBE = make([]byte, 8) + } + heightTx := binary.BigEndian.Uint64(thBE) + countTx := int64(height) - int64(heightTx) + if countTx >= 0 { + err := s.TxIndexer(ctx, heightTx, uint64(countTx+1)) + if err != nil { + return fmt.Errorf("tx indexer: %w", err) + } + } + + return nil +} diff --git a/service/tbc/crawler_test.go b/service/tbc/crawler_test.go new file mode 100644 index 000000000..44e3d0357 --- /dev/null +++ b/service/tbc/crawler_test.go @@ -0,0 +1,125 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbc + +import ( + "testing" + + "github.com/hemilabs/heminetwork/database/tbcd" +) + +//func TestIndex(t *testing.T) { +// t.Skip() +// logLevel := "INFO" +// loggo.ConfigureLoggers(logLevel) +// s, err := NewServer(&Config{ +// Network: "testnet3", +// }) +// if err != nil { +// t.Fatal(err) +// } +// +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// +// // Open db. +// s.cfg.LevelDBHome = "~/.tbcd" +// s.db, err = level.New(ctx, filepath.Join(s.cfg.LevelDBHome, s.cfg.Network)) +// if err != nil { +// t.Fatal(err) +// } +// defer s.db.Close() +// +// start := time.Now() +// err = s.indexer(ctx) +// if err != nil { +// t.Fatal(err) +// } +// t.Logf("done at %v", time.Now().Sub(start)) +//} +// +//func TestUtxo(t *testing.T) { +// t.Skip() +// +// dc := &spew.ConfigState{ +// DisableMethods: true, +// } +// +// utxos := make(map[tbcd.Outpoint]tbcd.Utxo, 100) +// hash := sha256.Sum256([]byte("Hello, world!")) +// index := uint32(1) +// op := tbcd.NewOutpoint(hash, index) +// hash2 := sha256.Sum256([]byte("Hello, world!2")) +// op2 := tbcd.NewOutpoint(hash2, index) +// utxo := tbcd.Utxo{} +// utxos[op] = utxo +// utxos[op2] = utxo +// t.Logf("%v", dc.Sdump(utxos)) +// t.Logf("%v", len(op.String())) +// +// for k := range utxos { +// t.Logf("%T", k) +// } +// +// type myt [2]byte +// var m myt +// m[0] = 1 +// t.Logf("%T", m) +// t.Logf("%x", m) +// t.Logf("%v", len(m)) +// +// var mx myt +// mx[0] = 2 +// mm := make(map[myt]int) +// mm[m] = 1234 +// mm[mx] = 5678 +// t.Logf("%v", dc.Sdump(mm)) +// +// t.Logf("%v", spew.Sdump(utxos)) +//} + +// Test the various mapsizes +// run with go test -v -bench . -benchmem -run=BenchmarkMap +func allocateMap(size int) map[tbcd.Outpoint]tbcd.Utxo { + m := make(map[tbcd.Outpoint]tbcd.Utxo, size) + for i := 0; i < size; i++ { + m[tbcd.Outpoint{}] = tbcd.Utxo{} + } + return m +} + +func BenchmarkMap10(b *testing.B) { + for i := 0; i < b.N; i++ { + allocateMap(10) + } +} + +func BenchmarkMap100(b *testing.B) { + for i := 0; i < b.N; i++ { + allocateMap(100) + } +} + +func BenchmarkMap10000(b *testing.B) { + for i := 0; i < b.N; i++ { + allocateMap(10000) + } +} + +func BenchmarkMap100000(b *testing.B) { + for i := 0; i < b.N; i++ { + allocateMap(100000) + } +} + +// BenchmarkMap1000000 seems to indicate that 1 million utxos use about +// 182714418 bytes which is about 174MB on linux/arm64. +// Or, about 183 per cache entry. 100 bytes for the key and value (36+44) and +// 83 in overhead. +func BenchmarkMap1000000(b *testing.B) { + for i := 0; i < b.N; i++ { + allocateMap(1e6) + } +} diff --git a/service/tbc/peer.go b/service/tbc/peer.go new file mode 100644 index 000000000..2648418a7 --- /dev/null +++ b/service/tbc/peer.go @@ -0,0 +1,202 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbc + +import ( + "context" + "fmt" + "math/rand" + "net" + "sync" + "time" + + "github.com/btcsuite/btcd/wire" +) + +// XXX wire could use some contexts, + +func writeTimeout(timeout time.Duration, conn net.Conn, msg wire.Message, pver uint32, btcnet wire.BitcoinNet) error { + conn.SetWriteDeadline(time.Now().Add(timeout)) + _, err := wire.WriteMessageWithEncodingN(conn, msg, pver, btcnet, + wire.LatestEncoding) + return err +} + +func readTimeout(timeout time.Duration, conn net.Conn, pver uint32, btcnet wire.BitcoinNet) (wire.Message, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + _, msg, _, err := wire.ReadMessageWithEncodingN(conn, pver, btcnet, + wire.LatestEncoding) + return msg, err +} + +type peer struct { + mtx sync.RWMutex + isDialing bool + conn net.Conn + connected time.Time + + address string + + protocolVersion uint32 + network wire.BitcoinNet + + remoteVersion *wire.MsgVersion + addrV2 bool +} + +func NewPeer(network wire.BitcoinNet, address string) (*peer, error) { + // XXX parse address and return failure if it's wrong + return &peer{ + protocolVersion: wire.ProtocolVersion, + network: network, + address: address, + }, nil +} + +func (p *peer) String() string { + return p.address +} + +func (p *peer) write(timeout time.Duration, msg wire.Message) error { + p.conn.SetWriteDeadline(time.Now().Add(timeout)) + _, err := wire.WriteMessageWithEncodingN(p.conn, msg, p.protocolVersion, + p.network, wire.LatestEncoding) + return err +} + +func (p *peer) read() (wire.Message, error) { + // XXX contexts would be nice + p.conn.SetReadDeadline(time.Time{}) // never timeout on reads + _, msg, _, err := wire.ReadMessageWithEncodingN(p.conn, p.protocolVersion, + p.network, wire.LatestEncoding) + return msg, err +} + +func (p *peer) handshake(ctx context.Context, conn net.Conn) error { + log.Tracef("handshake %v -> %v", conn.LocalAddr(), conn.RemoteAddr()) + defer log.Tracef("handshake exit %v -> %v", conn.LocalAddr(), conn.RemoteAddr()) + + // 1. send our version + // 2. receive version + // 3. send sendaddrv2 + // 4. send verack + // 5. receive sendaddrv2, verack or ignore + + defaultHandshakeTimeout := 5 * time.Second + us := &wire.NetAddress{Timestamp: time.Now()} + them := &wire.NetAddress{Timestamp: time.Now()} + msg := wire.NewMsgVersion(us, them, uint64(rand.Int63()), 0) + err := writeTimeout(defaultHandshakeTimeout, conn, msg, p.protocolVersion, p.network) + if err != nil { + return fmt.Errorf("could not write version message: %w", err) + } + + // 2. receive version + rmsg, err := readTimeout(defaultHandshakeTimeout, conn, p.protocolVersion, p.network) + if err != nil { + return fmt.Errorf("could not read version message: %w", err) + } + + v, ok := rmsg.(*wire.MsgVersion) + if !ok { + return fmt.Errorf("expected version message") + } + p.remoteVersion = v + + // 3. send sendaddrv2 + if v.ProtocolVersion >= 70016 { + err = writeTimeout(defaultHandshakeTimeout, conn, wire.NewMsgSendAddrV2(), p.protocolVersion, p.network) + if err != nil { + return fmt.Errorf("could not send sendaddrv2: %w", err) + } + } + + // 4. send verack + err = writeTimeout(defaultHandshakeTimeout, conn, wire.NewMsgVerAck(), p.protocolVersion, p.network) + if err != nil { + return fmt.Errorf("could not send verack: %w", err) + } + + for count := 0; count < 3; count++ { + msg, err := readTimeout(defaultHandshakeTimeout, conn, p.protocolVersion, p.network) + if err == wire.ErrUnknownMessage { + continue + } else if err != nil { + return fmt.Errorf("handshake read: %w", err) + } + + switch msg.(type) { + case *wire.MsgVerAck: + return nil + case *wire.MsgSendAddrV2: + p.addrV2 = true + continue + default: + return fmt.Errorf("unexpected message type: %T", msg) + } + } + + return fmt.Errorf("handshake failed") +} + +func (p *peer) connect(ctx context.Context) error { + log.Tracef("connect %v", p.address) // not locked but ok + defer log.Tracef("connect exit %v", p.address) + + p.mtx.Lock() + if p.isDialing { + p.mtx.Unlock() + return fmt.Errorf("already dialing %v", p.address) + } + if p.conn != nil { + p.mtx.Unlock() + return fmt.Errorf("already open %v", p.address) + } + p.isDialing = true + p.mtx.Unlock() + + d := net.Dialer{ + Timeout: 5 * time.Second, + KeepAlive: 9 * time.Second, + } + + log.Debugf("dialing %s", p.address) + conn, err := d.DialContext(ctx, "tcp", p.address) + if err != nil { + return fmt.Errorf("dial %v: %w", p.address, err) + } + log.Debugf("done dialing %s", p.address) + + err = p.handshake(ctx, conn) + if err != nil { + return fmt.Errorf("handshake %v: %w", p.address, err) + } + + p.mtx.Lock() + p.conn = conn + p.isDialing = false + p.connected = time.Now() + p.mtx.Unlock() + + return nil +} + +func (p *peer) close() error { + log.Tracef("close") + defer log.Tracef("close exit") + + p.mtx.Lock() + defer p.mtx.Unlock() + if p.conn != nil { + return p.conn.Close() + } + return fmt.Errorf("already closed") +} + +func (p *peer) isConnected() bool { + p.mtx.Lock() + defer p.mtx.Unlock() + return !p.isDialing +} diff --git a/service/tbc/rpc.go b/service/tbc/rpc.go new file mode 100644 index 000000000..1d593292c --- /dev/null +++ b/service/tbc/rpc.go @@ -0,0 +1,559 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbc + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + "sync" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "nhooyr.io/websocket" + + "github.com/hemilabs/heminetwork/api" + "github.com/hemilabs/heminetwork/api/protocol" + "github.com/hemilabs/heminetwork/api/tbcapi" + "github.com/hemilabs/heminetwork/database" + "github.com/hemilabs/heminetwork/database/tbcd/level" +) + +type tbcWs struct { + wg sync.WaitGroup + addr string + conn *protocol.WSConn + sessionID string + requestContext context.Context +} + +func (s *Server) handleWebsocketRead(ctx context.Context, ws *tbcWs) { + defer ws.wg.Done() + + log.Tracef("handleWebsocketRead: %v", ws.addr) + defer log.Tracef("handleWebsocketRead exit: %v", ws.addr) + + for { + cmd, id, payload, err := tbcapi.Read(ctx, ws.conn) + if err != nil { + var ce websocket.CloseError + if errors.As(err, &ce) { + log.Tracef("handleWebsocketRead: %v", err) + return + } + if errors.Is(err, io.EOF) { + log.Tracef("handleWebsocketRead: EOF") + return + } + + log.Errorf("handleWebsocketRead: %v", err) + return + } + + switch cmd { + case tbcapi.CmdPingRequest: + err = s.handlePingRequest(ctx, ws, payload, id) + case tbcapi.CmdBlockHeadersByHeightRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.BlockHeadersByHeightRequest) + return s.handleBlockHeadersByHeightRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdBlockHeadersByHeightRawRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.BlockHeadersByHeightRawRequest) + return s.handleBlockHeadersByHeightRawRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdBlockHeadersBestRawRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.BlockHeadersBestRawRequest) + return s.handleBlockHeadersBestRawRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdBlockHeadersBestRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.BlockHeadersBestRequest) + return s.handleBlockHeadersBestRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdBalanceByAddressRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.BalanceByAddressRequest) + return s.handleBalanceByAddressRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdUtxosByAddressRawRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.UtxosByAddressRawRequest) + return s.handleUtxosByAddressRawRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdUtxosByAddressRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.UtxosByAddressRequest) + return s.handleUtxosByAddressRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdTxByIdRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.TxByIdRequest) + return s.handleTxByIdRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + case tbcapi.CmdTxByIdRawRequest: + handler := func(ctx context.Context) (any, error) { + req := payload.(*tbcapi.TxByIdRawRequest) + return s.handleTxByIdRawRequest(ctx, req) + } + + go s.handleRequest(ctx, ws, id, cmd, handler) + default: + err = fmt.Errorf("unknown command: %v", cmd) + } + + // Command failed + if err != nil { + log.Errorf("handleWebsocketRead %s %s %s: %v", + ws.addr, cmd, id, err) + return + } + } +} + +func (s *Server) handleRequest(ctx context.Context, ws *tbcWs, id string, cmd protocol.Command, handler func(ctx context.Context) (any, error)) { + log.Tracef("handleRequest: %s: %s", ws.addr, cmd) + defer log.Tracef("handleRequest exit: %s: %s", ws.addr, cmd) + + ctx, cancel := context.WithTimeout(ctx, s.requestTimeout) + defer cancel() + + // TODO(joshuasing): add rate limiting? + + res, err := handler(ctx) + if err != nil { + log.Errorf("Failed to handle %s request for %s: %v", cmd, ws.addr, err) + } + + if res == nil { + return + } + + // XXX: spew.Sdump should only be called when the log level is enabled. + log.Debugf("Responding to %s request with %v", cmd, spew.Sdump(res)) + + if err = tbcapi.Write(ctx, ws.conn, id, res); err != nil { + log.Errorf("Failed to handle %s request for %s: protocol write failed: %v", + cmd, ws.addr, err) + } + + // Request processed successfully + s.cmdsProcessed.Inc() +} + +func (s *Server) handlePingRequest(ctx context.Context, ws *tbcWs, payload any, id string) error { + log.Tracef("handlePingRequest: %v", ws.addr) + defer log.Tracef("handlePingRequest exit: %v", ws.addr) + + p, ok := payload.(*tbcapi.PingRequest) + if !ok { + return fmt.Errorf("invalid payload type: %T", payload) + } + + res := &tbcapi.PingResponse{ + OriginTimestamp: p.Timestamp, + Timestamp: time.Now().Unix(), + } + + // XXX: spew.Sdump should only be called when the log level is enabled. + log.Tracef("responding with %v", spew.Sdump(res)) + + if err := tbcapi.Write(ctx, ws.conn, id, res); err != nil { + return fmt.Errorf("handlePingRequest write: %v %v", + ws.addr, err) + } + + // Ping request processed successfully + s.cmdsProcessed.Inc() + return nil +} + +func (s *Server) handleBlockHeadersByHeightRequest(ctx context.Context, req *tbcapi.BlockHeadersByHeightRequest) (any, error) { + log.Tracef("handleBtcBlockHeadersByHeightRequest") + defer log.Tracef("handleBtcBlockHeadersByHeightRequest exit") + + wireBlockHeaders, err := s.BlockHeadersByHeight(ctx, uint64(req.Height)) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return &tbcapi.BlockHeadersByHeightResponse{ + Error: protocol.RequestErrorf("block headers not found at height %d", req.Height), + }, nil + } + + e := protocol.NewInternalError(err) + return &tbcapi.BlockHeadersByHeightResponse{ + Error: e.ProtocolError(), + }, e + } + + return &tbcapi.BlockHeadersByHeightResponse{ + BlockHeaders: wireBlockHeadersToTBC(wireBlockHeaders), + }, nil +} + +func (s *Server) handleBlockHeadersByHeightRawRequest(ctx context.Context, req *tbcapi.BlockHeadersByHeightRawRequest) (any, error) { + log.Tracef("handleBtcBlockHeadersByHeightRawRequest") + defer log.Tracef("handleBtcBlockHeadersByHeightRawRequest exit") + + rawBlockHeaders, err := s.RawBlockHeadersByHeight(ctx, uint64(req.Height)) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return &tbcapi.BlockHeadersByHeightRawResponse{ + Error: protocol.RequestErrorf("block headers not found at height %d", req.Height), + }, nil + } + + e := protocol.NewInternalError(err) + return &tbcapi.BlockHeadersByHeightRawResponse{ + Error: e.ProtocolError(), + }, e + } + + return &tbcapi.BlockHeadersByHeightRawResponse{ + BlockHeaders: rawBlockHeaders, + }, nil +} + +func (s *Server) handleBlockHeadersBestRawRequest(ctx context.Context, _ *tbcapi.BlockHeadersBestRawRequest) (any, error) { + log.Tracef("handleBlockHeadersBestRawRequest") + defer log.Tracef("handleBlockHeadersBestRawRequest exit") + + height, blockHeaders, err := s.RawBlockHeadersBest(ctx) + if err != nil { + e := protocol.NewInternalError(err) + return &tbcapi.BlockHeadersBestRawResponse{ + Error: e.ProtocolError(), + }, e + } + + return &tbcapi.BlockHeadersBestRawResponse{ + Height: height, + BlockHeaders: blockHeaders, + }, nil +} + +func (s *Server) handleBlockHeadersBestRequest(ctx context.Context, _ *tbcapi.BlockHeadersBestRequest) (any, error) { + log.Tracef("handleBlockHeadersBestRequest") + defer log.Tracef("handleBlockHeadersBestRequest exit") + + height, blockHeaders, err := s.BlockHeadersBest(ctx) + if err != nil { + e := protocol.NewInternalError(err) + return &tbcapi.BlockHeadersBestResponse{ + Error: e.ProtocolError(), + }, e + } + + return &tbcapi.BlockHeadersBestResponse{ + Height: height, + BlockHeaders: wireBlockHeadersToTBC(blockHeaders), + }, nil +} + +func (s *Server) handleBalanceByAddressRequest(ctx context.Context, req *tbcapi.BalanceByAddressRequest) (any, error) { + log.Tracef("handleBalanceByAddressRequest") + defer log.Tracef("handleBalanceByAddressRequest exit") + + balance, err := s.BalanceByAddress(ctx, req.Address) + if err != nil { + e := protocol.NewInternalError(err) + return &tbcapi.BalanceByAddressResponse{ + Error: e.ProtocolError(), + }, e + } + + return &tbcapi.BalanceByAddressResponse{ + Balance: balance, + }, nil +} + +func (s *Server) handleUtxosByAddressRawRequest(ctx context.Context, req *tbcapi.UtxosByAddressRawRequest) (any, error) { + log.Tracef("handleUtxosByAddressRawRequest") + defer log.Tracef("handleUtxosByAddressRawRequest exit") + + utxos, err := s.UtxosByAddress(ctx, req.Address, uint64(req.Start), uint64(req.Count)) + if err != nil { + if errors.Is(err, level.ErrIterator) { + e := protocol.NewInternalError(err) + return &tbcapi.UtxosByAddressRawResponse{ + Error: e.ProtocolError(), + }, err + } + + return &tbcapi.UtxosByAddressRawResponse{ + Error: protocol.RequestErrorf("error getting utxos for address: %s", req.Address), + }, nil + } + + var responseUtxos []api.ByteSlice + for _, utxo := range utxos { + responseUtxos = append(responseUtxos, utxo[:]) + } + + return &tbcapi.UtxosByAddressRawResponse{ + Utxos: responseUtxos, + }, nil +} + +func (s *Server) handleUtxosByAddressRequest(ctx context.Context, req *tbcapi.UtxosByAddressRequest) (any, error) { + log.Tracef("handleUtxosByAddressRequest") + defer log.Tracef("handleUtxosByAddressRequest exit") + + utxos, err := s.UtxosByAddress(ctx, req.Address, uint64(req.Start), uint64(req.Count)) + if err != nil { + if errors.Is(err, level.ErrIterator) { + e := protocol.NewInternalError(err) + return &tbcapi.UtxosByAddressResponse{ + Error: e.ProtocolError(), + }, e + } + + return &tbcapi.UtxosByAddressResponse{ + Error: protocol.RequestErrorf("error getting utxos for address: %s", req.Address), + }, nil + } + + var responseUtxos []tbcapi.Utxo + for _, utxo := range utxos { + responseUtxos = append(responseUtxos, tbcapi.Utxo{ + TxId: api.ByteSlice(utxo.ScriptHashSlice()), + Value: utxo.Value(), + OutIndex: utxo.OutputIndex(), + }) + } + + return &tbcapi.UtxosByAddressResponse{ + Utxos: responseUtxos, + }, nil +} + +func (s *Server) handleTxByIdRawRequest(ctx context.Context, req *tbcapi.TxByIdRawRequest) (any, error) { + log.Tracef("handleTxByIdRawRequest") + defer log.Tracef("handleTxByIdRawRequest exit") + + if len(req.TxId) != 32 { + responseErr := protocol.RequestErrorf("invalid tx id") + return &tbcapi.TxByIdRawResponse{ + Error: responseErr, + }, nil + } + + tx, err := s.TxById(ctx, [32]byte(req.TxId)) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + responseErr := protocol.RequestErrorf("tx not found: %s", hex.EncodeToString(req.TxId)) + return &tbcapi.TxByIdRawResponse{ + Error: responseErr, + }, nil + } + + responseErr := protocol.NewInternalError(err) + return &tbcapi.TxByIdRawResponse{ + Error: responseErr.ProtocolError(), + }, responseErr + } + + b, err := tx2Bytes(tx) + if err != nil { + e := protocol.NewInternalError(err) + return &tbcapi.TxByIdRawResponse{ + Error: e.ProtocolError(), + }, e + } + + return &tbcapi.TxByIdRawResponse{ + Tx: b, + }, nil +} + +func (s *Server) handleTxByIdRequest(ctx context.Context, req *tbcapi.TxByIdRequest) (any, error) { + log.Tracef("handleTxByIdRequest") + defer log.Tracef("handleTxByIdRequest exit") + + if len(req.TxId) != 32 { + responseErr := protocol.RequestErrorf("invalid tx id") + return &tbcapi.TxByIdResponse{ + Error: responseErr, + }, nil + } + + tx, err := s.TxById(ctx, [32]byte(req.TxId)) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + responseErr := protocol.RequestErrorf("not found: %s", hex.EncodeToString(req.TxId)) + return &tbcapi.TxByIdResponse{ + Error: responseErr, + }, nil + } + + responseErr := protocol.NewInternalError(err) + return &tbcapi.TxByIdResponse{ + Error: responseErr.ProtocolError(), + }, responseErr + } + + return &tbcapi.TxByIdResponse{ + Tx: *wireTxToTbcapiTx(tx), + }, nil +} + +func (s *Server) handleWebsocket(w http.ResponseWriter, r *http.Request) { + log.Tracef("handleWebsocket: %v", r.RemoteAddr) + defer log.Tracef("handleWebsocket exit: %v", r.RemoteAddr) + + conn, err := websocket.Accept(w, r, &websocket.AcceptOptions{ + CompressionMode: websocket.CompressionContextTakeover, + }) + if err != nil { + log.Errorf("Failed to accept websocket connection for %s: %v", + r.RemoteAddr, err) + return + } + defer conn.Close(websocket.StatusNormalClosure, "") // Force close connection + + ws := &tbcWs{ + addr: r.RemoteAddr, + conn: protocol.NewWSConn(conn), + requestContext: r.Context(), + } + + if ws.sessionID, err = s.newSession(ws); err != nil { + log.Errorf("An error occurred while creating session: %v", err) + return + } + defer s.deleteSession(ws.sessionID) + + ws.wg.Add(1) + go s.handleWebsocketRead(r.Context(), ws) + + // Always ping, required by protocol. + ping := &tbcapi.PingRequest{ + Timestamp: time.Now().Unix(), + } + + log.Tracef("Responding with %v", spew.Sdump(ping)) + if err = tbcapi.Write(r.Context(), ws.conn, "0", ping); err != nil { + log.Errorf("Write ping: %v", err) + } + + log.Infof("Connection from %v", r.RemoteAddr) + + // Wait for termination + ws.wg.Wait() + + log.Infof("Connection terminated from %v", r.RemoteAddr) +} + +func (s *Server) newSession(ws *tbcWs) (string, error) { + for { + // Create random hexadecimal string to use as an ID + id, err := randHexId(16) + if err != nil { + return "", fmt.Errorf("generate session id: %w", err) + } + + // Ensure the key is not already in use, if it is then try again. + s.mtx.Lock() + if _, ok := s.sessions[id]; ok { + s.mtx.Unlock() + continue + } + s.sessions[id] = ws + s.mtx.Unlock() + + return id, nil + } +} + +func (s *Server) deleteSession(id string) { + s.mtx.Lock() + _, ok := s.sessions[id] + delete(s.sessions, id) + s.mtx.Unlock() + + if !ok { + log.Errorf("id not found in sessions %s", id) + } +} + +func randHexId(length int) (string, error) { + b := make([]byte, length) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("read random bytes: %w", err) + } + return hex.EncodeToString(b), nil +} + +func wireBlockHeadersToTBC(w []*wire.BlockHeader) []*tbcapi.BlockHeader { + blockHeaders := make([]*tbcapi.BlockHeader, len(w)) + for i, bh := range w { + blockHeaders[i] = &tbcapi.BlockHeader{ + Version: bh.Version, + PrevHash: bh.PrevBlock.String(), + MerkleRoot: bh.MerkleRoot.String(), + Timestamp: bh.Timestamp.Unix(), + Bits: fmt.Sprintf("%x", bh.Bits), + Nonce: bh.Nonce, + } + } + return blockHeaders +} + +func wireTxToTbcapiTx(w *wire.MsgTx) *tbcapi.Tx { + a := &tbcapi.Tx{ + Version: w.Version, + LockTime: w.LockTime, + TxIn: []*tbcapi.TxIn{}, + TxOut: []*tbcapi.TxOut{}, + } + + for _, v := range w.TxIn { + a.TxIn = append(a.TxIn, &tbcapi.TxIn{ + Sequence: v.Sequence, + SignatureScript: api.ByteSlice(v.SignatureScript), + PreviousOutPoint: tbcapi.OutPoint{ + Hash: api.ByteSlice(v.PreviousOutPoint.Hash[:]), + Index: v.PreviousOutPoint.Index, + }, + }) + + for _, b := range v.Witness { + a.TxIn[len(a.TxIn)-1].Witness = append(a.TxIn[len(a.TxIn)-1].Witness, + api.ByteSlice(b)) + } + } + + for _, v := range w.TxOut { + a.TxOut = append(a.TxOut, &tbcapi.TxOut{ + Value: v.Value, + PkScript: api.ByteSlice(v.PkScript), + }) + } + + return a +} diff --git a/service/tbc/tbc.go b/service/tbc/tbc.go new file mode 100644 index 000000000..a9ead495b --- /dev/null +++ b/service/tbc/tbc.go @@ -0,0 +1,1777 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbc + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math/rand" + "net" + "net/http" + "path/filepath" + "slices" + "strconv" + "sync" + "time" + + "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/dustin/go-humanize" + "github.com/juju/loggo" + "github.com/prometheus/client_golang/prometheus" + "github.com/syndtr/goleveldb/leveldb" + + "github.com/hemilabs/heminetwork/api" + "github.com/hemilabs/heminetwork/api/tbcapi" + "github.com/hemilabs/heminetwork/database" + "github.com/hemilabs/heminetwork/database/tbcd" + "github.com/hemilabs/heminetwork/database/tbcd/level" + "github.com/hemilabs/heminetwork/service/deucalion" +) + +const ( + logLevel = "INFO" + + promSubsystem = "tbc_service" // Prometheus + + mainnetPort = "8333" + testnetPort = "18333" + localnetPort = "18444" + + defaultPeersWanted = 64 + defaultPendingBlocks = 128 // 128 * ~4MB max memory use + + defaultMaxCachedTxs = 1e6 // dual purpose cache, max key 69, max value 36 + + networkLocalnet = "localnet" // XXX this needs to be rethought + + defaultCmdTimeout = 4 * time.Second +) + +var ( + testnetSeeds = []string{ + "testnet-seed.bitcoin.jonasschnelli.ch", + "seed.tbtc.petertodd.org", + "seed.testnet.bitcoin.sprovoost.nl", + "testnet-seed.bluematt.me", + } + mainnetSeeds = []string{ + "seed.bitcoin.sipa.be", + "dnsseed.bluematt.me", + "dnsseed.bitcoin.dashjr.org", + "seed.bitcoinstats.com", + "seed.bitnodes.io", + "seed.bitcoin.jonasschnelli.ch", + } +) + +var log = loggo.GetLogger("tbc") + +func init() { + loggo.ConfigureLoggers(logLevel) + rand.Seed(time.Now().UnixNano()) // used for seeding, ok to be math.rand +} + +func tx2Bytes(tx *wire.MsgTx) ([]byte, error) { + var b bytes.Buffer + if err := tx.Serialize(&b); err != nil { + return nil, err + } + + return b.Bytes(), nil +} + +func bytes2Tx(b []byte) (*wire.MsgTx, error) { + var w wire.MsgTx + if err := w.Deserialize(bytes.NewReader(b)); err != nil { + return nil, err + } + + return &w, nil +} + +func header2Bytes(wbh *wire.BlockHeader) ([]byte, error) { + var b bytes.Buffer + err := wbh.Serialize(&b) + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func h2b(wbh *wire.BlockHeader) []byte { + hb, err := header2Bytes(wbh) + if err != nil { + panic(err) + } + return hb +} + +func bytes2Header(header []byte) (*wire.BlockHeader, error) { + var bh wire.BlockHeader + err := bh.Deserialize(bytes.NewReader(header)) + if err != nil { + return nil, fmt.Errorf("Deserialize: %v", err) + } + return &bh, nil +} + +func headerTime(header []byte) *time.Time { + h, err := bytes2Header(header) + if err != nil { + return nil + } + return &h.Timestamp +} + +func hashEqual(h1 chainhash.Hash, h2 chainhash.Hash) bool { + // Fuck you chainhash package + return h1.IsEqual(&h2) +} + +func sliceChainHash(ch chainhash.Hash) []byte { + // Fuck you chainhash package + return ch[:] +} + +type blockPeer struct { + expire time.Time // when does this command expire + peer string // who was handling it +} + +type Config struct { + AutoIndex bool + BlockSanity bool + RegtestPort string + LevelDBHome string + ListenAddress string + LogLevel string + MaxCachedTxs int + Network string + PrometheusListenAddress string +} + +func NewDefaultConfig() *Config { + return &Config{ + ListenAddress: tbcapi.DefaultListen, + LogLevel: logLevel, + MaxCachedTxs: defaultMaxCachedTxs, + } +} + +type Server struct { + mtx sync.RWMutex + wg sync.WaitGroup + + cfg *Config + + // stats + printTime time.Time + blocksSize uint64 // cumulative block size written + blocksInserted map[string]struct{} + blocksDuplicate int + + // bitcoin network + wireNet wire.BitcoinNet + chainParams *chaincfg.Params + timeSource blockchain.MedianTimeSource + port string + seeds []string + + peers map[string]*peer // active but not necessarily connected + blocks map[string]*blockPeer // outstanding block downloads [hash]when/where + + // IBD hints + lastBlockHeader tbcd.BlockHeader + + // reentrancy flags for the indexers + utxoIndexerRunning bool + txIndexerRunning bool + + db tbcd.Database + + // Prometheus + isRunning bool + cmdsProcessed prometheus.Counter + + // WebSockets + sessions map[string]*tbcWs + requestTimeout time.Duration + + // ignoreUlimit will explicitly not check ulimit settings on the host + // machine, this is useful for very small datasets/chains + ignoreUlimit bool +} + +func NewServer(cfg *Config) (*Server, error) { + if cfg == nil { + cfg = NewDefaultConfig() + } + defaultRequestTimeout := 10 * time.Second // XXX: make config option? + s := &Server{ + cfg: cfg, + printTime: time.Now().Add(10 * time.Second), + blocks: make(map[string]*blockPeer, defaultPendingBlocks), + peers: make(map[string]*peer, defaultPeersWanted), + blocksInserted: make(map[string]struct{}, 8192), // stats + timeSource: blockchain.NewMedianTime(), + cmdsProcessed: prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: promSubsystem, + Name: "rpc_calls_total", + Help: "The total number of successful RPC commands", + }), + sessions: make(map[string]*tbcWs), + requestTimeout: defaultRequestTimeout, + } + + // We could use a PGURI verification here. + + switch cfg.Network { + case "mainnet": + s.port = mainnetPort + s.wireNet = wire.MainNet + s.chainParams = &chaincfg.MainNetParams + s.seeds = mainnetSeeds + case "testnet3": + s.port = testnetPort + s.wireNet = wire.TestNet3 + s.chainParams = &chaincfg.TestNet3Params + s.seeds = testnetSeeds + case networkLocalnet: + s.port = localnetPort + s.wireNet = wire.TestNet + s.chainParams = &chaincfg.RegressionNetParams + default: + return nil, fmt.Errorf("invalid network: %v", cfg.Network) + } + + return s, nil +} + +// DB exports the underlying database. This should only be used in tests. +func (s *Server) DB() tbcd.Database { + return s.db +} + +// blockPeerExpire removes expired block downloads from the cache and returns +// the number of used cache slots. Lock must be held. +func (s *Server) blockPeerExpire() int { + log.Tracef("blockPeerExpire exit") + defer log.Tracef("blockPeerExpire exit") + + now := time.Now() + for k, v := range s.blocks { + if v == nil { + // not assigned a peer yet + continue + } + if !now.After(v.expire) { + continue + } + + // mark block as unassigned but do not give up cache slot + s.blocks[k] = nil + log.Infof("expired block: %v", k) // XXX maybe remove but add to stats + + // kill peer as well since it is slow + if p := s.peers[v.peer]; p != nil && p.conn != nil { + p.conn.Close() // this will tear down peer + } + } + return len(s.blocks) +} + +func (s *Server) getHeaders(ctx context.Context, p *peer, lastHeaderHash []byte) error { + bh, err := bytes2Header(lastHeaderHash) + if err != nil { + return fmt.Errorf("invalid header: %v", err) + } + hash := bh.BlockHash() + ghs := wire.NewMsgGetHeaders() + ghs.AddBlockLocatorHash(&hash) + err = p.write(defaultCmdTimeout, ghs) + if err != nil { + return fmt.Errorf("write get headers: %v", err) + } + + return nil +} + +func (s *Server) seed(pctx context.Context, peersWanted int) ([]tbcd.Peer, error) { + log.Tracef("seed") + defer log.Tracef("seed exit") + + peers, err := s.db.PeersRandom(pctx, peersWanted) + if err != nil { + return nil, fmt.Errorf("peers random: %v", err) + } + // return peers from db first + if len(peers) >= peersWanted { + return peers, nil + } + + // Seed + resolver := &net.Resolver{} + ctx, cancel := context.WithTimeout(pctx, 15*time.Second) + defer cancel() + + errorsSeen := 0 + var addrs []net.IP + for k := range s.seeds { + ips, err := resolver.LookupIP(ctx, "ip", s.seeds[k]) + if err != nil { + log.Errorf("lookup: %v", err) + errorsSeen++ + continue + } + addrs = append(addrs, ips...) + } + if errorsSeen == len(s.seeds) { + return nil, fmt.Errorf("could not seed") + } + + // insert into peers table + for k := range addrs { + peers = append(peers, tbcd.Peer{ + Host: addrs[k].String(), + Port: s.port, + }) + } + + // return fake peers but don't save them to the database + return peers, nil +} + +func (s *Server) seedForever(ctx context.Context, peersWanted int) ([]tbcd.Peer, error) { + log.Tracef("seedForever") + defer log.Tracef("seedForever") + + minW := 5 + maxW := 59 + for { + holdOff := time.Duration(minW+rand.Intn(maxW-minW)) * time.Second + var em string + peers, err := s.seed(ctx, peersWanted) + if err != nil { + em = fmt.Sprintf("seed error: %v, retrying in %v", err, holdOff) + } else if peers != nil && len(peers) == 0 { + em = fmt.Sprintf("no peers found, retrying in %v", holdOff) + } else { + // great success! + return peers, nil + } + log.Errorf("%v", em) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(holdOff): + } + } +} + +func (s *Server) peerAdd(p *peer) { + log.Tracef("peerAdd: %v", p.address) + s.mtx.Lock() + s.peers[p.address] = p + s.mtx.Unlock() +} + +func (s *Server) peerDelete(address string) { + log.Tracef("peerDelete: %v", address) + s.mtx.Lock() + delete(s.peers, address) + s.mtx.Unlock() +} + +func (s *Server) peersLen() int { + s.mtx.Lock() + defer s.mtx.Unlock() + return len(s.peers) +} + +func (s *Server) peerManager(ctx context.Context) error { + log.Tracef("peerManager") + defer log.Tracef("peerManager exit") + + // Channel for peering signals + peersWanted := defaultPeersWanted + peerC := make(chan string, peersWanted) + + log.Infof("Peer manager connecting to %v peers", peersWanted) + seeds, err := s.seedForever(ctx, peersWanted) + if err != nil { + // context canceled + return fmt.Errorf("seed: %w", err) + } + if len(seeds) == 0 { + // should not happen + return fmt.Errorf("no seeds found") + } + + // Add a ticker that times out every 27 seconds regardless of what is + // going on. This will be nice and jittery and detect bad beers + // peridiocally. + loopTimeout := 27 * time.Second + loopTicker := time.NewTicker(loopTimeout) + + x := 0 + for { + peersActive := s.peersLen() + log.Debugf("peerManager active %v wanted %v", peersActive, peersWanted) + if peersActive < peersWanted { + // XXX we may want to make peers play along with waitgroup + + // Connect peer + for i := 0; i < peersWanted-peersActive; i++ { + address := net.JoinHostPort(seeds[x].Host, seeds[x].Port) + peer, err := NewPeer(s.wireNet, address) + if err != nil { + // This really should not happen + log.Errorf("new peer: %v", err) + continue + } + s.peerAdd(peer) + + go s.peerConnect(ctx, peerC, peer) + + x++ + if x >= len(seeds) { + // XXX duplicate code from above + seeds, err = s.seedForever(ctx, peersWanted) + if err != nil { + // Context canceled + return fmt.Errorf("seed: %w", err) + } + if len(seeds) == 0 { + // should not happen + return fmt.Errorf("no seeds found") + } + x = 0 + } + } + } + + // Unfortunately we need a timer here to restart the loop. The + // error is a laptop goes to sleep, all peers disconnect, RSTs + // are not seen by sleeping laptop, laptop wakes up. Now the + // expiration timers are all expired but not noticed by the + // laptop. + select { + case <-ctx.Done(): + return ctx.Err() + case address := <-peerC: + // peer exited, connect to new one + s.peerDelete(address) + log.Debugf("peer exited: %v", address) + case <-loopTicker.C: + log.Debugf("pinging active peers: %v", s.peersLen()) + go s.pingAllPeers(ctx) + loopTicker.Reset(loopTimeout) + } + } +} + +func (s *Server) localPeerManager(ctx context.Context) error { + log.Tracef("localPeerManager") + defer log.Tracef("localPeerManager exit") + + peersWanted := 1 + peerC := make(chan string, peersWanted) + address := net.JoinHostPort("127.0.0.1", s.port) + peer, err := NewPeer(s.wireNet, address) + if err != nil { + return fmt.Errorf("new peer: %w", err) + } + + log.Infof("Local peer manager connecting to %v peers", peersWanted) + + for { + s.peerAdd(peer) + go s.peerConnect(ctx, peerC, peer) + + select { + case <-ctx.Done(): + return ctx.Err() + case address := <-peerC: + s.peerDelete(address) + log.Infof("peer exited: %v", address) + } + + // hold off on reconnect + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(10 * time.Second): + log.Infof("peer exited: %v", "hold of timeout") + } + } +} + +func (s *Server) startPeerManager(ctx context.Context) error { + log.Tracef("startPeerManager") + defer log.Tracef("startPeerManager exit") + + switch s.cfg.Network { + case networkLocalnet: + return s.localPeerManager(ctx) + } + return s.peerManager(ctx) +} + +func (s *Server) pingAllPeers(ctx context.Context) { + log.Tracef("pingAllPeers") + defer log.Tracef("pingAllPeers exit") + + // XXX reason and explain why this cannot be reentrant + s.mtx.Lock() + defer s.mtx.Unlock() + + for _, p := range s.peers { + select { + case <-ctx.Done(): + return + default: + } + if p.conn == nil { + continue + } + + // We don't really care about the response. We just want to + // write to the connection to make it fail if the other side + // went away. + log.Debugf("Pinging: %v", p) + err := p.write(defaultCmdTimeout, wire.NewMsgPing(uint64(time.Now().Unix()))) + if err != nil { + log.Errorf("ping %v: %v", p, err) + } + } +} + +func (s *Server) peerConnect(ctx context.Context, peerC chan string, p *peer) { + log.Tracef("peerConnect %v", p) + defer func() { + select { + case peerC <- p.String(): + default: + log.Tracef("could not signal peer channel: %v", p) + } + log.Tracef("peerConnect exit %v", p) + }() + + tctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + err := p.connect(tctx) + if err != nil { + go func(pp *peer) { + // Remove from database; it's ok to be aggressive if it + // failed with no route to host or failed with i/o + // timeout or invalid network (ipv4/ipv6). + // + // This does have the side-effect of draining the peer + // table during network outages but that is ok. The + // peers table will be rebuild based on DNS seeds. + host, port, err := net.SplitHostPort(pp.String()) + if err != nil { + log.Errorf("split host port: %v", err) + return + } + err = s.db.PeerDelete(ctx, host, port) + if err != nil { + log.Errorf("peer delete (%v): %v", pp, err) + } else { + log.Debugf("Peer delete: %v", pp) + } + }(p) + log.Debugf("connect: %v", err) + return + } + defer func() { + err := p.close() + if err != nil { + log.Errorf("peer disconnect: %v %v", p, err) + } + }() + + _ = p.write(defaultCmdTimeout, wire.NewMsgSendHeaders()) // Ask peer to send headers + _ = p.write(defaultCmdTimeout, wire.NewMsgGetAddr()) // Try to get network information + + log.Debugf("Peer connected: %v", p) + + // Pretend we are always in IBD. + // + // This obviously will put a pressure on the internet connection and + // database because each and every peer is racing at start of day. As + // multiple answers come in the insert of the headers fails or + // succeeds. If it fails no more headers will be requested from that + // peer. + bhs, err := s.db.BlockHeadersBest(ctx) + if err != nil { + log.Errorf("block headers best: %v", err) + // database is closed, nothing we can do, return here to avoid below + // panic + if errors.Is(err, leveldb.ErrClosed) { + return + } + } + if len(bhs) != 1 { + // XXX fix multiple tips + panic(len(bhs)) + } + log.Debugf("block header best hash: %s", bhs[0].Hash) + + err = s.getHeaders(ctx, p, bhs[0].Header) + if err != nil { + // This should not happen + log.Errorf("get headers: %v", err) + return + } + + // XXX kickstart block download, should happen in getHeaders + + verbose := false + for { + // See if we were interrupted, for the love of pete add ctx to wire + select { + case <-ctx.Done(): + return + default: + } + + msg, err := p.read() + if err == wire.ErrUnknownMessage { + // skip unknown + continue + } else if err != nil { + log.Debugf("peer read %v: %v", p, err) + return + } + + if verbose { + spew.Dump(msg) + } + + // XXX send wire message to pool reader + switch m := msg.(type) { + case *wire.MsgAddr: + go s.handleAddr(ctx, p, m) + + case *wire.MsgAddrV2: + go s.handleAddrV2(ctx, p, m) + + case *wire.MsgBlock: + go s.handleBlock(ctx, p, m) + + case *wire.MsgFeeFilter: + // XXX shut up + + case *wire.MsgInv: + go s.handleInv(ctx, p, m) + + case *wire.MsgHeaders: + go s.handleHeaders(ctx, p, m) + + case *wire.MsgPing: + go s.handlePing(ctx, p, m) + default: + log.Tracef("unhandled message type %v: %T\n", p, msg) + } + } +} + +func (s *Server) running() bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + return s.isRunning +} + +func (s *Server) testAndSetRunning(b bool) bool { + s.mtx.Lock() + defer s.mtx.Unlock() + old := s.isRunning + s.isRunning = b + return old != s.isRunning +} + +func (s *Server) promRunning() float64 { + r := s.running() + if r { + return 1 + } + return 0 +} + +// blksMissing checks the block cache and the database and returns true if all +// blocks have not been downloaded. This function must be called with the lock +// held. +func (s *Server) blksMissing(ctx context.Context) bool { + // Do cheap memory check first + if len(s.blocks) != 0 { + return true + } + + // Do expensive database check + bm, err := s.db.BlocksMissing(ctx, 1) + if err != nil { + log.Errorf("blocks missing: %v", err) + return true // this is really kind of terminal + } + return len(bm) > 0 +} + +// blocksMissing checks the block cache and the database and returns true if all +// blocks have not been downloaded. +func (s *Server) blocksMissing(ctx context.Context) bool { + s.mtx.Lock() + defer s.mtx.Unlock() + + return s.blksMissing(ctx) +} + +func (s *Server) handleAddr(ctx context.Context, p *peer, msg *wire.MsgAddr) { + log.Tracef("handleAddr (%v): %v", p, len(msg.AddrList)) + defer log.Tracef("handleAddr exit (%v)", p) + + peers := make([]tbcd.Peer, 0, len(msg.AddrList)) + for k := range msg.AddrList { + peers = append(peers, tbcd.Peer{ + Host: msg.AddrList[k].IP.String(), + Port: strconv.Itoa(int(msg.AddrList[k].Port)), + }) + } + err := s.db.PeersInsert(ctx, peers) + // Don't log insert 0, its a dup. + if err != nil && !database.ErrZeroRows.Is(err) { + log.Errorf("%v", err) + } +} + +func (s *Server) handleAddrV2(ctx context.Context, p *peer, msg *wire.MsgAddrV2) { + log.Tracef("handleAddrV2 (%v): %v", p, len(msg.AddrList)) + defer log.Tracef("handleAddrV2 exit (%v)", p) + + peers := make([]tbcd.Peer, 0, len(msg.AddrList)) + for k := range msg.AddrList { + peers = append(peers, tbcd.Peer{ + Host: msg.AddrList[k].Addr.String(), + Port: strconv.Itoa(int(msg.AddrList[k].Port)), + }) + } + err := s.db.PeersInsert(ctx, peers) + // Don't log insert 0, its a dup. + if err != nil && !database.ErrZeroRows.Is(err) { + log.Errorf("%v", err) + } +} + +func (s *Server) handlePing(ctx context.Context, p *peer, msg *wire.MsgPing) { + log.Tracef("handlePing %v", p.address) + defer log.Tracef("handlePing exit %v", p.address) + + pong := wire.NewMsgPong(msg.Nonce) + err := p.write(defaultCmdTimeout, pong) + if err != nil { + log.Errorf("could not write pong message %v: %v", p.address, err) + return + } + log.Tracef("handlePing %v: pong %v", p.address, pong.Nonce) +} + +func (s *Server) handleInv(ctx context.Context, p *peer, msg *wire.MsgInv) { + log.Tracef("handleInv (%v)", p) + defer log.Tracef("handleInv exit (%v)", p) + + var bis []tbcd.BlockIdentifier + for k := range msg.InvList { + switch msg.InvList[k].Type { + case wire.InvTypeBlock: + + // XXX height is missing here, looks right but assert + // that this isn't broken. + log.Infof("handleInv: block %v", msg.InvList[k].Hash) + + bis = append(bis, tbcd.BlockIdentifier{ + Hash: msg.InvList[k].Hash[:], // fake out + }) + log.Infof("handleInv: block %v", msg.InvList[k].Hash) + case wire.InvTypeTx: + // XXX silence mempool for now + return + default: + log.Infof("handleInv: skipping inv type %v", msg.InvList[k].Type) + return + } + } + + // XXX This happens during block header download, we should not react + // Probably move into the invtype switch + log.Infof("download blocks if we like them") + // if len(bis) > 0 { + // s.mtx.Lock() + // defer s.mtx.Unlock() + // err := s.downloadBlocks(ctx, bis) + // if err != nil { + // log.Errorf("download blocks: %v", err) + // return + // } + // } +} + +func (s *Server) txIndexer(ctx context.Context) { + log.Tracef("txIndexer") + defer log.Tracef("txIndexer exit") + + if !s.cfg.AutoIndex { + return + } + + // only one txIndexer may run at any given time + s.mtx.Lock() + if s.txIndexerRunning { + s.mtx.Unlock() + return + } + s.txIndexerRunning = true + s.mtx.Unlock() + + // mark txIndexer not running on exit + defer func() { + s.mtx.Lock() + s.txIndexerRunning = false + s.mtx.Unlock() + }() + + if s.blocksMissing(ctx) { + return + } + + // Get height from db + he, err := s.db.MetadataGet(ctx, TxIndexHeightKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + log.Errorf("tx indexer metadata get: %v", err) + return + } + he = make([]byte, 8) + } + h := binary.BigEndian.Uint64(he) + + // Skip txIndexer if we are at best block height. This is a bit racy. + bhs, err := s.db.BlockHeadersBest(ctx) + if err != nil { + log.Errorf("utxo indexer block headers best: %v", err) + return + } + if len(bhs) != 1 { + log.Errorf("utxo indexer block headers best: unsuported fork") + return + } + + if bhs[0].Height != h-1 { + err = s.TxIndexer(ctx, h, 0) + if err != nil { + log.Errorf("tx indexer: %v", err) + return + } + } +} + +func (s *Server) utxoIndexer(ctx context.Context) { + log.Tracef("utxoIndexer") + defer log.Tracef("utxoIndexer exit") + + if !s.cfg.AutoIndex { + return + } + + // only one utxoIndexer may run at any given time + s.mtx.Lock() + if s.utxoIndexerRunning { + s.mtx.Unlock() + return + } + s.utxoIndexerRunning = true + s.mtx.Unlock() + + // mark utxoIndexer not running on exit + defer func() { + s.mtx.Lock() + s.utxoIndexerRunning = false + s.mtx.Unlock() + }() + + // exit if we aren't synced + if s.blocksMissing(ctx) { + return + } + + // Index all utxos + + // Get height from db + he, err := s.db.MetadataGet(ctx, UtxoIndexHeightKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + log.Errorf("utxo indexer metadata get: %v", err) + return + } + he = make([]byte, 8) + } + h := binary.BigEndian.Uint64(he) + + // Skip UtxoIndex if we are at best block height. This is a bit racy. + bhs, err := s.db.BlockHeadersBest(ctx) + if err != nil { + log.Errorf("utxo indexer block headers best: %v", err) + return + } + if len(bhs) != 1 { + log.Errorf("utxo indexer block headers best: unsuported fork") + return + } + + if bhs[0].Height != h-1 { + err = s.UtxoIndexer(ctx, h, 0) + if err != nil { + log.Errorf("utxo indexer: %v", err) + return + } + } + + // When utxo sync completes kick off tx sync + go s.txIndexer(ctx) +} + +func (s *Server) downloadBlock(ctx context.Context, p *peer, ch *chainhash.Hash) { + log.Tracef("downloadBlock") + defer log.Tracef("downloadBlock exit") + + getData := wire.NewMsgGetData() + getData.InvList = append(getData.InvList, + &wire.InvVect{ + Type: wire.InvTypeBlock, + Hash: *ch, + }) + + s.mtx.Lock() + defer s.mtx.Unlock() + err := p.write(defaultCmdTimeout, getData) + if err != nil { + // peer dead, make sure it is reaped + log.Errorf("write %v: %v", p, err) + p.close() + } +} + +func (s *Server) downloadBlocks(ctx context.Context) { + log.Tracef("downloadBlocks") + defer log.Tracef("downloadBlocks exit") + + now := time.Now() + + defer func() { + // if we are complete we need to kick off utxo sync + go s.utxoIndexer(ctx) + }() + + s.mtx.Lock() + defer s.mtx.Unlock() + + for k, v := range s.blocks { + if v != nil && now.After(v.expire) { + // kill peer as well since it is slow + if p := s.peers[v.peer]; p != nil && p.conn != nil { + p.conn.Close() // this will tear down peer + } + + // block expired, download block + s.blocks[k] = nil + v = nil // this will redownload the block + } + if v != nil { + // block already being downloaded and is not expired + continue + } + // unassigned slot, download block + for _, peer := range s.peers { + if peer.conn == nil { + // Not connected yet + continue + } + ch, err := chainhash.NewHashFromStr(k) + if err != nil { + // really should not happen + log.Errorf("download blocks hash: %v", err) + delete(s.blocks, k) + continue + } + + // sufficiently validated, record in cache + s.blocks[k] = &blockPeer{ + expire: time.Now().Add(37 * time.Second), // XXX make variable? + peer: peer.String(), + } + + go s.downloadBlock(ctx, peer, ch) + + break + } + } +} + +func (s *Server) syncBlocks(ctx context.Context) { + log.Tracef("syncBlocks") + defer log.Tracef("syncBlocks exit") + + // regardless of cache being full or no more missing blocks kick the + // downloader just to make sure we are making forward progress. + defer func() { + go s.downloadBlocks(ctx) + }() + + // Hold lock to fill blocks cache + s.mtx.Lock() + defer s.mtx.Unlock() + + // Deal with expired block downloads + used := s.blockPeerExpire() + want := defaultPendingBlocks - used + if want <= 0 { + return + } + + bm, err := s.db.BlocksMissing(ctx, want) + if err != nil { + log.Errorf("blocks missing: %v", err) + return + } + for k := range bm { + bi := bm[k] + hash, _ := chainhash.NewHash(bi.Hash[:]) + hashS := hash.String() + if _, ok := s.blocks[hashS]; ok { + continue + } + s.blocks[hashS] = nil // pending block + } +} + +func (s *Server) handleHeaders(ctx context.Context, p *peer, msg *wire.MsgHeaders) { + log.Tracef("handleHeaders %v", p) + defer log.Tracef("handleHeaders exit %v", p) + + log.Debugf("handleHeaders (%v): %v", p, len(msg.Headers)) + + if len(msg.Headers) == 0 { + // This may signify the end of IBD but isn't 100%. We can fart + // around with mean block time to determine if this peer is + // just behind or if we are nominally where we should be. This + // test will never be 100% accurate. + + s.mtx.Lock() + lastBH := s.lastBlockHeader.Timestamp() + s.mtx.Unlock() + if time.Now().Sub(lastBH) > 6*s.chainParams.TargetTimePerBlock { + log.Infof("peer not synced: %v", p) + return + } + + go s.syncBlocks(ctx) + + return + } + + // This code works because duplicate blockheaders are rejected later on + // but only after a somewhat expensive parameter setup and database + // call. + // + // There really is no good way of determining if we can escape the + // expensive calls so we just eat it. + + // Make sure we can connect these headers in database + dbpbh, err := s.db.BlockHeaderByHash(ctx, msg.Headers[0].PrevBlock[:]) + if err != nil { + log.Errorf("handle headers no previous block header: %v", + msg.Headers[0].BlockHash()) + return + } + pbh, err := bytes2Header(dbpbh.Header) + if err != nil { + log.Errorf("invalid block header: %v", err) + return + } + + // Construct insert list and nominally validate headers + headers := make([]tbcd.BlockHeader, 0, len(msg.Headers)) + height := dbpbh.Height + 1 + for k := range msg.Headers { + if !hashEqual(msg.Headers[k].PrevBlock, pbh.BlockHash()) { + log.Errorf("cannot connect %v at height %v", + msg.Headers[k].PrevBlock, height) + return + } + + headers = append(headers, tbcd.BlockHeader{ + Hash: sliceChainHash(msg.Headers[k].BlockHash()), + Height: height, + Header: h2b(msg.Headers[k]), + }) + + pbh = msg.Headers[k] + height++ + } + + if len(headers) > 0 { + err := s.db.BlockHeadersInsert(ctx, headers) + if err != nil { + // This ends the race between peers during IBD. + if !database.ErrDuplicate.Is(err) { + log.Errorf("block headers insert: %v", err) + } + return + } + + // If we get here try to store the last blockheader that was + // inserted. This may race so we have to take the mutex and + // check height. + lbh := headers[len(headers)-1] + + s.mtx.Lock() + if lbh.Height > s.lastBlockHeader.Height { + s.lastBlockHeader = lbh + } + s.mtx.Unlock() + + log.Infof("Inserted %v block headers height %v", + len(headers), lbh.Height) + + // Ask for next batch of headers + err = s.getHeaders(ctx, p, lbh.Header) + if err != nil { + log.Errorf("get headers: %v", err) + return + } + } +} + +func (s *Server) handleBlock(ctx context.Context, p *peer, msg *wire.MsgBlock) { + log.Tracef("handleBlock (%v)", p) + defer log.Tracef("handleBlock exit (%v)", p) + + block := btcutil.NewBlock(msg) + bhs := block.Hash().String() + bb, err := block.Bytes() // XXX we should not being doing this twice but requires a modification to the wire package + if err != nil { + log.Errorf("block bytes %v: %v", block.Hash(), err) + return + } + b := &tbcd.Block{ + Hash: sliceChainHash(*block.Hash()), + Block: bb, + } + + if s.cfg.BlockSanity { + err = blockchain.CheckBlockSanity(block, s.chainParams.PowLimit, + s.timeSource) + if err != nil { + log.Errorf("Unable to validate block hash %v: %v", bhs, err) + return + } + + // Contextual check of block + // + // We do want these checks however we download the blockchain + // out of order this we will have to do something clever for + // prevNode. + // + // header := &block.MsgBlock().Header + // flags := blockchain.BFNone + // err := blockchain.CheckBlockHeaderContext(header, prevNode, flags, bctxt, false) + // if err != nil { + // log.Errorf("Unable to validate context of block hash %v: %v", bhs, err) + // return + // } + } + + height, err := s.db.BlockInsert(ctx, b) + if err != nil { + log.Errorf("block insert %v: %v", bhs, err) + } else { + log.Infof("Insert block %v at %v txs %v %v", bhs, height, + len(msg.Transactions), msg.Header.Timestamp) + } + + // Whatever happens,, delete from cache and potentially try again + var ( + printStats bool + blocksSize uint64 + blocksInserted int + blocksDuplicate int // keep track of this until have less of them + delta time.Duration + + // blocks pending + blocksPending int + + // peers + goodPeers int + badPeers int + activePeers int + connectedPeers int + ) + s.mtx.Lock() + delete(s.blocks, bhs) // remove block from cache regardless of insert result + + // Stats + if err == nil { + s.blocksSize += uint64(len(b.Block) + len(b.Hash)) + if _, ok := s.blocksInserted[bhs]; ok { + s.blocksDuplicate++ + } else { + s.blocksInserted[bhs] = struct{}{} + } + } + now := time.Now() + if now.After(s.printTime) { + printStats = true + + blocksSize = s.blocksSize + blocksInserted = len(s.blocksInserted) + blocksDuplicate = s.blocksDuplicate + // This is super awkward but prevents calculating N inserts * + // time.Before(10*time.Second). + delta = now.Sub(s.printTime.Add(-10 * time.Second)) + + s.blocksSize = 0 + s.blocksInserted = make(map[string]struct{}, 8192) + s.blocksDuplicate = 0 + s.printTime = now.Add(10 * time.Second) + + // Grab pending block cache stats + blocksPending = len(s.blocks) + + // Grab some peer stats as well + activePeers = len(s.peers) + goodPeers, badPeers = s.db.PeersStats(ctx) + // Gonna take it right into the Danger Zone! (double mutex) + for _, peer := range s.peers { + if peer.isConnected() { + connectedPeers++ + } + } + } + s.mtx.Unlock() + + if printStats { + // XXX this counts errors somehow after ibd, probably because + // duplicate blocks are downloaded when an inv comes in. + log.Infof("Inserted %v blocks (%v, %v duplicates) in the last %v", + blocksInserted, humanize.Bytes(blocksSize), blocksDuplicate, delta) + log.Infof("Pending blocks %v/%v active peers %v connected peers %v "+ + "good peers %v bad peers %v", + blocksPending, defaultPendingBlocks, activePeers, connectedPeers, + goodPeers, badPeers) + } + + // kick cache + go s.syncBlocks(ctx) +} + +func (s *Server) insertGenesis(ctx context.Context) ([]tbcd.BlockHeader, error) { + log.Tracef("insertGenesis") + defer log.Tracef("insertGenesis exit") + + // We really should be inserting the block first but block insert + // verifies that a block header exists. + log.Infof("Inserting genesis block and header: %v", s.chainParams.GenesisHash) + gbh, err := header2Bytes(&s.chainParams.GenesisBlock.Header) + if err != nil { + return nil, fmt.Errorf("serialize genesis block header: %v", err) + } + + genesisBlockHeader := &tbcd.BlockHeader{ + Height: 0, + Hash: s.chainParams.GenesisHash[:], + Header: gbh, + } + err = s.db.BlockHeadersInsert(ctx, []tbcd.BlockHeader{*genesisBlockHeader}) + if err != nil { + return nil, fmt.Errorf("genesis block header insert: %v", err) + } + + log.Debugf("Inserting genesis block") + gb, err := btcutil.NewBlock(s.chainParams.GenesisBlock).Bytes() + if err != nil { + return nil, fmt.Errorf("genesis block encode: %v", err) + } + _, err = s.db.BlockInsert(ctx, &tbcd.Block{ + Hash: s.chainParams.GenesisHash[:], + Block: gb, + }) + if err != nil { + return nil, fmt.Errorf("genesis block insert: %v", err) + } + + return []tbcd.BlockHeader{*genesisBlockHeader}, nil +} + +// + +func (s *Server) BlockHeaderByHash(ctx context.Context, hash *chainhash.Hash) (*wire.BlockHeader, uint64, error) { + log.Tracef("BlockHeaderByHash") + defer log.Tracef("BlockHeaderByHash exit") + + bh, err := s.db.BlockHeaderByHash(ctx, hash[:]) + if err != nil { + return nil, 0, fmt.Errorf("db block header by hash: %w", err) + } + bhw, err := bytes2Header(bh.Header) + if err != nil { + return nil, 0, fmt.Errorf("bytes to header: %w", err) + } + return bhw, bh.Height, nil +} + +func (s *Server) blockHeadersByHeight(ctx context.Context, height uint64) ([]tbcd.BlockHeader, error) { + log.Tracef("blockHeadersByHeight") + defer log.Tracef("blockHeadersByHeight exit") + + bhs, err := s.db.BlockHeadersByHeight(ctx, height) + if err != nil { + return nil, fmt.Errorf("db block header by height: %w", err) + } + + return bhs, nil +} + +func (s *Server) RawBlockHeadersByHeight(ctx context.Context, height uint64) ([]api.ByteSlice, error) { + log.Tracef("RawBlockHeadersByHeight") + defer log.Tracef("RawBlockHeadersByHeight exit") + + bhs, err := s.blockHeadersByHeight(ctx, height) + if err != nil { + return nil, err + } + + var headers []api.ByteSlice + for _, bh := range bhs { + headers = append(headers, []byte(bh.Header)) + } + + return headers, nil +} + +func (s *Server) BlockHeadersByHeight(ctx context.Context, height uint64) ([]*wire.BlockHeader, error) { + log.Tracef("BlockHeadersByHeight") + defer log.Tracef("BlockHeadersByHeight exit") + + blockHeaders, err := s.blockHeadersByHeight(ctx, height) + if err != nil { + return nil, err + } + + wireBlockHeaders := make([]*wire.BlockHeader, 0, len(blockHeaders)) + for _, bh := range blockHeaders { + w, err := bh.Wire() + if err != nil { + return nil, err + } + wireBlockHeaders = append(wireBlockHeaders, w) + } + return wireBlockHeaders, nil +} + +// RawBlockHeadersBest returns the raw headers for the best known blocks. +func (s *Server) RawBlockHeadersBest(ctx context.Context) (uint64, []api.ByteSlice, error) { + log.Tracef("RawBlockHeadersBest") + defer log.Tracef("RawBlockHeadersBest exit") + + bhs, err := s.db.BlockHeadersBest(ctx) + if err != nil { + return 0, nil, err + } + + var height uint64 + if len(bhs) > 0 { + height = bhs[0].Height + } + + var headers []api.ByteSlice + for _, bh := range bhs { + headers = append(headers, []byte(bh.Header)) + } + + return height, headers, nil +} + +// BlockHeadersBest returns the headers for the best known blocks. +func (s *Server) BlockHeadersBest(ctx context.Context) (uint64, []*wire.BlockHeader, error) { + log.Tracef("BlockHeadersBest") + defer log.Tracef("BlockHeadersBest exit") + + blockHeaders, err := s.db.BlockHeadersBest(ctx) + if err != nil { + return 0, nil, err + } + + var height uint64 + if len(blockHeaders) > 0 { + height = blockHeaders[0].Height + } + + wireBlockHeaders := make([]*wire.BlockHeader, 0, len(blockHeaders)) + for _, bh := range blockHeaders { + w, err := bh.Wire() + if err != nil { + return 0, nil, err + } + wireBlockHeaders = append(wireBlockHeaders, w) + } + + return height, wireBlockHeaders, nil +} + +func (s *Server) BalanceByAddress(ctx context.Context, encodedAddress string) (uint64, error) { + addr, err := btcutil.DecodeAddress(encodedAddress, s.chainParams) + if err != nil { + return 0, err + } + + script, err := txscript.PayToAddrScript(addr) + if err != nil { + return 0, err + } + + scriptHash := sha256.Sum256(script) + + balance, err := s.db.BalanceByScriptHash(ctx, scriptHash) + if err != nil { + return 0, err + } + + return balance, nil +} + +func (s *Server) UtxosByAddress(ctx context.Context, encodedAddress string, start uint64, count uint64) ([]tbcd.Utxo, error) { + addr, err := btcutil.DecodeAddress(encodedAddress, s.chainParams) + if err != nil { + return nil, err + } + + script, err := txscript.PayToAddrScript(addr) + if err != nil { + return nil, err + } + + scriptHash := sha256.Sum256(script) + + utxos, err := s.db.UtxosByScriptHash(ctx, scriptHash, start, count) + if err != nil { + return nil, err + } + return utxos, nil +} + +func (s *Server) TxById(ctx context.Context, txId tbcd.TxId) (*wire.MsgTx, error) { + blockHashes, err := s.db.BlocksByTxId(ctx, txId) + if err != nil { + return nil, err + } + + // chain hash stores the bytes in reverse order + revTxId := bytes.Clone(txId[:]) + slices.Reverse(revTxId) + ch, err := chainhash.NewHashFromStr(hex.EncodeToString(revTxId[:])) + if err != nil { + return nil, err + } + + for _, blockHash := range blockHashes { + block, err := s.db.BlockByHash(ctx, blockHash[:]) + if err != nil { + return nil, err + } + + parsedBlock, err := btcutil.NewBlockFromBytes(block.Block) + if err != nil { + return nil, err + } + + for _, tx := range parsedBlock.Transactions() { + if tx.Hash().IsEqual(ch) { + return tx.MsgTx(), nil + } + } + } + + return nil, database.ErrNotFound +} + +func feesFromTransactions(txs []*btcutil.Tx) error { + for idx, tx := range txs { + for _, txIn := range tx.MsgTx().TxIn { + if idx == 0 { + // Skip coinbase inputs + continue + } + _ = txIn + } + for outIndex, txOut := range tx.MsgTx().TxOut { + if txscript.IsUnspendable(txOut.PkScript) { + continue + } + _ = outIndex + } + } + + return nil +} + +func (s *Server) FeesAtHeight(ctx context.Context, height, count int64) (uint64, error) { + log.Tracef("FeesAtHeight") + defer log.Tracef("FeesAtHeight exit") + + if height-count < 0 { + return 0, fmt.Errorf("height - count is less than 0") + } + var fees uint64 + for i := int64(0); i < int64(count); i++ { + log.Infof("%v", uint64(height-i)) + bhs, err := s.db.BlockHeadersByHeight(ctx, uint64(height-i)) + if err != nil { + return 0, fmt.Errorf("headers by height: %w", err) + } + if len(bhs) != 1 { + return 0, fmt.Errorf("too many block headers: %v", len(bhs)) + } + be, err := s.db.BlockByHash(ctx, bhs[0].Hash) + if err != nil { + return 0, fmt.Errorf("block by hash: %w", err) + } + b, err := btcutil.NewBlockFromBytes(be.Block) + if err != nil { + ch, _ := chainhash.NewHash(bhs[0].Hash) + return 0, fmt.Errorf("could not decode block %v %v: %v", + height, ch, err) + } + + // walk block tx' + err = feesFromTransactions(b.Transactions()) + if err != nil { + return 0, fmt.Errorf("fees from transactions %v %v: %v", + height, b.Hash(), err) + } + } + + return fees, fmt.Errorf("not yet") +} + +type SyncInfo struct { + Synced bool // True when all indexing is caught up + BlockHeaderHeight uint64 // last block header height + UtxoHeight uint64 // last indexed utxo block height + TxHeight uint64 // last indexed tx block height +} + +func (s *Server) Synced(ctx context.Context) (si SyncInfo) { + s.mtx.Lock() + defer s.mtx.Unlock() + si.BlockHeaderHeight = s.lastBlockHeader.Height + + // These values are cached in leveldb so it is ok to call with mutex + // held. + // + // Note that index heights are start indexing values thus they are off + // by one from the last block height seen. + uh, err := s.db.MetadataGet(ctx, UtxoIndexHeightKey) + if err == nil { + si.UtxoHeight = binary.BigEndian.Uint64(uh) - 1 + } + th, err := s.db.MetadataGet(ctx, TxIndexHeightKey) + if err == nil { + si.TxHeight = binary.BigEndian.Uint64(th) - 1 + } + if si.UtxoHeight == si.TxHeight && si.UtxoHeight == si.BlockHeaderHeight && + !s.blksMissing(ctx) { + si.Synced = true + } + return +} + +// DBOpen opens the underlying server database. It has been put in its own +// function to make it available during tests and hemictl. +func (s *Server) DBOpen(ctx context.Context) error { + log.Tracef("DBOpen") + defer log.Tracef("DBOpen exit") + + // This should have been verified but let's not make assumptions. + switch s.cfg.Network { + case "testnet3": + case "mainnet": + case networkLocalnet: // XXX why is this here?, this breaks the filepath.Join + default: + return fmt.Errorf("unsupported network: %v", s.cfg.Network) + } + + // Open db. + var err error + s.db, err = level.New(ctx, filepath.Join(s.cfg.LevelDBHome, s.cfg.Network)) + if err != nil { + return fmt.Errorf("open level database: %v", err) + } + + return nil +} + +func (s *Server) DBClose() error { + log.Tracef("DBClose") + defer log.Tracef("DBClose") + + return s.db.Close() +} + +func (s *Server) Run(pctx context.Context) error { + log.Tracef("Run") + defer log.Tracef("Run exit") + + if !s.testAndSetRunning(true) { + return fmt.Errorf("tbc already running") + } + defer s.testAndSetRunning(false) + + // We need a lot of open files and memory for the indexes. Best effort + // to echo to the user what the ulimits are. + if s.ignoreUlimit { + log.Warningf("ignoring ulimit requirements") + } else if ulimitSupported { + if err := verifyUlimits(); err != nil { + return fmt.Errorf("verify ulimits: %w", err) + } + } else { + log.Errorf("This architecture does not supported ulimit verification. " + + "Consult the README for minimum values.") + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + err := s.DBOpen(ctx) + if err != nil { + return fmt.Errorf("Failed to open level database: %w", err) + } + defer func() { + err := s.DBClose() + if err != nil { + log.Errorf("db close: %v", err) + } + }() + + // Find out where IBD is at + bhs, err := s.db.BlockHeadersBest(ctx) + if err != nil { + return fmt.Errorf("block headers best: %v", err) + } + // No entries means we are at genesis + if len(bhs) == 0 { + bhs, err = s.insertGenesis(ctx) + if err != nil { + return fmt.Errorf("insert genesis: %v", err) + } + bhs, err = s.db.BlockHeadersBest(ctx) + if err != nil { + return err + } + } else if len(bhs) > 1 { + return fmt.Errorf("blockheaders best: unsupported fork") + } + s.lastBlockHeader = bhs[0] // Prime last seen block header + log.Infof("Starting block headers sync at height: %v time %v", + bhs[0].Height, bhs[0].Timestamp()) + + // HTTP server + mux := http.NewServeMux() + log.Infof("handle (tbc): %s", tbcapi.RouteWebsocket) + mux.HandleFunc(tbcapi.RouteWebsocket, s.handleWebsocket) + + httpServer := &http.Server{ + Addr: s.cfg.ListenAddress, + Handler: mux, + BaseContext: func(_ net.Listener) context.Context { return ctx }, + } + httpErrCh := make(chan error) + go func() { + log.Infof("Listening: %s", s.cfg.ListenAddress) + httpErrCh <- httpServer.ListenAndServe() + }() + defer func() { + if err = httpServer.Shutdown(ctx); err != nil { + log.Errorf("http server exit: %v", err) + return + } + log.Infof("RPC server shutdown cleanly") + }() + + // Prometheus + if s.cfg.PrometheusListenAddress != "" { + d, err := deucalion.New(&deucalion.Config{ + ListenAddress: s.cfg.PrometheusListenAddress, + }) + if err != nil { + return fmt.Errorf("failed to create server: %w", err) + } + cs := []prometheus.Collector{ + s.cmdsProcessed, + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Subsystem: promSubsystem, + Name: "running", + Help: "Is tbc service running.", + }, s.promRunning), + } + s.wg.Add(1) + go func() { + defer s.wg.Done() + if err := d.Run(ctx, cs); err != context.Canceled { + log.Errorf("prometheus terminated with error: %v", err) + return + } + log.Infof("prometheus clean shutdown") + }() + } + + errC := make(chan error) + s.wg.Add(1) + go func() { + defer s.wg.Done() + err := s.startPeerManager(ctx) + if err != nil { + select { + case errC <- err: + default: + } + } + }() + + select { + case <-ctx.Done(): + err = ctx.Err() + case err = <-errC: + case err = <-httpErrCh: + } + cancel() + + log.Infof("tbc service shutting down") + s.wg.Wait() + log.Infof("tbc service clean shutdown") + + return err +} diff --git a/service/tbc/tbc_test.go b/service/tbc/tbc_test.go new file mode 100644 index 000000000..349d0c9da --- /dev/null +++ b/service/tbc/tbc_test.go @@ -0,0 +1,2150 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +package tbc + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "os" + "slices" + "strconv" + "strings" + "testing" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" + "github.com/go-test/deep" + "github.com/phayes/freeport" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "nhooyr.io/websocket" + "nhooyr.io/websocket/wsjson" + + "github.com/hemilabs/heminetwork/api" + "github.com/hemilabs/heminetwork/api/protocol" + "github.com/hemilabs/heminetwork/api/tbcapi" + "github.com/hemilabs/heminetwork/bitcoin" + "github.com/hemilabs/heminetwork/database/tbcd" +) + +const ( + privateKey = "72a2c41c84147325ce3c0f37697ef1e670c7169063dda89be9995c3c5219740f" + levelDbHome = ".testleveldb" +) + +type StdoutLogConsumer struct { + Name string // name of service +} + +func (t *StdoutLogConsumer) Accept(l testcontainers.Log) { + fmt.Printf("%s: %s", t.Name, string(l.Content)) +} + +func skipIfNoDocker(t *testing.T) { + envValue := os.Getenv("HEMI_DOCKER_TESTS") + val, err := strconv.ParseBool(envValue) + if envValue != "" && err != nil { + t.Fatal(err) + } + + if !val { + t.Skip("skipping docker tests") + } +} + +func TestBlockHeadersByHeightRaw(t *testing.T) { + skipIfNoDocker(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 100, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + _, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.BlockHeadersByHeightRawResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.BlockHeadersByHeightRawRequest{ + Height: 55, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdBlockHeadersByHeightRawResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + } + + if lastErr != nil { + t.Fatal(lastErr) + } + + bh, err := bytes2Header(response.BlockHeaders[0]) + if err != nil { + t.Fatal(err) + } + + t.Logf(spew.Sdump(bh)) + + if response.Error != nil { + t.Errorf("got unwanted error: %v", response.Error) + } + + cliBlockHeader := bitcoindBlockAtHeight(ctx, t, bitcoindContainer, 55) + expected := cliBlockHeaderToRaw(t, cliBlockHeader) + if diff := deep.Equal(expected, response.BlockHeaders); len(diff) > 0 { + t.Errorf("unexpected diff: %s", diff) + } +} + +func TestBlockHeadersByHeight(t *testing.T) { + skipIfNoDocker(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 100, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + _, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.BlockHeadersByHeightResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.BlockHeadersByHeightRequest{ + Height: 55, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdBlockHeadersByHeightResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } + + if response.Error != nil { + t.Errorf("got unwanted error: %v", response.Error) + } + + cliBlockHeader := bitcoindBlockAtHeight(ctx, t, bitcoindContainer, 55) + expected := cliBlockHeaderToTBC(t, cliBlockHeader) + if diff := deep.Equal(expected, response.BlockHeaders); len(diff) > 0 { + t.Errorf("unexpected diff: %s", diff) + } +} + +func TestBlockHeadersByHeightDoesNotExist(t *testing.T) { + skipIfNoDocker(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 100, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + _, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.BlockHeadersByHeightResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.BlockHeadersByHeightRequest{ + Height: 550, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdBlockHeadersByHeightResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } + + if response.Error.Message != "block headers not found at height 550" { + t.Fatalf("unexpected error message: %s", response.Error.Message) + } +} + +func TestBlockHeadersBestRaw(t *testing.T) { + skipIfNoDocker(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 50, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + _, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.BlockHeadersBestRawResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.BlockHeadersBestRawRequest{}) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdBlockHeadersBestRawResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + } + + if lastErr != nil { + t.Fatal(lastErr) + } + + bh, err := bytes2Header(response.BlockHeaders[0]) + if err != nil { + t.Fatal(err) + } + + t.Logf(spew.Sdump(bh)) + + if response.Error != nil { + t.Errorf("got unwanted error: %v", response.Error) + } + + cliBlockHeader := bitcoindBestBlock(ctx, t, bitcoindContainer) + expected := cliBlockHeaderToRaw(t, cliBlockHeader) + if diff := deep.Equal(expected, response.BlockHeaders); len(diff) > 0 { + t.Errorf("unexpected diff: %s", diff) + } +} + +func TestBtcBlockHeadersBest(t *testing.T) { + skipIfNoDocker(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 100, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + _, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.BlockHeadersBestResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.BlockHeadersBestRequest{}) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdBlockHeadersBestResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } + + if response.Error != nil { + t.Errorf("got unwanted error: %v", response.Error) + } + + cliBlockHeader := bitcoindBestBlock(ctx, t, bitcoindContainer) + expected := cliBlockHeaderToTBC(t, cliBlockHeader) + if diff := deep.Equal(expected, response.BlockHeaders); len(diff) > 0 { + t.Errorf("unexpected diff: %s", diff) + } +} + +func TestServerBlockHeadersBest(t *testing.T) { + skipIfNoDocker(t) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + blocks := uint64(100) + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, blocks, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + tbcServer, _ := createTbcServer(ctx, t, mappedPeerPort) + + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + height, bhs, err := tbcServer.BlockHeadersBest(ctx) + if err != nil { + t.Errorf("BlockHeadersBest() err = %v, want nil", err) + } + + if l := len(bhs); l != 1 { + t.Errorf("BlockHeadersBest() block len = %d, want 1", l) + } + + if height != blocks { + t.Errorf("BlockHeadersBest() height = %d, want %d", height, blocks) + } +} + +func TestBalanceByAddress(t *testing.T) { + skipIfNoDocker(t) + + type testTableItem struct { + name string + address func() string + doNotGenerate bool + } + + testTable := []testTableItem{ + { + name: "Pay to public key hash", + address: func() string { + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + }, + { + name: "Pay to script hash", + address: func() string { + address, err := btcutil.NewAddressScriptHash([]byte("blahblahscripthash"), &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + }, + { + name: "Pay to witness public key hash", + address: func() string { + address, err := btcutil.NewAddressWitnessPubKeyHash([]byte("blahblahwitnesspublickeyhash")[:20], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + }, + { + name: "Pay to witness script hash", + address: func() string { + address, err := btcutil.NewAddressWitnessScriptHash([]byte("blahblahwitnessscripthashblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + }, + { + name: "Pay to taproot", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + }, + { + name: "no balance", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + doNotGenerate: true, + }, + } + + for _, tti := range testTable { + t.Run(tti.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + initialBlocks := 0 + if !tti.doNotGenerate { + initialBlocks = 4 + } + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, uint64(initialBlocks), tti.address()) + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + // generate to another address to ensure it's not included in our query + someOtherAddress, err := btcutil.NewAddressScriptHash([]byte("blahblahotherscripthash"), &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + _, err = runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "generatetoaddress", + "3", + someOtherAddress.EncodeAddress(), + }) + if err != nil { + t.Fatal(err) + } + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.BalanceByAddressResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.UtxoIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.BalanceByAddressRequest{ + Address: tti.address(), + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdBalanceByAddressResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + var pricePerBlock uint64 = 50 * 100000000 + var blocks uint64 = 4 + var expectedBalance uint64 = 0 + if !tti.doNotGenerate { + expectedBalance = pricePerBlock * blocks + } + + expected := tbcapi.BalanceByAddressResponse{ + Balance: expectedBalance, + Error: nil, + } + if diff := deep.Equal(expected, response); len(diff) > 0 { + if response.Error != nil { + t.Error(response.Error.Message) + } + t.Logf("unexpected diff: %s", diff) + + // there is a chance we just haven't finished indexing + // the blocks and txs, retry until timeout + continue + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } + }) + } +} + +func TestUtxosByAddressRaw(t *testing.T) { + skipIfNoDocker(t) + + type testTableItem struct { + name string + address func() string + doNotGenerate bool + limit uint64 + start uint64 + } + + testTable := []testTableItem{ + { + name: "Pay to public key hash", + address: func() string { + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to script hash", + address: func() string { + address, err := btcutil.NewAddressScriptHash([]byte("blahblahscripthash"), &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to witness public key hash", + address: func() string { + address, err := btcutil.NewAddressWitnessPubKeyHash([]byte("blahblahwitnesspublickeyhash")[:20], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to witness script hash", + address: func() string { + address, err := btcutil.NewAddressWitnessScriptHash([]byte("blahblahwitnessscripthashblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to taproot", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "no balance", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + doNotGenerate: true, + limit: 10, + }, + { + name: "small limit", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblahsmalllimit")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 2, + }, + { + name: "offset", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblahsmalllimit")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + start: 3, + limit: 10, + }, + } + + for _, tti := range testTable { + t.Run(tti.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + var bitcoindContainer testcontainers.Container + var mappedPeerPort nat.Port + initialBlocks := 0 + if !tti.doNotGenerate { + initialBlocks = 4 + } + bitcoindContainer, mappedPeerPort = createBitcoindWithInitialBlocks(ctx, t, uint64(initialBlocks), tti.address()) + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + // generate to another address to ensure it's not included in our query + someOtherAddress, err := btcutil.NewAddressScriptHash([]byte("blahblahotherscripthash"), &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + _, err = runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "generatetoaddress", + "3", + someOtherAddress.EncodeAddress(), + }) + if err != nil { + t.Fatal(err) + } + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.UtxosByAddressRawResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.UtxoIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.UtxosByAddressRawRequest{ + Address: tti.address(), + Start: uint(tti.start), + Count: uint(tti.limit), + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdUtxosByAddressRawResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + // we generated 4 blocks to this address previously, therefore + // there should be 4 utxos + expectedCount := 4 - tti.start + if tti.limit < uint64(expectedCount) { + expectedCount = tti.limit + } + + if !tti.doNotGenerate && len(response.Utxos) != int(expectedCount) { + t.Fatalf("should have %d utxos, received: %d", expectedCount, len(response.Utxos)) + } else if tti.doNotGenerate && len(response.Utxos) != 0 { + t.Fatalf("did not generate any blocks for address, should not have utxos") + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } + }) + } +} + +func TestUtxosByAddress(t *testing.T) { + skipIfNoDocker(t) + + type testTableItem struct { + name string + address func() string + doNotGenerate bool + limit uint64 + start uint64 + } + + testTable := []testTableItem{ + { + name: "Pay to public key hash", + address: func() string { + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to script hash", + address: func() string { + address, err := btcutil.NewAddressScriptHash([]byte("blahblahscripthash"), &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to witness public key hash", + address: func() string { + address, err := btcutil.NewAddressWitnessPubKeyHash([]byte("blahblahwitnesspublickeyhash")[:20], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to witness script hash", + address: func() string { + address, err := btcutil.NewAddressWitnessScriptHash([]byte("blahblahwitnessscripthashblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "Pay to taproot", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 10, + }, + { + name: "no balance", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblah")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + doNotGenerate: true, + limit: 10, + }, + { + name: "small limit", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblahsmalllimit")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + limit: 2, + }, + { + name: "offset", + address: func() string { + address, err := btcutil.NewAddressTaproot([]byte("blahblahwtaprootblahblahblahblahsmalllimit")[:32], &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + + return address.EncodeAddress() + }, + start: 3, + limit: 10, + }, + } + + for _, tti := range testTable { + t.Run(tti.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + var bitcoindContainer testcontainers.Container + var mappedPeerPort nat.Port + initialBlocks := 0 + if !tti.doNotGenerate { + initialBlocks = 4 + } + bitcoindContainer, mappedPeerPort = createBitcoindWithInitialBlocks(ctx, t, uint64(initialBlocks), tti.address()) + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + // generate to another address to ensure it's not included in our query + someOtherAddress, err := btcutil.NewAddressScriptHash([]byte("blahblahotherscripthash"), &chaincfg.RegressionNetParams) + if err != nil { + t.Fatal(err) + } + _, err = runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "generatetoaddress", + "3", + someOtherAddress.EncodeAddress(), + }) + if err != nil { + t.Fatal(err) + } + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.UtxosByAddressResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.UtxoIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.UtxosByAddressRequest{ + Address: tti.address(), + Start: uint(tti.start), + Count: uint(tti.limit), + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdUtxosByAddressResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + // we generated 4 blocks to this address previously, therefore + // there should be 4 utxos + expectedCount := 4 - tti.start + if tti.limit < uint64(expectedCount) { + expectedCount = tti.limit + } + + if !tti.doNotGenerate && len(response.Utxos) != int(expectedCount) { + t.Fatalf("should have %d utxos, received: %d", expectedCount, len(response.Utxos)) + } else if tti.doNotGenerate && len(response.Utxos) != 0 { + t.Fatalf("did not generate any blocks for address, should not have utxos") + } + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } + }) + } +} + +func TestTxByIdRaw(t *testing.T) { + skipIfNoDocker(t) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 4, address.String()) + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.TxByIdRawResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.TxIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + txId := getRandomTxId(ctx, t, bitcoindContainer) + txIdBytes, err := hex.DecodeString(txId) + if err != nil { + t.Fatal(err) + } + + slices.Reverse(txIdBytes) + + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.TxByIdRawRequest{ + TxId: txIdBytes, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdTxByIdRawResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + if response.Error != nil { + t.Fatal(response.Error.Message) + } + + // XXX - write a better test than this, we should be able to compare + // against bitcoin-cli response fields + + // did we get the tx and can we parse it? + tx, err := bytes2Tx(response.Tx) + if err != nil { + t.Fatal(err) + } + + // is the hash equal to what we queried for? + if tx.TxHash().String() != txId { + t.Fatalf("id mismatch: %s != %s", tx.TxHash().String(), txId) + } + + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } +} + +func TestTxByIdRawInvalid(t *testing.T) { + skipIfNoDocker(t) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 4, address.String()) + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.TxByIdRawResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.TxIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + txId := getRandomTxId(ctx, t, bitcoindContainer) + txIdBytes, err := hex.DecodeString(txId) + if err != nil { + t.Fatal(err) + } + + txIdBytes[0]++ + + slices.Reverse(txIdBytes) + + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.TxByIdRawRequest{ + TxId: txIdBytes, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdTxByIdRawResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + if response.Error == nil { + t.Fatal("expecting error") + } + + if response.Error != nil { + if !strings.Contains(response.Error.Message, "not found:") { + t.Fatalf("incorrect error found %s", response.Error.Message) + } + } + + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } +} + +func TestTxByIdRawNotFound(t *testing.T) { + skipIfNoDocker(t) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 0, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + _, err = runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "generatetoaddress", + "4", + address.EncodeAddress(), + }) + if err != nil { + t.Fatal(err) + } + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.TxByIdRawResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.TxIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + txId := getRandomTxId(ctx, t, bitcoindContainer) + txIdBytes, err := hex.DecodeString(txId) + if err != nil { + t.Fatal(err) + } + + txIdBytes = append(txIdBytes, 8) + + slices.Reverse(txIdBytes) + + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.TxByIdRawRequest{ + TxId: txIdBytes, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdTxByIdRawResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + if response.Error == nil { + t.Fatal("expecting error") + } + + if response.Error != nil { + if !strings.Contains(response.Error.Message, "invalid tx id") { + t.Fatalf("incorrect error found: %s", response.Error.Message) + } + } + + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } +} + +func TestTxById(t *testing.T) { + skipIfNoDocker(t) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 4, address.String()) + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.TxByIdResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.TxIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + txId := getRandomTxId(ctx, t, bitcoindContainer) + txIdBytes, err := hex.DecodeString(txId) + if err != nil { + t.Fatal(err) + } + + slices.Reverse(txIdBytes) + + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.TxByIdRequest{ + TxId: txIdBytes, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdTxByIdResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + if response.Error != nil { + t.Fatal(response.Error.Message) + } + + tx, err := tbcServer.TxById(ctx, tbcd.TxId(txIdBytes)) + if err != nil { + t.Fatal(err) + } + + w := wireTxToTbcapiTx(tx) + + if diff := deep.Equal(w, &response.Tx); len(diff) > 0 { + t.Fatal(diff) + } + + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } +} + +func TestTxByIdInvalid(t *testing.T) { + skipIfNoDocker(t) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 4, address.String()) + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.TxByIdResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.TxIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + txId := getRandomTxId(ctx, t, bitcoindContainer) + txIdBytes, err := hex.DecodeString(txId) + if err != nil { + t.Fatal(err) + } + + txIdBytes[0]++ + + slices.Reverse(txIdBytes) + + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.TxByIdRequest{ + TxId: txIdBytes, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdTxByIdResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + if response.Error == nil { + t.Fatal("expecting error") + } + + if response.Error != nil { + if !strings.Contains(response.Error.Message, "not found:") { + t.Fatalf("incorrect error found %s", response.Error.Message) + } + } + + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } +} + +func TestTxByIdNotFound(t *testing.T) { + skipIfNoDocker(t) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + bitcoindContainer, mappedPeerPort := createBitcoindWithInitialBlocks(ctx, t, 0, "") + defer func() { + if err := bitcoindContainer.Terminate(ctx); err != nil { + panic(err) + } + }() + + _, _, address, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + _, err = runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "generatetoaddress", + "4", + address.EncodeAddress(), + }) + if err != nil { + t.Fatal(err) + } + + tbcServer, tbcUrl := createTbcServer(ctx, t, mappedPeerPort) + + c, _, err := websocket.Dial(ctx, tbcUrl, nil) + if err != nil { + t.Fatal(err) + } + defer c.CloseNow() + + assertPing(ctx, t, c, tbcapi.CmdPingRequest) + + tws := &tbcWs{ + conn: protocol.NewWSConn(c), + } + + var lastErr error + var response tbcapi.TxByIdResponse + for { + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + err = tbcServer.TxIndexer(ctx, 0, 1000) + if err != nil { + t.Fatal(err) + } + lastErr = nil + txId := getRandomTxId(ctx, t, bitcoindContainer) + txIdBytes, err := hex.DecodeString(txId) + if err != nil { + t.Fatal(err) + } + + txIdBytes = append(txIdBytes, 8) + + slices.Reverse(txIdBytes) + + err = tbcapi.Write(ctx, tws.conn, "someid", tbcapi.TxByIdRequest{ + TxId: txIdBytes, + }) + if err != nil { + lastErr = err + continue + } + + var v protocol.Message + err = wsjson.Read(ctx, c, &v) + if err != nil { + lastErr = err + continue + } + + if v.Header.Command == tbcapi.CmdTxByIdResponse { + if err := json.Unmarshal(v.Payload, &response); err != nil { + t.Fatal(err) + } + + if response.Error == nil { + t.Fatal("expecting error") + } + + if response.Error != nil { + if !strings.Contains(response.Error.Message, "invalid tx id") { + t.Fatalf("incorrect error found: %s", response.Error.Message) + } + } + + break + } else { + lastErr = fmt.Errorf("received unexpected command: %s", v.Header.Command) + } + + } + + if lastErr != nil { + t.Fatal(lastErr) + } +} + +func createBitcoind(ctx context.Context, t *testing.T) testcontainers.Container { + id, err := randHexId(6) + if err != nil { + t.Fatal("failed to generate random id:", err) + } + + name := fmt.Sprintf("bitcoind-%s", id) + req := testcontainers.ContainerRequest{ + Image: "kylemanna/bitcoind", + Cmd: []string{"bitcoind", "-regtest=1", "-debug=1", "-rpcallowip=0.0.0.0/0", "-rpcbind=0.0.0.0:18443", "-txindex=1", "-noonion", "-listenonion=0"}, + ExposedPorts: []string{"18443", "18444"}, + WaitingFor: wait.ForLog("dnsseed thread exit").WithPollInterval(1 * time.Second), + LogConsumerCfg: &testcontainers.LogConsumerConfig{ + Consumers: []testcontainers.LogConsumer{&StdoutLogConsumer{ + Name: name, + }}, + }, + Name: name, + HostConfigModifier: func(hostConfig *container.HostConfig) { + hostConfig.PortBindings = nat.PortMap{ + "18443/tcp": []nat.PortBinding{ + { + HostPort: "18443", + }, + }, + "18444/tcp": []nat.PortBinding{ + { + HostPort: "18444", + }, + }, + } + }, + } + bitcoindContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + if err != nil { + t.Fatal(err) + } + + return bitcoindContainer +} + +func runBitcoinCommand(ctx context.Context, t *testing.T, bitcoindContainer testcontainers.Container, cmd []string) (string, error) { + exitCode, result, err := bitcoindContainer.Exec(ctx, cmd) + if err != nil { + return "", err + } + + buf := new(strings.Builder) + _, err = io.Copy(buf, result) + if err != nil { + return "", err + } + t.Logf(buf.String()) + + if exitCode != 0 { + return "", fmt.Errorf("error code received: %d", exitCode) + } + + // first 8 bytes are header, there is also a newline character at the end of the response + return buf.String()[8 : len(buf.String())-1], nil +} + +func getRandomTxId(ctx context.Context, t *testing.T, bitcoindContainer testcontainers.Container) string { + blockHash, err := runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "getblockhash", + fmt.Sprintf("%d", 1), + }) + if err != nil { + t.Fatal(err) + } + + blockJson, err := runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "getblock", + blockHash, + }) + if err != nil { + t.Fatal(err) + } + + var parsed struct { + Tx []string `json:"tx"` + } + if err := json.Unmarshal([]byte(blockJson), &parsed); err != nil { + t.Fatal(err) + } + + if len(parsed.Tx) == 0 { + t.Fatal("was expecting at least 1 transaction") + } + + return parsed.Tx[0] +} + +func getEndpointWithRetries(ctx context.Context, container testcontainers.Container, retries int) (string, error) { + backoff := 500 * time.Millisecond + var lastError error + for i := 0; i < retries; i++ { + endpoint, err := container.Endpoint(ctx, "") + if err != nil { + lastError = err + time.Sleep(backoff) + backoff = backoff * 2 + continue + } + return endpoint, nil + } + + return "", lastError +} + +func nextPort() int { + ports, err := freeport.GetFreePorts(1000) + if err != nil && err != context.Canceled { + panic(err) + } + + return ports[time.Now().Unix()%int64(len(ports))] +} + +func createTbcServer(ctx context.Context, t *testing.T, mappedPeerPort nat.Port) (*Server, string) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + home := fmt.Sprintf("%s/%s", wd, levelDbHome) + + if err := os.RemoveAll(home); err != nil { + t.Fatal(err) + } + tcbListenAddress := fmt.Sprintf(":%d", nextPort()) + + cfg := NewDefaultConfig() + cfg.LevelDBHome = home + cfg.Network = networkLocalnet + cfg.RegtestPort = mappedPeerPort.Port() + cfg.ListenAddress = tcbListenAddress + tbcServer, err := NewServer(cfg) + if err != nil { + t.Fatal(err) + } + + tbcServer.ignoreUlimit = true + + go func() { + err := tbcServer.Run(ctx) + if err != nil && err != context.Canceled { + panic(err) + } + }() + + // let tbc index + select { + case <-time.After(1 * time.Second): + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + tbcUrl := fmt.Sprintf("http://localhost%s%s", tcbListenAddress, tbcapi.RouteWebsocket) + err = EnsureCanConnect(t, tbcUrl, 5*time.Second) + if err != nil { + t.Fatalf("could not connect to %s: %s", tbcUrl, err.Error()) + } + + return tbcServer, tbcUrl +} + +func EnsureCanConnect(t *testing.T, url string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + t.Logf("connecting to %s", url) + + var err error + + doneCh := make(chan bool) + go func() { + for { + c, _, err := websocket.Dial(ctx, url, nil) + if err != nil { + time.Sleep(1 * time.Second) + continue + } + c.CloseNow() + doneCh <- true + } + }() + + select { + case <-doneCh: + case <-ctx.Done(): + return fmt.Errorf("timed out trying to reach WS server in tests, last error: %s", err) + } + + return nil +} + +func assertPing(ctx context.Context, t *testing.T, c *websocket.Conn, cmd protocol.Command) { + var v protocol.Message + err := wsjson.Read(ctx, c, &v) + if err != nil { + t.Fatal(err) + } + + if v.Header.Command != cmd { + t.Fatalf("unexpected command: %s", v.Header.Command) + } +} + +// BtcCliBlockHeader represents the block header structure used by bitcoin-cli. +type BtcCliBlockHeader struct { + Hash string `json:"hash"` + Confirmations int `json:"confirmations"` + Height uint32 `json:"height"` + Version uint64 `json:"version"` + VersionHex string `json:"versionHex"` + MerkleRoot string `json:"merkleroot"` + Time uint64 `json:"time"` + MedianTime uint64 `json:"mediantime"` + Nonce uint64 `json:"nonce"` + Bits string `json:"bits"` + Difficulty float64 `json:"difficulty"` + Chainwork string `json:"chainwork"` + NTx uint64 `json:"nTx"` + PreviousBlockHash string `json:"previousblockhash"` + NextBlockHash string `json:"nextblockhash"` +} + +// cliBlockHeaderToWire converts a bitcoin-cli block header to the +// [wire.BlockHeader] representation of the block header. +func cliBlockHeaderToWire(t *testing.T, header *BtcCliBlockHeader) *wire.BlockHeader { + prevBlockHash, err := chainhash.NewHashFromStr(header.PreviousBlockHash) + if err != nil { + t.Fatal(fmt.Errorf("convert prevBlockHash to chainhash: %w", err)) + } + merkleRoot, err := chainhash.NewHashFromStr(header.MerkleRoot) + if err != nil { + t.Fatal(fmt.Errorf("convert merkleRoot to chainhash: %w", err)) + } + bits, err := strconv.ParseUint(header.Bits, 16, 64) + if err != nil { + t.Fatal(fmt.Errorf("parse bits as uint: %w", err)) + } + + blockHeader := wire.NewBlockHeader( + int32(header.Version), + prevBlockHash, + merkleRoot, + uint32(bits), + uint32(header.Nonce), + ) + blockHeader.Timestamp = time.Unix(int64(header.Time), 0) + return blockHeader +} + +// cliBlockHeaderToRaw converts a bitcoin-cli block header to a slice containing +// the raw byte representation of the block header. +func cliBlockHeaderToRaw(t *testing.T, cliBlockHeader *BtcCliBlockHeader) []api.ByteSlice { + blockHeader := cliBlockHeaderToWire(t, cliBlockHeader) + t.Logf(spew.Sdump(blockHeader)) + + bytes, err := header2Bytes(blockHeader) + if err != nil { + t.Fatal(fmt.Errorf("header to bytes: %w", err)) + } + + return []api.ByteSlice{bytes} +} + +// cliBlockHeaderToTBC converts a bitcoin-cli block header to a slice containing +// the [tbcapi.BlockHeader] representation of the block header. +func cliBlockHeaderToTBC(t *testing.T, btcCliBlockHeader *BtcCliBlockHeader) []*tbcapi.BlockHeader { + blockHeader := cliBlockHeaderToWire(t, btcCliBlockHeader) + t.Logf(spew.Sdump(blockHeader)) + return wireBlockHeadersToTBC([]*wire.BlockHeader{blockHeader}) +} + +func bitcoindBlockAtHeight(ctx context.Context, t *testing.T, bitcoindContainer testcontainers.Container, height uint64) *BtcCliBlockHeader { + blockHash, err := runBitcoinCommand(ctx, t, bitcoindContainer, []string{ + "bitcoin-cli", + "-regtest=1", + "getblockhash", + fmt.Sprintf("%d", height), + }) + if err != nil { + t.Fatal(fmt.Errorf("bitcoin-cli getblockhash %d: %w", height, err)) + } + + return bitcoindBlockByHash(ctx, t, bitcoindContainer, blockHash) +} + +func bitcoindBestBlock(ctx context.Context, t *testing.T, bitcoindContainer testcontainers.Container) *BtcCliBlockHeader { + blockHash, err := runBitcoinCommand(ctx, t, bitcoindContainer, []string{ + "bitcoin-cli", + "-regtest=1", + "getbestblockhash", + }) + if err != nil { + t.Fatal(fmt.Errorf("bitcoin-cli getbestblockhash: %w", err)) + } + + return bitcoindBlockByHash(ctx, t, bitcoindContainer, blockHash) +} + +func bitcoindBlockByHash(ctx context.Context, t *testing.T, bitcoindContainer testcontainers.Container, blockHash string) *BtcCliBlockHeader { + blockHeaderJson, err := runBitcoinCommand( + ctx, t, bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "getblockheader", + blockHash, + }) + if err != nil { + t.Fatal(fmt.Errorf("bitcoin-cli getblockheader: %w", err)) + } + + var btcCliBlockHeader BtcCliBlockHeader + if err = json.Unmarshal([]byte(blockHeaderJson), &btcCliBlockHeader); err != nil { + t.Fatal(fmt.Errorf("unmarshal json output: %w", err)) + } + + return &btcCliBlockHeader +} + +func createBitcoindWithInitialBlocks(ctx context.Context, t *testing.T, blocks uint64, overrideAddress string) (testcontainers.Container, nat.Port) { + t.Helper() + + bitcoindContainer := createBitcoind(ctx, t) + + _, _, btcAddress, err := bitcoin.KeysAndAddressFromHexString( + privateKey, + &chaincfg.RegressionNetParams, + ) + if err != nil { + t.Fatal(err) + } + + var address string + if overrideAddress != "" { + address = overrideAddress + } else { + address = btcAddress.EncodeAddress() + } + + _, err = runBitcoinCommand( + ctx, + t, + bitcoindContainer, + []string{ + "bitcoin-cli", + "-regtest=1", + "generatetoaddress", + strconv.FormatUint(blocks, 10), + address, + }) + if err != nil { + t.Fatal(err) + } + + return bitcoindContainer, nat.Port(localnetPort) +} diff --git a/service/tbc/ulimit_darwin.go b/service/tbc/ulimit_darwin.go new file mode 100644 index 000000000..8889dc0bc --- /dev/null +++ b/service/tbc/ulimit_darwin.go @@ -0,0 +1,65 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +//go:build darwin + +package tbc + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +var ( + resources = []int{ + unix.RLIMIT_AS, + unix.RLIMIT_MEMLOCK, + unix.RLIMIT_NOFILE, + unix.RLIMIT_NPROC, + } + resourceName = map[int]string{ + unix.RLIMIT_AS: "memory", + unix.RLIMIT_MEMLOCK: "lockedmem", + unix.RLIMIT_NOFILE: "nofiles", + unix.RLIMIT_NPROC: "processes", + } + resourceWant = map[int]unix.Rlimit{ + unix.RLIMIT_AS: {Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY}, + unix.RLIMIT_MEMLOCK: {Cur: 775258112, Max: 775258112}, + unix.RLIMIT_NOFILE: {Cur: 16384, Max: 16384}, + unix.RLIMIT_NPROC: {Cur: 2666, Max: 2666}, + } +) + +const ulimitSupported = true + +func verifyUlimits() error { + var p int + for k, resource := range resources { + var limit unix.Rlimit + if err := unix.Getrlimit(resource, &limit); err != nil { + return fmt.Errorf("ulimit %v: %w", k, err) + } + + // Make sure it is a reasonable value + limitRequired := resourceWant[resource] + if limitRequired.Cur > limit.Cur || limitRequired.Max > limit.Max { + return fmt.Errorf("ulimit %v: limit too low got %v, need %v", + resourceName[resource], limit.Max, limitRequired.Max) + } + + // Echo to user + if err := unix.Getrlimit(resource, &limit); err != nil { + return fmt.Errorf("ulimit %v: %w", k, err) + } + if p == 0 { + log.Infof("%-16v %-22v %-22v", "set resource", "current", "minumum") + p++ + } + log.Infof("%-16v: %-22v %-22v", resourceName[resource], limit.Cur, + limitRequired.Max) + } + return nil +} diff --git a/service/tbc/ulimit_linux.go b/service/tbc/ulimit_linux.go new file mode 100644 index 000000000..5b0414493 --- /dev/null +++ b/service/tbc/ulimit_linux.go @@ -0,0 +1,69 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +//go:build linux + +package tbc + +import ( + "fmt" + "math" + + "golang.org/x/sys/unix" +) + +var ( + resources = []int{ + unix.RLIMIT_AS, + unix.RLIMIT_MEMLOCK, + unix.RLIMIT_NOFILE, + unix.RLIMIT_NPROC, + unix.RLIMIT_RSS, + } + resourceName = map[int]string{ + unix.RLIMIT_AS: "memory", + unix.RLIMIT_MEMLOCK: "lockedmem", + unix.RLIMIT_NOFILE: "nofiles", + unix.RLIMIT_NPROC: "processes", + unix.RLIMIT_RSS: "rss", + } + resourceWant = map[int]unix.Rlimit{ + unix.RLIMIT_AS: {Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY}, + unix.RLIMIT_MEMLOCK: {Cur: 775258112, Max: 775258112}, + unix.RLIMIT_NOFILE: {Cur: 16384, Max: 16384}, + unix.RLIMIT_NPROC: {Cur: 4196, Max: 4196}, + unix.RLIMIT_RSS: {Cur: math.MaxUint64, Max: math.MaxUint64}, + } +) + +const ulimitSupported = true + +func verifyUlimits() error { + var p int + for k, resource := range resources { + var limit unix.Rlimit + if err := unix.Getrlimit(resource, &limit); err != nil { + return fmt.Errorf("ulimit %v: %w", k, err) + } + + // Make sure it is a reasonable value + limitRequired := resourceWant[resource] + if limitRequired.Cur > limit.Cur || limitRequired.Max > limit.Max { + return fmt.Errorf("ulimit %v: limit too low got %v, need %v", + resourceName[resource], limit.Max, limitRequired.Max) + } + + // Echo to user + if err := unix.Getrlimit(resource, &limit); err != nil { + return fmt.Errorf("ulimit %v: %w", k, err) + } + if p == 0 { + log.Infof("%-16v %-22v %-22v", "set resource", "current", "minumum") + p++ + } + log.Infof("%-16v: %-22v %-22v", resourceName[resource], limit.Cur, + limitRequired.Max) + } + return nil +} diff --git a/service/tbc/ulimit_other.go b/service/tbc/ulimit_other.go new file mode 100644 index 000000000..ecafcd1d2 --- /dev/null +++ b/service/tbc/ulimit_other.go @@ -0,0 +1,13 @@ +// Copyright (c) 2024 Hemi Labs, Inc. +// Use of this source code is governed by the MIT License, +// which can be found in the LICENSE file. + +//go:build !linux && !darwin + +package tbc + +const ulimitSupported = false + +func verifyUlimits() error { + return nil +}