Skip to content

Commit

Permalink
Merge pull request #992 from lightninglabs/marshal_batch_fixes
Browse files Browse the repository at this point in the history
tapgarden: list batches correctly after asset transfer
  • Loading branch information
Roasbeef authored Jul 10, 2024
2 parents 3e07a5b + 62b431e commit 5236dea
Show file tree
Hide file tree
Showing 15 changed files with 869 additions and 228 deletions.
91 changes: 90 additions & 1 deletion itest/assets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ import (
"context"
"crypto/tls"
"net/http"
"slices"
"strings"
"time"

"github.com/btcsuite/btcd/btcec/v2"
Expand All @@ -24,6 +26,7 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
"golang.org/x/net/http2"
"google.golang.org/protobuf/proto"
)

var (
Expand Down Expand Up @@ -438,7 +441,6 @@ func testMintAssetsWithTapscriptSibling(t *harnessTest) {
rpcIssuableAssets := MintAssetsConfirmBatch(
t.t, t.lndHarness.Miner.Client, t.tapd, issuableAssets,
)

AssertAssetBalances(t.t, t.tapd, rpcSimpleAssets, rpcIssuableAssets)

// Filter the managed UTXOs to select the genesis UTXO with the
Expand Down Expand Up @@ -528,3 +530,90 @@ func testMintAssetsWithTapscriptSibling(t *harnessTest) {
t.lndHarness.MineBlocksAndAssertNumTxes(1, 1)
t.lndHarness.AssertNumUTXOsWithConf(t.lndHarness.Bob, 1, 1, 1)
}

// testMintBatchAndTransfer tests that we can mint a batch of assets, observe
// the finalized batch state, and observe the same batch state after a transfer
// of an asset from the batch.
func testMintBatchAndTransfer(t *harnessTest) {
ctxb := context.Background()
rpcSimpleAssets := MintAssetsConfirmBatch(
t.t, t.lndHarness.Miner.Client, t.tapd, simpleAssets,
)

// List the batch right after minting.
originalBatches, err := t.tapd.ListBatches(
ctxb, &mintrpc.ListBatchRequest{},
)
require.NoError(t.t, err)

// We'll make a second node now that'll be the receiver of all the
// assets made above.
secondTapd := setupTapdHarness(
t.t, t, t.lndHarness.Bob, t.universeServer,
)
defer func() {
require.NoError(t.t, secondTapd.stop(!*noDelete))
}()

// In order to force a split, we don't try to send the full first asset.
a := rpcSimpleAssets[0]
addr, events := NewAddrWithEventStream(
t.t, secondTapd, &taprpc.NewAddrRequest{
AssetId: a.AssetGenesis.AssetId,
Amt: a.Amount - 1,
AssetVersion: a.Version,
},
)

AssertAddrCreated(t.t, secondTapd, a, addr)

sendResp, sendEvents := sendAssetsToAddr(t, t.tapd, addr)
sendRespJSON, err := formatProtoJSON(sendResp)
require.NoError(t.t, err)

t.Logf("Got response from sending assets: %v", sendRespJSON)

// Make sure that eventually we see a single event for the
// address.
AssertAddrEvent(t.t, secondTapd, addr, 1, statusDetected)

// Mine a block to make sure the events are marked as confirmed.
MineBlocks(t.t, t.lndHarness.Miner.Client, 1, 1)

// Eventually the event should be marked as confirmed.
AssertAddrEvent(t.t, secondTapd, addr, 1, statusConfirmed)

// Make sure we have imported and finalized all proofs.
AssertNonInteractiveRecvComplete(t.t, secondTapd, 1)
AssertSendEventsComplete(t.t, addr.ScriptKey, sendEvents)

// Make sure the receiver has received all events in order for
// the address.
AssertReceiveEvents(t.t, addr, events)

afterBatches, err := t.tapd.ListBatches(
ctxb, &mintrpc.ListBatchRequest{},
)
require.NoError(t.t, err)

// The batch listed after the transfer should be identical to the batch
// listed before the transfer.
require.Equal(
t.t, len(originalBatches.Batches), len(afterBatches.Batches),
)

originalBatch := originalBatches.Batches[0].Batch
afterBatch := afterBatches.Batches[0].Batch

// Sort the assets from the listed batch before comparison.
slices.SortFunc(originalBatch.Assets,
func(a, b *mintrpc.PendingAsset) int {
return strings.Compare(a.Name, b.Name)
})
slices.SortFunc(afterBatch.Assets,
func(a, b *mintrpc.PendingAsset) int {
return strings.Compare(a.Name, b.Name)
})

require.True(t.t, proto.Equal(originalBatch, afterBatch))
}
4 changes: 4 additions & 0 deletions itest/test_list_on_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ var testCases = []*testCase{
name: "mint batch resume",
test: testMintBatchResume,
},
{
name: "mint batch and transfer",
test: testMintBatchAndTransfer,
},
{
name: "asset meta validation",
test: testAssetMeta,
Expand Down
112 changes: 111 additions & 1 deletion proof/archive.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,13 @@ type Archiver interface {
// specific fields need to be set in the Locator (e.g. the OutPoint).
FetchProof(ctx context.Context, id Locator) (Blob, error)

// FetchIssuanceProof fetches the issuance proof for an asset, given the
// anchor point of the issuance (NOT the genesis point for the asset).
//
// If a proof cannot be found, then ErrProofNotFound should be returned.
FetchIssuanceProof(ctx context.Context, id asset.ID,
anchorOutpoint wire.OutPoint) (Blob, error)

// HasProof returns true if the proof for the given locator exists. This
// is intended to be a performance optimized lookup compared to fetching
// a proof and checking for ErrProofNotFound.
Expand Down Expand Up @@ -385,6 +392,7 @@ func lookupProofFilePath(rootPath string, loc Locator) (string, error) {
assetID := hex.EncodeToString(loc.AssetID[:])
scriptKey := hex.EncodeToString(loc.ScriptKey.SerializeCompressed())

// TODO(jhb): Check for correct file suffix and truncated outpoint?
searchPattern := filepath.Join(rootPath, assetID, scriptKey+"*")
matches, err := filepath.Glob(searchPattern)
if err != nil {
Expand Down Expand Up @@ -529,6 +537,78 @@ func (f *FileArchiver) FetchProof(_ context.Context, id Locator) (Blob, error) {
return proofFile, nil
}

// FetchIssuanceProof fetches the issuance proof for an asset, given the
// anchor point of the issuance (NOT the genesis point for the asset).
//
// If a proof cannot be found, then ErrProofNotFound should be returned.
//
// NOTE: This implements the Archiver interface.
func (f *FileArchiver) FetchIssuanceProof(ctx context.Context, id asset.ID,
anchorOutpoint wire.OutPoint) (Blob, error) {

// Construct a pattern to search for the issuance proof file. We'll
// leave the script key unspecified, as we don't know what the script
// key was at genesis.
assetID := hex.EncodeToString(id[:])
scriptKeyGlob := strings.Repeat("?", 2*btcec.PubKeyBytesLenCompressed)
truncatedHash := anchorOutpoint.Hash.String()[:outpointTruncateLength]

fileName := fmt.Sprintf("%s-%s-%d.%s",
scriptKeyGlob, truncatedHash, anchorOutpoint.Index,
TaprootAssetsFileEnding)

searchPattern := filepath.Join(f.proofPath, assetID, fileName)
matches, err := filepath.Glob(searchPattern)
if err != nil {
return nil, fmt.Errorf("error listing proof files: %w", err)
}
if len(matches) == 0 {
return nil, ErrProofNotFound
}

// We expect exactly one matching proof for a specific asset ID and
// outpoint. However, the proof file path uses the truncated outpoint,
// so an asset transfer with a collision in the first half of the TXID
// could also match. We can filter out such proof files by size.
proofFiles := make([]Blob, 0, len(matches))
for _, path := range matches {
proofFile, err := os.ReadFile(path)

switch {
case os.IsNotExist(err):
return nil, ErrProofNotFound

case err != nil:
return nil, fmt.Errorf("unable to find proof: %w", err)
}

proofFiles = append(proofFiles, proofFile)
}

switch {
// No proofs were read.
case len(proofFiles) == 0:
return nil, ErrProofNotFound

// Exactly one proof, we'll return it.
case len(proofFiles) == 1:
return proofFiles[0], nil

// Multiple proofs, return the smallest one.
default:
minProofIdx := 0
minProofSize := len(proofFiles[minProofIdx])
for idx, proof := range proofFiles {
if len(proof) < minProofSize {
minProofSize = len(proof)
minProofIdx = idx
}
}

return proofFiles[minProofIdx], nil
}
}

// HasProof returns true if the proof for the given locator exists. This is
// intended to be a performance optimized lookup compared to fetching a proof
// and checking for ErrProofNotFound.
Expand Down Expand Up @@ -704,10 +784,13 @@ func (f *FileArchiver) RemoveSubscriber(
return f.eventDistributor.RemoveSubscriber(subscriber)
}

// A compile-time interface to ensure FileArchiver meets the NotifyArchiver
// A compile-time assertion to ensure FileArchiver meets the NotifyArchiver
// interface.
var _ NotifyArchiver = (*FileArchiver)(nil)

// A compile-time assertion to ensure FileArchiver meets the Archiver interface.
var _ Archiver = (*FileArchiver)(nil)

// MultiArchiver is an archive of archives. It contains several archives and
// attempts to use them either as a look-aside cache, or a write through cache
// for all incoming requests.
Expand Down Expand Up @@ -763,6 +846,33 @@ func (m *MultiArchiver) FetchProof(ctx context.Context,
return nil, ErrProofNotFound
}

// FetchIssuanceProof fetches the issuance proof for an asset, given the
// anchor point of the issuance (NOT the genesis point for the asset).
func (m *MultiArchiver) FetchIssuanceProof(ctx context.Context,
id asset.ID, anchorOutpoint wire.OutPoint) (Blob, error) {

// Iterate through all our active backends and try to see if at least
// one of them contains the proof. Either one of them will have the
// proof, or we'll return an error back to the user.
for _, archive := range m.backends {
proof, err := archive.FetchIssuanceProof(
ctx, id, anchorOutpoint,
)

switch {
case errors.Is(err, ErrProofNotFound):
continue

case err != nil:
return nil, err
}

return proof, nil
}

return nil, ErrProofNotFound
}

// HasProof returns true if the proof for the given locator exists. This is
// intended to be a performance optimized lookup compared to fetching a proof
// and checking for ErrProofNotFound. The multi archiver only considers a proof
Expand Down
50 changes: 5 additions & 45 deletions proof/courier_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package proof
import (
"bytes"
"context"
"fmt"
"testing"

"github.com/lightninglabs/taproot-assets/asset"
Expand All @@ -12,52 +11,10 @@ import (
"github.com/stretchr/testify/require"
)

type mockProofArchive struct {
proofs map[Locator]Blob
}

func newMockProofArchive() *mockProofArchive {
return &mockProofArchive{
proofs: make(map[Locator]Blob),
}
}

func (m *mockProofArchive) FetchProof(ctx context.Context,
id Locator) (Blob, error) {

proof, ok := m.proofs[id]
if !ok {
return nil, ErrProofNotFound
}

return proof, nil
}

func (m *mockProofArchive) HasProof(ctx context.Context,
id Locator) (bool, error) {

_, ok := m.proofs[id]

return ok, nil
}

func (m *mockProofArchive) FetchProofs(ctx context.Context,
id asset.ID) ([]*AnnotatedProof, error) {

return nil, fmt.Errorf("not implemented")
}

func (m *mockProofArchive) ImportProofs(context.Context, HeaderVerifier,
MerkleVerifier, GroupVerifier, ChainLookupGenerator, bool,
...*AnnotatedProof) error {

return fmt.Errorf("not implemented")
}

// TestUniverseRpcCourierLocalArchiveShortCut tests that the local archive is
// used as a shortcut to fetch a proof if it's available.
func TestUniverseRpcCourierLocalArchiveShortCut(t *testing.T) {
localArchive := newMockProofArchive()
localArchive := NewMockProofArchive()

testBlocks := readTestData(t)
oddTxBlock := testBlocks[0]
Expand All @@ -79,7 +36,10 @@ func TestUniverseRpcCourierLocalArchiveShortCut(t *testing.T) {
ScriptKey: *proof.Asset.ScriptKey.PubKey,
OutPoint: fn.Ptr(proof.OutPoint()),
}
localArchive.proofs[locator] = proofBlob
locHash, err := locator.Hash()
require.NoError(t, err)

localArchive.proofs.Store(locHash, proofBlob)

courier := &UniverseRpcCourier{
recipient: Recipient{},
Expand Down
Loading

0 comments on commit 5236dea

Please sign in to comment.