Skip to content

Commit

Permalink
Merge pull request lightninglabs#805 from lightninglabs/prepare-refac…
Browse files Browse the repository at this point in the history
…tor-part-2

[psbt saga part 2/4]: preparation: refactor send logic, make passive assets independent
  • Loading branch information
guggero authored Mar 5, 2024
2 parents 4fbfc09 + 4035264 commit 52f1748
Show file tree
Hide file tree
Showing 44 changed files with 1,673 additions and 1,638 deletions.
5 changes: 4 additions & 1 deletion itest/assertions.go
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,10 @@ func VerifyProofBlob(t *testing.T, tapClient taprpc.TaprootAssetsClient,
return nil
}

snapshot, err := f.Verify(ctxt, headerVerifier, groupVerifier)
snapshot, err := f.Verify(
ctxt, headerVerifier, proof.DefaultMerkleVerifier,
groupVerifier,
)
require.NoError(t, err)

return f, snapshot
Expand Down
8 changes: 4 additions & 4 deletions itest/burn_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ func testBurnAssets(t *harnessTest) {
AssertAssetOutboundTransferWithOutputs(
t.t, minerClient, t.tapd, burnResp.BurnTransfer,
simpleAssetGen.AssetId,
[]uint64{outputAmounts[2] - burnAmt, burnAmt}, 1, 2, 2, true,
[]uint64{burnAmt, outputAmounts[2] - burnAmt}, 1, 2, 2, true,
)

// We'll now assert that the burned asset has the correct state.
Expand Down Expand Up @@ -230,7 +230,7 @@ func testBurnAssets(t *harnessTest) {
AssertAssetOutboundTransferWithOutputs(
t.t, minerClient, t.tapd, burnResp.BurnTransfer,
simpleAssetGen.AssetId,
[]uint64{changeAmt, multiBurnAmt}, 4, 5, 2, true,
[]uint64{multiBurnAmt, changeAmt}, 4, 5, 2, true,
)

// Our final asset balance should be reduced by both successful burn
Expand Down Expand Up @@ -270,7 +270,7 @@ func testBurnAssets(t *harnessTest) {
AssertAssetOutboundTransferWithOutputs(
t.t, minerClient, t.tapd, burnResp.BurnTransfer,
simpleGroupGen.AssetId,
[]uint64{simpleGroup.Amount - burnAmt, burnAmt}, 5, 6, 2, true,
[]uint64{burnAmt, simpleGroup.Amount - burnAmt}, 5, 6, 2, true,
)
AssertBalanceByID(
t.t, t.tapd, simpleGroupGen.AssetId, simpleGroup.Amount-burnAmt,
Expand Down Expand Up @@ -383,7 +383,7 @@ func testBurnGroupedAssets(t *harnessTest) {
// Assert that the asset burn transfer occurred correctly.
AssertAssetOutboundTransferWithOutputs(
t.t, miner, t.tapd, burnResp.BurnTransfer,
burnAssetID, []uint64{postBurnAmt, burnAmt}, 0, 1, 2, true,
burnAssetID, []uint64{burnAmt, postBurnAmt}, 0, 1, 2, true,
)

// Ensure that the burnt asset has the correct state.
Expand Down
15 changes: 2 additions & 13 deletions itest/psbt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,8 @@ func testPsbtScriptHashLockSend(t *harnessTest) {
genInfo.AssetId, []uint64{numUnits / 2, numUnits / 2}, 0, 1,
)

// This is an interactive/PSBT based transfer, so we do need to manually
// send the proof from the sender to the receiver because the proof
// courier address gets lost in the address->PSBT conversion.
_ = sendProof(t, bob, alice, sendResp, aliceAddr.ScriptKey, genInfo)
AssertNonInteractiveRecvComplete(t.t, alice, 1)
AssertAddrEvent(t.t, alice, aliceAddr, 1, statusCompleted)

aliceAssets, err := alice.ListAssets(ctxb, &taprpc.ListAssetRequest{
WithWitness: true,
Expand Down Expand Up @@ -259,11 +256,8 @@ func testPsbtScriptCheckSigSend(t *harnessTest) {
genInfo.AssetId, []uint64{numUnits / 2, numUnits / 2}, 0, 1,
)

// This is an interactive/PSBT based transfer, so we do need to manually
// send the proof from the sender to the receiver because the proof
// courier address gets lost in the address->PSBT conversion.
_ = sendProof(t, bob, alice, sendResp, aliceAddr.ScriptKey, genInfo)
AssertNonInteractiveRecvComplete(t.t, alice, 1)
AssertAddrEvent(t.t, alice, aliceAddr, 1, statusCompleted)

aliceAssets, err := alice.ListAssets(ctxb, &taprpc.ListAssetRequest{
WithWitness: true,
Expand Down Expand Up @@ -424,11 +418,6 @@ func runPsbtInteractiveFullValueSendTest(ctxt context.Context, t *harnessTest,

numOutputs := 1
amounts := []uint64{fullAmt}
if i == 0 {
// Account for the passive asset in the first transfer.
numOutputs = 2
amounts = []uint64{fullAmt, 0}
}
ConfirmAndAssertOutboundTransferWithOutputs(
t.t, t.lndHarness.Miner.Client, sender,
sendResp, genInfo.AssetId, amounts, i/2, (i/2)+1,
Expand Down
7 changes: 4 additions & 3 deletions proof/append.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ type TransitionParams struct {
// the proof for. This method returns both the encoded full provenance (proof
// chain) and the added latest proof.
func AppendTransition(blob Blob, params *TransitionParams,
headerVerifier HeaderVerifier, groupVerifier GroupVerifier) (Blob,
*Proof, error) {
headerVerifier HeaderVerifier, merkleVerifier MerkleVerifier,
groupVerifier GroupVerifier) (Blob, *Proof, error) {

// Decode the proof blob into a proper file structure first.
f := NewEmptyFile(V0)
Expand Down Expand Up @@ -78,7 +78,8 @@ func AppendTransition(blob Blob, params *TransitionParams,
if err := f.AppendProof(*newProof); err != nil {
return nil, nil, fmt.Errorf("error appending proof: %w", err)
}
if _, err := f.Verify(ctx, headerVerifier, groupVerifier); err != nil {
_, err = f.Verify(ctx, headerVerifier, merkleVerifier, groupVerifier)
if err != nil {
return nil, nil, fmt.Errorf("error verifying proof: %w", err)
}

Expand Down
11 changes: 6 additions & 5 deletions proof/append_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ func runAppendTransitionTest(t *testing.T, assetType asset.Type, amt uint64,
// Append the new transition to the genesis blob.
transitionBlob, transitionProof, err := AppendTransition(
genesisBlob, transitionParams, MockHeaderVerifier,
MockGroupVerifier,
MockMerkleVerifier, MockGroupVerifier,
)
require.NoError(t, err)
require.Greater(t, len(transitionBlob), len(genesisBlob))
Expand Down Expand Up @@ -399,7 +399,7 @@ func runAppendTransitionTest(t *testing.T, assetType asset.Type, amt uint64,

split1Blob, split1Proof, err := AppendTransition(
transitionBlob, split1Params, MockHeaderVerifier,
MockGroupVerifier,
MockMerkleVerifier, MockGroupVerifier,
)
require.NoError(t, err)
require.Greater(t, len(split1Blob), len(transitionBlob))
Expand Down Expand Up @@ -441,7 +441,7 @@ func runAppendTransitionTest(t *testing.T, assetType asset.Type, amt uint64,

split2Blob, split2Proof, err := AppendTransition(
transitionBlob, split2Params, MockHeaderVerifier,
MockGroupVerifier,
MockMerkleVerifier, MockGroupVerifier,
)
require.NoError(t, err)
require.Greater(t, len(split2Blob), len(transitionBlob))
Expand Down Expand Up @@ -484,7 +484,7 @@ func runAppendTransitionTest(t *testing.T, assetType asset.Type, amt uint64,

split3Blob, split3Proof, err := AppendTransition(
transitionBlob, split3Params, MockHeaderVerifier,
MockGroupVerifier,
MockMerkleVerifier, MockGroupVerifier,
)
require.NoError(t, err)
require.Greater(t, len(split3Blob), len(transitionBlob))
Expand Down Expand Up @@ -544,7 +544,8 @@ func verifyBlob(t testing.TB, blob Blob) *AssetSnapshot {
require.NoError(t, f.Decode(bytes.NewReader(blob)))

finalSnapshot, err := f.Verify(
context.Background(), MockHeaderVerifier, MockGroupVerifier,
context.Background(), MockHeaderVerifier, MockMerkleVerifier,
MockGroupVerifier,
)
require.NoError(t, err)

Expand Down
22 changes: 13 additions & 9 deletions proof/archive.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,8 @@ type Archiver interface {
// already be present, and we just update (replace) it with the new
// proof.
ImportProofs(ctx context.Context, headerVerifier HeaderVerifier,
groupVerifier GroupVerifier, replace bool,
proofs ...*AnnotatedProof) error
merkleVerifier MerkleVerifier, groupVerifier GroupVerifier,
replace bool, proofs ...*AnnotatedProof) error
}

// NotifyArchiver is an Archiver that also allows callers to subscribe to
Expand Down Expand Up @@ -625,7 +625,7 @@ func (f *FileArchiver) FetchProofs(_ context.Context,
//
// NOTE: This implements the Archiver interface.
func (f *FileArchiver) ImportProofs(_ context.Context,
_ HeaderVerifier, _ GroupVerifier, replace bool,
_ HeaderVerifier, _ MerkleVerifier, _ GroupVerifier, replace bool,
proofs ...*AnnotatedProof) error {

for _, proof := range proofs {
Expand Down Expand Up @@ -792,8 +792,9 @@ func (m *MultiArchiver) FetchProofs(ctx context.Context,
// outpoint of the first state transition will be used as the Genesis point.
// The final resting place of the asset will be used as the script key itself.
func (m *MultiArchiver) ImportProofs(ctx context.Context,
headerVerifier HeaderVerifier, groupVerifier GroupVerifier,
replace bool, proofs ...*AnnotatedProof) error {
headerVerifier HeaderVerifier, merkleVerifier MerkleVerifier,
groupVerifier GroupVerifier, replace bool,
proofs ...*AnnotatedProof) error {

// Before we import the proofs into the archive, we want to make sure
// that they're all valid. Along the way, we may augment the locator
Expand All @@ -802,7 +803,7 @@ func (m *MultiArchiver) ImportProofs(ctx context.Context,
// First, we'll decode and then also verify the proof.
finalStateTransition, err := m.proofVerifier.Verify(
c, bytes.NewReader(proof.Blob), headerVerifier,
groupVerifier,
merkleVerifier, groupVerifier,
)
if err != nil {
return fmt.Errorf("unable to verify proof: %w", err)
Expand Down Expand Up @@ -842,7 +843,8 @@ func (m *MultiArchiver) ImportProofs(ctx context.Context,
// to import each proof our archive backends.
for _, archive := range m.backends {
err := archive.ImportProofs(
ctx, headerVerifier, groupVerifier, replace, proofs...,
ctx, headerVerifier, merkleVerifier, groupVerifier,
replace, proofs...,
)
if err != nil {
return err
Expand Down Expand Up @@ -909,7 +911,8 @@ var _ NotifyArchiver = (*MultiArchiver)(nil)
// assets of the same ID. This is useful when we want to update the proof with a
// new one after a re-org.
func ReplaceProofInBlob(ctx context.Context, p *Proof, archive Archiver,
headerVerifier HeaderVerifier, groupVerifier GroupVerifier) error {
headerVerifier HeaderVerifier, merkleVerifier MerkleVerifier,
groupVerifier GroupVerifier) error {

// This is a bit of a hacky part. If we have a chain of transactions
// that were re-organized, we can't verify the whole chain until all of
Expand Down Expand Up @@ -987,7 +990,8 @@ func ReplaceProofInBlob(ctx context.Context, p *Proof, archive Archiver,
Blob: buf.Bytes(),
}
err = archive.ImportProofs(
ctx, headerVerifier, groupVerifier, true, directProof,
ctx, headerVerifier, merkleVerifier, groupVerifier,
true, directProof,
)
if err != nil {
return fmt.Errorf("unable to import updated proof: %w",
Expand Down
27 changes: 15 additions & 12 deletions proof/archive_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,16 +62,16 @@ func TestFileArchiverProofCollision(t *testing.T) {
blob2 = []byte("this is the second blob")
)
err = fileArchive.ImportProofs(
ctx, MockHeaderVerifier, MockGroupVerifier, false,
&AnnotatedProof{
ctx, MockHeaderVerifier, MockMerkleVerifier, MockGroupVerifier,
false, &AnnotatedProof{
Locator: locator1,
Blob: blob1,
},
)
require.NoError(t, err)
err = fileArchive.ImportProofs(
ctx, MockHeaderVerifier, MockGroupVerifier, false,
&AnnotatedProof{
ctx, MockHeaderVerifier, MockMerkleVerifier, MockGroupVerifier,
false, &AnnotatedProof{
Locator: locator2,
Blob: blob2,
},
Expand Down Expand Up @@ -190,7 +190,8 @@ func TestFileArchiver(t *testing.T) {

err = archive.ImportProofs(
ctx, MockHeaderVerifier,
MockGroupVerifier, false, proof,
MockMerkleVerifier, MockGroupVerifier,
false, proof,
)

if testCase.expectedStoreError != nil {
Expand Down Expand Up @@ -323,14 +324,16 @@ func TestMigrateOldFileNames(t *testing.T) {
// We should be able to import a new proof, and it should be stored
// under the new naming scheme.
proof6 := RandProof(t, genesis2, scriptKey2, oddTxBlock, 2, 1)
err = fileArchive.ImportProofs(nil, nil, nil, false, &AnnotatedProof{
Locator: Locator{
AssetID: fn.Ptr(proof6.Asset.ID()),
ScriptKey: *proof6.Asset.ScriptKey.PubKey,
OutPoint: fn.Ptr(proof6.OutPoint()),
err = fileArchive.ImportProofs(
nil, nil, nil, nil, false, &AnnotatedProof{
Locator: Locator{
AssetID: fn.Ptr(proof6.Asset.ID()),
ScriptKey: *proof6.Asset.ScriptKey.PubKey,
OutPoint: fn.Ptr(proof6.OutPoint()),
},
Blob: toFileBlob(proof6),
},
Blob: toFileBlob(proof6),
})
)
require.NoError(t, err)
assertProofAtNewName(proof6)
}
5 changes: 2 additions & 3 deletions proof/courier_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,8 @@ func (m *mockProofArchive) FetchProofs(ctx context.Context,
return nil, fmt.Errorf("not implemented")
}

func (m *mockProofArchive) ImportProofs(ctx context.Context,
headerVerifier HeaderVerifier, groupVerifier GroupVerifier,
replace bool, proofs ...*AnnotatedProof) error {
func (m *mockProofArchive) ImportProofs(context.Context, HeaderVerifier,
MerkleVerifier, GroupVerifier, bool, ...*AnnotatedProof) error {

return fmt.Errorf("not implemented")
}
Expand Down
7 changes: 5 additions & 2 deletions proof/mint.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,8 @@ func WithSiblingPreimage(
// serialized proof files, which proves the creation/existence of each of the
// assets within the batch.
func NewMintingBlobs(params *MintParams, headerVerifier HeaderVerifier,
groupVerifier GroupVerifier, anchorVerifier GroupAnchorVerifier,
merkleVerifier MerkleVerifier, groupVerifier GroupVerifier,
anchorVerifier GroupAnchorVerifier,
blobOpts ...MintingBlobOption) (AssetProofs, error) {

opts := defaultMintingBlobOpts()
Expand All @@ -256,7 +257,9 @@ func NewMintingBlobs(params *MintParams, headerVerifier HeaderVerifier,
for key := range proofs {
proof := proofs[key]

_, err := proof.Verify(ctx, nil, headerVerifier, groupVerifier)
_, err := proof.Verify(
ctx, nil, headerVerifier, merkleVerifier, groupVerifier,
)
if err != nil {
return nil, fmt.Errorf("invalid proof file generated: "+
"%w", err)
Expand Down
4 changes: 2 additions & 2 deletions proof/mint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ func TestNewMintingBlobs(t *testing.T) {
}},
},
GenesisPoint: genesisTx.TxIn[0].PreviousOutPoint,
}, MockHeaderVerifier, MockGroupVerifier, MockGroupAnchorVerifier,
WithAssetMetaReveals(metaReveals),
}, MockHeaderVerifier, MockMerkleVerifier, MockGroupVerifier,
MockGroupAnchorVerifier, WithAssetMetaReveals(metaReveals),
)
require.NoError(t, err)
}
10 changes: 7 additions & 3 deletions proof/mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,8 @@ func NewMockVerifier(t *testing.T) *MockVerifier {
}
}

func (m *MockVerifier) Verify(_ context.Context, _ io.Reader,
headerVerifier HeaderVerifier,
groupVerifier GroupVerifier) (*AssetSnapshot, error) {
func (m *MockVerifier) Verify(context.Context, io.Reader,
HeaderVerifier, MerkleVerifier, GroupVerifier) (*AssetSnapshot, error) {

return &AssetSnapshot{
Asset: &asset.Asset{
Expand All @@ -180,6 +179,11 @@ func MockHeaderVerifier(header wire.BlockHeader, height uint32) error {
return nil
}

// MockMerkleVerifier is a mock verifier which approves of all merkle proofs.
func MockMerkleVerifier(*wire.MsgTx, *TxMerkleProof, [32]byte) error {
return nil
}

// MockGroupVerifier is a mock verifier which approves of all group keys.
//
// Group key verification usually involves having imported the group anchor
Expand Down
Loading

0 comments on commit 52f1748

Please sign in to comment.