diff --git a/asset/generators.go b/asset/generators.go index 228b66637..2af47d0a1 100644 --- a/asset/generators.go +++ b/asset/generators.go @@ -111,7 +111,21 @@ var ( ScriptKey: SerializedKeyGen.Draw(t, "script_key"), } }) - GenesisGen = rapid.Make[Genesis]() + GenesisGen = rapid.Custom(func(t *rapid.T) Genesis { + return Genesis{ + FirstPrevOut: OutPointGen.Draw(t, "first_prev_out"), + Tag: rapid.StringN( + -1, -1, MaxAssetNameLength, + ).Draw(t, "tag"), + MetaHash: rapid.Make[[32]byte]().Draw( + t, "meta_hash", + ), + OutputIndex: rapid.Uint32().Draw(t, "output_index"), + Type: Type(rapid.IntRange(0, 1).Draw( + t, "asset_type"), + ), + } + }) SplitRootGen = rapid.Custom(func(t *rapid.T) mssmt.BranchNode { return *mssmt.NewComputedBranch( mssmt.NodeHash(HashBytesGen.Draw(t, "split_root_hash")), diff --git a/tapchannel/aux_funding_controller.go b/tapchannel/aux_funding_controller.go index 3a298501d..7f826642c 100644 --- a/tapchannel/aux_funding_controller.go +++ b/tapchannel/aux_funding_controller.go @@ -74,6 +74,12 @@ const ( // maxNumHTLCsPerParty is the maximum number of HTLCs that can be added // by a single party to a channel. maxNumHTLCsPerParty = maxNumHTLCs / 2 + + // proofChunk size is the chunk size of proofs, in the case that a proof + // is too large to be sent in a single message. Since the max lnwire + // message is 64k bytes, we leave some breathing room for the chunk + // metadata. + proofChunkSize = 60_000 ) // ErrorReporter is used to report an error back to the caller and/or peer that @@ -420,7 +426,9 @@ type pendingAssetFunding struct { fundingAckChan chan bool fundingFinalizedSignal chan struct{} - finalizedCloseOnce sync.Once + + finalizedCloseOnce sync.Once + inputProofChunks map[chainhash.Hash][]cmsg.ProofChunk } // addInputProof adds a new proof to the set of proofs that'll be used to fund @@ -461,6 +469,37 @@ func (p *pendingAssetFunding) addToFundingCommitment(a *asset.Asset) error { return p.fundingAssetCommitment.Merge(newCommitment) } +// addInputProofChunk adds a new proof chunk to the set of proof chunks that'll +// be processed. If this is the last chunk for this proof, then true is +// returned. +func (p *pendingAssetFunding) addInputProofChunk(chunk cmsg.ProofChunk, +) lfn.Result[lfn.Option[proof.Proof]] { + + type ret = proof.Proof + + // Collect this proof chunk with the rest of the proofs. + chunkID := chunk.ChunkSumID.Val + + proofChunks := p.inputProofChunks[chunkID] + proofChunks = append(proofChunks, chunk) + p.inputProofChunks[chunkID] = proofChunks + + // If this isn't the last chunk, then we can just return None and exit. + if !chunk.Last.Val { + return lfn.Ok(lfn.None[ret]()) + } + + // Otherwise, this is the last chunk, so we'll extract all the chunks + // and assemble the final proof. + finalProof, err := cmsg.AssembleProofChunks(proofChunks) + if err != nil { + return lfn.Errf[lfn.Option[ret]]("unable to "+ + "assemble proof chunks: %w", err) + } + + return lfn.Ok(lfn.Some(*finalProof)) +} + // newCommitBlobAndLeaves creates a new commitment blob that'll be stored in // the channel state for the specified party. func newCommitBlobAndLeaves(pendingFunding *pendingAssetFunding, @@ -699,6 +738,9 @@ func (f *fundingFlowIndex) fromMsg(chainParams *address.ChainParams, amt: assetProof.Amt().UnwrapOr(0), fundingAckChan: make(chan bool, 1), fundingFinalizedSignal: make(chan struct{}), + inputProofChunks: make( + map[chainhash.Hash][]cmsg.ProofChunk, + ), } (*f)[pid] = assetFunding } @@ -827,15 +869,33 @@ func (f *FundingController) sendInputOwnershipProofs(peerPub btcec.PublicKey, log.Tracef("Sending input ownership proof to remote party: %x", proofBytes) - inputProof := cmsg.NewTxAssetInputProof( - fundingState.pid, *fundingState.inputProofs[i], - ) + inputProof := fundingState.inputProofs[i] + inputAsset := inputProof.Asset - // Finally, we'll send the proof to the remote peer. - err := f.cfg.PeerMessenger.SendMessage(ctx, peerPub, inputProof) + // For each proof, we'll chunk them up optimistically to make + // sure we'll never exceed the upper message limit. + proofChunks, err := cmsg.CreateProofChunks( + *inputProof, proofChunkSize, + ) if err != nil { - return fmt.Errorf("unable to send proof to peer: %w", - err) + return fmt.Errorf("unable to create proof "+ + "chunks: %w", err) + } + + for _, proofChunk := range proofChunks { + inputProof := cmsg.NewTxAssetInputProof( + fundingState.pid, inputAsset.ID(), + inputAsset.Amount, proofChunk, + ) + + // Finally, we'll send the proof to the remote peer. + err := f.cfg.PeerMessenger.SendMessage( + ctx, peerPub, inputProof, + ) + if err != nil { + return fmt.Errorf("unable to send "+ + "proof to peer: %w", err) + } } } @@ -1295,9 +1355,27 @@ func (f *FundingController) processFundingMsg(ctx context.Context, // This is input proof, so we'll verify the challenge witness, then // store the proof. case *cmsg.TxAssetInputProof: + // By default, we'll get chunks of the proof sent to us. So + // we'll add this set to the chunks, then proceed but only if we + // have all the chunks. + finalProof, err := assetFunding.addInputProofChunk( + assetProof.ProofChunk.Val, + ).Unpack() + if err != nil { + return tempPID, fmt.Errorf("unable to add input proof "+ + "chunk: %w", err) + } + + // If there's no final proof yet, we can just return early. + if finalProof.IsNone() { + return tempPID, nil + } + + // Otherwise, we have all the proofs we need. + // // Before we proceed, we'll make sure that we already know of // the genesis proof for the incoming asset. - _, err := f.cfg.AssetSyncer.QueryAssetInfo( + _, err = f.cfg.AssetSyncer.QueryAssetInfo( ctx, assetProof.AssetID.Val, ) if err != nil { @@ -1305,35 +1383,39 @@ func (f *FundingController) processFundingMsg(ctx context.Context, "proof for asset_id=%v: %w", assetProof.AssetID.Val, err) } + err = lfn.MapOptionZ(finalProof, func(p proof.Proof) error { + log.Infof("Validating input proof, prev_out=%v", + p.OutPoint()) + + l, err := f.cfg.ChainBridge.GenProofChainLookup(&p) + if err != nil { + return fmt.Errorf("unable to create proof "+ + "lookup: %w", err) + } - p := assetProof.Proof.Val - log.Infof("Validating input proof, prev_out=%v", p.OutPoint()) + // Next, we'll validate this proof to make sure that the + // initiator is actually able to spend these outputs in + // the funding transaction. + _, err = p.Verify( + ctx, nil, f.cfg.HeaderVerifier, + proof.DefaultMerkleVerifier, + f.cfg.GroupVerifier, l, + ) + if err != nil { + return fmt.Errorf("unable to verify "+ + "ownership proof: %w", err) + } - l, err := f.cfg.ChainBridge.GenProofChainLookup(&p) - if err != nil { - return tempPID, fmt.Errorf("unable to create proof "+ - "lookup: %w", err) - } + // Now that we know the proof is valid, we'll add it to + // the funding state. + assetFunding.addInputProof(&p) - // Next, we'll validate this proof to make sure that the - // initiator is actually able to spend these outputs in the - // funding transaction. - _, err = p.Verify( - ctx, nil, f.cfg.HeaderVerifier, - proof.DefaultMerkleVerifier, - f.cfg.GroupVerifier, l, - ) + return nil + }) if err != nil { - return tempPID, fmt.Errorf("unable to verify "+ - "ownership proof: %w", err) + return tempPID, err } - // Now that we know the proof is valid, we'll add it to the - // funding state. - assetFunding.addInputProof( - &assetProof.Proof.Val, - ) - // This is an output proof, so now we should be able to verify the // asset funding output with witness intact. case *cmsg.TxAssetOutputProof: diff --git a/tapchannelmsg/records_test.go b/tapchannelmsg/records_test.go index 024ac356c..71efe7d77 100644 --- a/tapchannelmsg/records_test.go +++ b/tapchannelmsg/records_test.go @@ -534,3 +534,24 @@ func TestContractResolution(t *testing.T) { require.Equal(t, testRes, newRes) }) } + +// TestProofChunk tests encoding and decoding of the ProofChunk TLV blob. +func TestProofChunk(t *testing.T) { + t.Parallel() + + rapid.Check(t, func(r *rapid.T) { + proofChunk := NewProofChunk( + rapid.Make[[32]byte]().Draw(r, "chunk_sum"), + rapid.SliceOf(rapid.Byte()).Draw(r, "chunk_data"), + rapid.Bool().Draw(r, "chunk_offset"), + ) + + var b bytes.Buffer + require.NoError(t, proofChunk.Encode(&b)) + + var newChunk ProofChunk + require.NoError(t, newChunk.Decode(&b)) + + require.Equal(t, proofChunk, newChunk) + }) +} diff --git a/tapchannelmsg/testdata/rapid/TestProofChunkRoundTripProperty/TestProofChunkRoundTripProperty-20241217204719-3582.fail b/tapchannelmsg/testdata/rapid/TestProofChunkRoundTripProperty/TestProofChunkRoundTripProperty-20241217204719-3582.fail new file mode 100644 index 000000000..a59065065 --- /dev/null +++ b/tapchannelmsg/testdata/rapid/TestProofChunkRoundTripProperty/TestProofChunkRoundTripProperty-20241217204719-3582.fail @@ -0,0 +1,390 @@ +# 2024/12/17 20:47:19.611791 [TestProofChunkRoundTripProperty] [rapid] draw randGen: asset.Genesis{FirstPrevOut:wire.OutPoint{Hash:chainhash.Hash{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, Index:0x0}, Tag:"À᪾𐐀᪾AAႠAAAA\ufeffᾈAAAA\ufeffAÀAÀ᪾AA𐄷DžAAᾈA𞤡ᛮA", MetaHash:[32]uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, OutputIndex:0x0, Type:0x0} +# 2024/12/17 20:47:19.611811 [TestProofChunkRoundTripProperty] [rapid] draw assetType: 0 +# 2024/12/17 20:47:19.611823 [TestProofChunkRoundTripProperty] [rapid] draw scriptKey: &secp256k1.PublicKey{x:secp256k1.FieldVal{n:[10]uint32{0x2f81798, 0xa056c5, 0x28d959f, 0x36cb738, 0x3029bfc, 0x3a1c2c1, 0x206295c, 0x2eeb156, 0x27ef9dc, 0x1e6f99}}, y:secp256k1.FieldVal{n:[10]uint32{0x310d4b8, 0x1f423fe, 0x14199c4, 0x1229a15, 0xfd17b4, 0x384422a, 0x24fbfc0, 0x3119576, 0x27726a3, 0x120eb6}}} +# 2024/12/17 20:47:19.612743 [TestProofChunkRoundTripProperty] [rapid] draw chunkSize: 1 +# 2024/12/17 20:47:19.612983 [TestProofChunkRoundTripProperty] +# Error Trace: /Users/roasbeef/gocode/src/github.com/lightninglabs/taproot-assets/tapchannelmsg/wire_msgs_test.go:160 +# /Users/roasbeef/gocode/pkg/mod/pgregory.net/rapid@v1.1.0/engine.go:368 +# /Users/roasbeef/gocode/pkg/mod/pgregory.net/rapid@v1.1.0/engine.go:377 +# /Users/roasbeef/gocode/pkg/mod/pgregory.net/rapid@v1.1.0/engine.go:203 +# /Users/roasbeef/gocode/pkg/mod/pgregory.net/rapid@v1.1.0/engine.go:118 +# /Users/roasbeef/gocode/src/github.com/lightninglabs/taproot-assets/tapchannelmsg/wire_msgs_test.go:129 +# Error: Received unexpected error: +# unable to decode full proof: bytes: too large: 65 +# Test: TestProofChunkRoundTripProperty +# +v0.4.8#8254848599916273006 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x5555555555555 +0x14 +0xb908c4741e25e +0x1a +0x5555555555555 +0x22 +0x38e38e38e38e4 +0x2 +0x5555555555555 +0x14 +0x15923fd3e7c085 +0x467 +0x5555555555555 +0x22 +0x38e38e38e38e4 +0x2 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x14 +0x12f32b2c9e3807 +0x1d4 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0xe3e05a098777c +0x26 +0x5555555555555 +0x16 +0x6b74f03291620 +0x4 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0xe3e05a098777c +0x26 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x14 +0xb908c4741e25e +0x1a +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x14 +0xb908c4741e25e +0x1a +0x5555555555555 +0x22 +0x38e38e38e38e4 +0x2 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x20 +0x16502e2e6c40bf +0xaab +0x5555555555555 +0x16 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x16 +0x6b74f03291620 +0x4 +0x5555555555555 +0x0 +0x0 +0x0 +0x5555555555555 +0x14 +0x1f0285a09b9d2d +0x0 +0x5555555555555 +0x1a +0x0 +0x0 +0x5555555555555 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x0 +0x1 +0x0 +0x0 +0x0 +0x0 \ No newline at end of file diff --git a/tapchannelmsg/wire_msgs.go b/tapchannelmsg/wire_msgs.go index 2a3f468d3..a90fed8f6 100644 --- a/tapchannelmsg/wire_msgs.go +++ b/tapchannelmsg/wire_msgs.go @@ -2,6 +2,8 @@ package tapchannelmsg import ( "bytes" + "crypto/sha256" + "fmt" "io" "github.com/lightninglabs/taproot-assets/asset" @@ -44,6 +46,26 @@ const ( AssetFundingAckType = TapChannelMessageTypeOffset + 3 ) +var ( + // ErrChunkSize is returned when the chunk size is invalid. + ErrChunkSize = fmt.Errorf("chunk size must be positive") + + // ErrInvalidChunk is returned when the wrong number of chunks is + // created. + ErrImproperChunks = fmt.Errorf("improper number of chunks") + + // ErrChunkDistUniformity is returned when the chunk distribution is not + // the same over all chunks. + ErrChunkDistUniformity = fmt.Errorf("chunk distribution is not uniform") + + // ErrChunkDigestMismatch is returned when the chunk digest sum does not + // match one encoded in the message. + ErrChunkDigestMismatch = fmt.Errorf("chunk digest mismatch") + + // ErrNoChunks is returned when no chunks are provided. + ErrNoChunks = fmt.Errorf("no chunks") +) + // AssetFundingMsg is an interface that represents a message that is sent // during the asset funding process. type AssetFundingMsg interface { @@ -56,6 +78,204 @@ type AssetFundingMsg interface { Amt() fn.Option[uint64] } +// ProofChunk contains a chunk of a proof that would be too large to send as a +// single message. +type ProofChunk struct { + // ChunkSumID is a digest sum over the final proof including all chunks. + // This is used to identify which chunk belongs to which proofs, and can + // be used to verify the integrity of the final proof. + ChunkSumID tlv.RecordT[tlv.TlvType0, [sha256.Size]byte] + + // Chunk is a chunk of the proof. + Chunk tlv.RecordT[tlv.TlvType1, []byte] + + // Last indicates whether this is the last chunk in the proof. + Last tlv.RecordT[tlv.TlvType2, bool] +} + +// Encode writes the message using the given io.Writer. +func (p *ProofChunk) Encode(w io.Writer) error { + stream, err := tlv.NewStream( + p.ChunkSumID.Record(), p.Chunk.Record(), p.Last.Record(), + ) + if err != nil { + return err + } + + return stream.Encode(w) +} + +// Decode reads the message using the given io.Reader. +func (p *ProofChunk) Decode(r io.Reader) error { + stream, err := tlv.NewStream( + p.ChunkSumID.Record(), p.Chunk.Record(), p.Last.Record(), + ) + if err != nil { + return err + } + + return stream.Decode(r) +} + +// eProofChunk is a tlv encoding function for the ProofChunk type. +func eProofChunk(w io.Writer, val interface{}, _ *[8]byte) error { + if v, ok := val.(*ProofChunk); ok { + return v.Encode(w) + } + + return tlv.NewTypeForEncodingErr(val, "*ProofChunk") +} + +// dProofChunk is a tlv decoding function for the ProofChunk type. +func dProofChunk(r io.Reader, val interface{}, _ *[8]byte, l uint64) error { + if v, ok := val.(*ProofChunk); ok { + return v.Decode(r) + } + + return tlv.NewTypeForDecodingErr(val, "*ProofChunk", l, l) +} + +// Record returns the tlv record of the proof chunk. +func (p *ProofChunk) Record() tlv.Record { + sizeFunc := func() uint64 { + var buf bytes.Buffer + err := p.Encode(&buf) + if err != nil { + panic(err) + } + return uint64(len(buf.Bytes())) + } + + return tlv.MakeDynamicRecord( + 0, p, sizeFunc, eProofChunk, dProofChunk, + ) +} + +// NewProofChunk creates a new ProofChunk message. +func NewProofChunk(sum [32]byte, chunk []byte, last bool) ProofChunk { + return ProofChunk{ + ChunkSumID: tlv.NewPrimitiveRecord[tlv.TlvType0](sum), + Chunk: tlv.NewPrimitiveRecord[tlv.TlvType1](chunk), + Last: tlv.NewPrimitiveRecord[tlv.TlvType2](last), + } +} + +// CreateProofChunks creates a list of proof chunks from a single proof, given a +// desired chunk size. +func CreateProofChunks(wholeProof proof.Proof, + chunkSize int) ([]ProofChunk, error) { + + // The chunk size must be positive. + if chunkSize <= 0 { + return nil, fmt.Errorf("%w: chunk size is %v", + ErrChunkSize, chunkSize) + } + + // First, we'll encode the entire proof into a buffer so we can chunk it + // up. + var proofBuf bytes.Buffer + err := wholeProof.Encode(&proofBuf) + if err != nil { + return nil, err + } + + // We'll also obtain the hash digest of the proof as well. + proofDigest := sha256.Sum256(proofBuf.Bytes()) + + // If the length of the proof is below the chunk size, then we can + // return just a single proof. + if proofBuf.Len() <= chunkSize { + return []ProofChunk{ + NewProofChunk(proofDigest, proofBuf.Bytes(), true), + }, nil + } + + numExpectedChunks := proofBuf.Len() / chunkSize + if proofBuf.Len()%chunkSize != 0 { + numExpectedChunks++ + } + + proofSize := proofBuf.Len() + + // Otherwise, we'll need to chunk up the proof into multiple chunks. + var chunks []ProofChunk + for i := 0; i < proofSize; i += chunkSize { + // If this is the last chunk, then we'll set the last flag to + // true. + last := i+chunkSize >= proofBuf.Len() + + // We'll slice out the next chunk of the proof. + chunk := proofBuf.Next(chunkSize) + + // With the chunk obtained, we'll create a new proof chunk and + // add it to our list of chunks. + chunks = append( + chunks, NewProofChunk(proofDigest, chunk, last), + ) + } + + // Verify that we created the correct number of chunks. + if len(chunks) != numExpectedChunks { + return nil, fmt.Errorf("%w: expected %v chunks, got %v", + ErrImproperChunks, numExpectedChunks, len(chunks)) + } + + return chunks, nil +} + +// AssembleProofChunks assembles a list of proof chunks into a single proof. +func AssembleProofChunks(chunks []ProofChunk) (*proof.Proof, error) { + // We must have at least a single chunk. + if len(chunks) == 0 { + return nil, ErrNoChunks + } + + // First, we'll iterate over all the chunks to ensure that they all have + // the same digest sum. + var proofDigest [sha256.Size]byte + for i, chunk := range chunks { + // If this is the first chunk, then we'll record the digest sum. + if i == 0 { + proofDigest = chunk.ChunkSumID.Val + } + + // If the digest sum of this chunk doesn't match the one we + // recorded, then we'll return an error. + if proofDigest != chunk.ChunkSumID.Val { + return nil, fmt.Errorf("%w: digest sum mismatch at "+ + "chunk %v", ErrChunkDistUniformity, i) + } + } + + // With the digest sum validated, we'll now concatenate all the chunks + // together to obtain the full proof. + var proofBuf bytes.Buffer + for _, chunk := range chunks { + _, err := proofBuf.Write(chunk.Chunk.Val) + if err != nil { + return nil, fmt.Errorf("unable to write chunk: %w", err) + } + } + + // Before we decode the full proof, we'll ensure that the proof digest + // matches up. + fullProofDigest := sha256.Sum256(proofBuf.Bytes()) + if proofDigest != fullProofDigest { + return nil, fmt.Errorf("%w: digest sum mismatch: expected %x, "+ + "got %x", ErrChunkDigestMismatch, proofDigest, + fullProofDigest) + } + + // Finally, we'll decode the full proof from the buffer. + var fullProof proof.Proof + err := fullProof.Decode(&proofBuf) + if err != nil { + return nil, fmt.Errorf("unable to decode full proof: %w", err) + } + + return &fullProof, nil +} + // TxAssetInputProof is sent by the initiator of a channel funding request to // prove to the upcoming responder that they are the owner of an asset input. // @@ -72,23 +292,21 @@ type TxAssetInputProof struct { // Amount is the amount of the asset that this output represents. Amount tlv.RecordT[tlv.TlvType2, uint64] - // Proof is the last transition proof that proves this output was - // committed to in the Bitcoin transaction that anchors this asset - // output. - Proof tlv.RecordT[tlv.TlvType3, proof.Proof] + // ProofChunk is a set of proof chunks for the last transition proof + // that proves this output was committed to in the Bitcoin transaction + // that anchors this asset output. + ProofChunk tlv.RecordT[tlv.TlvType3, ProofChunk] } // NewTxAssetInputProof creates a new TxAssetInputProof message. -func NewTxAssetInputProof(pid funding.PendingChanID, - p proof.Proof) *TxAssetInputProof { +func NewTxAssetInputProof(pid funding.PendingChanID, assetID asset.ID, + amt uint64, chunk ProofChunk) *TxAssetInputProof { return &TxAssetInputProof{ PendingChanID: tlv.NewPrimitiveRecord[tlv.TlvType0](pid), - AssetID: tlv.NewRecordT[tlv.TlvType1](p.Asset.ID()), - Amount: tlv.NewPrimitiveRecord[tlv.TlvType2]( - p.Asset.Amount, - ), - Proof: tlv.NewRecordT[tlv.TlvType3](p), + AssetID: tlv.NewRecordT[tlv.TlvType1](assetID), + Amount: tlv.NewPrimitiveRecord[tlv.TlvType2](amt), + ProofChunk: tlv.NewRecordT[tlv.TlvType3](chunk), } } @@ -103,7 +321,7 @@ func (t *TxAssetInputProof) Decode(r io.Reader, _ uint32) error { t.PendingChanID.Record(), t.AssetID.Record(), t.Amount.Record(), - t.Proof.Record(), + t.ProofChunk.Record(), ) if err != nil { return err @@ -119,7 +337,7 @@ func (t *TxAssetInputProof) Encode(w *bytes.Buffer, _ uint32) error { t.PendingChanID.Record(), t.AssetID.Record(), t.Amount.Record(), - t.Proof.Record(), + t.ProofChunk.Record(), ) if err != nil { return err @@ -237,6 +455,8 @@ type AssetFundingCreated struct { // funding output to be able to create the aux funding+commitment // blobs. FundingOutputs tlv.RecordT[tlv.TlvType1, AssetOutputListRecord] + + // TODO(roasbeef): need to chunk this?? } // NewAssetFundingCreated creates a new AssetFundingCreated message. diff --git a/tapchannelmsg/wire_msgs_test.go b/tapchannelmsg/wire_msgs_test.go index ceb52601e..2a3672269 100644 --- a/tapchannelmsg/wire_msgs_test.go +++ b/tapchannelmsg/wire_msgs_test.go @@ -2,6 +2,7 @@ package tapchannelmsg import ( "bytes" + "crypto/sha256" "encoding/hex" "os" "strings" @@ -12,6 +13,7 @@ import ( "github.com/lightninglabs/taproot-assets/internal/test" "github.com/lightninglabs/taproot-assets/proof" "github.com/stretchr/testify/require" + "pgregory.net/rapid" ) // TestAssetFundingMsg tests encoding and decoding of the AssetFundingMsg @@ -44,6 +46,9 @@ func TestAssetFundingMsg(t *testing.T) { randProof, err := proof.Decode(proofBytes) require.NoError(t, err) + proofChunks, err := CreateProofChunks(*randProof, 100) + require.NoError(t, err) + testCases := []struct { name string msg AssetFundingMsg @@ -52,7 +57,8 @@ func TestAssetFundingMsg(t *testing.T) { { name: "TxAssetInputProof", msg: NewTxAssetInputProof( - [32]byte{1}, *randProof, + [32]byte{1}, randProof.Asset.ID(), + randProof.Asset.Amount, proofChunks[0], ), empty: func() AssetFundingMsg { return &TxAssetInputProof{} @@ -97,3 +103,158 @@ func TestAssetFundingMsg(t *testing.T) { }) } } + +// loadOddTxBlock loads a block from a file containing a hex-encoded block. +func loadOddTxBlock(t *testing.T, fileName string) wire.MsgBlock { + oddTxBlockHex, err := os.ReadFile(fileName) + require.NoError(t, err) + + oddTxBlockBytes, err := hex.DecodeString( + strings.TrimSpace(string(oddTxBlockHex)), + ) + require.NoError(t, err) + + var oddTxBlock wire.MsgBlock + err = oddTxBlock.Deserialize(bytes.NewReader(oddTxBlockBytes)) + require.NoError(t, err) + + return oddTxBlock +} + +// TestProofChunkRoundTripProperty tests that proof chunks can be split and then +// reassembled without losing any information. +func TestProofChunkRoundTripProperty(t *testing.T) { + oddTxBlock := loadOddTxBlock(t, oddTxBlockHexFileName) + + rapid.Check(t, func(r *rapid.T) { + // Make sure the asset type is normal or collectible. + randGen := asset.GenesisGen.Draw(r, "randGen") + randGen.Type = asset.Type( + rapid.IntRange(0, 1).Draw(r, "assetType"), + ) + + scriptKey := asset.PubKeyGen.Draw(r, "scriptKey") + + originalRandProof := proof.RandProof( + t, randGen, scriptKey, oddTxBlock, 0, 1, + ) + + // Encode the original proof. + var origBuf bytes.Buffer + err := originalRandProof.Encode(&origBuf) + require.NoError(r, err) + + // Randomize the chunk size. We ensure at least 1 to avoid + // division by zero. + chunkSize := rapid.IntRange( + 100, origBuf.Len()+50, + ).Draw(r, "chunkSize") + + chunks, err := CreateProofChunks(originalRandProof, chunkSize) + require.NoError(r, err) + + // The last chunk should have the Last attribute set. + require.True(r, chunks[len(chunks)-1].Last.Val) + + reconstructedProof, err := AssembleProofChunks(chunks) + require.NoError(r, err) + + // Encode the reconstructed proof for comparison. + var reconBuf bytes.Buffer + err = reconstructedProof.Encode(&reconBuf) + require.NoError(r, err) + + // Ensure original and reconstructed proofs match. + require.Equal(r, origBuf.Bytes(), reconBuf.Bytes(), + "reconstructed proof does not match the original") + }) +} + +// // TestProofChunkErrorCases tests error cases for proof chunking and +// assembly. This will try invalid chunk sizes and also corrupt chunks. +func TestProofChunkErrorCases(t *testing.T) { + oddTxBlock := loadOddTxBlock(t, oddTxBlockHexFileName) + + rapid.Check(t, func(r *rapid.T) { + // Make sure the asset type is normal or collectible. + randGen := asset.GenesisGen.Draw(r, "randGen") + randGen.Type = asset.Type( + rapid.IntRange(0, 1).Draw(r, "assetType"), + ) + + scriptKey := asset.PubKeyGen.Draw(r, "scriptKey") + + originalRandProof := proof.RandProof( + t, randGen, scriptKey, oddTxBlock, 0, 1, + ) + + var origBuf bytes.Buffer + err := originalRandProof.Encode(&origBuf) + require.NoError(r, err) + + // We’ll try some invalid chunk sizes to trigger errors: + invalidChunkSize := rapid.IntRange(-10, 0).Draw( + r, "invalidChunkSize", + ) + _, err = CreateProofChunks(originalRandProof, invalidChunkSize) + + // If the chunk size is invalid, we should get an error. + if invalidChunkSize <= 0 { + require.ErrorIs(r, err, ErrChunkSize, "Expected an "+ + "error for non-positive chunk size") + } + + // We'll now test for invalid chunking. To start, we'll make a + // valid set of proof chunks. + chunkSize := rapid.IntRange(100, origBuf.Len()+10).Draw( + r, "chunkSize", + ) + chunks, err := CreateProofChunks(originalRandProof, chunkSize) + require.NoError(r, err) + + // We'll modify the chunk digest to trigger an error. + if len(chunks) > 1 { + // Corrupt the digest in one chunk. + badChunkIndex := rapid.IntRange(0, len(chunks)-1).Draw( + r, "badChunkIndex", + ) + chunks[badChunkIndex].ChunkSumID.Val = sha256.Sum256( + []byte("corruption"), + ) + + _, err := AssembleProofChunks(chunks) + require.ErrorIs( + r, err, ErrChunkDistUniformity, + "expected error due to mismatched chunk digest", + ) + } + + // Obtain a new set of chunks. + chunks, err = CreateProofChunks(originalRandProof, chunkSize) + require.NoError(r, err) + + // Next, we'll corrupt the chunk itself so decoding fails. + if len(chunks) > 0 { + // Introduce random bytes that can't represent a valid + // proof. + badChunkIndex := rapid.IntRange(0, len(chunks)-1).Draw( + r, "badChunkIndex2", + ) + chunks[badChunkIndex].Chunk.Val = nil + + _, err := AssembleProofChunks(chunks) + require.ErrorIs( + r, err, ErrChunkDigestMismatch, + "expected error due to invalid proof "+ + "decoding, got %v", err, + ) + } + + // We'll also test the case of an empty set of chunks. + _, err = AssembleProofChunks([]ProofChunk{}) + require.ErrorIs( + r, err, ErrNoChunks, + "expected error assembling from empty chunk list", + ) + }) +}