diff --git a/components/debugapi/blocks.go b/components/debugapi/blocks.go index 4f9a07abb..948aa76e0 100644 --- a/components/debugapi/blocks.go +++ b/components/debugapi/blocks.go @@ -16,7 +16,13 @@ func getSlotBlockIDs(index iotago.SlotIndex) (*BlockChangesResponse, error) { } includedBlocks := make([]string, 0) - tangleTree := ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.BlockID.Bytes, iotago.BlockIDFromBytes) + tangleTree := ads.NewSet[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.BlockID.Bytes, + iotago.BlockIDFromBytes, + ) _ = blocksForSlot.StreamKeys(func(blockID iotago.BlockID) error { includedBlocks = append(includedBlocks, blockID.String()) diff --git a/components/debugapi/transactions.go b/components/debugapi/transactions.go index 6724032c9..8a325e98c 100644 --- a/components/debugapi/transactions.go +++ b/components/debugapi/transactions.go @@ -21,7 +21,13 @@ func storeTransactionsPerSlot(scd *notarization.SlotCommittedDetails) error { if err != nil { return ierrors.Wrapf(err, "failed to retrieve state diff for slot %d", slot) } - mutationsTree := ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes) + mutationsTree := ads.NewSet[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ) tcs := &TransactionsChangesResponse{ Index: slot, IncludedTransactions: make([]string, 0), diff --git a/components/inx/server_blocks.go b/components/inx/server_blocks.go index 0565e94b8..07a0a8ed3 100644 --- a/components/inx/server_blocks.go +++ b/components/inx/server_blocks.go @@ -12,6 +12,7 @@ import ( "github.com/iotaledger/hive.go/runtime/workerpool" inx "github.com/iotaledger/inx/go" "github.com/iotaledger/iota-core/pkg/blockhandler" + "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" iotago "github.com/iotaledger/iota.go/v4" ) @@ -123,6 +124,33 @@ func (s *Server) ListenToConfirmedBlocks(_ *inx.NoParams, srv inx.INX_ListenToCo return ctx.Err() } +func (s *Server) ReadAcceptedBlocks(slot *inx.SlotIndex, srv inx.INX_ReadAcceptedBlocksServer) error { + blocksStore, err := deps.Protocol.MainEngineInstance().Storage.Blocks(slot.Unwrap()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "failed to get blocks: %s", err.Error()) + } + + if err := blocksStore.ForEachBlockInSlot(func(block *model.Block) error { + metadata, err := getINXBlockMetadata(block.ID()) + if err != nil { + return err + } + + payload := &inx.BlockWithMetadata{ + Metadata: metadata, + Block: &inx.RawBlock{ + Data: block.Data(), + }, + } + + return srv.Send(payload) + }); err != nil { + return status.Errorf(codes.Internal, "failed to iterate blocks: %s", err.Error()) + } + + return nil +} + func (s *Server) SubmitBlock(ctx context.Context, rawBlock *inx.RawBlock) (*inx.BlockId, error) { block, err := rawBlock.UnwrapBlock(deps.Protocol) if err != nil { diff --git a/components/inx/server_commitments.go b/components/inx/server_commitments.go index b16713f48..a88448dd8 100644 --- a/components/inx/server_commitments.go +++ b/components/inx/server_commitments.go @@ -8,6 +8,7 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/runtime/workerpool" inx "github.com/iotaledger/inx/go" "github.com/iotaledger/iota-core/pkg/model" iotago "github.com/iotaledger/iota.go/v4" @@ -22,6 +23,135 @@ func inxCommitment(commitment *model.Commitment) *inx.Commitment { } } +func (s *Server) ListenToCommitments(req *inx.SlotRangeRequest, srv inx.INX_ListenToCommitmentsServer) error { + createCommitmentPayloadForSlotAndSend := func(slot iotago.SlotIndex) error { + commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(slot) + if err != nil { + if ierrors.Is(err, kvstore.ErrKeyNotFound) { + return status.Errorf(codes.NotFound, "commitment slot %d not found", slot) + } + + return err + } + + if err := srv.Send(inxCommitment(commitment)); err != nil { + return ierrors.Errorf("send error: %w", err) + } + + return nil + } + + sendSlotsRange := func(startSlot iotago.SlotIndex, endSlot iotago.SlotIndex) error { + for currentSlot := startSlot; currentSlot <= endSlot; currentSlot++ { + if err := createCommitmentPayloadForSlotAndSend(currentSlot); err != nil { + return err + } + } + + return nil + } + + // if a startSlot is given, we send all available commitments including the start slot. + // if an endSlot is given, we send all available commitments up to and including min(latestCommitmentSlot, endSlot). + // if no startSlot is given, but an endSlot, we don't send previous commitments. + sendPreviousSlots := func(startSlot iotago.SlotIndex, endSlot iotago.SlotIndex) (iotago.SlotIndex, error) { + if startSlot == 0 { + // no need to send previous commitments + return 0, nil + } + + latestCommitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + + if startSlot > latestCommitment.Slot() { + // no need to send previous commitments + return 0, nil + } + + // Stream all available commitments first + prunedEpoch, hasPruned := deps.Protocol.MainEngineInstance().SyncManager.LastPrunedEpoch() + if hasPruned && startSlot <= deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(prunedEpoch) { + return 0, status.Errorf(codes.InvalidArgument, "given startSlot %d is older than the current pruningSlot %d", startSlot, deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(prunedEpoch)) + } + + if endSlot == 0 || endSlot > latestCommitment.Slot() { + endSlot = latestCommitment.Slot() + } + + if err := sendSlotsRange(startSlot, endSlot); err != nil { + return 0, err + } + + return endSlot, nil + } + + stream := &streamRange{ + start: iotago.SlotIndex(req.GetStartSlot()), + end: iotago.SlotIndex(req.GetEndSlot()), + } + + var err error + stream.lastSent, err = sendPreviousSlots(stream.start, stream.end) + if err != nil { + return err + } + + if stream.isBounded() && stream.lastSent >= stream.end { + // We are done sending, so close the stream + return nil + } + + catchUpFunc := func(start iotago.SlotIndex, end iotago.SlotIndex) error { + err := sendSlotsRange(start, end) + if err != nil { + err := ierrors.Errorf("sendSlotsRange error: %w", err) + Component.LogError(err.Error()) + + return err + } + + return nil + } + + sendFunc := func(_ iotago.SlotIndex, payload *inx.Commitment) error { + if err := srv.Send(payload); err != nil { + err := ierrors.Errorf("send error: %w", err) + Component.LogError(err.Error()) + + return err + } + + return nil + } + + var innerErr error + ctx, cancel := context.WithCancel(Component.Daemon().ContextStopped()) + + wp := workerpool.New("ListenToCommitments", workerpool.WithWorkerCount(workerCount)).Start() + + unhook := deps.Protocol.Events.Engine.Notarization.LatestCommitmentUpdated.Hook(func(commitment *model.Commitment) { + done, err := handleRangedSend1(commitment.Slot(), inxCommitment(commitment), stream, catchUpFunc, sendFunc) + switch { + case err != nil: + innerErr = err + cancel() + + case done: + cancel() + } + }).Unhook + + <-ctx.Done() + unhook() + + // We need to wait until all tasks are done, otherwise we might call + // "SendMsg" and "CloseSend" in parallel on the grpc stream, which is + // not safe according to the grpc docs. + wp.Shutdown() + wp.ShutdownComplete.Wait() + + return innerErr +} + func (s *Server) ForceCommitUntil(_ context.Context, slot *inx.SlotIndex) (*inx.NoParams, error) { err := deps.Protocol.MainEngineInstance().Notarization.ForceCommitUntil(slot.Unwrap()) if err != nil { diff --git a/go.mod b/go.mod index a8d306e68..821a76207 100644 --- a/go.mod +++ b/go.mod @@ -10,22 +10,22 @@ require ( github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/app v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/logger v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 - github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac - github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e + github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231108104504-1445f545de82 + github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231108104322-f301c3573998 + github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa github.com/labstack/echo/v4 v4.11.2 github.com/labstack/gommon v0.4.0 github.com/libp2p/go-libp2p v0.32.0 @@ -33,7 +33,6 @@ require ( github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-varint v0.0.7 - github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e github.com/otiai10/copy v1.14.0 github.com/prometheus/client_golang v1.17.0 github.com/spf13/pflag v1.0.5 @@ -43,6 +42,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/dig v1.17.1 golang.org/x/crypto v0.14.0 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 ) @@ -89,7 +89,7 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 // indirect github.com/ipfs/boxo v0.13.1 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect @@ -169,7 +169,6 @@ require ( go.uber.org/mock v0.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/image v0.13.0 // indirect golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect diff --git a/go.sum b/go.sum index 4b7298c9b..a5a51a465 100644 --- a/go.sum +++ b/go.sum @@ -275,40 +275,40 @@ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10 h1:M24zuxsCGccvksoanDZEjc8K3tWFyw7aZ2sbQK740pE= -github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10/go.mod h1:IFh0gDfeMgZtfCo+5afK59IDR4xXh+cTR9YtLnZPcbY= -github.com/iotaledger/hive.go/app v0.0.0-20231107225803-f89acd088c10 h1:wsUsKHP9meQsr1UPYASpN+QRa2NlWyhDbt0R310NccM= -github.com/iotaledger/hive.go/app v0.0.0-20231107225803-f89acd088c10/go.mod h1:8ZbIKR84oQd/3iQ5eeT7xpudO9/ytzXP7veIYnk7Orc= -github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10 h1:CJ9nehCDKqFo3sJLMnybx0/AvmdXq6dau5qFr+pivUc= -github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10 h1:FMassldB6buYv8nsfELSkKzR3mj326YNmLy4DNY+20o= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10/go.mod h1:Mc+ACqBGPxrPMIPUBOm6/HL0J6m0iVMwjtIEKW3uow8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10 h1:sGxsehUXmhWW5Vv9PBwuW1mlW2Npdb2yMonZgolVzHs= -github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10/go.mod h1:h3o6okvMSEK3KOX6pOp3yq1h9ohTkTfo6X8MzEadeb0= -github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10 h1:NufkzT29n9OconEE6+8HMoCkW+MXiznGn+HxWrNPy1o= -github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10/go.mod h1:3XkUSKfHaVxGbT0XAvjNlVYqPzhfLTGhDtdNA5UBPco= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10 h1:M43fs0ybJXyVGnN55xG2OrMmXqbpYGudnU8zIA7NNL4= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10 h1:nGffY7n3mxUFtWrKgbvNx7jYu2lGkeu01hp+8aLaOk8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10/go.mod h1:O/U3jtiUDeqqM0MZQFu2UPqS9fUm0C5hNISxlmg/thE= -github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10 h1:ME7iE4yKMYEvfkFvOPswAWsZaq7mLkKiGN88K1X1OBg= -github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10/go.mod h1:s4kzx9QY1MVWHJralj+3q5kI0eARtrJhphYD/iBbPfo= -github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10 h1:yhDHLCtdpLSiv/kDDLDkJZcJispd1OUAWIYF7RXFQi4= -github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10/go.mod h1:JvokzmpmFZPDskMlUqqjgHtD8usVJU4nAY/TNMGge8M= -github.com/iotaledger/hive.go/logger v0.0.0-20231107225803-f89acd088c10 h1:ajaTrqlYEjVbkIu2RTN+GKrQnbbbjoAFea2wLgj2B+c= -github.com/iotaledger/hive.go/logger v0.0.0-20231107225803-f89acd088c10/go.mod h1:aBfAfIB2GO/IblhYt5ipCbyeL9bXSNeAwtYVA3hZaHg= -github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10 h1:5tPaO+hxPTBp5J7Ap2oIqzHEXmYbrh5Rfh4y2l5KaQQ= -github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10/go.mod h1:jRw8yFipiPaqmTPHh7hTcxAP9u6pjRGpByS3REJKkbY= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10 h1:KahaknpEVnJCgyaawYzRVR0rcX2/iCXiUXHvSjlMqEA= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10/go.mod h1:SdK26z8/VhWtxaqCuQrufm80SELgowQPmu9T/8eUQ8g= -github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10 h1:1BQfYB6hxWGTjrv70SP3xxThxTV8t1AqEVQRRr5dTJI= -github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 h1:17JDzMKTMXKF3xys6gPURRddkZhg1LY+xwfhbr/sVqg= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5/go.mod h1:LsJvoBUVVnY7tkwwByCVtAwmp5bFXdyJNGU/+KVQJVM= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac h1:c7R33+TQGMYP6pvLUQQaqpdDFl+GZbhAcfGMI0285fo= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac/go.mod h1:qPuMUvCTaghsnYRDnRoRuztTyEKFlmi2S7gb44rH7WM= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e h1:ZYRC1MHn/ghsqtjIpYGTxLQrh5n5eUmC0/YWnJiTRhk= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e/go.mod h1:jqbLYq4a/FwuiPBqFfkAwwxU8vs3+kReRq2/tyX5qRA= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 h1:2r4FgIGdc2lHcIbXiUFCCVq4+B0oZk9t6Z0SSLjrzCE= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 h1:SnmQt9GxrWIvpW7pgQS049x1b8T+lQutTQbo35FImug= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 h1:qkq0Wz+Y3J8QYRLd0fwTgHuur/A3k7d82BxOKSfvk8c= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 h1:GtsYwcCqRomhMo190TPxBrOzs6YnVmqkmQgT/lJrJRo= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 h1:Xeb4w0g0Kv2ZjdCZQqz8oiqAU5qAy8OXG8kGTXSPzuY= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 h1:NtQLSS0Lq5qg/w5nbMpXrlQpmcK3KiOaQmgZWoRc4mM= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 h1:o5S4KUAwToOLXoYYRj9ZgqeDsFv1VRM4+Mni0Tdj2Ck= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 h1:kXKJQ8UvbA8kI0Jx0EnlXbwDeZFY8pEX0Q6KaOPsYlQ= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 h1:coXPklQ7JgqTXIUXh3b4OHml1VIvI8x7pQsjsES/u/s= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 h1:VBvGnsVwqhoT9zMyMIlK5fPmz6fsbiPZOwdU1E8WU7o= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 h1:05EbTaladbyo7mD8yBaWYJh9P8u/TUTmrjVmcUjoW8A= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 h1:XbC1fmY87UJ/yMs8U2YqlUdJsqb0Xqj/ZYQKlZ7AUG8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 h1:LXhLW2cN9bQYoHQsgmJRb/jiRBRU5s2rLoCNjZfgHdg= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 h1:Y4HgL5gm9S27usg5M2t6wi1BSdCxVorM62lwnpKuMd4= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231108104504-1445f545de82 h1:FdM1lxUKgENO3oOlF5blVqmjER44mLIHGpavyUOY5JI= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231108104504-1445f545de82/go.mod h1:HVxkGPraMDTRudfG9AFN7Ga9gijp6skXB9TKNBc4KgI= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231108104322-f301c3573998 h1:KkC0SaWrjSMg897r2DDosJYALFfLadFST3Fvoaxg7hw= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231108104322-f301c3573998/go.mod h1:c+lBG3vgt2rgXHeOncK8hMllMwihTAtVbu790NslW2w= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa h1:A2nadmSbmn62f6wtrqvv/TCCF2sDiiwyDnl6brbRo1E= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa/go.mod h1:8iDORW4/e4NztyAGqjW07uSMjbhs7snbxw+81IWOczY= github.com/ipfs/boxo v0.13.1 h1:nQ5oQzcMZR3oL41REJDcTbrvDvuZh3J9ckc9+ILeRQI= github.com/ipfs/boxo v0.13.1/go.mod h1:btrtHy0lmO1ODMECbbEY1pxNtrLilvKSYLoGQt1yYCk= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -507,8 +507,6 @@ github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= diff --git a/pkg/core/account/accounts.go b/pkg/core/account/accounts.go index 770a72de1..7cfdfa3bd 100644 --- a/pkg/core/account/accounts.go +++ b/pkg/core/account/accounts.go @@ -1,8 +1,6 @@ package account import ( - "bytes" - "encoding/binary" "io" "sync/atomic" @@ -10,7 +8,8 @@ import ( "github.com/iotaledger/hive.go/ds/shrinkingmap" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/runtime/syncutils" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -27,14 +26,9 @@ type Accounts struct { // NewAccounts creates a new Weights instance. func NewAccounts() *Accounts { - a := new(Accounts) - a.initialize() - - return a -} - -func (a *Accounts) initialize() { - a.accountPools = shrinkingmap.New[iotago.AccountID, *Pool]() + return &Accounts{ + accountPools: shrinkingmap.New[iotago.AccountID, *Pool](), + } } func (a *Accounts) Has(id iotago.AccountID) bool { @@ -137,91 +131,80 @@ func (a *Accounts) SelectCommittee(members ...iotago.AccountID) *SeatedAccounts } func AccountsFromBytes(b []byte) (*Accounts, int, error) { - return AccountsFromReader(bytes.NewReader(b)) -} - -func AccountsFromReader(readSeeker io.ReadSeeker) (*Accounts, int, error) { - a := new(Accounts) - n, err := a.readFromReadSeeker(readSeeker) + reader := stream.NewByteReader(b) - return a, n, err -} - -func (a *Accounts) readFromReadSeeker(reader io.ReadSeeker) (n int, err error) { - a.mutex.Lock() - defer a.mutex.Unlock() - - a.initialize() - - var accountCount uint32 - if err = binary.Read(reader, binary.LittleEndian, &accountCount); err != nil { - return n, ierrors.Wrap(err, "unable to read accounts count") + a, err := AccountsFromReader(reader) + if err != nil { + return nil, 0, ierrors.Wrap(err, "unable to read accounts from bytes") } - n += 4 - - for i := uint32(0); i < accountCount; i++ { - var accountID iotago.AccountID - if _, err = io.ReadFull(reader, accountID[:]); err != nil { - return 0, ierrors.Wrap(err, "unable to read accountID") - } - n += iotago.AccountIDLength + return a, reader.BytesRead(), nil +} - poolBytes := make([]byte, poolBytesLength) - if _, err = io.ReadFull(reader, poolBytes); err != nil { - return 0, ierrors.Wrap(err, "unable to read pool bytes") - } - n += poolBytesLength +func AccountsFromReader(reader io.Reader) (*Accounts, error) { + a := NewAccounts() - pool, c, err := PoolFromBytes(poolBytes) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + accountID, err := stream.Read[iotago.AccountID](reader) if err != nil { - return 0, ierrors.Wrap(err, "failed to parse pool") + return ierrors.Wrapf(err, "unable to read accountID at index %d", i) } - if c != poolBytesLength { - return 0, ierrors.Wrap(err, "invalid pool bytes length") + pool, err := stream.ReadObject(reader, poolBytesLength, PoolFromBytes) + if err != nil { + return ierrors.Wrapf(err, "unable to read pool at index %d", i) } if err := a.setWithoutLocking(accountID, pool); err != nil { - return 0, ierrors.Wrapf(err, "failed to set pool for account %s", accountID.String()) + return ierrors.Wrapf(err, "failed to set pool for account %s", accountID.String()) } + + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to read account data") } - var reused bool - if err = binary.Read(reader, binary.LittleEndian, &reused); err != nil { - return n, ierrors.Wrap(err, "unable to read reused flag") + reused, err := stream.Read[bool](reader) + if err != nil { + return nil, ierrors.Wrap(err, "failed to read reused flag") } + a.reused.Store(reused) - n++ - return n, nil + return a, nil } -func (a *Accounts) Bytes() (bytes []byte, err error) { +func (a *Accounts) Bytes() ([]byte, error) { a.mutex.RLock() defer a.mutex.RUnlock() - m := marshalutil.New() + byteBuffer := stream.NewByteBuffer() - m.WriteUint32(uint32(a.accountPools.Size())) - var innerErr error - a.ForEach(func(id iotago.AccountID, pool *Pool) bool { - m.WriteBytes(id[:]) - poolBytes, err := pool.Bytes() - if err != nil { - innerErr = err - return false - } - m.WriteBytes(poolBytes) + if err := stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + var innerErr error + a.ForEach(func(id iotago.AccountID, pool *Pool) bool { + if innerErr = stream.Write(byteBuffer, id); innerErr != nil { + return false + } - return true - }) + if innerErr = stream.WriteObject(byteBuffer, pool, (*Pool).Bytes); innerErr != nil { + return false + } - m.WriteBool(a.reused.Load()) + return true + }) + if innerErr != nil { + return 0, innerErr + } + + return a.accountPools.Size(), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to write accounts") + } - if innerErr != nil { - return nil, innerErr + if err := stream.Write(byteBuffer, a.reused.Load()); err != nil { + return nil, ierrors.Wrap(err, "failed to write reused flag") } - return m.Bytes(), nil + return byteBuffer.Bytes() } diff --git a/pkg/core/account/accounts_test.go b/pkg/core/account/accounts_test.go index ffc7dbe8e..49639141d 100644 --- a/pkg/core/account/accounts_test.go +++ b/pkg/core/account/accounts_test.go @@ -99,7 +99,7 @@ func TestAccounts(t *testing.T) { require.Equal(t, accounts, accounts2) // check "AccountsFromReader" - accounts3, _, err := account.AccountsFromReader(bytes.NewReader(accountBytes)) + accounts3, err := account.AccountsFromReader(bytes.NewReader(accountBytes)) require.NoError(t, err) // check if the new account is the same diff --git a/pkg/core/account/pool.go b/pkg/core/account/pool.go index 5815420db..44827eb11 100644 --- a/pkg/core/account/pool.go +++ b/pkg/core/account/pool.go @@ -2,11 +2,12 @@ package account import ( "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) -const poolBytesLength = 3 * marshalutil.Uint64Size +const poolBytesLength = 3 * serializer.UInt64ByteSize // Pool represents all the data we need for a given validator and epoch to calculate its rewards data. type Pool struct { @@ -19,33 +20,35 @@ type Pool struct { func PoolFromBytes(bytes []byte) (*Pool, int, error) { p := new(Pool) - m := marshalutil.New(bytes) - poolStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse pool stake") - } - p.PoolStake = iotago.BaseToken(poolStake) - validatorStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse validator stake") - } - p.ValidatorStake = iotago.BaseToken(validatorStake) + var err error + byteReader := stream.NewByteReader(bytes) - fixedCost, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse fixed cost") + if p.PoolStake, err = stream.Read[iotago.BaseToken](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read PoolStake") + } + if p.ValidatorStake, err = stream.Read[iotago.BaseToken](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read ValidatorStake") + } + if p.FixedCost, err = stream.Read[iotago.Mana](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read FixedCost") } - p.FixedCost = iotago.Mana(fixedCost) - return p, m.ReadOffset(), nil + return p, byteReader.BytesRead(), nil } -func (p *Pool) Bytes() (bytes []byte, err error) { - m := marshalutil.New() - m.WriteUint64(uint64(p.PoolStake)) - m.WriteUint64(uint64(p.ValidatorStake)) - m.WriteUint64(uint64(p.FixedCost)) +func (p *Pool) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer(poolBytesLength) + + if err := stream.Write(byteBuffer, p.PoolStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write PoolStake") + } + if err := stream.Write(byteBuffer, p.ValidatorStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write ValidatorStake") + } + if err := stream.Write(byteBuffer, p.FixedCost); err != nil { + return nil, ierrors.Wrap(err, "failed to write FixedCost") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } diff --git a/pkg/model/account_diff.go b/pkg/model/account_diff.go index fadef8c74..80647fb5e 100644 --- a/pkg/model/account_diff.go +++ b/pkg/model/account_diff.go @@ -1,15 +1,10 @@ package model import ( - "bytes" - "context" - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -18,7 +13,7 @@ import ( type AccountDiff struct { BICChange iotago.BlockIssuanceCredits - PreviousUpdatedTime iotago.SlotIndex + PreviousUpdatedSlot iotago.SlotIndex NewExpirySlot iotago.SlotIndex PreviousExpirySlot iotago.SlotIndex @@ -44,7 +39,7 @@ type AccountDiff struct { func NewAccountDiff() *AccountDiff { return &AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewExpirySlot: 0, PreviousExpirySlot: 0, NewOutputID: iotago.EmptyOutputID, @@ -60,37 +55,10 @@ func NewAccountDiff() *AccountDiff { } } -func (d AccountDiff) Bytes() ([]byte, error) { - m := marshalutil.New() - - m.WriteInt64(int64(d.BICChange)) - m.WriteUint32(uint32(d.PreviousUpdatedTime)) - m.WriteUint32(uint32(d.NewExpirySlot)) - m.WriteUint32(uint32(d.PreviousExpirySlot)) - m.WriteBytes(lo.PanicOnErr(d.NewOutputID.Bytes())) - m.WriteBytes(lo.PanicOnErr(d.PreviousOutputID.Bytes())) - - if err := writeBlockIssuerKeys(m, d.BlockIssuerKeysAdded); err != nil { - return nil, err - } - if err := writeBlockIssuerKeys(m, d.BlockIssuerKeysRemoved); err != nil { - return nil, err - } - - m.WriteInt64(d.ValidatorStakeChange) - m.WriteInt64(d.DelegationStakeChange) - m.WriteInt64(d.FixedCostChange) - m.WriteUint64(uint64(d.StakeEndEpochChange)) - m.WriteBytes(lo.PanicOnErr(d.NewLatestSupportedVersionAndHash.Bytes())) - m.WriteBytes(lo.PanicOnErr(d.PrevLatestSupportedVersionAndHash.Bytes())) - - return m.Bytes(), nil -} - func (d *AccountDiff) Clone() *AccountDiff { return &AccountDiff{ BICChange: d.BICChange, - PreviousUpdatedTime: d.PreviousUpdatedTime, + PreviousUpdatedSlot: d.PreviousUpdatedSlot, NewExpirySlot: d.NewExpirySlot, PreviousExpirySlot: d.PreviousExpirySlot, NewOutputID: d.NewOutputID, @@ -106,131 +74,113 @@ func (d *AccountDiff) Clone() *AccountDiff { } } -func (d *AccountDiff) FromBytes(b []byte) (int, error) { - return d.readFromReadSeeker(bytes.NewReader(b)) -} - -func (d *AccountDiff) FromReader(readSeeker io.ReadSeeker) error { - return lo.Return2(d.readFromReadSeeker(readSeeker)) -} +func (d *AccountDiff) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() -func (d *AccountDiff) readFromReadSeeker(reader io.ReadSeeker) (offset int, err error) { - if err = binary.Read(reader, binary.LittleEndian, &d.BICChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read account BIC balance value in the diff") + if err := stream.Write(byteBuffer, d.BICChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write BICChange value in the diff") } - offset += 8 - - if err = binary.Read(reader, binary.LittleEndian, &d.PreviousUpdatedTime); err != nil { - return offset, ierrors.Wrap(err, "unable to read previous updated time in the diff") + if err := stream.Write(byteBuffer, d.PreviousUpdatedSlot); err != nil { + return nil, ierrors.Wrap(err, "unable to write PreviousUpdatedSlot in the diff") } - offset += iotago.SlotIndexLength - - if err = binary.Read(reader, binary.LittleEndian, &d.NewExpirySlot); err != nil { - return offset, ierrors.Wrap(err, "unable to read new expiry slot in the diff") + if err := stream.Write(byteBuffer, d.NewExpirySlot); err != nil { + return nil, ierrors.Wrap(err, "unable to write NewExpirySlot in the diff") } - offset += iotago.SlotIndexLength - - if err = binary.Read(reader, binary.LittleEndian, &d.PreviousExpirySlot); err != nil { - return offset, ierrors.Wrap(err, "unable to read previous expiry slot in the diff") + if err := stream.Write(byteBuffer, d.PreviousExpirySlot); err != nil { + return nil, ierrors.Wrap(err, "unable to write PreviousExpirySlot in the diff") } - offset += iotago.SlotIndexLength - - if err = binary.Read(reader, binary.LittleEndian, &d.NewOutputID); err != nil { - return offset, ierrors.Wrap(err, "unable to read new outputID in the diff") + if err := stream.Write(byteBuffer, d.NewOutputID); err != nil { + return nil, ierrors.Wrap(err, "unable to write NewOutputID in the diff") } - offset += iotago.OutputIDLength - - if err = binary.Read(reader, binary.LittleEndian, &d.PreviousOutputID); err != nil { - return offset, ierrors.Wrap(err, "unable to read previous outputID in the diff") + if err := stream.Write(byteBuffer, d.PreviousOutputID); err != nil { + return nil, ierrors.Wrap(err, "unable to write PreviousOutputID in the diff") } - offset += iotago.OutputIDLength - keysAdded, bytesRead, err := readBlockIssuerKeys(reader) - if err != nil { - return offset, ierrors.Wrap(err, "unable to read added blockIssuerKeys in the diff") + if err := stream.WriteObject(byteBuffer, d.BlockIssuerKeysAdded, iotago.BlockIssuerKeys.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write added blockIssuerKeys in the diff") + } + if err := stream.WriteObject(byteBuffer, d.BlockIssuerKeysRemoved, iotago.BlockIssuerKeys.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write removed blockIssuerKeys in the diff") } - offset += bytesRead - - d.BlockIssuerKeysAdded = keysAdded - keysRemoved, bytesRead, err := readBlockIssuerKeys(reader) - if err != nil { - return offset, ierrors.Wrap(err, "unable to read removed blockIssuerKeys in the diff") + if err := stream.Write(byteBuffer, d.ValidatorStakeChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write ValidatorStakeChange in the diff") + } + if err := stream.Write(byteBuffer, d.DelegationStakeChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write DelegationStakeChange in the diff") + } + if err := stream.Write(byteBuffer, d.FixedCostChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write FixedCostChange in the diff") + } + if err := stream.Write(byteBuffer, d.StakeEndEpochChange); err != nil { + return nil, ierrors.Wrap(err, "unable to write StakeEndEpochChange in the diff") + } + if err := stream.WriteObject(byteBuffer, d.NewLatestSupportedVersionAndHash, VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write NewLatestSupportedVersionAndHash in the diff") + } + if err := stream.WriteObject(byteBuffer, d.PrevLatestSupportedVersionAndHash, VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "unable to write PrevLatestSupportedVersionAndHash in the diff") } - offset += bytesRead - d.BlockIssuerKeysRemoved = keysRemoved + return byteBuffer.Bytes() +} - if err = binary.Read(reader, binary.LittleEndian, &d.ValidatorStakeChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read validator stake change in the diff") - } - offset += 8 +func AccountDiffFromReader(reader io.ReadSeeker) (*AccountDiff, error) { + var err error + d := NewAccountDiff() - if err = binary.Read(reader, binary.LittleEndian, &d.DelegationStakeChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read delegation stake change in the diff") + if d.BICChange, err = stream.Read[iotago.BlockIssuanceCredits](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read account BIC balance value in the diff") } - offset += 8 - - if err = binary.Read(reader, binary.LittleEndian, &d.FixedCostChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read fixed cost change in the diff") + if d.PreviousUpdatedSlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read previous updated time in the diff") } - offset += 8 - - if err = binary.Read(reader, binary.LittleEndian, &d.StakeEndEpochChange); err != nil { - return offset, ierrors.Wrap(err, "unable to read new stake end epoch in the diff") + if d.NewExpirySlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read new expiry slot in the diff") } - offset += 8 - - newVersionAndHashBytes := make([]byte, VersionAndHashSize) - if err = binary.Read(reader, binary.LittleEndian, newVersionAndHashBytes); err != nil { - return offset, ierrors.Wrap(err, "unable to read new version and hash bytes in the diff") + if d.PreviousExpirySlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read previous expiry slot in the diff") + } + if d.NewOutputID, err = stream.Read[iotago.OutputID](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read new outputID in the diff") } - d.NewLatestSupportedVersionAndHash, _, err = VersionAndHashFromBytes(newVersionAndHashBytes) - if err != nil { - return offset, ierrors.Wrap(err, "unable to parse new version and hash bytes in the diff") + if d.PreviousOutputID, err = stream.Read[iotago.OutputID](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read previous outputID in the diff") } - offset += len(newVersionAndHashBytes) - prevVersionAndHashBytes := make([]byte, VersionAndHashSize) - if err = binary.Read(reader, binary.LittleEndian, prevVersionAndHashBytes); err != nil { - return offset, ierrors.Wrap(err, "unable to read prev version and hash bytes in the diff") + if d.BlockIssuerKeysAdded, err = stream.ReadObjectFromReader(reader, iotago.BlockIssuerKeysFromReader); err != nil { + return nil, ierrors.Wrap(err, "unable to read added blockIssuerKeys in the diff") } - d.PrevLatestSupportedVersionAndHash, _, err = VersionAndHashFromBytes(prevVersionAndHashBytes) - if err != nil { - return offset, ierrors.Wrap(err, "unable to parse prev version and hash bytes in the diff") + if d.BlockIssuerKeysRemoved, err = stream.ReadObjectFromReader(reader, iotago.BlockIssuerKeysFromReader); err != nil { + return nil, ierrors.Wrap(err, "unable to read removed blockIssuerKeys in the diff") } - offset += len(prevVersionAndHashBytes) - - return offset, nil -} -func writeBlockIssuerKeys(m *marshalutil.MarshalUtil, blockIssuerKeys iotago.BlockIssuerKeys) error { - blockIssuerKeysBytes, err := iotago.CommonSerixAPI().Encode(context.TODO(), blockIssuerKeys) - if err != nil { - return ierrors.Wrap(err, "unable to encode blockIssuerKeys in the diff") + if d.ValidatorStakeChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read validator stake change in the diff") + } + if d.DelegationStakeChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read delegation stake change in the diff") + } + if d.FixedCostChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read fixed cost change in the diff") + } + if d.StakeEndEpochChange, err = stream.Read[int64](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read new stake end epoch in the diff") + } + if d.NewLatestSupportedVersionAndHash, err = stream.ReadObject(reader, VersionAndHashSize, VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read new latest supported version and hash in the diff") + } + if d.PrevLatestSupportedVersionAndHash, err = stream.ReadObject(reader, VersionAndHashSize, VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read prev latest supported version and hash in the diff") } - m.WriteUint64(uint64(len(blockIssuerKeysBytes))) - m.WriteBytes(blockIssuerKeysBytes) - - return nil + return d, nil } -func readBlockIssuerKeys(reader io.ReadSeeker) (iotago.BlockIssuerKeys, int, error) { - var bytesConsumed int - - blockIssuerKeysBytes, err := stream.ReadBlob(reader) - if err != nil { - return nil, bytesConsumed, ierrors.Wrap(err, "unable to read blockIssuerKeysBytes in the diff") - } +func AccountDiffFromBytes(b []byte) (*AccountDiff, int, error) { + reader := stream.NewByteReader(b) - bytesConsumed += serializer.UInt64ByteSize // add the blob size - bytesConsumed += len(blockIssuerKeysBytes) - - var blockIssuerKeys iotago.BlockIssuerKeys - if _, err := iotago.CommonSerixAPI().Decode(context.TODO(), blockIssuerKeysBytes, &blockIssuerKeys); err != nil { - return nil, bytesConsumed, ierrors.Wrap(err, "unable to decode blockIssuerKeys in the diff") - } + a, err := AccountDiffFromReader(reader) - return blockIssuerKeys, bytesConsumed, nil + return a, reader.BytesRead(), err } diff --git a/pkg/model/block.go b/pkg/model/block.go index a41c491c0..3ada4793f 100644 --- a/pkg/model/block.go +++ b/pkg/model/block.go @@ -77,6 +77,10 @@ func (blk *Block) ID() iotago.BlockID { return blk.blockID } +func (blk *Block) SlotCommitmentID() iotago.CommitmentID { + return blk.block.Header.SlotCommitmentID +} + func (blk *Block) Data() []byte { return blk.data } diff --git a/pkg/model/commitment.go b/pkg/model/commitment.go index cf17c8c4f..f80f41824 100644 --- a/pkg/model/commitment.go +++ b/pkg/model/commitment.go @@ -21,7 +21,7 @@ type Commitment struct { } func NewEmptyCommitment(api iotago.API) *Commitment { - emptyCommitment := iotago.NewEmptyCommitment(api.ProtocolParameters().Version()) + emptyCommitment := iotago.NewEmptyCommitment(api) emptyCommitment.ReferenceManaCost = api.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost return lo.PanicOnErr(CommitmentFromCommitment(emptyCommitment, api)) @@ -50,28 +50,39 @@ func CommitmentFromCommitment(iotaCommitment *iotago.Commitment, api iotago.API, return newCommitment(commitmentID, iotaCommitment, data, api) } -func CommitmentFromBytes(data []byte, apiProvider iotago.APIProvider, opts ...serix.Option) (*Commitment, error) { - version, _, err := iotago.VersionFromBytes(data) - if err != nil { - return nil, ierrors.Wrap(err, "failed to determine version") - } - - apiForVersion, err := apiProvider.APIForVersion(version) - if err != nil { - return nil, ierrors.Wrapf(err, "failed to get API for version %d", version) +func CommitmentFromBytes(apiProvider iotago.APIProvider) func([]byte) (*Commitment, int, error) { + return func(bytes []byte) (*Commitment, int, error) { + totalBytesRead := 0 + + // We read the version byte here to determine the API to use, but then we decode the entire commitment again. + // Thus, we don't count the version byte as read bytes. + version, _, err := iotago.VersionFromBytes(bytes) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to determine version") + } + + apiForVersion, err := apiProvider.APIForVersion(version) + if err != nil { + return nil, 0, ierrors.Wrapf(err, "failed to get API for version %d", version) + } + + iotaCommitment := new(iotago.Commitment) + if totalBytesRead, err = apiForVersion.Decode(bytes, iotaCommitment, serix.WithValidation()); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to decode commitment") + } + + commitmentID, err := iotaCommitment.ID() + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to determine commitment ID") + } + + commitment, err := newCommitment(commitmentID, iotaCommitment, bytes, apiForVersion) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to create commitment") + } + + return commitment, totalBytesRead, nil } - - iotaCommitment := new(iotago.Commitment) - if _, err := apiForVersion.Decode(data, iotaCommitment, opts...); err != nil { - return nil, err - } - - commitmentID, err := iotaCommitment.ID() - if err != nil { - return nil, err - } - - return newCommitment(commitmentID, iotaCommitment, data, apiForVersion) } func (c *Commitment) ID() iotago.CommitmentID { @@ -102,6 +113,10 @@ func (c *Commitment) Data() []byte { return c.data } +func (c *Commitment) Bytes() ([]byte, error) { + return c.data, nil +} + func (c *Commitment) Commitment() *iotago.Commitment { return c.commitment } diff --git a/pkg/model/poolstats.go b/pkg/model/poolstats.go index b8ce0a5c0..eada06d32 100644 --- a/pkg/model/poolstats.go +++ b/pkg/model/poolstats.go @@ -1,8 +1,10 @@ package model import ( + "io" + "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -13,36 +15,48 @@ type PoolsStats struct { ProfitMargin uint64 } -func PoolsStatsFromBytes(bytes []byte) (*PoolsStats, int, error) { +func PoolStatsFromReader(reader io.ReadSeeker) (*PoolsStats, error) { p := new(PoolsStats) - m := marshalutil.New(bytes) - totalStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse total stake") - } - p.TotalStake = iotago.BaseToken(totalStake) - totalValidatorStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse total validator stake") + var err error + if p.TotalStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read TotalStake") + } + if p.TotalValidatorStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read TotalValidatorStake") + } + if p.ProfitMargin, err = stream.Read[uint64](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read ProfitMargin") } - p.TotalValidatorStake = iotago.BaseToken(totalValidatorStake) - p.ProfitMargin, err = m.ReadUint64() + return p, nil +} + +func PoolsStatsFromBytes(bytes []byte) (*PoolsStats, int, error) { + byteReader := stream.NewByteReader(bytes) + + p, err := PoolStatsFromReader(byteReader) if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse profit margin") + return nil, 0, ierrors.Wrap(err, "failed to parse PoolStats") } - return p, m.ReadOffset(), nil + return p, byteReader.BytesRead(), nil } func (p *PoolsStats) Bytes() ([]byte, error) { - m := marshalutil.New() - m.WriteUint64(uint64(p.TotalStake)) - m.WriteUint64(uint64(p.TotalValidatorStake)) - m.WriteUint64(p.ProfitMargin) + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, p.TotalStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write TotalStake") + } + if err := stream.Write(byteBuffer, p.TotalValidatorStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write TotalValidatorStake") + } + if err := stream.Write(byteBuffer, p.ProfitMargin); err != nil { + return nil, ierrors.Wrap(err, "failed to write ProfitMargin") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } type PoolRewards struct { @@ -54,36 +68,46 @@ type PoolRewards struct { FixedCost iotago.Mana } -func PoolRewardsFromBytes(bytes []byte) (*PoolRewards, int, error) { +func PoolRewardsFromReader(reader io.ReadSeeker) (*PoolRewards, error) { + var err error p := new(PoolRewards) - m := marshalutil.New(bytes) - poolStake, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse pool stake") + if p.PoolStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read PoolStake") } - p.PoolStake = iotago.BaseToken(poolStake) - - poolRewards, err := m.ReadUint64() - if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse pool rewards") + if p.PoolRewards, err = stream.Read[iotago.Mana](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read PoolRewards") + } + if p.FixedCost, err = stream.Read[iotago.Mana](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read FixedCost") } - p.PoolRewards = iotago.Mana(poolRewards) - fixedCost, err := m.ReadUint64() + return p, nil +} + +func PoolRewardsFromBytes(bytes []byte) (*PoolRewards, int, error) { + byteReader := stream.NewByteReader(bytes) + + p, err := PoolRewardsFromReader(byteReader) if err != nil { - return nil, m.ReadOffset(), ierrors.Wrap(err, "failed to parse fixed cost") + return nil, 0, ierrors.Wrap(err, "failed to parse PoolRewards") } - p.FixedCost = iotago.Mana(fixedCost) - return p, m.ReadOffset(), nil + return p, byteReader.BytesRead(), nil } func (p *PoolRewards) Bytes() ([]byte, error) { - m := marshalutil.New() - m.WriteUint64(uint64(p.PoolStake)) - m.WriteUint64(uint64(p.PoolRewards)) - m.WriteUint64(uint64(p.FixedCost)) + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, p.PoolStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write PoolStake") + } + if err := stream.Write(byteBuffer, p.PoolRewards); err != nil { + return nil, ierrors.Wrap(err, "failed to write PoolRewards") + } + if err := stream.Write(byteBuffer, p.FixedCost); err != nil { + return nil, ierrors.Wrap(err, "failed to write FixedCost") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } diff --git a/pkg/model/validator_performance.go b/pkg/model/validator_performance.go index bd72a1be1..c121cfc23 100644 --- a/pkg/model/validator_performance.go +++ b/pkg/model/validator_performance.go @@ -1,37 +1,68 @@ package model import ( - iotago "github.com/iotaledger/iota.go/v4" + "io" + + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2/stream" ) type ValidatorPerformance struct { // works if ValidatorBlocksPerSlot is less than 32 because we use it as bit vector - SlotActivityVector uint32 `serix:"0"` + SlotActivityVector uint32 // can be uint8 because max count per slot is maximally ValidatorBlocksPerSlot + 1 - BlockIssuedCount uint8 `serix:"1"` - HighestSupportedVersionAndHash VersionAndHash `serix:"2"` + BlocksIssuedCount uint8 + HighestSupportedVersionAndHash VersionAndHash } func NewValidatorPerformance() *ValidatorPerformance { return &ValidatorPerformance{ SlotActivityVector: 0, - BlockIssuedCount: 0, + BlocksIssuedCount: 0, HighestSupportedVersionAndHash: VersionAndHash{}, } } -func ValidatorPerformanceFromBytes(decodeAPI iotago.API) func([]byte) (*ValidatorPerformance, int, error) { - return func(bytes []byte) (*ValidatorPerformance, int, error) { - validatorPerformance := new(ValidatorPerformance) - consumedBytes, err := decodeAPI.Decode(bytes, validatorPerformance) - if err != nil { - return nil, 0, err - } +func ValidatorPerformanceFromBytes(bytes []byte) (*ValidatorPerformance, int, error) { + byteReader := stream.NewByteReader(bytes) + + v, err := ValidatorPerformanceFromReader(byteReader) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to parse ValidatorPerformance") + } - return validatorPerformance, consumedBytes, nil + return v, byteReader.BytesRead(), nil +} + +func ValidatorPerformanceFromReader(reader io.ReadSeeker) (*ValidatorPerformance, error) { + var err error + v := NewValidatorPerformance() + + if v.SlotActivityVector, err = stream.Read[uint32](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read SlotActivityVector") + } + if v.BlocksIssuedCount, err = stream.Read[uint8](reader); err != nil { + return nil, ierrors.Wrap(err, "failed to read BlocksIssuedCount") } + if v.HighestSupportedVersionAndHash, err = stream.ReadObject(reader, VersionAndHashSize, VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "failed to read HighestSupportedVersionAndHash") + } + + return v, nil } -func (p *ValidatorPerformance) Bytes(api iotago.API) ([]byte, error) { - return api.Encode(p) +func (p *ValidatorPerformance) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, p.SlotActivityVector); err != nil { + return nil, ierrors.Wrap(err, "failed to write SlotActivityVector") + } + if err := stream.Write(byteBuffer, p.BlocksIssuedCount); err != nil { + return nil, ierrors.Wrap(err, "failed to write BlocksIssuedCount") + } + if err := stream.WriteObject(byteBuffer, p.HighestSupportedVersionAndHash, VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write HighestSupportedVersionAndHash") + } + + return byteBuffer.Bytes() } diff --git a/pkg/model/version_and_hash.go b/pkg/model/version_and_hash.go index 8475cc04c..46747799d 100644 --- a/pkg/model/version_and_hash.go +++ b/pkg/model/version_and_hash.go @@ -7,7 +7,7 @@ import ( iotago "github.com/iotaledger/iota.go/v4" ) -const VersionAndHashSize = iotago.IdentifierLength + iotago.VersionLength +const VersionAndHashSize = iotago.VersionLength + iotago.IdentifierLength type VersionAndHash struct { Version iotago.Version `serix:"0"` diff --git a/pkg/network/peer.go b/pkg/network/peer.go index 4b5b0a298..18554f10d 100644 --- a/pkg/network/peer.go +++ b/pkg/network/peer.go @@ -10,7 +10,8 @@ import ( "github.com/iotaledger/hive.go/crypto/ed25519" "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" ) const DefaultReconnectInterval = 5 * time.Second @@ -78,17 +79,29 @@ func (p *Peer) SetConnStatus(cs ConnectionStatus) { } func (p *Peer) Bytes() ([]byte, error) { - m := marshalutil.New() - m.WriteUint64(uint64(len(p.ID))) - m.WriteBytes([]byte(p.ID)) - m.WriteUint8(uint8(len(p.PeerAddresses))) - for _, addr := range p.PeerAddresses { - addrBytes := addr.Bytes() - m.WriteUint64(uint64(len(addrBytes))) - m.WriteBytes(addrBytes) + byteBuffer := stream.NewByteBuffer() + + if err := stream.WriteObjectWithSize(byteBuffer, p.ID, serializer.SeriLengthPrefixTypeAsUint16, func(id peer.ID) ([]byte, error) { + return []byte(id), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to write peer ID") + } + + if err := stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsByte, func() (elementsCount int, err error) { + for _, addr := range p.PeerAddresses { + if err = stream.WriteObjectWithSize(byteBuffer, addr, serializer.SeriLengthPrefixTypeAsUint16, func(m multiaddr.Multiaddr) ([]byte, error) { + return m.Bytes(), nil + }); err != nil { + return 0, ierrors.Wrap(err, "failed to write peer address") + } + } + + return len(p.PeerAddresses), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to write peer addresses") } - return m.Bytes(), nil + return byteBuffer.Bytes() } func (p *Peer) String() string { @@ -97,46 +110,48 @@ func (p *Peer) String() string { // peerFromBytes parses a peer from a byte slice. func peerFromBytes(bytes []byte) (*Peer, error) { - m := marshalutil.New(bytes) - idLen, err := m.ReadUint64() - if err != nil { - return nil, err - } - idBytes, err := m.ReadBytes(int(idLen)) - if err != nil { - return nil, err - } - id := peer.ID(idBytes) - - peer := &Peer{ - ID: id, + p := &Peer{ PeerAddresses: make([]multiaddr.Multiaddr, 0), ConnStatus: &atomic.Value{}, RemoveCh: make(chan struct{}), DoneCh: make(chan struct{}), } - peer.SetConnStatus(ConnStatusDisconnected) + var err error + byteReader := stream.NewByteReader(bytes) - peerAddrLen, err := m.ReadUint8() - if err != nil { - return nil, err - } - for i := 0; i < int(peerAddrLen); i++ { - addrLen, err := m.ReadUint64() - if err != nil { - return nil, err - } - addrBytes, err := m.ReadBytes(int(addrLen)) + if p.ID, err = stream.ReadObjectWithSize(byteReader, serializer.SeriLengthPrefixTypeAsUint16, func(bytes []byte) (peer.ID, int, error) { + id, err := peer.IDFromBytes(bytes) if err != nil { - return nil, err + return "", 0, ierrors.Wrap(err, "failed to parse peerID") } - addr, err := multiaddr.NewMultiaddrBytes(addrBytes) + + return id, len(bytes), nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to read peer ID") + } + + p.SetConnStatus(ConnStatusDisconnected) + + if err = stream.ReadCollection(byteReader, serializer.SeriLengthPrefixTypeAsByte, func(i int) error { + addr, err := stream.ReadObjectWithSize(byteReader, serializer.SeriLengthPrefixTypeAsUint16, func(bytes []byte) (multiaddr.Multiaddr, int, error) { + m, err := multiaddr.NewMultiaddrBytes(bytes) + if err != nil { + return nil, 0, ierrors.Wrap(err, "failed to parse peer address") + } + + return m, len(bytes), nil + }) if err != nil { - return nil, err + return ierrors.Wrap(err, "failed to read peer address") } - peer.PeerAddresses = append(peer.PeerAddresses, addr) + + p.PeerAddresses = append(p.PeerAddresses, addr) + + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "failed to read peer addresses") } - return peer, nil + return p, nil } diff --git a/pkg/network/protocols/core/events.go b/pkg/network/protocols/core/events.go index 8577388f6..4aa157fc1 100644 --- a/pkg/network/protocols/core/events.go +++ b/pkg/network/protocols/core/events.go @@ -17,7 +17,7 @@ type Events struct { AttestationsReceived *event.Event4[*model.Commitment, []*iotago.Attestation, *merklehasher.Proof[iotago.Identifier], peer.ID] AttestationsRequestReceived *event.Event2[iotago.CommitmentID, peer.ID] WarpSyncRequestReceived *event.Event2[iotago.CommitmentID, peer.ID] - WarpSyncResponseReceived *event.Event6[iotago.CommitmentID, iotago.BlockIDs, *merklehasher.Proof[iotago.Identifier], iotago.TransactionIDs, *merklehasher.Proof[iotago.Identifier], peer.ID] + WarpSyncResponseReceived *event.Event6[iotago.CommitmentID, map[iotago.CommitmentID]iotago.BlockIDs, *merklehasher.Proof[iotago.Identifier], iotago.TransactionIDs, *merklehasher.Proof[iotago.Identifier], peer.ID] Error *event.Event2[error, peer.ID] event.Group[Events, *Events] @@ -33,7 +33,7 @@ var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { AttestationsReceived: event.New4[*model.Commitment, []*iotago.Attestation, *merklehasher.Proof[iotago.Identifier], peer.ID](), AttestationsRequestReceived: event.New2[iotago.CommitmentID, peer.ID](), WarpSyncRequestReceived: event.New2[iotago.CommitmentID, peer.ID](), - WarpSyncResponseReceived: event.New6[iotago.CommitmentID, iotago.BlockIDs, *merklehasher.Proof[iotago.Identifier], iotago.TransactionIDs, *merklehasher.Proof[iotago.Identifier], peer.ID](), + WarpSyncResponseReceived: event.New6[iotago.CommitmentID, map[iotago.CommitmentID]iotago.BlockIDs, *merklehasher.Proof[iotago.Identifier], iotago.TransactionIDs, *merklehasher.Proof[iotago.Identifier], peer.ID](), Error: event.New2[error, peer.ID](), } }) diff --git a/pkg/network/protocols/core/models/message.pb.go b/pkg/network/protocols/core/models/message.pb.go index 66dbe6a16..11faf96f1 100644 --- a/pkg/network/protocols/core/models/message.pb.go +++ b/pkg/network/protocols/core/models/message.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v4.23.4 +// protoc-gen-go v1.31.0 +// protoc v4.24.4 // source: pkg/network/protocols/core/models/message.proto package models @@ -535,11 +535,8 @@ type WarpSyncResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - CommitmentId []byte `protobuf:"bytes,1,opt,name=commitment_id,json=commitmentId,proto3" json:"commitment_id,omitempty"` - BlockIds []byte `protobuf:"bytes,2,opt,name=block_ids,json=blockIds,proto3" json:"block_ids,omitempty"` - TangleMerkleProof []byte `protobuf:"bytes,3,opt,name=tangle_merkle_proof,json=tangleMerkleProof,proto3" json:"tangle_merkle_proof,omitempty"` - TransactionIds []byte `protobuf:"bytes,4,opt,name=transaction_ids,json=transactionIds,proto3" json:"transaction_ids,omitempty"` - MutationsMerkleProof []byte `protobuf:"bytes,5,opt,name=mutations_merkle_proof,json=mutationsMerkleProof,proto3" json:"mutations_merkle_proof,omitempty"` + CommitmentId []byte `protobuf:"bytes,1,opt,name=commitment_id,json=commitmentId,proto3" json:"commitment_id,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` } func (x *WarpSyncResponse) Reset() { @@ -581,30 +578,9 @@ func (x *WarpSyncResponse) GetCommitmentId() []byte { return nil } -func (x *WarpSyncResponse) GetBlockIds() []byte { - if x != nil { - return x.BlockIds - } - return nil -} - -func (x *WarpSyncResponse) GetTangleMerkleProof() []byte { - if x != nil { - return x.TangleMerkleProof - } - return nil -} - -func (x *WarpSyncResponse) GetTransactionIds() []byte { - if x != nil { - return x.TransactionIds - } - return nil -} - -func (x *WarpSyncResponse) GetMutationsMerkleProof() []byte { +func (x *WarpSyncResponse) GetPayload() []byte { if x != nil { - return x.MutationsMerkleProof + return x.Payload } return nil } @@ -676,26 +652,17 @@ var file_pkg_network_protocols_core_models_message_proto_rawDesc = []byte{ 0x0a, 0x0f, 0x57, 0x61, 0x72, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0xe3, 0x01, 0x0a, 0x10, 0x57, 0x61, 0x72, 0x70, 0x53, - 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x12, 0x2e, 0x0a, - 0x13, 0x74, 0x61, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x70, - 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x61, 0x6e, 0x67, - 0x6c, 0x65, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x27, 0x0a, - 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x5f, 0x6d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x43, 0x5a, 0x41, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6f, 0x74, 0x61, 0x6c, - 0x65, 0x64, 0x67, 0x65, 0x72, 0x2f, 0x69, 0x6f, 0x74, 0x61, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, - 0x70, 0x6b, 0x67, 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, - 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x51, 0x0a, 0x10, 0x57, 0x61, 0x72, 0x70, 0x53, 0x79, + 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 0x6f, 0x74, 0x61, 0x6c, 0x65, 0x64, 0x67, + 0x65, 0x72, 0x2f, 0x69, 0x6f, 0x74, 0x61, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x6b, 0x67, + 0x2f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/network/protocols/core/models/message.proto b/pkg/network/protocols/core/models/message.proto index 11cfa0860..88b588189 100644 --- a/pkg/network/protocols/core/models/message.proto +++ b/pkg/network/protocols/core/models/message.proto @@ -49,8 +49,5 @@ message WarpSyncRequest { message WarpSyncResponse { bytes commitment_id = 1; - bytes block_ids = 2; - bytes tangle_merkle_proof = 3; - bytes transaction_ids = 4; - bytes mutations_merkle_proof = 5; + bytes payload = 2; } diff --git a/pkg/network/protocols/core/protocol.go b/pkg/network/protocols/core/protocol.go index 0ced4c663..6b6d21228 100644 --- a/pkg/network/protocols/core/protocol.go +++ b/pkg/network/protocols/core/protocol.go @@ -1,8 +1,6 @@ package core import ( - "encoding/binary" - "github.com/libp2p/go-libp2p/core/peer" "google.golang.org/protobuf/proto" @@ -14,8 +12,8 @@ import ( "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/runtime/workerpool" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" - "github.com/iotaledger/hive.go/serializer/v2/serix" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/network" nwmodels "github.com/iotaledger/iota-core/pkg/network/protocols/core/models" @@ -73,16 +71,23 @@ func (p *Protocol) SendSlotCommitment(cm *model.Commitment, to ...peer.ID) { } func (p *Protocol) SendAttestations(cm *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], to ...peer.ID) { - encodedAttestations := marshalutil.New() - encodedAttestations.WriteUint32(uint32(len(attestations))) - for _, att := range attestations { - iotagoAPI := lo.PanicOnErr(p.apiProvider.APIForVersion(att.Header.ProtocolVersion)) - encodedAttestations.WriteBytes(lo.PanicOnErr(iotagoAPI.Encode(att))) + byteBuffer := stream.NewByteBuffer() + + if err := stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, att := range attestations { + if err = stream.WriteObjectWithSize(byteBuffer, att, serializer.SeriLengthPrefixTypeAsUint16, (*iotago.Attestation).Bytes); err != nil { + return 0, ierrors.Wrapf(err, "failed to write attestation %v", att) + } + } + + return len(attestations), nil + }); err != nil { + panic(err) } p.network.Send(&nwmodels.Packet{Body: &nwmodels.Packet_Attestations{Attestations: &nwmodels.Attestations{ Commitment: cm.Data(), - Attestations: encodedAttestations.Bytes(), + Attestations: lo.PanicOnErr(byteBuffer.Bytes()), MerkleProof: lo.PanicOnErr(merkleProof.Bytes()), }}}, to...) } @@ -127,7 +132,7 @@ func (p *Protocol) handlePacket(nbr peer.ID, packet proto.Message) (err error) { case *nwmodels.Packet_WarpSyncRequest: p.handleWarpSyncRequest(packetBody.WarpSyncRequest.GetCommitmentId(), nbr) case *nwmodels.Packet_WarpSyncResponse: - p.handleWarpSyncResponse(packetBody.WarpSyncResponse.GetCommitmentId(), packetBody.WarpSyncResponse.GetBlockIds(), packetBody.WarpSyncResponse.GetTangleMerkleProof(), packetBody.WarpSyncResponse.GetTransactionIds(), packetBody.WarpSyncResponse.GetMutationsMerkleProof(), nbr) + p.handleWarpSyncResponse(packetBody.WarpSyncResponse.GetCommitmentId(), packetBody.WarpSyncResponse.GetPayload(), nbr) default: return ierrors.Errorf("unsupported packet; packet=%+v, packetBody=%T-%+v", packet, packetBody, packetBody) } @@ -172,7 +177,7 @@ func (p *Protocol) onBlockRequest(idBytes []byte, id peer.ID) { } func (p *Protocol) onSlotCommitment(commitmentBytes []byte, id peer.ID) { - receivedCommitment, err := model.CommitmentFromBytes(commitmentBytes, p.apiProvider, serix.WithValidation()) + receivedCommitment, err := lo.DropCount(model.CommitmentFromBytes(p.apiProvider)(commitmentBytes)) if err != nil { p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize slot commitment"), id) @@ -193,35 +198,38 @@ func (p *Protocol) onSlotCommitmentRequest(idBytes []byte, id peer.ID) { } func (p *Protocol) onAttestations(commitmentBytes []byte, attestationsBytes []byte, merkleProof []byte, id peer.ID) { - cm, err := model.CommitmentFromBytes(commitmentBytes, p.apiProvider, serix.WithValidation()) + cm, err := lo.DropCount(model.CommitmentFromBytes(p.apiProvider)(commitmentBytes)) if err != nil { p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize commitment"), id) return } - if len(attestationsBytes) < 4 { - p.Events.Error.Trigger(ierrors.Errorf("failed to deserialize attestations, invalid attestation count"), id) + reader := stream.NewByteReader(attestationsBytes) + + attestationsCount, err := stream.PeekSize(reader, serializer.SeriLengthPrefixTypeAsUint32) + if err != nil { + p.Events.Error.Trigger(ierrors.Errorf("failed peek attestations count"), id) return } - attestationCount := binary.LittleEndian.Uint32(attestationsBytes[0:4]) - readOffset := 4 - attestations := make([]*iotago.Attestation, attestationCount) - for i := uint32(0); i < attestationCount; i++ { - attestation, consumed, err := iotago.AttestationFromBytes(p.apiProvider)(attestationsBytes[readOffset:]) + attestations := make([]*iotago.Attestation, attestationsCount) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + attestations[i], err = stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint16, iotago.AttestationFromBytes(p.apiProvider)) if err != nil { - p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize attestations"), id) - - return + return ierrors.Wrapf(err, "failed to deserialize attestation %d", i) } - readOffset += consumed - attestations[i] = attestation + return nil + }); err != nil { + p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize attestations"), id) + + return } - if readOffset != len(attestationsBytes) { - p.Events.Error.Trigger(ierrors.Errorf("failed to deserialize attestations: %d bytes remaining", len(attestationsBytes)-readOffset), id) + + if reader.BytesRead() != len(attestationsBytes) { + p.Events.Error.Trigger(ierrors.Errorf("failed to deserialize attestations: %d bytes remaining", len(attestationsBytes)-reader.BytesRead()), id) return } diff --git a/pkg/network/protocols/core/warp_sync.go b/pkg/network/protocols/core/warp_sync.go index ae9206edc..ace474241 100644 --- a/pkg/network/protocols/core/warp_sync.go +++ b/pkg/network/protocols/core/warp_sync.go @@ -11,6 +11,13 @@ import ( "github.com/iotaledger/iota.go/v4/merklehasher" ) +type WarpSyncPayload struct { + BlockIDsBySlotCommitmentID map[iotago.CommitmentID]iotago.BlockIDs `serix:"0,lengthPrefixType=uint32"` + TangleMerkleProof *merklehasher.Proof[iotago.Identifier] `serix:"1"` + TransactionIDs iotago.TransactionIDs `serix:"2"` + MutationsMerkleProof *merklehasher.Proof[iotago.Identifier] `serix:"3"` +} + func (p *Protocol) SendWarpSyncRequest(id iotago.CommitmentID, to ...peer.ID) { p.network.Send(&nwmodels.Packet{Body: &nwmodels.Packet_WarpSyncRequest{ WarpSyncRequest: &nwmodels.WarpSyncRequest{ @@ -19,16 +26,20 @@ func (p *Protocol) SendWarpSyncRequest(id iotago.CommitmentID, to ...peer.ID) { }}, to...) } -func (p *Protocol) SendWarpSyncResponse(id iotago.CommitmentID, blockIDs iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationsMerkleProof *merklehasher.Proof[iotago.Identifier], to ...peer.ID) { +func (p *Protocol) SendWarpSyncResponse(id iotago.CommitmentID, blockIDsBySlotCommitmentID map[iotago.CommitmentID]iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationsMerkleProof *merklehasher.Proof[iotago.Identifier], to ...peer.ID) { serializer := p.apiProvider.APIForSlot(id.Slot()) + payload := &WarpSyncPayload{ + BlockIDsBySlotCommitmentID: blockIDsBySlotCommitmentID, + TangleMerkleProof: tangleMerkleProof, + TransactionIDs: transactionIDs, + MutationsMerkleProof: mutationsMerkleProof, + } + p.network.Send(&nwmodels.Packet{Body: &nwmodels.Packet_WarpSyncResponse{ WarpSyncResponse: &nwmodels.WarpSyncResponse{ - CommitmentId: lo.PanicOnErr(id.Bytes()), - BlockIds: lo.PanicOnErr(serializer.Encode(blockIDs)), - TangleMerkleProof: lo.PanicOnErr(tangleMerkleProof.Bytes()), - TransactionIds: lo.PanicOnErr(serializer.Encode(transactionIDs)), - MutationsMerkleProof: lo.PanicOnErr(mutationsMerkleProof.Bytes()), + CommitmentId: lo.PanicOnErr(id.Bytes()), + Payload: lo.PanicOnErr(serializer.Encode(payload)), }, }}, to...) } @@ -46,7 +57,7 @@ func (p *Protocol) handleWarpSyncRequest(commitmentIDBytes []byte, id peer.ID) { }) } -func (p *Protocol) handleWarpSyncResponse(commitmentIDBytes []byte, blockIDsBytes []byte, tangleMerkleProofBytes []byte, transactionIDsBytes []byte, mutationProofBytes []byte, id peer.ID) { +func (p *Protocol) handleWarpSyncResponse(commitmentIDBytes []byte, payloadBytes []byte, id peer.ID) { p.workerPool.Submit(func() { commitmentID, _, err := iotago.CommitmentIDFromBytes(commitmentIDBytes) if err != nil { @@ -55,34 +66,13 @@ func (p *Protocol) handleWarpSyncResponse(commitmentIDBytes []byte, blockIDsByte return } - var blockIDs iotago.BlockIDs - if _, err = p.apiProvider.APIForSlot(commitmentID.Slot()).Decode(blockIDsBytes, &blockIDs, serix.WithValidation()); err != nil { - p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize block ids"), id) - - return - } - - tangleMerkleProof, _, err := merklehasher.ProofFromBytes[iotago.Identifier](tangleMerkleProofBytes) - if err != nil { - p.Events.Error.Trigger(ierrors.Wrapf(err, "failed to deserialize merkle proof when receiving waprsync response for commitment %s", commitmentID), id) - - return - } - - var transactionIDs iotago.TransactionIDs - if _, err = p.apiProvider.APIForSlot(commitmentID.Slot()).Decode(transactionIDsBytes, &transactionIDs, serix.WithValidation()); err != nil { - p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize transaction ids"), id) - - return - } - - mutationProof, _, err := merklehasher.ProofFromBytes[iotago.Identifier](mutationProofBytes) - if err != nil { - p.Events.Error.Trigger(ierrors.Wrapf(err, "failed to deserialize merkle proof when receiving waprsync response for commitment %s", commitmentID), id) + payload := new(WarpSyncPayload) + if _, err = p.apiProvider.APIForSlot(commitmentID.Slot()).Decode(payloadBytes, payload, serix.WithValidation()); err != nil { + p.Events.Error.Trigger(ierrors.Wrap(err, "failed to deserialize payload"), id) return } - p.Events.WarpSyncResponseReceived.Trigger(commitmentID, blockIDs, tangleMerkleProof, transactionIDs, mutationProof, id) + p.Events.WarpSyncResponseReceived.Trigger(commitmentID, payload.BlockIDsBySlotCommitmentID, payload.TangleMerkleProof, payload.TransactionIDs, payload.MutationsMerkleProof, id) }) } diff --git a/pkg/protocol/block_dispatcher.go b/pkg/protocol/block_dispatcher.go index 15b2fcfa3..0a79a0d51 100644 --- a/pkg/protocol/block_dispatcher.go +++ b/pkg/protocol/block_dispatcher.go @@ -154,9 +154,9 @@ func (b *BlockDispatcher) initNetworkConnection() { }, b.warpSyncWorkers) }) - b.protocol.Events.Network.WarpSyncResponseReceived.Hook(func(commitmentID iotago.CommitmentID, blockIDs iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationMerkleProof *merklehasher.Proof[iotago.Identifier], src peer.ID) { + b.protocol.Events.Network.WarpSyncResponseReceived.Hook(func(commitmentID iotago.CommitmentID, blockIDsBySlotCommitmentID map[iotago.CommitmentID]iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationMerkleProof *merklehasher.Proof[iotago.Identifier], src peer.ID) { b.runTask(func() { - b.protocol.HandleError(b.processWarpSyncResponse(commitmentID, blockIDs, tangleMerkleProof, transactionIDs, mutationMerkleProof, src)) + b.protocol.HandleError(b.processWarpSyncResponse(commitmentID, blockIDsBySlotCommitmentID, tangleMerkleProof, transactionIDs, mutationMerkleProof, src)) }, b.warpSyncWorkers) }) } @@ -177,7 +177,7 @@ func (b *BlockDispatcher) processWarpSyncRequest(commitmentID iotago.CommitmentI return ierrors.Wrapf(err, "commitment ID mismatch: %s != %s", commitment.ID(), commitmentID) } - blockIDs, err := committedSlot.BlockIDs() + blocksIDsByCommitmentID, err := committedSlot.BlocksIDsBySlotCommitmentID() if err != nil { return ierrors.Wrapf(err, "failed to get block IDs from slot %d", commitmentID.Slot()) } @@ -192,13 +192,13 @@ func (b *BlockDispatcher) processWarpSyncRequest(commitmentID iotago.CommitmentI return ierrors.Wrapf(err, "failed to get roots from slot %d", commitmentID.Slot()) } - b.protocol.networkProtocol.SendWarpSyncResponse(commitmentID, blockIDs, roots.TangleProof(), transactionIDs, roots.MutationProof(), src) + b.protocol.networkProtocol.SendWarpSyncResponse(commitmentID, blocksIDsByCommitmentID, roots.TangleProof(), transactionIDs, roots.MutationProof(), src) return nil } // processWarpSyncResponse processes a WarpSync response. -func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.CommitmentID, blockIDs iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationMerkleProof *merklehasher.Proof[iotago.Identifier], _ peer.ID) error { +func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.CommitmentID, blockIDsBySlotCommitmentID map[iotago.CommitmentID]iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationMerkleProof *merklehasher.Proof[iotago.Identifier], _ peer.ID) error { if b.processedWarpSyncRequests.Has(commitmentID) { return nil } @@ -220,7 +220,20 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment return nil } - acceptedBlocks := ads.NewSet[iotago.Identifier, iotago.BlockID](mapdb.NewMapDB(), iotago.BlockID.Bytes, iotago.BlockIDFromBytes) + // Flatten all blockIDs into a single slice. + var blockIDs iotago.BlockIDs + for _, ids := range blockIDsBySlotCommitmentID { + blockIDs = append(blockIDs, ids...) + } + + acceptedBlocks := ads.NewSet[iotago.Identifier, iotago.BlockID]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.BlockID.Bytes, + iotago.BlockIDFromBytes, + ) + for _, blockID := range blockIDs { _ = acceptedBlocks.Add(blockID) // a mapdb can newer return an error } @@ -229,7 +242,14 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment return ierrors.Errorf("failed to verify tangle merkle proof for %s", commitmentID) } - acceptedTransactionIDs := ads.NewSet[iotago.Identifier, iotago.TransactionID](mapdb.NewMapDB(), iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes) + acceptedTransactionIDs := ads.NewSet[iotago.Identifier, iotago.TransactionID]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ) + for _, transactionID := range transactionIDs { _ = acceptedTransactionIDs.Add(transactionID) // a mapdb can never return an error } @@ -326,19 +346,21 @@ func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.Commitment return nil } - for _, blockID := range blockIDs { - block, _ := targetEngine.BlockDAG.GetOrRequestBlock(blockID) - if block == nil { // this should never happen as we're requesting the blocks for this slot so it can't be evicted. - b.protocol.HandleError(ierrors.Errorf("failed to get block %s", blockID)) - continue - } + for slotCommitmentID, blockIDsForCommitment := range blockIDsBySlotCommitmentID { + for _, blockID := range blockIDsForCommitment { + block, _ := targetEngine.BlockDAG.GetOrRequestBlock(blockID) + if block == nil { // this should never happen as we're requesting the blocks for this slot so it can't be evicted. + b.protocol.HandleError(ierrors.Errorf("failed to get block %s", blockID)) + continue + } - // We need to make sure that we add all blocks as root blocks because we don't know which blocks are root blocks without - // blocks from future slots. We're committing the current slot which then leads to the eviction of the blocks from the - // block cache and thus if not root blocks no block in the next slot can become solid. - targetEngine.EvictionState.AddRootBlock(block.ID(), block.SlotCommitmentID()) + // We need to make sure that we add all blocks as root blocks because we don't know which blocks are root blocks without + // blocks from future slots. We're committing the current slot which then leads to the eviction of the blocks from the + // block cache and thus if not root blocks no block in the next slot can become solid. + targetEngine.EvictionState.AddRootBlock(blockID, slotCommitmentID) - block.Booked().OnUpdate(blockBookedFunc) + block.Booked().OnUpdate(blockBookedFunc) + } } return nil diff --git a/pkg/protocol/commitment_verifier.go b/pkg/protocol/commitment_verifier.go index 7289fd86d..2172c92e7 100644 --- a/pkg/protocol/commitment_verifier.go +++ b/pkg/protocol/commitment_verifier.go @@ -41,31 +41,12 @@ func NewCommitmentVerifier(mainEngine *engine.Engine, lastCommonCommitmentBefore func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier]) (blockIDsFromAttestations iotago.BlockIDs, cumulativeWeight uint64, err error) { // 1. Verify that the provided attestations are indeed the ones that were included in the commitment. tree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(attestation *iotago.Attestation) ([]byte, error) { - apiForVersion, err := c.engine.APIForVersion(attestation.Header.ProtocolVersion) - if err != nil { - return nil, ierrors.Wrapf(err, "failed to get API for version %d", attestation.Header.ProtocolVersion) - } - - return apiForVersion.Encode(attestation) - }, - func(bytes []byte) (*iotago.Attestation, int, error) { - version, _, err := iotago.VersionFromBytes(bytes) - if err != nil { - return nil, 0, ierrors.Wrap(err, "failed to determine version") - } - - a := new(iotago.Attestation) - apiForVersion, err := c.engine.APIForVersion(version) - if err != nil { - return nil, 0, ierrors.Wrapf(err, "failed to get API for version %d", version) - } - n, err := apiForVersion.Decode(bytes, a) - - return a, n, err - }, + (*iotago.Attestation).Bytes, + iotago.AttestationFromBytes(c.engine), ) for _, att := range attestations { diff --git a/pkg/protocol/engine/accounts/accounts.go b/pkg/protocol/engine/accounts/accounts.go index 403f569c0..8acd50a53 100644 --- a/pkg/protocol/engine/accounts/accounts.go +++ b/pkg/protocol/engine/accounts/accounts.go @@ -1,15 +1,11 @@ package accounts import ( - "bytes" - "encoding/binary" "io" - "github.com/iotaledger/hive.go/crypto/ed25519" "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" iotago "github.com/iotaledger/iota.go/v4" ) @@ -64,7 +60,7 @@ func (a *AccountData) Clone() *AccountData { ID: a.ID, Credits: &BlockIssuanceCredits{ Value: a.Credits.Value, - UpdateTime: a.Credits.UpdateTime, + UpdateSlot: a.Credits.UpdateSlot, }, ExpirySlot: a.ExpirySlot, OutputID: a.OutputID, @@ -78,138 +74,96 @@ func (a *AccountData) Clone() *AccountData { } } -func (a *AccountData) FromBytes(b []byte) (int, error) { - return a.readFromReadSeeker(bytes.NewReader(b)) -} - -func (a *AccountData) FromReader(readSeeker io.ReadSeeker) error { - return lo.Return2(a.readFromReadSeeker(readSeeker)) -} - -func (a *AccountData) readFromReadSeeker(reader io.ReadSeeker) (int, error) { - var bytesConsumed int - - bytesRead, err := io.ReadFull(reader, a.ID[:]) +func AccountDataFromReader(reader io.ReadSeeker) (*AccountData, error) { + accountID, err := stream.Read[iotago.AccountID](reader) if err != nil { - return bytesConsumed, ierrors.Wrap(err, "unable to read accountID") + return nil, ierrors.Wrap(err, "unable to read accountID") } - bytesConsumed += bytesRead - - a.Credits = &BlockIssuanceCredits{} + a := NewAccountData(accountID) - if err := binary.Read(reader, binary.LittleEndian, &a.Credits.Value); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read account balance value for accountID %s", a.ID) + if a.Credits, err = stream.ReadObject(reader, BlockIssuanceCreditsBytesLength, BlockIssuanceCreditsFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read credits") } - bytesConsumed += 8 - - if err := binary.Read(reader, binary.LittleEndian, &a.Credits.UpdateTime); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read updatedTime for account balance for accountID %s", a.ID) + if a.ExpirySlot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read expiry slot") } - bytesConsumed += iotago.SlotIndexLength - - if err := binary.Read(reader, binary.LittleEndian, &a.ExpirySlot); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read expiry slot for accountID %s", a.ID) - } - bytesConsumed += iotago.SlotIndexLength - - if err := binary.Read(reader, binary.LittleEndian, &a.OutputID); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read outputID for accountID %s", a.ID) + if a.OutputID, err = stream.Read[iotago.OutputID](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read outputID") } - bytesConsumed += iotago.OutputIDLength - var blockIssuerKeyCount uint8 - if err := binary.Read(reader, binary.LittleEndian, &blockIssuerKeyCount); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read blockIssuerKeyCount count for accountID %s", a.ID) + if a.BlockIssuerKeys, err = stream.ReadObjectFromReader(reader, iotago.BlockIssuerKeysFromReader); err != nil { + return nil, ierrors.Wrap(err, "unable to read block issuer keys") } - bytesConsumed++ - a.BlockIssuerKeys = iotago.NewBlockIssuerKeys() - for i := uint8(0); i < blockIssuerKeyCount; i++ { - var blockIssuerKeyType iotago.BlockIssuerKeyType - if err := binary.Read(reader, binary.LittleEndian, &blockIssuerKeyType); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read block issuer key type for accountID %s", a.ID) - } - bytesConsumed++ - - switch blockIssuerKeyType { - case iotago.BlockIssuerKeyEd25519PublicKey: - var ed25519PublicKey ed25519.PublicKey - bytesRead, err = io.ReadFull(reader, ed25519PublicKey[:]) - if err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read public key index %d for accountID %s", i, a.ID) - } - bytesConsumed += bytesRead - a.BlockIssuerKeys.Add(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519PublicKey)) - case iotago.BlockIssuerKeyPublicKeyHash: - var implicitAccountCreationAddress iotago.ImplicitAccountCreationAddress - bytesRead, err = io.ReadFull(reader, implicitAccountCreationAddress[:]) - if err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read address %d for accountID %s", i, a.ID) - } - bytesConsumed += bytesRead - a.BlockIssuerKeys.Add(iotago.Ed25519PublicKeyHashBlockIssuerKeyFromImplicitAccountCreationAddress(&implicitAccountCreationAddress)) - default: - return bytesConsumed, ierrors.Wrapf(err, "unsupported block issuer key type %d for accountID %s at offset %d", blockIssuerKeyType, a.ID, i) - } + if a.ValidatorStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read validator stake") } - if err := binary.Read(reader, binary.LittleEndian, &(a.ValidatorStake)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read validator stake for accountID %s", a.ID) + if a.DelegationStake, err = stream.Read[iotago.BaseToken](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read delegation stake") } - bytesConsumed += iotago.BaseTokenSize - if err := binary.Read(reader, binary.LittleEndian, &(a.DelegationStake)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read delegation stake for accountID %s", a.ID) + if a.FixedCost, err = stream.Read[iotago.Mana](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read fixed cost") } - bytesConsumed += iotago.BaseTokenSize - if err := binary.Read(reader, binary.LittleEndian, &(a.FixedCost)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read fixed cost for accountID %s", a.ID) + if a.StakeEndEpoch, err = stream.Read[iotago.EpochIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read stake end epoch") } - bytesConsumed += iotago.ManaSize - if err := binary.Read(reader, binary.LittleEndian, &(a.StakeEndEpoch)); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read stake end epoch for accountID %s", a.ID) + if a.LatestSupportedProtocolVersionAndHash, err = stream.ReadObject(reader, model.VersionAndHashSize, model.VersionAndHashFromBytes); err != nil { + return nil, ierrors.Wrap(err, "unable to read latest supported protocol version and hash") } - bytesConsumed += iotago.EpochIndexLength - versionAndHashBytes := make([]byte, model.VersionAndHashSize) - if err := binary.Read(reader, binary.LittleEndian, versionAndHashBytes); err != nil { - return bytesConsumed, ierrors.Wrapf(err, "unable to read latest supported protocol version for accountID %s", a.ID) - } + return a, nil +} - if a.LatestSupportedProtocolVersionAndHash, _, err = model.VersionAndHashFromBytes(versionAndHashBytes[:]); err != nil { - return 0, err - } +func AccountDataFromBytes(b []byte) (*AccountData, int, error) { + reader := stream.NewByteReader(b) - bytesConsumed += len(versionAndHashBytes) + a, err := AccountDataFromReader(reader) - return bytesConsumed, nil + return a, reader.BytesRead(), err } -func (a AccountData) Bytes() ([]byte, error) { - idBytes, err := a.ID.Bytes() - if err != nil { - return nil, ierrors.Wrap(err, "failed to marshal account id") +func (a *AccountData) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, a.ID); err != nil { + return nil, ierrors.Wrap(err, "failed to write AccountID") } - m := marshalutil.New() - m.WriteBytes(idBytes) - m.WriteBytes(lo.PanicOnErr(a.Credits.Bytes())) - m.WriteUint32(uint32(a.ExpirySlot)) - m.WriteBytes(lo.PanicOnErr(a.OutputID.Bytes())) - m.WriteByte(byte(len(a.BlockIssuerKeys))) - for _, key := range a.BlockIssuerKeys { - m.WriteBytes(key.Bytes()) + if err := stream.WriteObject(byteBuffer, a.Credits, (*BlockIssuanceCredits).Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write Credits") + } + if err := stream.Write(byteBuffer, a.ExpirySlot); err != nil { + return nil, ierrors.Wrap(err, "failed to write ExpirySlot") + } + if err := stream.Write(byteBuffer, a.OutputID); err != nil { + return nil, ierrors.Wrap(err, "failed to write OutputID") } - m.WriteUint64(uint64(a.ValidatorStake)) - m.WriteUint64(uint64(a.DelegationStake)) - m.WriteUint64(uint64(a.FixedCost)) - m.WriteUint32(uint32(a.StakeEndEpoch)) - m.WriteBytes(lo.PanicOnErr(a.LatestSupportedProtocolVersionAndHash.Bytes())) + if err := stream.WriteObject(byteBuffer, a.BlockIssuerKeys, iotago.BlockIssuerKeys.Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write BlockIssuerKeys") + } + + if err := stream.Write(byteBuffer, a.ValidatorStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write ValidatorStake") + } + if err := stream.Write(byteBuffer, a.DelegationStake); err != nil { + return nil, ierrors.Wrap(err, "failed to write DelegationStake") + } + if err := stream.Write(byteBuffer, a.FixedCost); err != nil { + return nil, ierrors.Wrap(err, "failed to write FixedCost") + } + if err := stream.Write(byteBuffer, a.StakeEndEpoch); err != nil { + return nil, ierrors.Wrap(err, "failed to write StakeEndEpoch") + } + if err := stream.WriteObject(byteBuffer, a.LatestSupportedProtocolVersionAndHash, model.VersionAndHash.Bytes); err != nil { + return nil, ierrors.Wrap(err, "failed to write LatestSupportedProtocolVersionAndHash") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } func WithCredits(credits *BlockIssuanceCredits) options.Option[AccountData] { diff --git a/pkg/protocol/engine/accounts/accountsledger/manager.go b/pkg/protocol/engine/accounts/accountsledger/manager.go index 0d47ec95a..729295032 100644 --- a/pkg/protocol/engine/accounts/accountsledger/manager.go +++ b/pkg/protocol/engine/accounts/accountsledger/manager.go @@ -60,15 +60,13 @@ func New( blockBurns: shrinkingmap.New[iotago.SlotIndex, ds.Set[iotago.BlockID]](), latestSupportedVersionSignals: memstorage.NewIndexedStorage[iotago.SlotIndex, iotago.AccountID, *model.SignaledBlock](), accountsTree: ads.NewMap[iotago.Identifier](accountsStore, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*accounts.AccountData).Bytes, - func(bytes []byte) (object *accounts.AccountData, consumed int, err error) { - a := new(accounts.AccountData) - consumed, err = a.FromBytes(bytes) - - return a, consumed, err - }), + accounts.AccountDataFromBytes, + ), block: blockFunc, slotDiff: slotDiffFunc, } @@ -379,7 +377,7 @@ func (m *Manager) rollbackAccountTo(accountData *accounts.AccountData, targetSlo } // update the account data with the diff - accountData.Credits.Update(-diffChange.BICChange, diffChange.PreviousUpdatedTime) + accountData.Credits.Update(-diffChange.BICChange, diffChange.PreviousUpdatedSlot) // update the expiry slot of the account if it was changed if diffChange.PreviousExpirySlot != diffChange.NewExpirySlot { accountData.ExpirySlot = diffChange.PreviousExpirySlot @@ -444,7 +442,7 @@ func (m *Manager) preserveDestroyedAccountData(accountID iotago.AccountID) (acco slotDiff.PreviousExpirySlot = accountData.ExpirySlot slotDiff.NewOutputID = iotago.EmptyOutputID slotDiff.PreviousOutputID = accountData.OutputID - slotDiff.PreviousUpdatedTime = accountData.Credits.UpdateTime + slotDiff.PreviousUpdatedSlot = accountData.Credits.UpdateSlot slotDiff.BlockIssuerKeysRemoved = accountData.BlockIssuerKeys.Clone() slotDiff.ValidatorStakeChange = -int64(accountData.ValidatorStake) @@ -519,7 +517,7 @@ func (m *Manager) commitAccountTree(slot iotago.SlotIndex, accountDiffChanges ma if diffChange.BICChange != 0 || !exists { // decay the credits to the current slot if the account exists if exists { - decayedPreviousCredits, err := m.apiProvider.APIForSlot(slot).ManaDecayProvider().ManaWithDecay(iotago.Mana(accountData.Credits.Value), accountData.Credits.UpdateTime, slot) + decayedPreviousCredits, err := m.apiProvider.APIForSlot(slot).ManaDecayProvider().ManaWithDecay(iotago.Mana(accountData.Credits.Value), accountData.Credits.UpdateSlot, slot) if err != nil { return ierrors.Wrapf(err, "can't retrieve account, could not decay credits for account (%s) in slot (%d)", accountData.ID, slot) } diff --git a/pkg/protocol/engine/accounts/accountsledger/snapshot.go b/pkg/protocol/engine/accounts/accountsledger/snapshot.go index 8f8d565c4..de3dbe89a 100644 --- a/pkg/protocol/engine/accounts/accountsledger/snapshot.go +++ b/pkg/protocol/engine/accounts/accountsledger/snapshot.go @@ -1,14 +1,14 @@ package accountsledger import ( - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts" - "github.com/iotaledger/iota-core/pkg/utils" iotago "github.com/iotaledger/iota.go/v4" ) @@ -16,24 +16,23 @@ func (m *Manager) Import(reader io.ReadSeeker) error { m.mutex.Lock() defer m.mutex.Unlock() - var accountCount uint64 - var slotDiffCount uint64 - - // The number of accounts contained within this snapshot. - if err := binary.Read(reader, binary.LittleEndian, &accountCount); err != nil { - return ierrors.Wrap(err, "unable to read account count") - } + // populate the account tree, account tree should be empty at this point + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(i int) error { + accountData, err := stream.ReadObjectFromReader(reader, accounts.AccountDataFromReader) + if err != nil { + return ierrors.Wrapf(err, "unable to read account data at index %d", i) + } - // The number of slot diffs contained within this snapshot. - if err := binary.Read(reader, binary.LittleEndian, &slotDiffCount); err != nil { - return ierrors.Wrap(err, "unable to read slot diffs count") - } + if err := m.accountsTree.Set(accountData.ID, accountData); err != nil { + return ierrors.Wrapf(err, "unable to set account %s", accountData.ID) + } - if err := m.importAccountTree(reader, accountCount); err != nil { - return ierrors.Wrap(err, "unable to import account tree") + return nil + }); err != nil { + return ierrors.Wrap(err, "failed to read account data") } - if err := m.readSlotDiffs(reader, slotDiffCount); err != nil { + if err := m.readSlotDiffs(reader); err != nil { return ierrors.Wrap(err, "unable to import slot diffs") } @@ -44,64 +43,42 @@ func (m *Manager) Export(writer io.WriteSeeker, targetIndex iotago.SlotIndex) er m.mutex.Lock() defer m.mutex.Unlock() - var accountCount uint64 - var slotDiffsCount uint64 - - pWriter := utils.NewPositionedWriter(writer) - - if err := pWriter.WriteValue("accounts count", accountCount, true); err != nil { - return ierrors.Wrap(err, "unable to write accounts count") - } - - if err := pWriter.WriteValue("slot diffs count", slotDiffsCount, true); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") - } - - accountCount, err := m.exportAccountTree(pWriter, targetIndex) - if err != nil { - return ierrors.Wrapf(err, "unable to export account for target index %d", targetIndex) - } - - if err = pWriter.WriteValueAtBookmark("accounts count", accountCount); err != nil { - return ierrors.Wrap(err, "unable to write accounts count") - } - - if slotDiffsCount, err = m.writeSlotDiffs(pWriter, targetIndex); err != nil { - return ierrors.Wrapf(err, "unable to export slot diffs for target index %d", targetIndex) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + elements, err := m.exportAccountTree(writer, targetIndex) + if err != nil { + return 0, ierrors.Wrap(err, "can't write account tree") + } - if err = pWriter.WriteValueAtBookmark("slot diffs count", slotDiffsCount); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") + return elements, nil + }); err != nil { + return ierrors.Wrapf(err, "unable to export accounts for slot %d", targetIndex) } - return nil -} - -func (m *Manager) importAccountTree(reader io.ReadSeeker, accountCount uint64) error { - // populate the account tree, account tree should be empty at this point - for i := uint64(0); i < accountCount; i++ { - accountData := &accounts.AccountData{} - if err := accountData.FromReader(reader); err != nil { - return ierrors.Wrap(err, "unable to read account data") + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (elementsCount int, err error) { + elementsCount, err = m.writeSlotDiffs(writer, targetIndex) + if err != nil { + return 0, ierrors.Wrap(err, "can't write slot diffs") } - if err := m.accountsTree.Set(accountData.ID, accountData); err != nil { - return ierrors.Wrapf(err, "unable to set account %s", accountData.ID) - } + return elementsCount, nil + }); err != nil { + return ierrors.Wrapf(err, "unable to export slot diffs for slot %d", targetIndex) } return nil } // exportAccountTree exports the AccountTree at a certain target slot, returning the total amount of exported accounts. -func (m *Manager) exportAccountTree(pWriter *utils.PositionedWriter, targetIndex iotago.SlotIndex) (accountCount uint64, err error) { - if err = m.accountsTree.Stream(func(accountID iotago.AccountID, accountData *accounts.AccountData) error { - if _, err = m.rollbackAccountTo(accountData, targetIndex); err != nil { +func (m *Manager) exportAccountTree(writer io.WriteSeeker, targetIndex iotago.SlotIndex) (int, error) { + var accountCount int + + if err := m.accountsTree.Stream(func(accountID iotago.AccountID, accountData *accounts.AccountData) error { + if _, err := m.rollbackAccountTo(accountData, targetIndex); err != nil { return ierrors.Wrapf(err, "unable to rollback account %s", accountID) } - if err = writeAccountData(pWriter, accountData); err != nil { - return ierrors.Wrapf(err, "unable to write data for account %s", accountID) + if err := stream.WriteObject(writer, accountData, (*accounts.AccountData).Bytes); err != nil { + return ierrors.Wrapf(err, "unable to write account %s", accountID) } accountCount++ @@ -112,17 +89,18 @@ func (m *Manager) exportAccountTree(pWriter *utils.PositionedWriter, targetIndex } // we might have entries that were destroyed, that are present in diffs but not in the tree from the latestCommittedIndex we streamed above - recreatedAccountsCount, err := m.recreateDestroyedAccounts(pWriter, targetIndex) + recreatedAccountsCount, err := m.recreateDestroyedAccounts(writer, targetIndex) return accountCount + recreatedAccountsCount, err } -func (m *Manager) recreateDestroyedAccounts(pWriter *utils.PositionedWriter, targetSlot iotago.SlotIndex) (recreatedAccountsCount uint64, err error) { +func (m *Manager) recreateDestroyedAccounts(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (int, error) { + var recreatedAccountsCount int destroyedAccounts := make(map[iotago.AccountID]*accounts.AccountData) for slot := m.latestCommittedSlot; slot > targetSlot; slot-- { // it should be impossible that `m.slotDiff(slot)` returns an error, because it is impossible to export a pruned slot - err = lo.PanicOnErr(m.slotDiff(slot)).StreamDestroyed(func(accountID iotago.AccountID) bool { + err := lo.PanicOnErr(m.slotDiff(slot)).StreamDestroyed(func(accountID iotago.AccountID) bool { // actual data will be filled in by rollbackAccountTo accountData := accounts.NewAccountData(accountID) @@ -143,136 +121,127 @@ func (m *Manager) recreateDestroyedAccounts(pWriter *utils.PositionedWriter, tar return 0, ierrors.Errorf("account %s was not destroyed", accountID) } - if err = writeAccountData(pWriter, accountData); err != nil { - return 0, ierrors.Wrapf(err, "unable to write account %s to snapshot", accountID) + if err := stream.WriteObject(writer, accountData, (*accounts.AccountData).Bytes); err != nil { + return 0, ierrors.Wrapf(err, "unable to write account %s", accountID) } } return recreatedAccountsCount, nil } -func writeAccountData(writer *utils.PositionedWriter, accountData *accounts.AccountData) error { - accountBytes, err := accountData.Bytes() - if err != nil { - return ierrors.Wrapf(err, "unable to get account data snapshot bytes for accountID %s", accountData.ID) - } - - if err = writer.WriteBytes(accountBytes); err != nil { - return ierrors.Wrapf(err, "unable to write account data for accountID %s", accountData.ID) - } - - return nil -} - -func (m *Manager) readSlotDiffs(reader io.ReadSeeker, slotDiffCount uint64) error { - for i := uint64(0); i < slotDiffCount; i++ { - var slot iotago.SlotIndex - var accountsInDiffCount uint64 - - if err := binary.Read(reader, binary.LittleEndian, &slot); err != nil { - return ierrors.Wrap(err, "unable to read slot index") - } - - if err := binary.Read(reader, binary.LittleEndian, &accountsInDiffCount); err != nil { - return ierrors.Wrap(err, "unable to read accounts in diff count") - } - if accountsInDiffCount == 0 { - continue - } - - diffStore, err := m.slotDiff(slot) +func (m *Manager) readSlotDiffs(reader io.ReadSeeker) error { + // Read all the slots. + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(i int) error { + slot, err := stream.Read[iotago.SlotIndex](reader) if err != nil { - return ierrors.Errorf("unable to import account slot diffs for slot %d", slot) + return ierrors.Wrapf(err, "unable to read slot index at index %d", i) } - for j := uint64(0); j < accountsInDiffCount; j++ { - var accountID iotago.AccountID - if _, err := io.ReadFull(reader, accountID[:]); err != nil { + // Read all the slot diffs within each slot. + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(j int) error { + diffStore, err := m.slotDiff(slot) + if err != nil { + return ierrors.Wrapf(err, "unable to get account diff storage for slot %d", slot) + } + + accountID, err := stream.Read[iotago.AccountID](reader) + if err != nil { return ierrors.Wrapf(err, "unable to read accountID for index %d", j) } - var destroyed bool - if err := binary.Read(reader, binary.LittleEndian, &destroyed); err != nil { + destroyed, err := stream.Read[bool](reader) + if err != nil { return ierrors.Wrapf(err, "unable to read destroyed flag for accountID %s", accountID) } - accountDiff := model.NewAccountDiff() + var accountDiff *model.AccountDiff if !destroyed { - if err := accountDiff.FromReader(reader); err != nil { + if accountDiff, err = stream.ReadObjectFromReader(reader, model.AccountDiffFromReader); err != nil { return ierrors.Wrapf(err, "unable to read account diff for accountID %s", accountID) } + } else { + accountDiff = model.NewAccountDiff() } if err := diffStore.Store(accountID, accountDiff, destroyed); err != nil { return ierrors.Wrapf(err, "unable to store slot diff for accountID %s", accountID) } + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read accounts in diff count at index %d", i) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "failed to read slot diffs") } return nil } -func (m *Manager) writeSlotDiffs(pWriter *utils.PositionedWriter, targetSlot iotago.SlotIndex) (slotDiffsCount uint64, err error) { +func (m *Manager) writeSlotDiffs(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (int, error) { + var slotDiffsCount int + // write slot diffs until being able to reach targetSlot, where the exported tree is at slot := iotago.SlotIndex(1) maxCommittableAge := m.apiProvider.APIForSlot(targetSlot).ProtocolParameters().MaxCommittableAge() - if targetSlot > maxCommittableAge { slot = targetSlot - maxCommittableAge } for ; slot <= targetSlot; slot++ { - var accountsInDiffCount uint64 - - // The index of the slot diffs. - if err = pWriter.WriteValue("slot index", slot); err != nil { - return 0, err - } + var accountsInDiffCount int - // The number of account entries within this slot diff. - if err = pWriter.WriteValue("inDiff accounts count", accountsInDiffCount, true); err != nil { - return 0, err + if err := stream.Write(writer, slot); err != nil { + return 0, ierrors.Wrapf(err, "unable to write slot %d", slot) } - slotDiffsCount++ - - var innerErr error slotDiffs, err := m.slotDiff(slot) if err != nil { // if slot is already pruned, then don't write anything continue } - if err = slotDiffs.Stream(func(accountID iotago.AccountID, accountDiff *model.AccountDiff, destroyed bool) bool { - if err = pWriter.WriteBytes(lo.PanicOnErr(accountID.Bytes())); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write accountID for account %s", accountID) - } + if err = stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + var innerErr error - if err = pWriter.WriteValue("destroyed flag", destroyed); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write destroyed flag for account %s", accountID) - } + if err = slotDiffs.Stream(func(accountID iotago.AccountID, accountDiff *model.AccountDiff, destroyed bool) bool { - if !destroyed { - if err = pWriter.WriteBytes(lo.PanicOnErr(accountDiff.Bytes())); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write account diff for account %s", accountID) + if err = stream.Write(writer, accountID); err != nil { + innerErr = ierrors.Wrapf(err, "unable to write accountID for account %s", accountID) + return false + } + + if err = stream.Write(writer, destroyed); err != nil { + innerErr = ierrors.Wrapf(err, "unable to write destroyed flag for account %s", accountID) + return false + } + + if !destroyed { + if err = stream.WriteObject(writer, accountDiff, (*model.AccountDiff).Bytes); err != nil { + innerErr = ierrors.Wrapf(err, "unable to write account diff for account %s", accountID) + return false + } } + + accountsInDiffCount++ + + return true + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to stream slot diff for index %d", slot) } - accountsInDiffCount++ + if innerErr != nil { + return 0, ierrors.Wrapf(innerErr, "unable to stream slot diff for index %d", slot) + } - return true + return accountsInDiffCount, nil }); err != nil { - return 0, ierrors.Wrapf(err, "unable to stream slot diff for index %d", slot) + return 0, ierrors.Wrapf(err, "unable to write slot diff %d", slot) } - if innerErr != nil { - return 0, ierrors.Wrapf(innerErr, "unable to write slot diff for index %d", slot) - } - - // The number of diffs contained within this slot. - if err = pWriter.WriteValueAtBookmark("inDiff accounts count", accountsInDiffCount); err != nil { - return 0, err - } + slotDiffsCount++ } return slotDiffsCount, nil diff --git a/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go b/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go index 89b8f4058..c94f5d06c 100644 --- a/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go +++ b/pkg/protocol/engine/accounts/accountsledger/snapshot_test.go @@ -3,9 +3,9 @@ package accountsledger_test import ( "testing" - "github.com/orcaman/writerseeker" "github.com/stretchr/testify/require" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/tpkg" @@ -176,15 +176,15 @@ func TestManager_Import_Export(t *testing.T) { }, }) - //// Export and import the account ledger into new manager for the latest slot. + // Export and import the account ledger into new manager for the latest slot. { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() err := ts.Instance.Export(writer, iotago.SlotIndex(3)) require.NoError(t, err) ts.Instance = ts.initAccountLedger() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) ts.Instance.SetLatestCommittedSlot(3) @@ -193,13 +193,13 @@ func TestManager_Import_Export(t *testing.T) { // Export and import for pre-latest slot. { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() err := ts.Instance.Export(writer, iotago.SlotIndex(2)) require.NoError(t, err) ts.Instance = ts.initAccountLedger() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) ts.Instance.SetLatestCommittedSlot(2) diff --git a/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go b/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go index 64d7f8152..3decfe1cb 100644 --- a/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go +++ b/pkg/protocol/engine/accounts/accountsledger/testsuite_test.go @@ -131,7 +131,7 @@ func (t *TestSuite) ApplySlotActions(slot iotago.SlotIndex, rmc iotago.Mana, act BICChange: iotago.BlockIssuanceCredits(action.TotalAllotments), // manager takes AccountDiff only with allotments filled in when applyDiff is triggered BlockIssuerKeysAdded: t.BlockIssuerKeys(action.AddedKeys, true), BlockIssuerKeysRemoved: t.BlockIssuerKeys(action.RemovedKeys, true), - PreviousUpdatedTime: prevAccountFields.BICUpdatedAt, + PreviousUpdatedSlot: prevAccountFields.BICUpdatedAt, NewExpirySlot: prevAccountFields.ExpirySlot, DelegationStakeChange: action.DelegationStakeChange, @@ -265,7 +265,7 @@ func (t *TestSuite) assertDiff(slot iotago.SlotIndex, accountID iotago.AccountID expectedAccountDiff := accountsSlotBuildData.SlotDiff[accountID] require.Equal(t.T, expectedAccountDiff.PreviousOutputID, actualDiff.PreviousOutputID) - require.Equal(t.T, expectedAccountDiff.PreviousUpdatedTime, actualDiff.PreviousUpdatedTime) + require.Equal(t.T, expectedAccountDiff.PreviousUpdatedSlot, actualDiff.PreviousUpdatedSlot) require.Equal(t.T, expectedAccountDiff.NewExpirySlot, actualDiff.NewExpirySlot) require.Equal(t.T, expectedAccountDiff.PreviousExpirySlot, actualDiff.PreviousExpirySlot) diff --git a/pkg/protocol/engine/accounts/credits.go b/pkg/protocol/engine/accounts/credits.go index a4df3e5b8..3d04118c3 100644 --- a/pkg/protocol/engine/accounts/credits.go +++ b/pkg/protocol/engine/accounts/credits.go @@ -1,49 +1,64 @@ package accounts import ( - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) +const BlockIssuanceCreditsBytesLength = serializer.Int64ByteSize + iotago.SlotIndexLength + // BlockIssuanceCredits is a weight annotated with the slot it was last updated in. type BlockIssuanceCredits struct { Value iotago.BlockIssuanceCredits - UpdateTime iotago.SlotIndex + UpdateSlot iotago.SlotIndex } // NewBlockIssuanceCredits creates a new Credits instance. func NewBlockIssuanceCredits(value iotago.BlockIssuanceCredits, updateTime iotago.SlotIndex) (newCredits *BlockIssuanceCredits) { return &BlockIssuanceCredits{ Value: value, - UpdateTime: updateTime, + UpdateSlot: updateTime, } } // Bytes returns a serialized version of the Credits. -func (c BlockIssuanceCredits) Bytes() ([]byte, error) { - m := marshalutil.New() +func (c *BlockIssuanceCredits) Bytes() ([]byte, error) { + byteBuffer := stream.NewByteBuffer() + + if err := stream.Write(byteBuffer, c.Value); err != nil { + return nil, ierrors.Wrap(err, "failed to write value") + } - m.WriteInt64(int64(c.Value)) - m.WriteUint32(uint32(c.UpdateTime)) + if err := stream.Write(byteBuffer, c.UpdateSlot); err != nil { + return nil, ierrors.Wrap(err, "failed to write updateTime") + } - return m.Bytes(), nil + return byteBuffer.Bytes() } -// FromBytes parses a serialized version of the Credits. -func (c *BlockIssuanceCredits) FromBytes(bytes []byte) (int, error) { - m := marshalutil.New(bytes) +func BlockIssuanceCreditsFromBytes(bytes []byte) (*BlockIssuanceCredits, int, error) { + c := new(BlockIssuanceCredits) - c.Value = iotago.BlockIssuanceCredits(lo.PanicOnErr(m.ReadInt64())) - c.UpdateTime = iotago.SlotIndex(lo.PanicOnErr(m.ReadUint32())) + var err error + byteReader := stream.NewByteReader(bytes) + + if c.Value, err = stream.Read[iotago.BlockIssuanceCredits](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read value") + } + + if c.UpdateSlot, err = stream.Read[iotago.SlotIndex](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read updateTime") + } - return m.ReadOffset(), nil + return c, byteReader.BytesRead(), nil } // Update updates the Credits increasing Value and updateTime. -func (c *BlockIssuanceCredits) Update(change iotago.BlockIssuanceCredits, updateTime ...iotago.SlotIndex) { +func (c *BlockIssuanceCredits) Update(change iotago.BlockIssuanceCredits, updateSlot ...iotago.SlotIndex) { c.Value += change - if len(updateTime) > 0 { - c.UpdateTime = updateTime[0] + if len(updateSlot) > 0 { + c.UpdateSlot = updateSlot[0] } } diff --git a/pkg/protocol/engine/accounts/mana/manager.go b/pkg/protocol/engine/accounts/mana/manager.go index b53432329..d9c6ae32f 100644 --- a/pkg/protocol/engine/accounts/mana/manager.go +++ b/pkg/protocol/engine/accounts/mana/manager.go @@ -222,5 +222,5 @@ func (m *Manager) getBIC(accountID iotago.AccountID, slot iotago.SlotIndex) (bic return 0, 0, nil } - return iotago.Mana(accountBIC.Credits.Value), accountBIC.Credits.UpdateTime, nil + return iotago.Mana(accountBIC.Credits.Value), accountBIC.Credits.UpdateSlot, nil } diff --git a/pkg/protocol/engine/accounts/mana/manager_test.go b/pkg/protocol/engine/accounts/mana/manager_test.go index f4d58dd62..767367087 100644 --- a/pkg/protocol/engine/accounts/mana/manager_test.go +++ b/pkg/protocol/engine/accounts/mana/manager_test.go @@ -72,7 +72,7 @@ func TestManager_GetManaOnAccountOverflow(t *testing.T) { ID: id, Credits: &accounts.BlockIssuanceCredits{ Value: iotago.MaxBlockIssuanceCredits/2 + iotago.MaxBlockIssuanceCredits/4, - UpdateTime: 1, + UpdateSlot: 1, }, ExpirySlot: iotago.MaxSlotIndex, OutputID: iotago.OutputID{}, diff --git a/pkg/protocol/engine/attestation/slotattestation/snapshot.go b/pkg/protocol/engine/attestation/slotattestation/snapshot.go index ec2ae69fd..0b4b3c14e 100644 --- a/pkg/protocol/engine/attestation/slotattestation/snapshot.go +++ b/pkg/protocol/engine/attestation/slotattestation/snapshot.go @@ -4,6 +4,7 @@ import ( "io" "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -13,28 +14,14 @@ func (m *Manager) Import(reader io.ReadSeeker) error { defer m.commitmentMutex.Unlock() var attestations []*iotago.Attestation - if err := stream.ReadCollection(reader, func(i int) error { - attestationBytes, err := stream.ReadBlob(reader) - if err != nil { - return ierrors.Wrap(err, "failed to read attestation") - } - - version, _, err := iotago.VersionFromBytes(attestationBytes) - if err != nil { - return ierrors.Wrap(err, "failed to determine version") - } + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { - apiForVersion, err := m.apiProvider.APIForVersion(version) + attestation, err := stream.ReadObjectWithSize[*iotago.Attestation](reader, serializer.SeriLengthPrefixTypeAsUint16, iotago.AttestationFromBytes(m.apiProvider)) if err != nil { - return ierrors.Wrapf(err, "failed to get API for version %d", version) + return ierrors.Wrapf(err, "failed to read attestation %d", i) } - importedAttestation := new(iotago.Attestation) - if _, err = apiForVersion.Decode(attestationBytes, importedAttestation); err != nil { - return ierrors.Wrapf(err, "failed to decode attestation %d", i) - } - - attestations = append(attestations, importedAttestation) + attestations = append(attestations, attestation) return nil }); err != nil { @@ -88,23 +75,14 @@ func (m *Manager) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) err return ierrors.Wrapf(err, "failed to stream attestations of slot %d", targetSlot) } - if err = stream.WriteCollection(writer, func() (uint64, error) { + if err = stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { for _, a := range attestations { - apiForVersion, err := m.apiProvider.APIForVersion(a.Header.ProtocolVersion) - if err != nil { - return 0, ierrors.Wrapf(err, "failed to get API for version %d", a.Header.ProtocolVersion) - } - bytes, err := apiForVersion.Encode(a) - if err != nil { - return 0, ierrors.Wrapf(err, "failed to encode attestation %v", a) - } - - if writeErr := stream.WriteBlob(writer, bytes); writeErr != nil { - return 0, ierrors.Wrapf(writeErr, "failed to write attestation %v", a) + if err := stream.WriteObjectWithSize(writer, a, serializer.SeriLengthPrefixTypeAsUint16, (*iotago.Attestation).Bytes); err != nil { + return 0, ierrors.Wrapf(err, "failed to write attestation %v", a) } } - return uint64(len(attestations)), nil + return len(attestations), nil }); err != nil { return ierrors.Wrapf(err, "failed to write attestations of slot %d", targetSlot) } diff --git a/pkg/protocol/engine/attestation/slotattestation/storage.go b/pkg/protocol/engine/attestation/slotattestation/storage.go index 6f74e60c0..55e9b5c6b 100644 --- a/pkg/protocol/engine/attestation/slotattestation/storage.go +++ b/pkg/protocol/engine/attestation/slotattestation/storage.go @@ -72,20 +72,11 @@ func (m *Manager) trackerStorage(index iotago.SlotIndex) (*kvstore.TypedStore[io return nil, ierrors.Wrapf(err, "failed to get extended realm for tracker of slot %d", index) } - api := m.apiProvider.APIForSlot(index) - return kvstore.NewTypedStore[iotago.AccountID, *iotago.Attestation](trackerStorage, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(v *iotago.Attestation) ([]byte, error) { - return api.Encode(v) - }, - func(bytes []byte) (object *iotago.Attestation, consumed int, err error) { - attestation := new(iotago.Attestation) - consumed, err = api.Decode(bytes, attestation) - - return attestation, consumed, err - }, + (*iotago.Attestation).Bytes, + iotago.AttestationFromBytes(m.apiProvider), ), nil } @@ -99,7 +90,10 @@ func (m *Manager) attestationsForSlot(index iotago.SlotIndex) (ads.Map[iotago.Id return nil, ierrors.Wrapf(err, "failed to get extended realm for attestations of slot %d", index) } - return ads.NewMap[iotago.Identifier](attestationsStorage, + return ads.NewMap[iotago.Identifier]( + attestationsStorage, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*iotago.Attestation).Bytes, diff --git a/pkg/protocol/engine/attestation/slotattestation/testframework_test.go b/pkg/protocol/engine/attestation/slotattestation/testframework_test.go index 1214dd6b3..07fa7abe1 100644 --- a/pkg/protocol/engine/attestation/slotattestation/testframework_test.go +++ b/pkg/protocol/engine/attestation/slotattestation/testframework_test.go @@ -150,7 +150,10 @@ func (t *TestFramework) AssertCommit(slot iotago.SlotIndex, expectedCW uint64, e require.EqualValues(t.test, expectedCW, cw) - expectedTree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), + expectedTree := ads.NewMap[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*iotago.Attestation).Bytes, diff --git a/pkg/protocol/engine/blocks/block.go b/pkg/protocol/engine/blocks/block.go index 10df6cbb5..88ce9192e 100644 --- a/pkg/protocol/engine/blocks/block.go +++ b/pkg/protocol/engine/blocks/block.go @@ -247,7 +247,7 @@ func (b *Block) SlotCommitmentID() iotago.CommitmentID { return b.rootBlock.commitmentID } - return b.modelBlock.ProtocolBlock().Header.SlotCommitmentID + return b.modelBlock.SlotCommitmentID() } // IsMissing returns a flag that indicates if the underlying Block data hasn't been stored, yet. diff --git a/pkg/protocol/engine/committed_slot_api.go b/pkg/protocol/engine/committed_slot_api.go index edb604814..2a2871b24 100644 --- a/pkg/protocol/engine/committed_slot_api.go +++ b/pkg/protocol/engine/committed_slot_api.go @@ -44,7 +44,7 @@ func (c *CommittedSlotAPI) Roots() (committedRoots *iotago.Roots, err error) { return nil, ierrors.Errorf("no roots storage for slot %d", c.CommitmentID) } - roots, err := rootsStorage.Load(c.CommitmentID) + roots, _, err := rootsStorage.Load(c.CommitmentID) if err != nil { return nil, ierrors.Wrapf(err, "failed to load roots for slot %d", c.CommitmentID) } @@ -52,10 +52,10 @@ func (c *CommittedSlotAPI) Roots() (committedRoots *iotago.Roots, err error) { return roots, nil } -// BlockIDs returns the accepted block IDs of the slot. -func (c *CommittedSlotAPI) BlockIDs() (blockIDs iotago.BlockIDs, err error) { +// BlocksIDsBySlotCommitmentID returns the accepted block IDs of the slot grouped by their SlotCommitmentID. +func (c *CommittedSlotAPI) BlocksIDsBySlotCommitmentID() (map[iotago.CommitmentID]iotago.BlockIDs, error) { if c.engine.Storage.Settings().LatestCommitment().Slot() < c.CommitmentID.Slot() { - return blockIDs, ierrors.Errorf("slot %d is not committed yet", c.CommitmentID) + return nil, ierrors.Errorf("slot %d is not committed yet", c.CommitmentID) } store, err := c.engine.Storage.Blocks(c.CommitmentID.Slot()) @@ -63,14 +63,15 @@ func (c *CommittedSlotAPI) BlockIDs() (blockIDs iotago.BlockIDs, err error) { return nil, ierrors.Errorf("failed to get block store of slot index %d", c.CommitmentID.Slot()) } + blockIDsBySlotCommitmentID := make(map[iotago.CommitmentID]iotago.BlockIDs) if err := store.ForEachBlockInSlot(func(block *model.Block) error { - blockIDs = append(blockIDs, block.ID()) + blockIDsBySlotCommitmentID[block.SlotCommitmentID()] = append(blockIDsBySlotCommitmentID[block.SlotCommitmentID()], block.ID()) return nil }); err != nil { return nil, ierrors.Wrapf(err, "failed to iterate over blocks of slot %d", c.CommitmentID.Slot()) } - return blockIDs, nil + return blockIDsBySlotCommitmentID, nil } func (c *CommittedSlotAPI) TransactionIDs() (iotago.TransactionIDs, error) { @@ -83,7 +84,13 @@ func (c *CommittedSlotAPI) TransactionIDs() (iotago.TransactionIDs, error) { return nil, ierrors.Errorf("failed to get mutations of slot index %d", c.CommitmentID.Slot()) } - set := ads.NewSet[iotago.Identifier](store, iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes) + set := ads.NewSet[iotago.Identifier]( + store, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ) transactionIDs := make(iotago.TransactionIDs, 0, set.Size()) if err = set.Stream(func(txID iotago.TransactionID) error { diff --git a/pkg/protocol/engine/consensus/blockgadget/testframework_test.go b/pkg/protocol/engine/consensus/blockgadget/testframework_test.go index a2d7371bc..c8b17ebed 100644 --- a/pkg/protocol/engine/consensus/blockgadget/testframework_test.go +++ b/pkg/protocol/engine/consensus/blockgadget/testframework_test.go @@ -53,6 +53,8 @@ func NewTestFramework(test *testing.T) *TestFramework { iotago.CommitmentID.Bytes, iotago.CommitmentIDFromBytes, ), nil + }, func() iotago.BlockID { + return tpkg.TestAPI.ProtocolParameters().GenesisBlockID() }) t.blockCache = blocks.New(evictionState, api.SingleVersionProvider(tpkg.TestAPI)) @@ -63,7 +65,7 @@ func NewTestFramework(test *testing.T) *TestFramework { t.Events = instance.Events() t.Instance = instance - genesisBlock := blocks.NewRootBlock(iotago.EmptyBlockID, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID(), time.Unix(tpkg.TestAPI.TimeProvider().GenesisUnixTime(), 0)) + genesisBlock := blocks.NewRootBlock(tpkg.TestAPI.ProtocolParameters().GenesisBlockID(), iotago.NewEmptyCommitment(tpkg.TestAPI).MustID(), time.Unix(tpkg.TestAPI.TimeProvider().GenesisUnixTime(), 0)) t.blocks.Set("Genesis", genesisBlock) genesisBlock.ID().RegisterAlias("Genesis") evictionState.AddRootBlock(genesisBlock.ID(), genesisBlock.SlotCommitmentID()) diff --git a/pkg/protocol/engine/engine.go b/pkg/protocol/engine/engine.go index 1cee3527b..089d8d5cc 100644 --- a/pkg/protocol/engine/engine.go +++ b/pkg/protocol/engine/engine.go @@ -114,7 +114,7 @@ func New( &Engine{ Events: NewEvents(), Storage: storageInstance, - EvictionState: eviction.NewState(storageInstance.LatestNonEmptySlot(), storageInstance.RootBlocks), + EvictionState: eviction.NewState(storageInstance.LatestNonEmptySlot(), storageInstance.RootBlocks, storageInstance.GenesisRootBlockID), Workers: workers, errorHandler: errorHandler, @@ -183,7 +183,7 @@ func New( } // Only mark any pruning indexes if we loaded a non-genesis snapshot - if e.Storage.Settings().LatestFinalizedSlot() > 0 { + if e.Storage.Settings().LatestFinalizedSlot() > e.Storage.GenesisRootBlockID().Slot() { if _, _, err := e.Storage.PruneByDepth(1); err != nil { if !ierrors.Is(err, database.ErrNoPruningNeeded) && !ierrors.Is(err, database.ErrEpochPruned) { @@ -199,7 +199,6 @@ func New( } else { // Restore from Disk e.Storage.RestoreFromDisk() - e.EvictionState.PopulateFromStorage(e.Storage.Settings().LatestCommitment().Slot()) if err := e.Attestations.RestoreFromDisk(); err != nil { panic(ierrors.Wrap(err, "failed to restore attestations from disk")) @@ -488,11 +487,14 @@ func (e *Engine) setupPruning() { // chain beyond a window based on eviction, which in turn is based on acceptance. In case of a partition, this behavior is // clearly not desired. func (e *Engine) EarliestRootCommitment(lastFinalizedSlot iotago.SlotIndex) (earliestCommitment *model.Commitment) { - maxCommittableAge := e.APIForSlot(lastFinalizedSlot).ProtocolParameters().MaxCommittableAge() + api := e.APIForSlot(lastFinalizedSlot) + + genesisSlot := api.ProtocolParameters().GenesisSlot() + maxCommittableAge := api.ProtocolParameters().MaxCommittableAge() var earliestRootCommitmentSlot iotago.SlotIndex - if lastFinalizedSlot <= maxCommittableAge { - earliestRootCommitmentSlot = 0 + if lastFinalizedSlot <= genesisSlot+maxCommittableAge { + earliestRootCommitmentSlot = genesisSlot } else { earliestRootCommitmentSlot = lastFinalizedSlot - maxCommittableAge } diff --git a/pkg/protocol/engine/eviction/state.go b/pkg/protocol/engine/eviction/state.go index 104af2c3e..935bd15a2 100644 --- a/pkg/protocol/engine/eviction/state.go +++ b/pkg/protocol/engine/eviction/state.go @@ -3,13 +3,12 @@ package eviction import ( "io" - "github.com/iotaledger/hive.go/core/memstorage" - "github.com/iotaledger/hive.go/ds/ringbuffer" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/storage/prunable/slotstore" iotago "github.com/iotaledger/iota.go/v4" @@ -21,24 +20,23 @@ const latestNonEmptySlotKey = 1 type State struct { Events *Events - rootBlocks *memstorage.IndexedStorage[iotago.SlotIndex, iotago.BlockID, iotago.CommitmentID] - latestRootBlocks *ringbuffer.RingBuffer[iotago.BlockID] rootBlockStorageFunc func(iotago.SlotIndex) (*slotstore.Store[iotago.BlockID, iotago.CommitmentID], error) lastEvictedSlot iotago.SlotIndex latestNonEmptyStore kvstore.KVStore evictionMutex syncutils.RWMutex + genesisRootBlockFunc func() iotago.BlockID + optsRootBlocksEvictionDelay iotago.SlotIndex } // NewState creates a new eviction State. -func NewState(latestNonEmptyStore kvstore.KVStore, rootBlockStorageFunc func(iotago.SlotIndex) (*slotstore.Store[iotago.BlockID, iotago.CommitmentID], error), opts ...options.Option[State]) (state *State) { +func NewState(latestNonEmptyStore kvstore.KVStore, rootBlockStorageFunc func(iotago.SlotIndex) (*slotstore.Store[iotago.BlockID, iotago.CommitmentID], error), genesisRootBlockFunc func() iotago.BlockID, opts ...options.Option[State]) (state *State) { return options.Apply(&State{ Events: NewEvents(), - rootBlocks: memstorage.NewIndexedStorage[iotago.SlotIndex, iotago.BlockID, iotago.CommitmentID](), - latestRootBlocks: ringbuffer.NewRingBuffer[iotago.BlockID](8), rootBlockStorageFunc: rootBlockStorageFunc, latestNonEmptyStore: latestNonEmptyStore, + genesisRootBlockFunc: genesisRootBlockFunc, optsRootBlocksEvictionDelay: 3, }, opts) } @@ -54,8 +52,12 @@ func (s *State) AdvanceActiveWindowToIndex(slot iotago.SlotIndex) { if delayedIndex, shouldEvictRootBlocks := s.delayedBlockEvictionThreshold(slot); shouldEvictRootBlocks { // Remember the last slot outside our cache window that has root blocks. - if evictedSlot := s.rootBlocks.Evict(delayedIndex); evictedSlot != nil && evictedSlot.Size() > 0 { - s.setLatestNonEmptySlot(delayedIndex) + if storage, err := s.rootBlockStorageFunc(delayedIndex); err != nil { + _ = storage.StreamKeys(func(_ iotago.BlockID) error { + s.setLatestNonEmptySlot(delayedIndex) + + return ierrors.New("stop iteration") + }) } } @@ -86,15 +88,17 @@ func (s *State) ActiveRootBlocks() map[iotago.BlockID]iotago.CommitmentID { activeRootBlocks := make(map[iotago.BlockID]iotago.CommitmentID) startSlot, endSlot := s.activeIndexRange() for slot := startSlot; slot <= endSlot; slot++ { - storage := s.rootBlocks.Get(slot) - if storage == nil { + // We assume the cache is always populated for the latest slots. + storage, err := s.rootBlockStorageFunc(slot) + // Slot too old, it was pruned. + if err != nil { continue } - storage.ForEach(func(id iotago.BlockID, commitmentID iotago.CommitmentID) bool { + _ = storage.Stream(func(id iotago.BlockID, commitmentID iotago.CommitmentID) error { activeRootBlocks[id] = commitmentID - return true + return nil }) } @@ -111,13 +115,9 @@ func (s *State) AddRootBlock(id iotago.BlockID, commitmentID iotago.CommitmentID return } - if s.rootBlocks.Get(id.Slot(), true).Set(id, commitmentID) { - if err := lo.PanicOnErr(s.rootBlockStorageFunc(id.Slot())).Store(id, commitmentID); err != nil { - panic(ierrors.Wrapf(err, "failed to store root block %s", id)) - } + if err := lo.PanicOnErr(s.rootBlockStorageFunc(id.Slot())).Store(id, commitmentID); err != nil { + panic(ierrors.Wrapf(err, "failed to store root block %s", id)) } - - s.latestRootBlocks.Add(id) } // RemoveRootBlock removes a solid entry points from the map. @@ -125,10 +125,8 @@ func (s *State) RemoveRootBlock(id iotago.BlockID) { s.evictionMutex.RLock() defer s.evictionMutex.RUnlock() - if rootBlocks := s.rootBlocks.Get(id.Slot()); rootBlocks != nil && rootBlocks.Delete(id) { - if err := lo.PanicOnErr(s.rootBlockStorageFunc(id.Slot())).Delete(id); err != nil { - panic(err) - } + if err := lo.PanicOnErr(s.rootBlockStorageFunc(id.Slot())).Delete(id); err != nil { + panic(err) } } @@ -141,9 +139,12 @@ func (s *State) IsRootBlock(id iotago.BlockID) (has bool) { return false } - slotBlocks := s.rootBlocks.Get(id.Slot(), false) + storage, err := s.rootBlockStorageFunc(id.Slot()) + if err != nil { + return false + } - return slotBlocks != nil && slotBlocks.Has(id) + return lo.PanicOnErr(storage.Has(id)) } // RootBlockCommitmentID returns the commitmentID if it is a known root block. @@ -155,22 +156,18 @@ func (s *State) RootBlockCommitmentID(id iotago.BlockID) (commitmentID iotago.Co return iotago.CommitmentID{}, false } - slotBlocks := s.rootBlocks.Get(id.Slot(), false) - if slotBlocks == nil { - return iotago.CommitmentID{}, false + storage, err := s.rootBlockStorageFunc(id.Slot()) + if err != nil { + return iotago.EmptyCommitmentID, false } - return slotBlocks.Get(id) -} - -// LatestRootBlocks returns the latest root blocks. -func (s *State) LatestRootBlocks() iotago.BlockIDs { - rootBlocks := s.latestRootBlocks.ToSlice() - if len(rootBlocks) == 0 { - return iotago.BlockIDs{iotago.EmptyBlockID} + // This return empty value for commitmentID in the case the key is not found. + commitmentID, exists, err = storage.Load(id) + if err != nil { + panic(ierrors.Wrapf(err, "failed to load root block %s", id)) } - return rootBlocks + return commitmentID, exists } // Export exports the root blocks to the given writer. @@ -183,20 +180,21 @@ func (s *State) Export(writer io.WriteSeeker, lowerTarget iotago.SlotIndex, targ start, _ := s.delayedBlockEvictionThreshold(lowerTarget) - latestNonEmptySlot := iotago.SlotIndex(0) + genesisSlot := s.genesisRootBlockFunc().Slot() + latestNonEmptySlot := genesisSlot - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { for currentSlot := start; currentSlot <= targetSlot; currentSlot++ { storage, err := s.rootBlockStorageFunc(currentSlot) if err != nil { continue } if err = storage.StreamBytes(func(rootBlockIDBytes []byte, commitmentIDBytes []byte) (err error) { - if err = stream.WriteBlob(writer, rootBlockIDBytes); err != nil { + if err = stream.WriteBytes(writer, rootBlockIDBytes); err != nil { return ierrors.Wrapf(err, "failed to write root block ID %s", rootBlockIDBytes) } - if err = stream.WriteBlob(writer, commitmentIDBytes); err != nil { + if err = stream.WriteBytes(writer, commitmentIDBytes); err != nil { return ierrors.Wrapf(err, "failed to write root block's %s commitment %s", rootBlockIDBytes, commitmentIDBytes) } @@ -218,10 +216,10 @@ func (s *State) Export(writer io.WriteSeeker, lowerTarget iotago.SlotIndex, targ if latestNonEmptySlot > s.optsRootBlocksEvictionDelay { latestNonEmptySlot -= s.optsRootBlocksEvictionDelay } else { - latestNonEmptySlot = 0 + latestNonEmptySlot = genesisSlot } - if err := stream.WriteSerializable(writer, latestNonEmptySlot, iotago.SlotIndexLength); err != nil { + if err := stream.Write(writer, latestNonEmptySlot); err != nil { return ierrors.Wrap(err, "failed to write latest non empty slot") } @@ -230,32 +228,19 @@ func (s *State) Export(writer io.WriteSeeker, lowerTarget iotago.SlotIndex, targ // Import imports the root blocks from the given reader. func (s *State) Import(reader io.ReadSeeker) error { - if err := stream.ReadCollection(reader, func(i int) error { - - blockIDBytes, err := stream.ReadBlob(reader) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + rootBlockID, err := stream.Read[iotago.BlockID](reader) if err != nil { return ierrors.Wrapf(err, "failed to read root block id %d", i) } - rootBlockID, _, err := iotago.BlockIDFromBytes(blockIDBytes) + commitmentID, err := stream.Read[iotago.CommitmentID](reader) if err != nil { - return ierrors.Wrapf(err, "failed to parse root block id %d", i) + return ierrors.Wrapf(err, "failed to read root block's %s commitment id %d", rootBlockID, i) } - commitmentIDBytes, err := stream.ReadBlob(reader) - if err != nil { - return ierrors.Wrapf(err, "failed to read root block's %s commitment id", rootBlockID) - } - - commitmentID, _, err := iotago.CommitmentIDFromBytes(commitmentIDBytes) - if err != nil { - return ierrors.Wrapf(err, "failed to parse root block's %s commitment id", rootBlockID) - } - - if s.rootBlocks.Get(rootBlockID.Slot(), true).Set(rootBlockID, commitmentID) { - if err := lo.PanicOnErr(s.rootBlockStorageFunc(rootBlockID.Slot())).Store(rootBlockID, commitmentID); err != nil { - panic(ierrors.Wrapf(err, "failed to store root block %s", rootBlockID)) - } + if err := lo.PanicOnErr(s.rootBlockStorageFunc(rootBlockID.Slot())).Store(rootBlockID, commitmentID); err != nil { + panic(ierrors.Wrapf(err, "failed to store root block %s", rootBlockID)) } return nil @@ -263,16 +248,11 @@ func (s *State) Import(reader io.ReadSeeker) error { return ierrors.Wrap(err, "failed to read root blocks") } - latestNonEmptySlotBytes, err := stream.ReadBytes(reader, iotago.SlotIndexLength) + latestNonEmptySlot, err := stream.Read[iotago.SlotIndex](reader) if err != nil { return ierrors.Wrap(err, "failed to read latest non empty slot") } - latestNonEmptySlot, _, err := iotago.SlotIndexFromBytes(latestNonEmptySlotBytes) - if err != nil { - return ierrors.Wrap(err, "failed to parse latest non empty slot") - } - s.setLatestNonEmptySlot(latestNonEmptySlot) return nil @@ -283,7 +263,8 @@ func (s *State) Rollback(lowerTarget, targetIndex iotago.SlotIndex) error { defer s.evictionMutex.RUnlock() start, _ := s.delayedBlockEvictionThreshold(lowerTarget) - latestNonEmptySlot := iotago.SlotIndex(0) + genesisSlot := s.genesisRootBlockFunc().Slot() + latestNonEmptySlot := genesisSlot for currentSlot := start; currentSlot <= targetIndex; currentSlot++ { _, err := s.rootBlockStorageFunc(currentSlot) @@ -297,7 +278,7 @@ func (s *State) Rollback(lowerTarget, targetIndex iotago.SlotIndex) error { if latestNonEmptySlot > s.optsRootBlocksEvictionDelay { latestNonEmptySlot -= s.optsRootBlocksEvictionDelay } else { - latestNonEmptySlot = 0 + latestNonEmptySlot = genesisSlot } if err := s.latestNonEmptyStore.Set([]byte{latestNonEmptySlotKey}, latestNonEmptySlot.MustBytes()); err != nil { @@ -307,22 +288,6 @@ func (s *State) Rollback(lowerTarget, targetIndex iotago.SlotIndex) error { return nil } -// PopulateFromStorage populates the root blocks from the storage. -func (s *State) PopulateFromStorage(latestCommitmentSlot iotago.SlotIndex) { - for slot := lo.Return1(s.delayedBlockEvictionThreshold(latestCommitmentSlot)); slot <= latestCommitmentSlot; slot++ { - storedRootBlocks, err := s.rootBlockStorageFunc(slot) - if err != nil { - continue - } - - _ = storedRootBlocks.Stream(func(id iotago.BlockID, commitmentID iotago.CommitmentID) error { - s.AddRootBlock(id, commitmentID) - - return nil - }) - } -} - // latestNonEmptySlot returns the latest slot that contains a rootblock. func (s *State) latestNonEmptySlot() iotago.SlotIndex { latestNonEmptySlotBytes, err := s.latestNonEmptyStore.Get([]byte{latestNonEmptySlotKey}) @@ -371,21 +336,33 @@ func (s *State) withinActiveIndexRange(slot iotago.SlotIndex) bool { // delayedBlockEvictionThreshold returns the slot index that is the threshold for delayed rootblocks eviction. func (s *State) delayedBlockEvictionThreshold(slot iotago.SlotIndex) (thresholdSlot iotago.SlotIndex, shouldEvict bool) { - if slot < s.optsRootBlocksEvictionDelay { - return 0, false + genesisSlot := s.genesisRootBlockFunc().Slot() + if slot < genesisSlot+s.optsRootBlocksEvictionDelay { + return genesisSlot, false } + var rootBlockInWindow bool // Check if we have root blocks up to the eviction point. for ; slot >= s.lastEvictedSlot; slot-- { - if rb := s.rootBlocks.Get(slot); rb != nil { - if rb.Size() > 0 { - if slot >= s.optsRootBlocksEvictionDelay { - return slot - s.optsRootBlocksEvictionDelay, true - } - - return 0, false + storage := lo.PanicOnErr(s.rootBlockStorageFunc(slot)) + + _ = storage.StreamKeys(func(_ iotago.BlockID) error { + if slot >= s.optsRootBlocksEvictionDelay { + thresholdSlot = slot - s.optsRootBlocksEvictionDelay + shouldEvict = true + } else { + thresholdSlot = genesisSlot + shouldEvict = false } - } + + rootBlockInWindow = true + + return ierrors.New("stop iteration") + }) + } + + if rootBlockInWindow { + return thresholdSlot, shouldEvict } // If we didn't find any root blocks, we have to fallback to the latestNonEmptySlot before the eviction point. @@ -393,7 +370,7 @@ func (s *State) delayedBlockEvictionThreshold(slot iotago.SlotIndex) (thresholdS return latestNonEmptySlot - s.optsRootBlocksEvictionDelay, true } - return 0, false + return genesisSlot, false } // WithRootBlocksEvictionDelay sets the time since confirmation threshold. diff --git a/pkg/protocol/engine/eviction/state_test.go b/pkg/protocol/engine/eviction/state_test.go index 2787eb4b8..0bad76f27 100644 --- a/pkg/protocol/engine/eviction/state_test.go +++ b/pkg/protocol/engine/eviction/state_test.go @@ -22,20 +22,22 @@ func TestState_RootBlocks(t *testing.T) { Directory: t.TempDir(), }, api.SingleVersionProvider(tpkg.TestAPI), errorHandler) - ts := NewTestFramework(t, prunableStorage, eviction.NewState(mapdb.NewMapDB(), prunableStorage.RootBlocks, eviction.WithRootBlocksEvictionDelay(3))) - ts.CreateAndAddRootBlock("Genesis", 0, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) + ts := NewTestFramework(t, prunableStorage, eviction.NewState(mapdb.NewMapDB(), prunableStorage.RootBlocks, func() iotago.BlockID { + return tpkg.TestAPI.ProtocolParameters().GenesisBlockID() + }, eviction.WithRootBlocksEvictionDelay(3))) + ts.CreateAndAddRootBlock("Genesis", 0, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) ts.RequireActiveRootBlocks("Genesis") ts.RequireLastEvictedSlot(0) ts.Instance.Initialize(0) - ts.CreateAndAddRootBlock("Root1.0", 1, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) - ts.CreateAndAddRootBlock("Root1.1", 1, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) - ts.CreateAndAddRootBlock("Root2.0", 2, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) - ts.CreateAndAddRootBlock("Root3.0", 3, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) - ts.CreateAndAddRootBlock("Root4.0", 4, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) - ts.CreateAndAddRootBlock("Root4.1", 4, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) - ts.CreateAndAddRootBlock("Root5.0", 5, iotago.NewEmptyCommitment(tpkg.TestAPI.Version()).MustID()) + ts.CreateAndAddRootBlock("Root1.0", 1, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) + ts.CreateAndAddRootBlock("Root1.1", 1, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) + ts.CreateAndAddRootBlock("Root2.0", 2, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) + ts.CreateAndAddRootBlock("Root3.0", 3, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) + ts.CreateAndAddRootBlock("Root4.0", 4, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) + ts.CreateAndAddRootBlock("Root4.1", 4, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) + ts.CreateAndAddRootBlock("Root5.0", 5, iotago.NewEmptyCommitment(tpkg.TestAPI).MustID()) ts.RequireActiveRootBlocks("Genesis") ts.RequireLastEvictedSlot(0) diff --git a/pkg/protocol/engine/eviction/testframework_test.go b/pkg/protocol/engine/eviction/testframework_test.go index 00b70e598..33d097036 100644 --- a/pkg/protocol/engine/eviction/testframework_test.go +++ b/pkg/protocol/engine/eviction/testframework_test.go @@ -97,8 +97,9 @@ func (t *TestFramework) RequireStorageRootBlocks(expected ...string) { rootBlockStorage, err := t.prunableStorage.RootBlocks(blockID.Slot()) require.NoError(t.Testing, err) - loadedCommitmentID, err := rootBlockStorage.Load(blockID) + loadedCommitmentID, exists, err := rootBlockStorage.Load(blockID) require.NoError(t.Testing, err) + require.True(t.Testing, exists) require.Equal(t.Testing, commitmentID, loadedCommitmentID) } } diff --git a/pkg/protocol/engine/ledger/ledger/ledger.go b/pkg/protocol/engine/ledger/ledger/ledger.go index 31632f082..0b9207f36 100644 --- a/pkg/protocol/engine/ledger/ledger/ledger.go +++ b/pkg/protocol/engine/ledger/ledger/ledger.go @@ -180,11 +180,14 @@ func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, // Update the Accounts ledger // first, get the RMC corresponding to this slot - maxCommittableAge := l.apiProvider.APIForSlot(slot).ProtocolParameters().MaxCommittableAge() - rmcIndex, _ := safemath.SafeSub(slot, maxCommittableAge) - rmcForSlot, err := l.rmcManager.RMC(rmcIndex) + protocolParams := l.apiProvider.APIForSlot(slot).ProtocolParameters() + rmcSlot, _ := safemath.SafeSub(slot, protocolParams.MaxCommittableAge()) // We can safely ignore the underflow error and use the default 0 return value + if rmcSlot < protocolParams.GenesisSlot() { + rmcSlot = protocolParams.GenesisSlot() + } + rmcForSlot, err := l.rmcManager.RMC(rmcSlot) if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to get RMC for slot %d: %w", slot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("ledger failed to get RMC for slot %d: %w", rmcSlot, err) } if err = l.accountsLedger.ApplyDiff(slot, rmcForSlot, accountDiffs, destroyedAccounts); err != nil { return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to apply diff to Accounts ledger for slot %d: %w", slot, err) @@ -656,7 +659,7 @@ func (l *Ledger) processStateDiffTransactions(stateDiff mempool.StateDiff) (spen } accountDiff.BICChange += iotago.BlockIssuanceCredits(allotment.Mana) - accountDiff.PreviousUpdatedTime = accountData.Credits.UpdateTime + accountDiff.PreviousUpdatedSlot = accountData.Credits.UpdateSlot // we are not transitioning the allotted account, so the new and previous expiry slots are the same accountDiff.PreviousExpirySlot = accountData.ExpirySlot diff --git a/pkg/protocol/engine/mempool/v1/state_diff.go b/pkg/protocol/engine/mempool/v1/state_diff.go index 3fe5bb794..f5cbf214b 100644 --- a/pkg/protocol/engine/mempool/v1/state_diff.go +++ b/pkg/protocol/engine/mempool/v1/state_diff.go @@ -31,7 +31,13 @@ func NewStateDiff(slot iotago.SlotIndex, kv kvstore.KVStore) *StateDiff { createdOutputs: shrinkingmap.New[mempool.StateID, mempool.StateMetadata](), executedTransactions: orderedmap.New[iotago.TransactionID, mempool.TransactionMetadata](), stateUsageCounters: shrinkingmap.New[mempool.StateID, int](), - mutations: ads.NewSet[iotago.Identifier](kv, iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes), + mutations: ads.NewSet[iotago.Identifier]( + kv, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.TransactionID.Bytes, + iotago.TransactionIDFromBytes, + ), } } diff --git a/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go b/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go index 8e60dd53f..08aa10799 100644 --- a/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go +++ b/pkg/protocol/engine/notarization/slotnotarization/slotmutations.go @@ -76,7 +76,13 @@ func (m *SlotMutations) Reset(index iotago.SlotIndex) { func (m *SlotMutations) AcceptedBlocks(index iotago.SlotIndex, createIfMissing ...bool) ads.Set[iotago.Identifier, iotago.BlockID] { if len(createIfMissing) > 0 && createIfMissing[0] { return lo.Return1(m.acceptedBlocksBySlot.GetOrCreate(index, func() ads.Set[iotago.Identifier, iotago.BlockID] { - return ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.BlockID.Bytes, iotago.BlockIDFromBytes) + return ads.NewSet[iotago.Identifier]( + mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, + iotago.BlockID.Bytes, + iotago.BlockIDFromBytes, + ) })) } diff --git a/pkg/protocol/engine/tipselection/v1/provider.go b/pkg/protocol/engine/tipselection/v1/provider.go index ad9bb1d1b..f26180628 100644 --- a/pkg/protocol/engine/tipselection/v1/provider.go +++ b/pkg/protocol/engine/tipselection/v1/provider.go @@ -4,12 +4,14 @@ import ( "math" "time" + "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine/tipselection" + iotago "github.com/iotaledger/iota.go/v4" ) // NewProvider creates a new TipSelection provider, that can be used to inject the component into an engine. @@ -20,7 +22,7 @@ func NewProvider(opts ...options.Option[TipSelection]) module.Provider[*engine.E e.HookConstructed(func() { // wait for submodules to be constructed (so all of their properties are available) module.OnAllConstructed(func() { - t.Construct(e.TipManager, e.Ledger.ConflictDAG(), e.Ledger.MemPool().TransactionMetadata, e.EvictionState.LatestRootBlocks, DynamicLivenessThreshold(e.SybilProtection.SeatManager().OnlineCommittee().Size)) + t.Construct(e.TipManager, e.Ledger.ConflictDAG(), e.Ledger.MemPool().TransactionMetadata, func() iotago.BlockIDs { return lo.Keys(e.EvictionState.ActiveRootBlocks()) }, DynamicLivenessThreshold(e.SybilProtection.SeatManager().OnlineCommittee().Size)) e.Events.AcceptedBlockProcessed.Hook(func(block *blocks.Block) { t.SetAcceptanceTime(block.IssuingTime()) diff --git a/pkg/protocol/engine/tipselection/v1/tip_selection.go b/pkg/protocol/engine/tipselection/v1/tip_selection.go index ee7b7fad4..a4e180b86 100644 --- a/pkg/protocol/engine/tipselection/v1/tip_selection.go +++ b/pkg/protocol/engine/tipselection/v1/tip_selection.go @@ -3,6 +3,8 @@ package tipselectionv1 import ( "time" + "golang.org/x/exp/slices" + "github.com/iotaledger/hive.go/ds" "github.com/iotaledger/hive.go/ds/reactive" "github.com/iotaledger/hive.go/ierrors" @@ -120,6 +122,17 @@ func (t *TipSelection) SelectTips(amount int) (references model.ParentReferences }, amount); len(references[iotago.StrongParentType]) == 0 { rootBlocks := t.rootBlocks() + // Sort the rootBlocks in descending order according to their slot. + slices.SortFunc(rootBlocks, func(i, j iotago.BlockID) int { + if i.Slot() == j.Slot() { + return 0 + } else if i.Slot() < j.Slot() { + return 1 + } + + return -1 + }) + references[iotago.StrongParentType] = rootBlocks[:lo.Min(len(rootBlocks), t.optMaxStrongParents)] } diff --git a/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go b/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go index 3d31872ea..f4b6a1926 100644 --- a/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go +++ b/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/snapshot.go @@ -4,6 +4,7 @@ import ( "io" "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" @@ -21,13 +22,13 @@ func (o *Orchestrator) Import(reader io.ReadSeeker) error { o.lastCommittedSlot = slot upgradeSignalMap := make(map[account.SeatIndex]*model.SignaledBlock) - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { seat, err := stream.Read[account.SeatIndex](reader) if err != nil { return ierrors.Wrap(err, "failed to read seat") } - signaledBlock, err := stream.ReadFunc(reader, model.SignaledBlockFromBytesFunc(o.apiProvider.APIForSlot(slot))) + signaledBlock, err := stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint16, model.SignaledBlockFromBytesFunc(o.apiProvider.APIForSlot(slot))) if err != nil { return ierrors.Wrap(err, "failed to read signaled block") } @@ -55,13 +56,13 @@ func (o *Orchestrator) Import(reader io.ReadSeeker) error { latestSignals.Set(seat, signaledBlock) } - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsByte, func(i int) error { epoch, err := stream.Read[iotago.EpochIndex](reader) if err != nil { return ierrors.Wrap(err, "failed to read epoch") } - versionAndHash, err := stream.ReadFunc(reader, model.VersionAndHashFromBytes) + versionAndHash, err := stream.ReadObject(reader, model.VersionAndHashSize, model.VersionAndHashFromBytes) if err != nil { return ierrors.Wrap(err, "failed to read versionAndHash") } @@ -87,19 +88,19 @@ func (o *Orchestrator) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex } // Export the upgrade signals for the target slot. Since these are rolled forward exporting the last slot is sufficient. - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { - var exportedCount uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + var exportedCount int upgradeSignals, err := o.upgradeSignalsPerSlotFunc(targetSlot) if err != nil { return 0, ierrors.Wrapf(err, "failed to get upgrade signals for target slot %d", targetSlot) } if err := upgradeSignals.StreamBytes(func(seatBytes []byte, signaledBlockBytes []byte) error { - if err := stream.Write(writer, seatBytes); err != nil { + if err := stream.WriteBytes(writer, seatBytes); err != nil { return ierrors.Wrap(err, "failed to write seat") } - if err := stream.WriteBlob(writer, signaledBlockBytes); err != nil { + if err := stream.WriteBytesWithSize(writer, signaledBlockBytes, serializer.SeriLengthPrefixTypeAsUint16); err != nil { return ierrors.Wrap(err, "failed to write signaled block") } @@ -116,8 +117,8 @@ func (o *Orchestrator) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex } // Export the successfully signaled epochs for the signaling window. - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { - var exportedCount uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsByte, func() (elementsCount int, err error) { + var exportedCount int apiForSlot := o.apiProvider.APIForSlot(targetSlot) currentEpoch := apiForSlot.TimeProvider().EpochFromSlot(targetSlot) @@ -137,7 +138,7 @@ func (o *Orchestrator) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex if err := stream.Write(writer, epoch); err != nil { return 0, ierrors.Wrapf(err, "failed to write epoch %d", epoch) } - if err := stream.WriteSerializable(writer, versionAndHash); err != nil { + if err := stream.WriteObject(writer, versionAndHash, model.VersionAndHash.Bytes); err != nil { return 0, ierrors.Wrapf(err, "failed to write versionAndHash for epoch %d", epoch) } diff --git a/pkg/protocol/engine/utxoledger/database_prefixes.go b/pkg/protocol/engine/utxoledger/database_prefixes.go index 12dcd559e..8ff909c73 100644 --- a/pkg/protocol/engine/utxoledger/database_prefixes.go +++ b/pkg/protocol/engine/utxoledger/database_prefixes.go @@ -36,7 +36,7 @@ const ( 1 byte + 34 bytes Value: - BlockID + iotago.SlotIndex + TransactionCreationSlot (time.Time) + iotago.Output.Serialized() + BlockID + iotago.SlotIndex + TransactionCreationSlot (time.Slot) + iotago.Output.Serialized() 40 bytes + 4 bytes + 8 byte s + 1 byte type + X bytes Spent Output: @@ -46,7 +46,7 @@ const ( 1 byte + 34 bytes Value: - TargetTransactionID (iotago.SignedTransactionID) + TransactionAcceptedSlotIndex (iotago.SlotIndex) + TransactionCreationSlot (time.Time) + TargetTransactionID (iotago.SignedTransactionID) + TransactionAcceptedSlotIndex (iotago.SlotIndex) + TransactionCreationSlot (time.Slot) 32 bytes + 8 bytes + 8 bytes Unspent Output: diff --git a/pkg/protocol/engine/utxoledger/manager.go b/pkg/protocol/engine/utxoledger/manager.go index 83fd376f5..6fc850cab 100644 --- a/pkg/protocol/engine/utxoledger/manager.go +++ b/pkg/protocol/engine/utxoledger/manager.go @@ -2,13 +2,13 @@ package utxoledger import ( "crypto/sha256" - "encoding/binary" "github.com/iotaledger/hive.go/ads" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -28,6 +28,8 @@ func New(store kvstore.KVStore, apiProvider iotago.APIProvider) *Manager { return &Manager{ store: store, stateTree: ads.NewMap[iotago.Identifier](lo.PanicOnErr(store.WithExtendedRealm(kvstore.Realm{StoreKeyPrefixStateTree})), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.OutputID.Bytes, iotago.OutputIDFromBytes, (*stateTreeMetadata).Bytes, @@ -130,8 +132,8 @@ func (m *Manager) ReadLedgerIndexWithoutLocking() (iotago.SlotIndex, error) { value, err := m.store.Get([]byte{StoreKeyPrefixLedgerSlotIndex}) if err != nil { if ierrors.Is(err, kvstore.ErrKeyNotFound) { - // there is no ledger milestone yet => return 0 - return 0, nil + // there is no ledger milestone yet => return genesis slot + return m.apiProvider.CommittedAPI().ProtocolParameters().GenesisSlot(), nil } return 0, ierrors.Errorf("failed to load ledger milestone index: %w", err) @@ -368,7 +370,8 @@ func (m *Manager) LedgerStateSHA256Sum() ([]byte, error) { if err != nil { return nil, err } - if err := binary.Write(ledgerStateHash, binary.LittleEndian, ledgerSlot); err != nil { + + if err := stream.Write(ledgerStateHash, ledgerSlot); err != nil { return nil, err } @@ -384,22 +387,16 @@ func (m *Manager) LedgerStateSHA256Sum() ([]byte, error) { return nil, err } - if _, err := ledgerStateHash.Write(output.outputID[:]); err != nil { + if err := stream.Write(ledgerStateHash, outputID); err != nil { return nil, err } - if _, err := ledgerStateHash.Write(output.KVStorableValue()); err != nil { + if err := stream.WriteBytes(ledgerStateHash, output.KVStorableValue()); err != nil { return nil, err } } - // Add root of the state tree - stateTreeBytes, err := m.StateTreeRoot().Bytes() - if err != nil { - return nil, err - } - - if _, err := ledgerStateHash.Write(stateTreeBytes); err != nil { + if err := stream.Write(ledgerStateHash, m.StateTreeRoot()); err != nil { return nil, err } diff --git a/pkg/protocol/engine/utxoledger/marshalutils_helper.go b/pkg/protocol/engine/utxoledger/marshalutils_helper.go deleted file mode 100644 index b19e747fc..000000000 --- a/pkg/protocol/engine/utxoledger/marshalutils_helper.go +++ /dev/null @@ -1,43 +0,0 @@ -package utxoledger - -import ( - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" - iotago "github.com/iotaledger/iota.go/v4" -) - -func ParseOutputID(ms *marshalutil.MarshalUtil) (iotago.OutputID, error) { - bytes, err := ms.ReadBytes(iotago.OutputIDLength) - if err != nil { - return iotago.EmptyOutputID, err - } - - return iotago.OutputID(bytes), nil -} - -func parseTransactionID(ms *marshalutil.MarshalUtil) (iotago.TransactionID, error) { - bytes, err := ms.ReadBytes(iotago.TransactionIDLength) - if err != nil { - return iotago.EmptyTransactionID, err - } - - return iotago.TransactionID(bytes), nil -} - -func ParseBlockID(ms *marshalutil.MarshalUtil) (iotago.BlockID, error) { - bytes, err := ms.ReadBytes(iotago.BlockIDLength) - if err != nil { - return iotago.EmptyBlockID, err - } - - return iotago.BlockID(bytes), nil -} - -func parseSlotIndex(ms *marshalutil.MarshalUtil) (iotago.SlotIndex, error) { - bytes, err := ms.ReadBytes(iotago.SlotIndexLength) - if err != nil { - return 0, err - } - - return lo.DropCount(iotago.SlotIndexFromBytes(bytes)) -} diff --git a/pkg/protocol/engine/utxoledger/output.go b/pkg/protocol/engine/utxoledger/output.go index 3b6b5e396..0194b65c1 100644 --- a/pkg/protocol/engine/utxoledger/output.go +++ b/pkg/protocol/engine/utxoledger/output.go @@ -8,7 +8,7 @@ import ( "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/serializer/v2" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -193,11 +193,13 @@ func (o *Output) CopyWithBlockIDAndSlotBooked(blockID iotago.BlockID, slotBooked // - kvStorable func outputStorageKeyForOutputID(outputID iotago.OutputID) []byte { - ms := marshalutil.New(iotago.OutputIDLength + 1) - ms.WriteByte(StoreKeyPrefixOutput) // 1 byte - ms.WriteBytes(outputID[:]) // iotago.OutputIDLength bytes + byteBuffer := stream.NewByteBuffer(iotago.OutputIDLength + serializer.OneByte) - return ms.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixOutput) + _ = stream.Write(byteBuffer, outputID) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (o *Output) KVStorableKey() (key []byte) { @@ -205,68 +207,41 @@ func (o *Output) KVStorableKey() (key []byte) { } func (o *Output) KVStorableValue() (value []byte) { - ms := marshalutil.New() - ms.WriteBytes(o.blockID[:]) // BlockIDLength bytes - ms.WriteBytes(o.slotBooked.MustBytes()) // 4 bytes - - ms.WriteUint32(uint32(len(o.encodedOutput))) // 4 bytes - ms.WriteBytes(o.encodedOutput) + byteBuffer := stream.NewByteBuffer() - ms.WriteUint32(uint32(len(o.encodedProof))) // 4 bytes - ms.WriteBytes(o.encodedProof) + // There can't be any errors. + _ = stream.Write(byteBuffer, o.blockID) + _ = stream.Write(byteBuffer, o.slotBooked) + _ = stream.WriteBytesWithSize(byteBuffer, o.encodedOutput, serializer.SeriLengthPrefixTypeAsUint32) + _ = stream.WriteBytesWithSize(byteBuffer, o.encodedProof, serializer.SeriLengthPrefixTypeAsUint32) - return ms.Bytes() + return lo.PanicOnErr(byteBuffer.Bytes()) } func (o *Output) kvStorableLoad(_ *Manager, key []byte, value []byte) error { - // Parse key - keyUtil := marshalutil.New(key) - - // Read prefix output - _, err := keyUtil.ReadByte() - if err != nil { - return err - } - - // Read OutputID - if o.outputID, err = ParseOutputID(keyUtil); err != nil { - return err - } + var err error - // Parse value - valueUtil := marshalutil.New(value) + keyReader := stream.NewByteReader(key) - // Read BlockID - if o.blockID, err = ParseBlockID(valueUtil); err != nil { - return err + if _, err = stream.Read[byte](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read prefix") } - - // Read Slot - o.slotBooked, err = parseSlotIndex(valueUtil) - if err != nil { - return err + if o.outputID, err = stream.Read[iotago.OutputID](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read outputID") } - // Read Output - outputLen, err := valueUtil.ReadUint32() - if err != nil { - return err + valueReader := stream.NewByteReader(value) + if o.blockID, err = stream.Read[iotago.BlockID](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read blockID") } - - o.encodedOutput, err = valueUtil.ReadBytes(int(outputLen)) - if err != nil { - return err + if o.slotBooked, err = stream.Read[iotago.SlotIndex](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read slotBooked") } - - // Read Output proof - proofLen, err := valueUtil.ReadUint32() - if err != nil { - return err + if o.encodedOutput, err = stream.ReadBytesWithSize(valueReader, serializer.SeriLengthPrefixTypeAsUint32); err != nil { + return ierrors.Wrap(err, "unable to read encodedOutput") } - - o.encodedProof, err = valueUtil.ReadBytes(int(proofLen)) - if err != nil { - return err + if o.encodedProof, err = stream.ReadBytesWithSize(valueReader, serializer.SeriLengthPrefixTypeAsUint32); err != nil { + return ierrors.Wrap(err, "unable to read encodedProof") } return nil diff --git a/pkg/protocol/engine/utxoledger/slot_diff.go b/pkg/protocol/engine/utxoledger/slot_diff.go index dd54bd492..be304e321 100644 --- a/pkg/protocol/engine/utxoledger/slot_diff.go +++ b/pkg/protocol/engine/utxoledger/slot_diff.go @@ -2,12 +2,13 @@ package utxoledger import ( "crypto/sha256" - "encoding/binary" "sort" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -21,12 +22,14 @@ type SlotDiff struct { Spents Spents } -func slotDiffKeyForIndex(index iotago.SlotIndex) []byte { - m := marshalutil.New(iotago.SlotIndexLength + 1) - m.WriteByte(StoreKeyPrefixSlotDiffs) - m.WriteBytes(index.MustBytes()) +func slotDiffKeyForIndex(slot iotago.SlotIndex) []byte { + byteBuffer := stream.NewByteBuffer(serializer.OneByte + iotago.SlotIndexLength) - return m.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixSlotDiffs) + _ = stream.Write(byteBuffer, slot) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (sd *SlotDiff) KVStorableKey() []byte { @@ -34,40 +37,48 @@ func (sd *SlotDiff) KVStorableKey() []byte { } func (sd *SlotDiff) KVStorableValue() []byte { - m := marshalutil.New() + byteBuffer := stream.NewByteBuffer() - m.WriteUint32(uint32(len(sd.Outputs))) - for _, output := range sd.sortedOutputs() { - m.WriteBytes(output.outputID[:]) - } + // There can't be any errors. + _ = stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, output := range sd.sortedOutputs() { + _ = stream.Write(byteBuffer, output.outputID) + } - m.WriteUint32(uint32(len(sd.Spents))) - for _, spent := range sd.sortedSpents() { - m.WriteBytes(spent.output.outputID[:]) - } + return len(sd.Outputs), nil + }) - return m.Bytes() + _ = stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, spent := range sd.sortedSpents() { + _ = stream.Write(byteBuffer, spent.output.outputID) + } + + return len(sd.Spents), nil + }) + + return lo.PanicOnErr(byteBuffer.Bytes()) } // note that this method relies on the data being available within other "tables". func (sd *SlotDiff) kvStorableLoad(manager *Manager, key []byte, value []byte) error { - slot, _, err := iotago.SlotIndexFromBytes(key[1:]) - if err != nil { + var err error + + if sd.Slot, _, err = iotago.SlotIndexFromBytes(key[1:]); err != nil { return err } - marshalUtil := marshalutil.New(value) + byteReader := stream.NewByteReader(value) - outputCount, err := marshalUtil.ReadUint32() + outputsCount, err := stream.PeekSize(byteReader, serializer.SeriLengthPrefixTypeAsUint32) if err != nil { - return err + return ierrors.Wrap(err, "unable to peek outputs count") } - outputs := make(Outputs, int(outputCount)) - for i := 0; i < int(outputCount); i++ { - var outputID iotago.OutputID - if outputID, err = ParseOutputID(marshalUtil); err != nil { - return err + outputs := make(Outputs, outputsCount) + if err = stream.ReadCollection(byteReader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + outputID, err := stream.Read[iotago.OutputID](byteReader) + if err != nil { + return ierrors.Wrap(err, "unable to read outputID") } output, err := manager.ReadOutputByOutputIDWithoutLocking(outputID) @@ -76,18 +87,22 @@ func (sd *SlotDiff) kvStorableLoad(manager *Manager, key []byte, value []byte) e } outputs[i] = output + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read slot diff outputs") } - spentCount, err := marshalUtil.ReadUint32() + spentsCount, err := stream.PeekSize(byteReader, serializer.SeriLengthPrefixTypeAsUint32) if err != nil { - return err + return ierrors.Wrap(err, "unable to peek spents count") } - spents := make(Spents, spentCount) - for i := 0; i < int(spentCount); i++ { - var outputID iotago.OutputID - if outputID, err = ParseOutputID(marshalUtil); err != nil { - return err + spents := make(Spents, spentsCount) + if err = stream.ReadCollection(byteReader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + outputID, err := stream.Read[iotago.OutputID](byteReader) + if err != nil { + return ierrors.Wrap(err, "unable to read outputID") } spent, err := manager.ReadSpentForOutputIDWithoutLocking(outputID) @@ -96,9 +111,12 @@ func (sd *SlotDiff) kvStorableLoad(manager *Manager, key []byte, value []byte) e } spents[i] = spent + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read slot diff spents") } - sd.Slot = slot sd.Outputs = outputs sd.Spents = spents @@ -127,11 +145,11 @@ func (sd *SlotDiff) sortedSpents() LexicalOrderedSpents { func (sd *SlotDiff) SHA256Sum() ([]byte, error) { sdDiffHash := sha256.New() - if err := binary.Write(sdDiffHash, binary.LittleEndian, sd.KVStorableKey()); err != nil { + if err := stream.WriteBytes(sdDiffHash, sd.KVStorableKey()); err != nil { return nil, ierrors.Errorf("unable to serialize slot diff: %w", err) } - if err := binary.Write(sdDiffHash, binary.LittleEndian, sd.KVStorableValue()); err != nil { + if err := stream.WriteBytes(sdDiffHash, sd.KVStorableValue()); err != nil { return nil, ierrors.Errorf("unable to serialize slot diff: %w", err) } diff --git a/pkg/protocol/engine/utxoledger/snapshot.go b/pkg/protocol/engine/utxoledger/snapshot.go index 719a69b30..a6af13e95 100644 --- a/pkg/protocol/engine/utxoledger/snapshot.go +++ b/pkg/protocol/engine/utxoledger/snapshot.go @@ -1,87 +1,76 @@ package utxoledger import ( - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/byteutils" "github.com/iotaledger/hive.go/serializer/v2/serix" - "github.com/iotaledger/iota-core/pkg/utils" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) // Helpers to serialize/deserialize into/from snapshots func (o *Output) SnapshotBytes() []byte { - m := marshalutil.New() - m.WriteBytes(o.outputID[:]) - m.WriteBytes(o.blockID[:]) - m.WriteUint32(uint32(o.slotBooked)) - m.WriteUint32(uint32(len(o.encodedOutput))) - m.WriteBytes(o.encodedOutput) - m.WriteUint32(uint32(len(o.encodedProof))) - m.WriteBytes(o.encodedProof) - - return m.Bytes() + return byteutils.ConcatBytes(o.outputID[:], o.KVStorableValue()) } func OutputFromSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvider) (*Output, error) { - outputID := iotago.OutputID{} - if _, err := io.ReadFull(reader, outputID[:]); err != nil { - return nil, ierrors.Errorf("unable to read LS output ID: %w", err) + outputID, err := stream.Read[iotago.OutputID](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS output ID") } - blockID := iotago.BlockID{} - if _, err := io.ReadFull(reader, blockID[:]); err != nil { - return nil, ierrors.Errorf("unable to read LS block ID: %w", err) + blockID, err := stream.Read[iotago.BlockID](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS block ID") } - var slotBooked iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &slotBooked); err != nil { - return nil, ierrors.Errorf("unable to read LS output milestone index booked: %w", err) + slotBooked, err := stream.Read[iotago.SlotIndex](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS output slot booked") } - var outputLength uint32 - if err := binary.Read(reader, binary.LittleEndian, &outputLength); err != nil { - return nil, ierrors.Errorf("unable to read LS output length: %w", err) - } + var outputBytes []byte + output, err := stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint32, func(bytes []byte) (iotago.TxEssenceOutput, int, error) { + outputBytes = bytes - outputBytes := make([]byte, outputLength) - if _, err := io.ReadFull(reader, outputBytes); err != nil { - return nil, ierrors.Errorf("unable to read LS output bytes: %w", err) - } + var o iotago.TxEssenceOutput + readBytes, err := apiProvider.APIForSlot(blockID.Slot()).Decode(bytes, &o, serix.WithValidation()) + if err != nil { + return nil, 0, ierrors.Wrap(err, "invalid LS output address") + } - var output iotago.TxEssenceOutput - if _, err := apiProvider.APIForSlot(blockID.Slot()).Decode(outputBytes, &output, serix.WithValidation()); err != nil { - return nil, ierrors.Errorf("invalid LS output address: %w", err) + return o, readBytes, nil + }) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS output") } - var proofLength uint32 - if err := binary.Read(reader, binary.LittleEndian, &proofLength); err != nil { - return nil, ierrors.Errorf("unable to read LS output proof length: %w", err) - } + var proofBytes []byte + proof, err := stream.ReadObjectWithSize(reader, serializer.SeriLengthPrefixTypeAsUint32, func(bytes []byte) (*iotago.OutputIDProof, int, error) { + proofBytes = bytes - proofBytes := make([]byte, proofLength) - if _, err := io.ReadFull(reader, proofBytes); err != nil { - return nil, ierrors.Errorf("unable to read LS output proof bytes: %w", err) - } + proof, readBytes, err := iotago.OutputIDProofFromBytes(apiProvider.APIForSlot(blockID.Slot()))(proofBytes) + if err != nil { + return nil, 0, ierrors.Wrap(err, "invalid LS output proof") + } - proof, _, err := iotago.OutputIDProofFromBytes(apiProvider.APIForSlot(blockID.Slot()))(proofBytes) + return proof, readBytes, nil + }) if err != nil { - return nil, ierrors.Errorf("invalid LS output proof: %w", err) + return nil, ierrors.Wrap(err, "unable to read LS output proof") } return NewOutput(apiProvider, outputID, blockID, slotBooked, output, outputBytes, proof, proofBytes), nil } func (s *Spent) SnapshotBytes() []byte { - m := marshalutil.New() - m.WriteBytes(s.Output().SnapshotBytes()) - m.WriteBytes(s.transactionIDSpent[:]) - // we don't need to write indexSpent because this info is available in the milestoneDiff that consumes the output - return m.Bytes() + + return byteutils.ConcatBytes(s.Output().SnapshotBytes(), s.transactionIDSpent[:]) } func SpentFromSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvider, indexSpent iotago.SlotIndex) (*Spent, error) { @@ -90,84 +79,89 @@ func SpentFromSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvide return nil, err } - transactionIDSpent := iotago.TransactionID{} - if _, err := io.ReadFull(reader, transactionIDSpent[:]); err != nil { - return nil, ierrors.Errorf("unable to read LS transaction ID spent: %w", err) + transactionIDSpent, err := stream.Read[iotago.TransactionID](reader) + if err != nil { + return nil, ierrors.Wrap(err, "unable to read LS transaction ID spent") } return NewSpent(output, transactionIDSpent, indexSpent), nil } func ReadSlotDiffToSnapshotReader(reader io.ReadSeeker, apiProvider iotago.APIProvider) (*SlotDiff, error) { + var err error slotDiff := &SlotDiff{} - var diffIndex iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &diffIndex); err != nil { - return nil, ierrors.Errorf("unable to read slot diff index: %w", err) + if slotDiff.Slot, err = stream.Read[iotago.SlotIndex](reader); err != nil { + return nil, ierrors.Wrap(err, "unable to read slot diff index") } - slotDiff.Slot = diffIndex - var createdCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &createdCount); err != nil { - return nil, ierrors.Errorf("unable to read slot diff created count: %w", err) + createdCount, err := stream.PeekSize(reader, serializer.SeriLengthPrefixTypeAsUint32) + if err != nil { + return nil, ierrors.Wrap(err, "unable to peek slot diff created count") } - slotDiff.Outputs = make(Outputs, createdCount) - for i := uint64(0); i < createdCount; i++ { - var err error + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { slotDiff.Outputs[i], err = OutputFromSnapshotReader(reader, apiProvider) if err != nil { - return nil, ierrors.Errorf("unable to read slot diff output: %w", err) + return ierrors.Wrap(err, "unable to read slot diff output") } - } - var consumedCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &consumedCount); err != nil { - return nil, ierrors.Errorf("unable to read slot diff consumed count: %w", err) + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "unable to read slot diff created collection") } + consumedCount, err := stream.PeekSize(reader, serializer.SeriLengthPrefixTypeAsUint32) + if err != nil { + return nil, ierrors.Wrap(err, "unable to peek slot diff consumed count") + } slotDiff.Spents = make(Spents, consumedCount) - for i := uint64(0); i < consumedCount; i++ { - var err error + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { slotDiff.Spents[i], err = SpentFromSnapshotReader(reader, apiProvider, slotDiff.Slot) if err != nil { - return nil, ierrors.Errorf("unable to read slot diff spent: %w", err) + return ierrors.Wrap(err, "unable to read slot diff spent") } + + return nil + }); err != nil { + return nil, ierrors.Wrap(err, "unable to read slot diff consumed collection") } return slotDiff, nil } -func WriteSlotDiffToSnapshotWriter(writer io.WriteSeeker, diff *SlotDiff) (written int64, err error) { - var totalBytesWritten int64 - - if err := utils.WriteValueFunc(writer, diff.Slot.MustBytes(), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff index") +func WriteSlotDiffToSnapshotWriter(writer io.WriteSeeker, diff *SlotDiff) error { + if err := stream.Write(writer, diff.Slot); err != nil { + return ierrors.Wrap(err, "unable to write slot diff index") } - if err := utils.WriteValueFunc(writer, uint64(len(diff.Outputs)), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff created count") - } - - for _, output := range diff.sortedOutputs() { - if err := utils.WriteBytesFunc(writer, output.SnapshotBytes(), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff created output") + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, output := range diff.sortedOutputs() { + if err := stream.WriteBytes(writer, output.SnapshotBytes()); err != nil { + return 0, ierrors.Wrap(err, "unable to write slot diff created output") + } } - } - if err := utils.WriteValueFunc(writer, uint64(len(diff.Spents)), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff consumed count") + return len(diff.Outputs), nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot diff created collection") } - for _, spent := range diff.sortedSpents() { - if err := utils.WriteBytesFunc(writer, spent.SnapshotBytes(), &totalBytesWritten); err != nil { - return 0, ierrors.Wrap(err, "unable to write slot diff created output") + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + for _, spent := range diff.sortedSpents() { + if err := stream.WriteBytes(writer, spent.SnapshotBytes()); err != nil { + return 0, ierrors.Wrap(err, "unable to write slot diff spent output") + } } + + return len(diff.Spents), nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot diff spent collection") } - return totalBytesWritten, nil + return nil } // Import imports the ledger state from the given reader. @@ -175,40 +169,33 @@ func (m *Manager) Import(reader io.ReadSeeker) error { m.WriteLockLedger() defer m.WriteUnlockLedger() - var snapshotLedgerIndex iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &snapshotLedgerIndex); err != nil { - return ierrors.Errorf("unable to read LS ledger index: %w", err) + snapshotLedgerIndex, err := stream.Read[iotago.SlotIndex](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read LS ledger index") } - if err := m.StoreLedgerIndexWithoutLocking(snapshotLedgerIndex); err != nil { return err } - var outputCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &outputCount); err != nil { - return ierrors.Errorf("unable to read LS output count: %w", err) - } - - var slotDiffCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &slotDiffCount); err != nil { - return ierrors.Errorf("unable to read LS slot diff count: %w", err) - } - - for i := uint64(0); i < outputCount; i++ { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(i int) error { output, err := OutputFromSnapshotReader(reader, m.apiProvider) if err != nil { - return ierrors.Errorf("at pos %d: %w", i, err) + return ierrors.Wrapf(err, "at pos %d", i) } if err := m.importUnspentOutputWithoutLocking(output); err != nil { - return err + return ierrors.Wrap(err, "unable to import LS output") } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read LS output collection") } - for i := uint64(0); i < slotDiffCount; i++ { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { slotDiff, err := ReadSlotDiffToSnapshotReader(reader, m.apiProvider) if err != nil { - return err + return ierrors.Wrapf(err, "unable to read LS slot diff at index %d", i) } if slotDiff.Slot != snapshotLedgerIndex-iotago.SlotIndex(i) { @@ -216,8 +203,12 @@ func (m *Manager) Import(reader io.ReadSeeker) error { } if err := m.RollbackDiffWithoutLocking(slotDiff.Slot, slotDiff.Outputs, slotDiff.Spents); err != nil { - return err + return ierrors.Wrapf(err, "unable to rollback LS slot diff at index %d", i) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read LS slot diff collection") } if err := m.stateTree.Commit(); err != nil { @@ -236,83 +227,55 @@ func (m *Manager) Export(writer io.WriteSeeker, targetIndex iotago.SlotIndex) er if err != nil { return err } - if err := utils.WriteValueFunc(writer, ledgerIndex); err != nil { - return ierrors.Wrap(err, "unable to write ledger index") - } - - var relativeCountersPosition int64 - - var outputCount uint64 - var slotDiffCount uint64 - - // Outputs Count - // The amount of UTXOs contained within this snapshot. - if err := utils.WriteValueFunc(writer, outputCount, &relativeCountersPosition); err != nil { - return ierrors.Wrap(err, "unable to write outputs count") - } - - // Slot Diffs Count - // The amount of slot diffs contained within this snapshot. - if err := utils.WriteValueFunc(writer, slotDiffCount, &relativeCountersPosition); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") - } - // Get all UTXOs and sort them by outputID - outputIDs, err := m.UnspentOutputsIDs(ReadLockLedger(false)) - if err != nil { - return ierrors.Wrap(err, "error while retrieving unspent outputIDs") + if err := stream.Write(writer, ledgerIndex); err != nil { + return ierrors.Wrap(err, "unable to write ledger index") } - for _, outputID := range outputIDs.RemoveDupsAndSort() { - output, err := m.ReadOutputByOutputIDWithoutLocking(outputID) + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + // Get all UTXOs and sort them by outputID + outputIDs, err := m.UnspentOutputsIDs(ReadLockLedger(false)) if err != nil { - return ierrors.Wrapf(err, "error while retrieving output %s", outputID) + return 0, ierrors.Wrap(err, "error while retrieving unspent outputIDs") } - if err := utils.WriteBytesFunc(writer, output.SnapshotBytes(), &relativeCountersPosition); err != nil { - return ierrors.Wrap(err, "unable to write output ID") - } + var outputCount int + for _, outputID := range outputIDs.RemoveDupsAndSort() { + output, err := m.ReadOutputByOutputIDWithoutLocking(outputID) + if err != nil { + return 0, ierrors.Wrapf(err, "error while retrieving output %s", outputID) + } - outputCount++ - } + if err := stream.WriteBytes(writer, output.SnapshotBytes()); err != nil { + return 0, ierrors.Wrapf(err, "unable to write output with ID %s", outputID) + } - for diffIndex := ledgerIndex; diffIndex > targetIndex; diffIndex-- { - slotDiff, err := m.SlotDiffWithoutLocking(diffIndex) - if err != nil { - return ierrors.Wrapf(err, "error while retrieving slot diffs for slot %s", diffIndex) + outputCount++ } - written, err := WriteSlotDiffToSnapshotWriter(writer, slotDiff) - if err != nil { - return ierrors.Wrapf(err, "error while writing slot diffs for slot %s", diffIndex) - } - - relativeCountersPosition += written - slotDiffCount++ + return outputCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write unspent output collection") } - // seek back to the file position of the counters - if _, err := writer.Seek(-relativeCountersPosition, io.SeekCurrent); err != nil { - return ierrors.Errorf("unable to seek to LS counter placeholders: %w", err) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var slotDiffCount int + for diffIndex := ledgerIndex; diffIndex > targetIndex; diffIndex-- { + slotDiff, err := m.SlotDiffWithoutLocking(diffIndex) + if err != nil { + return 0, ierrors.Wrapf(err, "error while retrieving slot diffs for slot %s", diffIndex) + } - var countersSize int64 + if WriteSlotDiffToSnapshotWriter(writer, slotDiff) != nil { + return 0, ierrors.Wrapf(err, "error while writing slot diffs for slot %s", diffIndex) + } - // Outputs Count - // The amount of UTXOs contained within this snapshot. - if err := utils.WriteValueFunc(writer, outputCount, &countersSize); err != nil { - return ierrors.Wrap(err, "unable to write outputs count") - } - - // Slot Diffs Count - // The amount of slot diffs contained within this snapshot. - if err := utils.WriteValueFunc(writer, slotDiffCount, &countersSize); err != nil { - return ierrors.Wrap(err, "unable to write slot diffs count") - } + slotDiffCount++ + } - // seek back to the last write position - if _, err := writer.Seek(relativeCountersPosition-countersSize, io.SeekCurrent); err != nil { - return ierrors.Errorf("unable to seek to LS last written position: %w", err) + return slotDiffCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot diff collection") } return nil diff --git a/pkg/protocol/engine/utxoledger/snapshot_test.go b/pkg/protocol/engine/utxoledger/snapshot_test.go index 5e174887a..4f24112bc 100644 --- a/pkg/protocol/engine/utxoledger/snapshot_test.go +++ b/pkg/protocol/engine/utxoledger/snapshot_test.go @@ -5,12 +5,12 @@ import ( "encoding/binary" "testing" - "github.com/orcaman/writerseeker" "github.com/stretchr/testify/require" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/kvstore/mapdb" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger/tpkg" "github.com/iotaledger/iota-core/pkg/utils" @@ -151,13 +151,11 @@ func TestReadSlotDiffToSnapshotReader(t *testing.T) { }, } - writer := &writerseeker.WriterSeeker{} - written, err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) + writer := stream.NewByteBuffer() + err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) require.NoError(t, err) - require.Equal(t, int64(writer.BytesReader().Len()), written) - - reader := writer.BytesReader() + reader := writer.Reader() readSlotDiff, err := utxoledger.ReadSlotDiffToSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, err) @@ -181,21 +179,19 @@ func TestWriteSlotDiffToSnapshotWriter(t *testing.T) { }, } - writer := &writerseeker.WriterSeeker{} - written, err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) + writer := stream.NewByteBuffer() + err := utxoledger.WriteSlotDiffToSnapshotWriter(writer, slotDiff) require.NoError(t, err) - require.Equal(t, int64(writer.BytesReader().Len()), written) - - reader := writer.BytesReader() + reader := writer.Reader() var readSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &readSlot)) require.Equal(t, slot, readSlot) - var createdCount uint64 + var createdCount uint32 require.NoError(t, binary.Read(reader, binary.LittleEndian, &createdCount)) - require.Equal(t, uint64(len(slotDiff.Outputs)), createdCount) + require.Equal(t, uint32(len(slotDiff.Outputs)), createdCount) var snapshotOutputs utxoledger.Outputs for i := 0; i < len(slotDiff.Outputs); i++ { @@ -206,9 +202,9 @@ func TestWriteSlotDiffToSnapshotWriter(t *testing.T) { tpkg.EqualOutputs(t, slotDiff.Outputs, snapshotOutputs) - var consumedCount uint64 + var consumedCount uint32 require.NoError(t, binary.Read(reader, binary.LittleEndian, &consumedCount)) - require.Equal(t, uint64(len(slotDiff.Spents)), consumedCount) + require.Equal(t, uint32(len(slotDiff.Spents)), consumedCount) var snapshotSpents utxoledger.Spents for i := 0; i < len(slotDiff.Spents); i++ { @@ -271,10 +267,10 @@ func TestManager_Import(t *testing.T) { // Test exporting and importing at the current slot 2 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 2)) - reader := writer.BytesReader() + reader := writer.Reader() importedSlot2 := utxoledger.New(mapdb.NewMapDB(), api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, importedSlot2.Import(reader)) @@ -285,10 +281,10 @@ func TestManager_Import(t *testing.T) { // Test exporting and importing at slot 1 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 1)) - reader := writer.BytesReader() + reader := writer.Reader() importedSlot1 := utxoledger.New(mapdb.NewMapDB(), api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, importedSlot1.Import(reader)) @@ -302,10 +298,10 @@ func TestManager_Import(t *testing.T) { // Test exporting and importing at slot 0 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 0)) - reader := writer.BytesReader() + reader := writer.Reader() importedSlot0 := utxoledger.New(mapdb.NewMapDB(), api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, importedSlot0.Import(reader)) @@ -358,10 +354,10 @@ func TestManager_Export(t *testing.T) { // Test exporting at the current slot 2 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 2)) - reader := writer.BytesReader() + reader := writer.Reader() var snapshotLedgerSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &snapshotLedgerSlot)) @@ -371,10 +367,6 @@ func TestManager_Export(t *testing.T) { require.NoError(t, binary.Read(reader, binary.LittleEndian, &outputCount)) require.Equal(t, uint64(8), outputCount) - var slotDiffCount uint64 - require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) - require.Equal(t, uint64(0), slotDiffCount) - var snapshotOutputs utxoledger.Outputs for i := uint64(0); i < outputCount; i++ { output, err := utxoledger.OutputFromSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) @@ -387,14 +379,18 @@ func TestManager_Export(t *testing.T) { require.NoError(t, err) tpkg.EqualOutputs(t, unspentOutputs, snapshotOutputs) + + var slotDiffCount uint32 + require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) + require.Equal(t, uint32(0), slotDiffCount) } // Test exporting at slot 1 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 1)) - reader := writer.BytesReader() + reader := writer.Reader() var snapshotLedgerSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &snapshotLedgerSlot)) @@ -404,10 +400,6 @@ func TestManager_Export(t *testing.T) { require.NoError(t, binary.Read(reader, binary.LittleEndian, &outputCount)) require.Equal(t, uint64(8), outputCount) - var slotDiffCount uint64 - require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) - require.Equal(t, uint64(1), slotDiffCount) - var snapshotOutputs utxoledger.Outputs for i := uint64(0); i < outputCount; i++ { output, err := utxoledger.OutputFromSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) @@ -420,7 +412,11 @@ func TestManager_Export(t *testing.T) { tpkg.EqualOutputs(t, unspentOutputs, snapshotOutputs) - for i := uint64(0); i < slotDiffCount; i++ { + var slotDiffCount uint32 + require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) + require.Equal(t, uint32(1), slotDiffCount) + + for i := uint32(0); i < slotDiffCount; i++ { diff, err := utxoledger.ReadSlotDiffToSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, err) require.Equal(t, snapshotLedgerSlot-iotago.SlotIndex(i), diff.Slot) @@ -429,10 +425,10 @@ func TestManager_Export(t *testing.T) { // Test exporting at slot 0 { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() require.NoError(t, manager.Export(writer, 0)) - reader := writer.BytesReader() + reader := writer.Reader() var snapshotLedgerSlot iotago.SlotIndex require.NoError(t, binary.Read(reader, binary.LittleEndian, &snapshotLedgerSlot)) @@ -442,10 +438,6 @@ func TestManager_Export(t *testing.T) { require.NoError(t, binary.Read(reader, binary.LittleEndian, &outputCount)) require.Equal(t, uint64(8), outputCount) - var slotDiffCount uint64 - require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) - require.Equal(t, uint64(2), slotDiffCount) - var snapshotOutputs utxoledger.Outputs for i := uint64(0); i < outputCount; i++ { output, err := utxoledger.OutputFromSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) @@ -458,7 +450,11 @@ func TestManager_Export(t *testing.T) { tpkg.EqualOutputs(t, unspentOutputs, snapshotOutputs) - for i := uint64(0); i < slotDiffCount; i++ { + var slotDiffCount uint32 + require.NoError(t, binary.Read(reader, binary.LittleEndian, &slotDiffCount)) + require.Equal(t, uint32(2), slotDiffCount) + + for i := uint32(0); i < slotDiffCount; i++ { diff, err := utxoledger.ReadSlotDiffToSnapshotReader(reader, api.SingleVersionProvider(iotago_tpkg.TestAPI)) require.NoError(t, err) require.Equal(t, snapshotLedgerSlot-iotago.SlotIndex(i), diff.Slot) diff --git a/pkg/protocol/engine/utxoledger/spent.go b/pkg/protocol/engine/utxoledger/spent.go index cdb41baeb..1b1a0a888 100644 --- a/pkg/protocol/engine/utxoledger/spent.go +++ b/pkg/protocol/engine/utxoledger/spent.go @@ -3,8 +3,11 @@ package utxoledger import ( "bytes" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -84,11 +87,13 @@ func NewSpent(output *Output, transactionIDSpent iotago.TransactionID, slotSpent } func spentStorageKeyForOutputID(outputID iotago.OutputID) []byte { - ms := marshalutil.New(iotago.OutputIDLength + 1) - ms.WriteByte(StoreKeyPrefixOutputSpent) // 1 byte - ms.WriteBytes(outputID[:]) // iotago.OutputIDLength bytes + byteBuffer := stream.NewByteBuffer(iotago.OutputIDLength + serializer.OneByte) - return ms.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixOutputSpent) // 1 byte + _ = stream.Write(byteBuffer, outputID) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (s *Spent) KVStorableKey() (key []byte) { @@ -96,40 +101,33 @@ func (s *Spent) KVStorableKey() (key []byte) { } func (s *Spent) KVStorableValue() (value []byte) { - ms := marshalutil.New(iotago.TransactionIDLength + iotago.SlotIndexLength) - ms.WriteBytes(s.transactionIDSpent[:]) // iotago.TransactionIDLength bytes - ms.WriteBytes(s.slotSpent.MustBytes()) // iotago.SlotIndexLength bytes + byteBuffer := stream.NewByteBuffer(iotago.TransactionIDLength + iotago.SlotIndexLength) + + // There can't be any errors. + _ = stream.Write(byteBuffer, s.transactionIDSpent) + _ = stream.Write(byteBuffer, s.slotSpent) - return ms.Bytes() + return lo.PanicOnErr(byteBuffer.Bytes()) } func (s *Spent) kvStorableLoad(_ *Manager, key []byte, value []byte) error { - // Parse key - keyUtil := marshalutil.New(key) + var err error + keyReader := stream.NewByteReader(key) - // Read prefix output - _, err := keyUtil.ReadByte() - if err != nil { - return err + if _, err = stream.Read[byte](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read prefix") } - - // Read OutputID - if s.outputID, err = ParseOutputID(keyUtil); err != nil { - return err + if s.outputID, err = stream.Read[iotago.OutputID](keyReader); err != nil { + return ierrors.Wrap(err, "unable to read outputID") } - // Parse value - valueUtil := marshalutil.New(value) + valueReader := stream.NewByteReader(value) - // Read transaction ID - if s.transactionIDSpent, err = parseTransactionID(valueUtil); err != nil { - return err + if s.transactionIDSpent, err = stream.Read[iotago.TransactionID](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read transactionIDSpent") } - - // Read slot index spent index - s.slotSpent, err = parseSlotIndex(valueUtil) - if err != nil { - return err + if s.slotSpent, err = stream.Read[iotago.SlotIndex](valueReader); err != nil { + return ierrors.Wrap(err, "unable to read slotSpent") } return nil diff --git a/pkg/protocol/engine/utxoledger/spent_status.go b/pkg/protocol/engine/utxoledger/spent_status.go index 2a2bb6fc4..bfd34409e 100644 --- a/pkg/protocol/engine/utxoledger/spent_status.go +++ b/pkg/protocol/engine/utxoledger/spent_status.go @@ -2,7 +2,9 @@ package utxoledger import ( "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -17,11 +19,13 @@ type OutputConsumer func(output *Output) bool type LookupKey []byte func lookupKeyUnspentOutput(outputID iotago.OutputID) LookupKey { - ms := marshalutil.New(iotago.OutputIDLength + 1) - ms.WriteByte(StoreKeyPrefixOutputUnspent) // 1 byte - ms.WriteBytes(outputID[:]) // iotago.OutputIDLength bytes + byteBuffer := stream.NewByteBuffer(serializer.OneByte + iotago.OutputIDLength) - return ms.Bytes() + // There can't be any errors. + _ = stream.Write(byteBuffer, StoreKeyPrefixOutputUnspent) + _ = stream.Write(byteBuffer, outputID) + + return lo.PanicOnErr(byteBuffer.Bytes()) } func (o *Output) UnspentLookupKey() LookupKey { @@ -29,14 +33,10 @@ func (o *Output) UnspentLookupKey() LookupKey { } func outputIDFromDatabaseKey(key LookupKey) (iotago.OutputID, error) { - ms := marshalutil.New([]byte(key)) - - // prefix - if _, err := ms.ReadByte(); err != nil { - return iotago.OutputID{}, err - } + // Skip 1 byte prefix. + outputID, _, err := iotago.OutputIDFromBytes(key[1:]) - return ParseOutputID(ms) + return outputID, err } func markAsUnspent(output *Output, mutations kvstore.BatchedMutations) error { diff --git a/pkg/protocol/engine/utxoledger/state_tree.go b/pkg/protocol/engine/utxoledger/state_tree.go index 06f04fab5..1e5f3af3f 100644 --- a/pkg/protocol/engine/utxoledger/state_tree.go +++ b/pkg/protocol/engine/utxoledger/state_tree.go @@ -6,17 +6,16 @@ import ( "github.com/iotaledger/hive.go/ads" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore/mapdb" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" iotago "github.com/iotaledger/iota.go/v4" ) type stateTreeMetadata struct { - Time iotago.SlotIndex + Slot iotago.SlotIndex } func newStateMetadata(output *Output) *stateTreeMetadata { return &stateTreeMetadata{ - Time: output.SlotCreated(), + Slot: output.SlotCreated(), } } @@ -25,7 +24,7 @@ func stateMetadataFromBytes(b []byte) (*stateTreeMetadata, int, error) { var err error var n int - s.Time, n, err = iotago.SlotIndexFromBytes(b) + s.Slot, n, err = iotago.SlotIndexFromBytes(b) if err != nil { return nil, 0, err } @@ -34,10 +33,7 @@ func stateMetadataFromBytes(b []byte) (*stateTreeMetadata, int, error) { } func (s *stateTreeMetadata) Bytes() ([]byte, error) { - ms := marshalutil.New(iotago.SlotIndexLength) - ms.WriteBytes(s.Time.MustBytes()) - - return ms.Bytes(), nil + return s.Slot.Bytes() } func (m *Manager) StateTreeRoot() iotago.Identifier { @@ -46,6 +42,8 @@ func (m *Manager) StateTreeRoot() iotago.Identifier { func (m *Manager) CheckStateTree() bool { comparisonTree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.OutputID.Bytes, iotago.OutputIDFromBytes, (*stateTreeMetadata).Bytes, diff --git a/pkg/protocol/enginemanager/enginemanager.go b/pkg/protocol/enginemanager/enginemanager.go index a36b01175..a1ade86d4 100644 --- a/pkg/protocol/enginemanager/enginemanager.go +++ b/pkg/protocol/enginemanager/enginemanager.go @@ -280,7 +280,7 @@ func (e *EngineManager) rollbackStorage(newStorage *storage.Storage, slot iotago return ierrors.Wrap(err, "failed to rollback commitments") } // Create temporary components and rollback their permanent state, which will be reflected on disk. - evictionState := eviction.NewState(newStorage.LatestNonEmptySlot(), newStorage.RootBlocks) + evictionState := eviction.NewState(newStorage.LatestNonEmptySlot(), newStorage.RootBlocks, newStorage.GenesisRootBlockID) evictionState.Initialize(latestCommitment.Slot()) blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) diff --git a/pkg/protocol/protocol_fork.go b/pkg/protocol/protocol_fork.go index 1425276cd..458ee4ccc 100644 --- a/pkg/protocol/protocol_fork.go +++ b/pkg/protocol/protocol_fork.go @@ -46,7 +46,7 @@ func (p *Protocol) processAttestationsRequest(commitmentID iotago.CommitmentID, p.HandleError(ierrors.Errorf("failed to get roots storage for commitment %s", commitmentID)) return } - roots, err := rootsStorage.Load(commitmentID) + roots, _, err := rootsStorage.Load(commitmentID) if err != nil { p.HandleError(ierrors.Wrapf(err, "failed to load roots for commitment %s", commitmentID)) return diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go index f5f024a56..6c855d4e4 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go @@ -232,11 +232,16 @@ func (t *Tracker) ApplyEpoch(epoch iotago.EpochIndex, committee *account.Account continue } - validatorPerformance, err := validatorSlotPerformances.Load(accountID) + validatorPerformance, exists, err := validatorSlotPerformances.Load(accountID) if err != nil { panic(ierrors.Wrapf(err, "failed to load performance factor for account %s", accountID)) } + // key not found + if !exists { + validatorPerformance = model.NewValidatorPerformance() + } + validatorPerformances = append(validatorPerformances, validatorPerformance) } pf := t.aggregatePerformanceFactors(validatorPerformances, epoch) @@ -296,7 +301,7 @@ func (t *Tracker) aggregatePerformanceFactors(slotActivityVector []*model.Valida // we reward not only total number of blocks issued, but also regularity based on block timestamp slotPerformanceFactor := bits.OnesCount32(pf.SlotActivityVector) - if pf.BlockIssuedCount > protoParamsForEpoch.ValidationBlocksPerSlot() { + if pf.BlocksIssuedCount > protoParamsForEpoch.ValidationBlocksPerSlot() { // we harshly punish validators that issue any blocks more than allowed return 0 @@ -326,7 +331,7 @@ func (t *Tracker) trackCommitteeMemberPerformance(validationBlock *iotago.Valida return } - validatorPerformance, err := validatorPerformances.Load(block.ProtocolBlock().Header.IssuerID) + validatorPerformance, exists, err := validatorPerformances.Load(block.ProtocolBlock().Header.IssuerID) if err != nil { t.errHandler(ierrors.Errorf("failed to load performance factor for account %s", block.ProtocolBlock().Header.IssuerID)) @@ -334,7 +339,7 @@ func (t *Tracker) trackCommitteeMemberPerformance(validationBlock *iotago.Valida } // key not found - if validatorPerformance == nil { + if !exists { validatorPerformance = model.NewValidatorPerformance() } @@ -345,8 +350,8 @@ func (t *Tracker) trackCommitteeMemberPerformance(validationBlock *iotago.Valida // we restrict the number up to ValidatorBlocksPerSlot + 1 to know later if the validator issued more blocks than allowed and be able to punish for it // also it can fint into uint8 - if validatorPerformance.BlockIssuedCount < apiForSlot.ProtocolParameters().ValidationBlocksPerSlot()+1 { - validatorPerformance.BlockIssuedCount++ + if validatorPerformance.BlocksIssuedCount < apiForSlot.ProtocolParameters().ValidationBlocksPerSlot()+1 { + validatorPerformance.BlocksIssuedCount++ } validatorPerformance.HighestSupportedVersionAndHash = model.VersionAndHash{ diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go index a2dbdeaad..498d00d99 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rewards.go @@ -190,6 +190,8 @@ func (t *Tracker) rewardsMap(epoch iotago.EpochIndex) (ads.Map[iotago.Identifier } return ads.NewMap[iotago.Identifier](kv, + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*model.PoolRewards).Bytes, diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go index cf02cd05f..585aff575 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot.go @@ -1,14 +1,14 @@ package performance import ( - "encoding/binary" "io" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" + "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" - "github.com/iotaledger/iota-core/pkg/utils" iotago "github.com/iotaledger/iota.go/v4" ) @@ -46,26 +46,25 @@ func (t *Tracker) Export(writer io.WriteSeeker, targetSlotIndex iotago.SlotIndex timeProvider := t.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() targetEpoch := timeProvider.EpochFromSlot(targetSlotIndex) - positionedWriter := utils.NewPositionedWriter(writer) // if the target index is the last slot of the epoch, the epoch was committed if timeProvider.EpochEnd(targetEpoch) != targetSlotIndex { targetEpoch-- } - if err := t.exportPerformanceFactor(positionedWriter, timeProvider.EpochStart(targetEpoch+1), targetSlotIndex); err != nil { + if err := t.exportPerformanceFactor(writer, timeProvider.EpochStart(targetEpoch+1), targetSlotIndex); err != nil { return ierrors.Wrap(err, "unable to export performance factor") } - if err := t.exportPoolRewards(positionedWriter, targetEpoch); err != nil { + if err := t.exportPoolRewards(writer, targetEpoch); err != nil { return ierrors.Wrap(err, "unable to export pool rewards") } - if err := t.exportPoolsStats(positionedWriter, targetEpoch); err != nil { + if err := t.exportPoolsStats(writer, targetEpoch); err != nil { return ierrors.Wrap(err, "unable to export pool stats") } - if err := t.exportCommittees(positionedWriter, targetSlotIndex); err != nil { + if err := t.exportCommittees(writer, targetSlotIndex); err != nil { return ierrors.Wrap(err, "unable to export committees") } @@ -73,20 +72,10 @@ func (t *Tracker) Export(writer io.WriteSeeker, targetSlotIndex iotago.SlotIndex } func (t *Tracker) importPerformanceFactor(reader io.ReadSeeker) error { - var slotCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &slotCount); err != nil { - return ierrors.Wrap(err, "unable to read slot count") - } - - for i := uint64(0); i < slotCount; i++ { - var slot iotago.SlotIndex - if err := binary.Read(reader, binary.LittleEndian, &slot); err != nil { - return ierrors.Wrap(err, "unable to read slot index") - } - - var accountsCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &accountsCount); err != nil { - return ierrors.Wrapf(err, "unable to read accounts count for slot index %d", slot) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + slot, err := stream.Read[iotago.SlotIndex](reader) + if err != nil { + return ierrors.Wrapf(err, "unable to read slot index at index %d", i) } performanceFactors, err := t.validatorPerformancesFunc(slot) @@ -94,36 +83,37 @@ func (t *Tracker) importPerformanceFactor(reader io.ReadSeeker) error { return ierrors.Wrapf(err, "unable to get performance factors for slot index %d", slot) } - for j := uint64(0); j < accountsCount; j++ { - var accountID iotago.AccountID - if err = binary.Read(reader, binary.LittleEndian, &accountID); err != nil { - return ierrors.Wrapf(err, "unable to read account id for the slot index %d", slot) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(j int) error { + accountID, err := stream.Read[iotago.AccountID](reader) + if err != nil { + return ierrors.Wrapf(err, "unable to read account id at index %d", j) } - - var performanceFactor model.ValidatorPerformance - if err = binary.Read(reader, binary.LittleEndian, &performanceFactor); err != nil { - return ierrors.Wrapf(err, "unable to read performance factor for account %s and slot index %d", accountID, slot) + performanceFactor, err := stream.ReadObjectFromReader(reader, model.ValidatorPerformanceFromReader) + if err != nil { + return ierrors.Wrapf(err, "unable to read performance factor for account %s and slot %d", accountID, slot) } - - if err = performanceFactors.Store(accountID, &performanceFactor); err != nil { + if err = performanceFactors.Store(accountID, performanceFactor); err != nil { return ierrors.Wrapf(err, "unable to store performance factor for account %s and slot index %d", accountID, slot) } + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read performance factors for slot %d", slot) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read performance factors collection") } return nil } func (t *Tracker) importPoolRewards(reader io.ReadSeeker) error { - var epochCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &epochCount); err != nil { - return ierrors.Wrap(err, "unable to read epoch count") - } - - for i := uint64(0); i < epochCount; i++ { - var epoch iotago.EpochIndex - if err := binary.Read(reader, binary.LittleEndian, &epoch); err != nil { - return ierrors.Wrap(err, "unable to read epoch index") + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(int) error { + epoch, err := stream.Read[iotago.EpochIndex](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read epoch") } rewardsTree, err := t.rewardsMap(epoch) @@ -131,307 +121,280 @@ func (t *Tracker) importPoolRewards(reader io.ReadSeeker) error { return ierrors.Wrapf(err, "unable to get rewards tree for epoch index %d", epoch) } - var accountsCount uint64 - if err = binary.Read(reader, binary.LittleEndian, &accountsCount); err != nil { - return ierrors.Wrapf(err, "unable to read accounts count for epoch index %d", epoch) - } - - for j := uint64(0); j < accountsCount; j++ { - var accountID iotago.AccountID - if err = binary.Read(reader, binary.LittleEndian, &accountID); err != nil { - return ierrors.Wrapf(err, "unable to read account id for the epoch index %d", epoch) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint64, func(int) error { + accountID, err := stream.Read[iotago.AccountID](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read account id") } - var reward model.PoolRewards - if err = binary.Read(reader, binary.LittleEndian, &reward); err != nil { + reward, err := stream.ReadObjectFromReader(reader, model.PoolRewardsFromReader) + if err != nil { return ierrors.Wrapf(err, "unable to read reward for account %s and epoch index %d", accountID, epoch) } - if err = rewardsTree.Set(accountID, &reward); err != nil { + if err = rewardsTree.Set(accountID, reward); err != nil { return ierrors.Wrapf(err, "unable to set reward for account %s and epoch index %d", accountID, epoch) } + + return nil + }); err != nil { + return ierrors.Wrapf(err, "unable to read rewards collection for epoch %d", epoch) } - if err = rewardsTree.Commit(); err != nil { + if err := rewardsTree.Commit(); err != nil { return ierrors.Wrapf(err, "unable to commit rewards for epoch index %d", epoch) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read pool rewards collection") } return nil } func (t *Tracker) importPoolsStats(reader io.ReadSeeker) error { - var epochCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &epochCount); err != nil { - return ierrors.Wrap(err, "unable to read epoch count") - } - - for i := uint64(0); i < epochCount; i++ { - var epoch iotago.EpochIndex - if err := binary.Read(reader, binary.LittleEndian, &epoch); err != nil { - return ierrors.Wrap(err, "unable to read epoch index") + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(int) error { + epoch, err := stream.Read[iotago.EpochIndex](reader) + if err != nil { + return ierrors.Wrap(err, "unable to read epoch") } - var poolStats model.PoolsStats - if err := binary.Read(reader, binary.LittleEndian, &poolStats); err != nil { - return ierrors.Wrapf(err, "unable to read pool stats for epoch index %d", epoch) + poolStats, err := stream.ReadObjectFromReader(reader, model.PoolStatsFromReader) + if err != nil { + return ierrors.Wrapf(err, "unable to read pool stats for epoch %d", epoch) } - if err := t.poolStatsStore.Store(epoch, &poolStats); err != nil { + if err := t.poolStatsStore.Store(epoch, poolStats); err != nil { return ierrors.Wrapf(err, "unable to store pool stats for the epoch index %d", epoch) } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read pool stats collection") } return nil } func (t *Tracker) importCommittees(reader io.ReadSeeker) error { - var epochCount uint64 - if err := binary.Read(reader, binary.LittleEndian, &epochCount); err != nil { - return ierrors.Wrap(err, "unable to read committees epoch count") - } - for i := uint64(0); i < epochCount; i++ { - var epoch iotago.EpochIndex - if err := binary.Read(reader, binary.LittleEndian, &epoch); err != nil { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(int) error { + epoch, err := stream.Read[iotago.EpochIndex](reader) + if err != nil { return ierrors.Wrap(err, "unable to read epoch index") } - committee, _, err := account.AccountsFromReader(reader) + committee, err := account.AccountsFromReader(reader) if err != nil { - return ierrors.Wrapf(err, "unable to read committee for the epoch index %d", epoch) + return ierrors.Wrapf(err, "unable to read committee for the epoch %d", epoch) } if err = t.committeeStore.Store(epoch, committee); err != nil { return ierrors.Wrap(err, "unable to store committee") } + + return nil + }); err != nil { + return ierrors.Wrap(err, "unable to read committees collection") } return nil } -func (t *Tracker) exportPerformanceFactor(pWriter *utils.PositionedWriter, startSlot, targetSlot iotago.SlotIndex) error { +func (t *Tracker) exportPerformanceFactor(writer io.WriteSeeker, startSlot, targetSlot iotago.SlotIndex) error { t.performanceFactorsMutex.RLock() defer t.performanceFactorsMutex.RUnlock() - var slotCount uint64 - if err := pWriter.WriteValue("pf slot count", slotCount, true); err != nil { - return ierrors.Wrap(err, "unable to write pf slot count") - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var slotCount int - for currentSlot := startSlot; currentSlot <= targetSlot; currentSlot++ { - if err := pWriter.WriteValue("slot index", currentSlot); err != nil { - return ierrors.Wrapf(err, "unable to write slot index %d", currentSlot) - } + for currentSlot := startSlot; currentSlot <= targetSlot; currentSlot++ { + if err := stream.Write(writer, currentSlot); err != nil { + return 0, ierrors.Wrapf(err, "unable to write slot index %d", currentSlot) + } - var accountsCount uint64 - if err := pWriter.WriteValue("pf account count", accountsCount, true); err != nil { - return ierrors.Wrapf(err, "unable to write pf accounts count for slot index %d", currentSlot) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + var accountsCount int - performanceFactors, err := t.validatorPerformancesFunc(currentSlot) - if err != nil { - return ierrors.Wrapf(err, "unable to get performance factors for slot index %d", currentSlot) - } + performanceFactors, err := t.validatorPerformancesFunc(currentSlot) + if err != nil { + return 0, ierrors.Wrapf(err, "unable to get performance factors for slot index %d", currentSlot) + } - if err = performanceFactors.Stream(func(accountID iotago.AccountID, pf *model.ValidatorPerformance) error { - if err = pWriter.WriteValue("account id", accountID); err != nil { - return ierrors.Wrapf(err, "unable to write account id %s for slot %d", accountID, currentSlot) - } + if err = performanceFactors.Stream(func(accountID iotago.AccountID, pf *model.ValidatorPerformance) error { + if err := stream.Write(writer, accountID); err != nil { + return ierrors.Wrapf(err, "unable to write account id %s for slot %d", accountID, currentSlot) + } - bytes, err := t.apiProvider.APIForSlot(currentSlot).Encode(pf) - if err != nil { - return ierrors.Wrapf(err, "unable to encode performance factor for accountID %s and slot index %d", accountID, currentSlot) - } + if err := stream.WriteObject(writer, pf, (*model.ValidatorPerformance).Bytes); err != nil { + return ierrors.Wrapf(err, "unable to write performance factor for accountID %s and slot index %d", accountID, currentSlot) + } - if err = pWriter.WriteBytes(bytes); err != nil { - return ierrors.Wrapf(err, "unable to write performance factor for accountID %s and slot index %d", accountID, currentSlot) - } + accountsCount++ - accountsCount++ + return nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to write performance factors for slot index %d", currentSlot) + } - return nil - }); err != nil { - return ierrors.Wrapf(err, "unable to write performance factors for slot index %d", currentSlot) - } + return accountsCount, nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to write accounts for slot %d", currentSlot) + } - if err = pWriter.WriteValueAtBookmark("pf account count", accountsCount); err != nil { - return ierrors.Wrap(err, "unable to write pf accounts count") + slotCount++ } - slotCount++ - } - - if err := pWriter.WriteValueAtBookmark("pf slot count", slotCount); err != nil { - return ierrors.Wrap(err, "unable to write pf slot count at bookmarked position") + return slotCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write slot count") } return nil } -func (t *Tracker) exportPoolRewards(pWriter *utils.PositionedWriter, targetEpoch iotago.EpochIndex) error { +func (t *Tracker) exportPoolRewards(writer io.WriteSeeker, targetEpoch iotago.EpochIndex) error { // export all stored pools // in theory we could save the epoch count only once, because stats and rewards should be the same length - var epochCount uint64 - if err := pWriter.WriteValue("pool rewards epoch count", epochCount, true); err != nil { - return ierrors.Wrap(err, "unable to write epoch count") - } - for epoch := targetEpoch; epoch > iotago.EpochIndex(lo.Max(0, int(targetEpoch)-daysInYear)); epoch-- { - rewardsMap, err := t.rewardsMap(epoch) - if err != nil { - return ierrors.Wrapf(err, "unable to get rewards tree for epoch index %d", epoch) - } - // if the map was not present in storage we can skip this epoch and the previous ones, as we never stored any rewards - if !rewardsMap.WasRestoredFromStorage() { - break - } - - if err = pWriter.WriteValue("epoch index", epoch); err != nil { - return ierrors.Wrapf(err, "unable to write epoch index for epoch index %d", epoch) - } - - var accountCount uint64 - if err = pWriter.WriteValue("pool rewards account count", accountCount, true); err != nil { - return ierrors.Wrapf(err, "unable to write account count for epoch index %d", epoch) - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var epochCount int - if err = rewardsMap.Stream(func(key iotago.AccountID, value *model.PoolRewards) error { - if err = pWriter.WriteValue("account id", key); err != nil { - return ierrors.Wrapf(err, "unable to write account id for epoch index %d and accountID %s", epoch, key) + for epoch := targetEpoch; epoch > iotago.EpochIndex(lo.Max(0, int(targetEpoch)-daysInYear)); epoch-- { + rewardsMap, err := t.rewardsMap(epoch) + if err != nil { + return 0, ierrors.Wrapf(err, "unable to get rewards tree for epoch %d", epoch) + } + // if the map was not present in storage we can skip this epoch and the previous ones, as we never stored any rewards + if !rewardsMap.WasRestoredFromStorage() { + break } - if err = pWriter.WriteValue("account rewards", value); err != nil { - return ierrors.Wrapf(err, "unable to write account rewards for epoch index %d and accountID %s", epoch, key) + if err := stream.Write(writer, epoch); err != nil { + return 0, ierrors.Wrapf(err, "unable to write epoch index for epoch index %d", epoch) } - accountCount++ + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint64, func() (int, error) { + var accountCount int - return nil - }); err != nil { - return ierrors.Wrapf(err, "unable to stream rewards for epoch index %d", epoch) - } + if err = rewardsMap.Stream(func(key iotago.AccountID, value *model.PoolRewards) error { + if err := stream.Write(writer, key); err != nil { + return ierrors.Wrapf(err, "unable to write account id for epoch %d and accountID %s", epoch, key) + } - if err = pWriter.WriteValueAtBookmark("pool rewards account count", accountCount); err != nil { - return ierrors.Wrapf(err, "unable to write account count for epoch index %d at bookmarked position", epoch) - } + if err := stream.WriteObject(writer, value, (*model.PoolRewards).Bytes); err != nil { + return ierrors.Wrapf(err, "unable to write account rewards for epoch index %d and accountID %s", epoch, key) + } - epochCount++ - } + accountCount++ - if err := pWriter.WriteValueAtBookmark("pool rewards epoch count", epochCount); err != nil { - return ierrors.Wrap(err, "unable to write epoch count at bookmarked position") + return nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to stream rewards for epoch index %d", epoch) + } + + return accountCount, nil + }); err != nil { + return 0, ierrors.Wrapf(err, "unable to write rewards for epoch index %d", epoch) + } + + epochCount++ + } + + return epochCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write pool rewards collection") } return nil } -func (t *Tracker) exportPoolsStats(pWriter *utils.PositionedWriter, targetEpoch iotago.EpochIndex) error { - var epochCount uint64 - if err := pWriter.WriteValue("pools stats epoch count", epochCount, true); err != nil { - return ierrors.Wrap(err, "unable to write epoch count") - } - // export all stored pools - var innerErr error - if err := t.poolStatsStore.StreamBytes(func(key []byte, value []byte) error { - epoch, _, err := iotago.EpochIndexFromBytes(key) - if err != nil { - innerErr = err +func (t *Tracker) exportPoolsStats(writer io.WriteSeeker, targetEpoch iotago.EpochIndex) error { + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var epochCount int - return innerErr - } + // export all stored pools + if err := t.poolStatsStore.StreamBytes(func(key []byte, value []byte) error { + epoch, _, err := iotago.EpochIndexFromBytes(key) + if err != nil { + return err + } - if epoch > targetEpoch { - // continue - return nil - } - if err := pWriter.WriteBytes(key); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write epoch index %d", epoch) + if epoch > targetEpoch { + // continue + return nil + } - return innerErr - } + if err := stream.WriteBytes(writer, key); err != nil { + return ierrors.Wrapf(err, "unable to write epoch index %d", epoch) + } + + if err := stream.WriteBytes(writer, value); err != nil { + return ierrors.Wrapf(err, "unable to write pools stats for epoch %d", epoch) + } - if err := pWriter.WriteBytes(value); err != nil { - innerErr = ierrors.Wrapf(err, "unable to write pools stats for epoch %d", epoch) + epochCount++ - return innerErr + return nil + }); err != nil { + return 0, ierrors.Wrap(err, "unable to iterate over pools stats") } - epochCount++ - - return nil + return epochCount, nil }); err != nil { - return ierrors.Wrap(err, "unable to iterate over pools stats") - } else if innerErr != nil { - return ierrors.Wrap(innerErr, "error while iterating over pools stats") - } - if err := pWriter.WriteValueAtBookmark("pools stats epoch count", epochCount); err != nil { - return ierrors.Wrap(err, "unable to write stats epoch count at bookmarked position") + return ierrors.Wrap(err, "unable to write pool stats collection") } return nil } -func (t *Tracker) exportCommittees(pWriter *utils.PositionedWriter, targetSlot iotago.SlotIndex) error { - var epochCount uint64 - if err := pWriter.WriteValue("committees epoch count", epochCount, true); err != nil { - return ierrors.Wrap(err, "unable to write committees epoch count") - } - +func (t *Tracker) exportCommittees(writer io.WriteSeeker, targetSlot iotago.SlotIndex) error { apiForSlot := t.apiProvider.APIForSlot(targetSlot) epochFromTargetSlot := apiForSlot.TimeProvider().EpochFromSlot(targetSlot) pointOfNoReturn := apiForSlot.TimeProvider().EpochEnd(epochFromTargetSlot) - apiForSlot.ProtocolParameters().MaxCommittableAge() - var innerErr error - err := t.committeeStore.StreamBytes(func(epochBytes []byte, committeeBytes []byte) error { - epoch, _, err := iotago.EpochIndexFromBytes(epochBytes) - if err != nil { - innerErr = err - - return innerErr - } + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + var epochCount int - // We have a committee for an epoch higher than the targetSlot - // 1. we trust the point of no return, we export the committee for the next epoch - // 2. if we don't trust the point-of-no-return - // - we were able to rotate a committee, then we export it - // - we were not able to rotate a committee (reused), then we don't export it - if epoch > epochFromTargetSlot && targetSlot < pointOfNoReturn { - committee, _, err := account.AccountsFromBytes(committeeBytes) + if err := t.committeeStore.StreamBytes(func(epochBytes []byte, committeeBytes []byte) error { + epoch, _, err := iotago.EpochIndexFromBytes(epochBytes) if err != nil { - innerErr = ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) - - return innerErr + return err } - if committee.IsReused() { - return nil + + // We have a committee for an epoch higher than the targetSlot + // 1. we trust the point of no return, we export the committee for the next epoch + // 2. if we don't trust the point-of-no-return + // - we were able to rotate a committee, then we export it + // - we were not able to rotate a committee (reused), then we don't export it + if epoch > epochFromTargetSlot && targetSlot < pointOfNoReturn { + committee, _, err := account.AccountsFromBytes(committeeBytes) + if err != nil { + return ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) + } + if committee.IsReused() { + return nil + } } - } - if err := pWriter.WriteBytes(epochBytes); err != nil { - innerErr = ierrors.Wrap(err, "unable to write epoch index") + if err := stream.WriteBytes(writer, epochBytes); err != nil { + return ierrors.Wrapf(err, "unable to write epoch index %d", epoch) + } + if err := stream.WriteBytes(writer, committeeBytes); err != nil { + return ierrors.Wrapf(err, "unable to write committee for epoch %d", epoch) + } - return innerErr - } - if err := pWriter.WriteBytes(committeeBytes); err != nil { - innerErr = ierrors.Wrap(err, "unable to write epoch committee") + epochCount++ - return innerErr + return nil + }); err != nil { + return 0, ierrors.Wrap(err, "unable to iterate over committee base store") } - epochCount++ - - return nil - }) - if err != nil { - return ierrors.Wrapf(err, "unable to iterate over committee base store: %w", innerErr) - } - if innerErr != nil { - return ierrors.Wrap(err, "error while iterating over committee base store") - } - - if err = pWriter.WriteValueAtBookmark("committees epoch count", epochCount); err != nil { - return ierrors.Wrap(err, "unable to write committee epoch count at bookmarked position") + return epochCount, nil + }); err != nil { + return ierrors.Wrap(err, "unable to write committees collection") } return nil diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go index 2f5ccc8c8..d74d8c2bd 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/snapshot_test.go @@ -3,9 +3,9 @@ package performance import ( "testing" - "github.com/orcaman/writerseeker" "github.com/stretchr/testify/require" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" ) @@ -37,7 +37,7 @@ func TestManager_Import_Export(t *testing.T) { } { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() delegatorRewardBeforeImport, validatorRewardBeforeImport := ts.calculateExpectedRewards(epochsCount, epochActions) // export two full epochs @@ -47,14 +47,14 @@ func TestManager_Import_Export(t *testing.T) { ts.InitPerformanceTracker() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) delegatorRewardAfterImport, validatorRewardAfterImport := ts.calculateExpectedRewards(epochsCount, epochActions) require.Equal(t, delegatorRewardBeforeImport, delegatorRewardAfterImport) require.Equal(t, validatorRewardBeforeImport, validatorRewardAfterImport) } { - writer := &writerseeker.WriterSeeker{} + writer := stream.NewByteBuffer() delegatorRewardBeforeImport, validatorRewardBeforeImport := ts.calculateExpectedRewards(epochsCount, epochActions) // export at the beginning of epoch 2, skip epoch 3 at all @@ -64,7 +64,7 @@ func TestManager_Import_Export(t *testing.T) { ts.InitPerformanceTracker() - err = ts.Instance.Import(writer.BytesReader()) + err = ts.Instance.Import(writer.Reader()) require.NoError(t, err) delegatorRewardAfterImport, validatorRewardAfterImport := ts.calculateExpectedRewards(epochsCount, epochActions) diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go index 62c5c1e48..9c535a137 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/testsuite_test.go @@ -41,7 +41,7 @@ func NewTestSuite(t *testing.T) *TestSuite { epochStats: make(map[iotago.EpochIndex]*model.PoolsStats), api: iotago.V3API( iotago.NewV3ProtocolParameters( - iotago.WithTimeProviderOptions(time.Now().Unix(), 10, 3), + iotago.WithTimeProviderOptions(0, time.Now().Unix(), 10, 3), iotago.WithRewardsOptions(8, 8, 11, 1154, 2, 1), ), ), @@ -61,10 +61,8 @@ func (t *TestSuite) InitPerformanceTracker() { p := slotstore.NewStore(slot, prunableStores[slot], iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(s *model.ValidatorPerformance) ([]byte, error) { - return s.Bytes(t.api) - }, - model.ValidatorPerformanceFromBytes(t.api), + (*model.ValidatorPerformance).Bytes, + model.ValidatorPerformanceFromBytes, ) return p, nil diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go index fd3ff4c70..320f1e5cb 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go @@ -226,6 +226,8 @@ func (o *SybilProtection) committeeRoot(targetCommitteeEpoch iotago.EpochIndex) committeeTree := ads.NewSet[iotago.Identifier]( mapdb.NewMapDB(), + iotago.Identifier.Bytes, + iotago.IdentifierFromBytes, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, ) diff --git a/pkg/storage/permanent/commitments.go b/pkg/storage/permanent/commitments.go index abc4b59c2..1a9bd1fd9 100644 --- a/pkg/storage/permanent/commitments.go +++ b/pkg/storage/permanent/commitments.go @@ -6,11 +6,16 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/iota-core/pkg/model" iotago "github.com/iotaledger/iota.go/v4" ) +var ( + ErrCommitmentBeforeGenesis = ierrors.New("commitment is before genesis") +) + type Commitments struct { apiProvider iotago.APIProvider store *kvstore.TypedStore[iotago.SlotIndex, *model.Commitment] @@ -22,13 +27,8 @@ func NewCommitments(store kvstore.KVStore, apiProvider iotago.APIProvider) *Comm store: kvstore.NewTypedStore(store, iotago.SlotIndex.Bytes, iotago.SlotIndexFromBytes, - func(c *model.Commitment) ([]byte, error) { - return c.Data(), nil - }, - func(bytes []byte) (*model.Commitment, int, error) { - c, err := model.CommitmentFromBytes(bytes, apiProvider) - return c, len(bytes), err - }, + (*model.Commitment).Bytes, + model.CommitmentFromBytes(apiProvider), ), } } @@ -38,18 +38,24 @@ func (c *Commitments) Store(commitment *model.Commitment) error { } func (c *Commitments) Load(slot iotago.SlotIndex) (commitment *model.Commitment, err error) { + genesisSlot := c.apiProvider.CommittedAPI().ProtocolParameters().GenesisSlot() + if slot < genesisSlot { + return nil, ierrors.Wrapf(ErrCommitmentBeforeGenesis, "slot %d is before genesis slot %d", slot, genesisSlot) + } + return c.store.Get(slot) } func (c *Commitments) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (err error) { - if err := stream.WriteCollection(writer, func() (elementsCount uint64, err error) { - var count uint64 - for slot := iotago.SlotIndex(0); slot <= targetSlot; slot++ { + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { + var count int + for slot := c.apiProvider.CommittedAPI().ProtocolParameters().GenesisSlot(); slot <= targetSlot; slot++ { commitmentBytes, err := c.store.KVStore().Get(lo.PanicOnErr(slot.Bytes())) if err != nil { return 0, ierrors.Wrapf(err, "failed to load commitment for slot %d", slot) } - if err := stream.WriteBlob(writer, commitmentBytes); err != nil { + + if err := stream.WriteBytesWithSize(writer, commitmentBytes, serializer.SeriLengthPrefixTypeAsUint16); err != nil { return 0, ierrors.Wrapf(err, "failed to write commitment for slot %d", slot) } @@ -65,17 +71,12 @@ func (c *Commitments) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) } func (c *Commitments) Import(reader io.ReadSeeker) (err error) { - if err := stream.ReadCollection(reader, func(i int) error { - commitmentBytes, err := stream.ReadBlob(reader) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { + commitment, err := stream.ReadObjectWithSize[*model.Commitment](reader, serializer.SeriLengthPrefixTypeAsUint16, model.CommitmentFromBytes(c.apiProvider)) if err != nil { return ierrors.Wrapf(err, "failed to read commitment at index %d", i) } - commitment, err := model.CommitmentFromBytes(commitmentBytes, c.apiProvider) - if err != nil { - return ierrors.Wrapf(err, "failed to parse commitment at index %d", i) - } - if err := c.Store(commitment); err != nil { return ierrors.Wrapf(err, "failed to store commitment at index %d", i) } diff --git a/pkg/storage/permanent/settings.go b/pkg/storage/permanent/settings.go index d1ad5fe82..c5cf892af 100644 --- a/pkg/storage/permanent/settings.go +++ b/pkg/storage/permanent/settings.go @@ -10,6 +10,7 @@ import ( "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/hive.go/serializer/v2" "github.com/iotaledger/hive.go/serializer/v2/byteutils" "github.com/iotaledger/hive.go/serializer/v2/stream" "github.com/iotaledger/hive.go/stringify" @@ -65,17 +66,8 @@ func NewSettings(store kvstore.KVStore, opts ...options.Option[api.EpochBasedPro storeLatestCommitment: kvstore.NewTypedValue( store, []byte{latestCommitmentKey}, - func(commitment *model.Commitment) ([]byte, error) { - return commitment.Data(), nil - }, - func(bytes []byte) (*model.Commitment, int, error) { - commitment, err := model.CommitmentFromBytes(bytes, apiProvider) - if err != nil { - return nil, 0, err - } - - return commitment, len(bytes), nil - }, + (*model.Commitment).Bytes, + model.CommitmentFromBytes(apiProvider), ), storeLatestFinalizedSlot: kvstore.NewTypedValue( store, @@ -299,7 +291,7 @@ func (s *Settings) latestFinalizedSlot() iotago.SlotIndex { latestFinalizedSlot, err := s.storeLatestFinalizedSlot.Get() if err != nil { if ierrors.Is(err, kvstore.ErrKeyNotFound) { - return 0 + return s.apiProvider.CommittedAPI().ProtocolParameters().GenesisSlot() } panic(err) } @@ -346,7 +338,7 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit commitmentBytes = s.LatestCommitment().Data() } - if err := stream.WriteBlob(writer, commitmentBytes); err != nil { + if err := stream.WriteBytesWithSize(writer, commitmentBytes, serializer.SeriLengthPrefixTypeAsUint16); err != nil { return ierrors.Wrap(err, "failed to write commitment") } @@ -358,8 +350,8 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit defer s.mutex.RUnlock() // Export protocol versions - if err := stream.WriteCollection(writer, func() (uint64, error) { - var count uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint16, func() (int, error) { + var count int var innerErr error if err := s.storeProtocolVersionEpochMapping.Iterate(kvstore.EmptyPrefix, func(version iotago.Version, epoch iotago.EpochIndex) bool { @@ -391,8 +383,8 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit // TODO: rollback future protocol parameters if it was added after targetCommitment.Slot() // Export future protocol parameters - if err := stream.WriteCollection(writer, func() (uint64, error) { - var count uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint16, func() (int, error) { + var count int var innerErr error if err := s.storeFutureProtocolParameters.Iterate(kvstore.EmptyPrefix, func(version iotago.Version, tuple *types.Tuple[iotago.EpochIndex, iotago.Identifier]) bool { @@ -428,8 +420,8 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit } // Export protocol parameters: we only export the parameters up until the current active ones. - if err := stream.WriteCollection(writer, func() (uint64, error) { - var paramsCount uint64 + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint16, func() (int, error) { + var paramsCount int var innerErr error if err := s.storeProtocolParameters.KVStore().Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, value kvstore.Value) bool { @@ -444,7 +436,7 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit return true } - if err := stream.WriteBlob(writer, value); err != nil { + if err := stream.WriteBytesWithSize(writer, value, serializer.SeriLengthPrefixTypeAsUint32); err != nil { innerErr = err return false } @@ -467,7 +459,7 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit } func (s *Settings) Import(reader io.ReadSeeker) (err error) { - commitmentBytes, err := stream.ReadBlob(reader) + commitmentBytes, err := stream.ReadBytesWithSize(reader, serializer.SeriLengthPrefixTypeAsUint16) if err != nil { return ierrors.Wrap(err, "failed to read commitment") } @@ -482,7 +474,7 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Read protocol version epoch mapping - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint16, func(i int) error { version, err := stream.Read[iotago.Version](reader) if err != nil { return ierrors.Wrap(err, "failed to parse version") @@ -504,7 +496,7 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Read future protocol parameters - if err := stream.ReadCollection(reader, func(i int) error { + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint16, func(i int) error { version, err := stream.Read[iotago.Version](reader) if err != nil { return ierrors.Wrap(err, "failed to parse version") @@ -530,8 +522,8 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Read protocol parameters - if err := stream.ReadCollection(reader, func(i int) error { - paramsBytes, err := stream.ReadBlob(reader) + if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint16, func(i int) error { + paramsBytes, err := stream.ReadBytesWithSize(reader, serializer.SeriLengthPrefixTypeAsUint32) if err != nil { return ierrors.Wrapf(err, "failed to read protocol parameters bytes at index %d", i) } @@ -550,7 +542,7 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { } // Now that we parsed the protocol parameters, we can parse the commitment since there will be an API available - commitment, err := model.CommitmentFromBytes(commitmentBytes, s.apiProvider) + commitment, err := lo.DropCount(model.CommitmentFromBytes(s.apiProvider)(commitmentBytes)) if err != nil { return ierrors.Wrap(err, "failed to parse commitment") } diff --git a/pkg/storage/prunable/prunable_slot.go b/pkg/storage/prunable/prunable_slot.go index 0549278b8..9297c5977 100644 --- a/pkg/storage/prunable/prunable_slot.go +++ b/pkg/storage/prunable/prunable_slot.go @@ -82,15 +82,11 @@ func (p *Prunable) ValidatorPerformances(slot iotago.SlotIndex) (*slotstore.Stor return nil, ierrors.Wrapf(database.ErrEpochPruned, "could not get performance factors with slot %d", slot) } - apiForSlot := p.apiProvider.APIForSlot(slot) - return slotstore.NewStore(slot, kv, iotago.AccountID.Bytes, iotago.AccountIDFromBytes, - func(s *model.ValidatorPerformance) ([]byte, error) { - return s.Bytes(apiForSlot) - }, - model.ValidatorPerformanceFromBytes(apiForSlot), + (*model.ValidatorPerformance).Bytes, + model.ValidatorPerformanceFromBytes, ), nil } diff --git a/pkg/storage/prunable/slotstore/accountdiffs.go b/pkg/storage/prunable/slotstore/accountdiffs.go index 8aa889fa4..e04eb5242 100644 --- a/pkg/storage/prunable/slotstore/accountdiffs.go +++ b/pkg/storage/prunable/slotstore/accountdiffs.go @@ -31,19 +31,14 @@ func NewAccountDiffs(slot iotago.SlotIndex, store kvstore.KVStore, api iotago.AP iotago.AccountID.Bytes, iotago.AccountIDFromBytes, (*model.AccountDiff).Bytes, - func(bytes []byte) (object *model.AccountDiff, consumed int, err error) { - diff := new(model.AccountDiff) - n, err := diff.FromBytes(bytes) - - return diff, n, err - }), + model.AccountDiffFromBytes, + ), destroyedAccounts: kvstore.NewTypedStore[iotago.AccountID, types.Empty](lo.PanicOnErr(store.WithExtendedRealm(kvstore.Realm{destroyedAccountsPrefix})), iotago.AccountID.Bytes, iotago.AccountIDFromBytes, types.Empty.Bytes, - func(bytes []byte) (object types.Empty, consumed int, err error) { - return types.Void, 0, nil - }), + types.EmptyFromBytes, + ), } } diff --git a/pkg/storage/prunable/slotstore/retainer.go b/pkg/storage/prunable/slotstore/retainer.go index 27a46c5d3..646ed7913 100644 --- a/pkg/storage/prunable/slotstore/retainer.go +++ b/pkg/storage/prunable/slotstore/retainer.go @@ -4,7 +4,7 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/serializer/v2/marshalutil" + "github.com/iotaledger/hive.go/serializer/v2/stream" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/nodeclient/apimodels" ) @@ -20,28 +20,32 @@ type BlockRetainerData struct { } func (b *BlockRetainerData) Bytes() ([]byte, error) { - marshalUtil := marshalutil.New(2) - marshalUtil.WriteUint8(uint8(b.State)) - marshalUtil.WriteUint8(uint8(b.FailureReason)) + byteBuffer := stream.NewByteBuffer(2) - return marshalUtil.Bytes(), nil + if err := stream.Write(byteBuffer, b.State); err != nil { + return nil, ierrors.Wrap(err, "failed to write block state") + } + if err := stream.Write(byteBuffer, b.FailureReason); err != nil { + return nil, ierrors.Wrap(err, "failed to write block failure reason") + } + + return byteBuffer.Bytes() } -func (b *BlockRetainerData) FromBytes(bytes []byte) (int, error) { - marshalUtil := marshalutil.New(bytes) - state, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err - } - b.State = apimodels.BlockState(state) +func BlockRetainerDataFromBytes(bytes []byte) (*BlockRetainerData, int, error) { + byteReader := stream.NewByteReader(bytes) - reason, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err + var err error + b := new(BlockRetainerData) + + if b.State, err = stream.Read[apimodels.BlockState](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read block state") + } + if b.FailureReason, err = stream.Read[apimodels.BlockFailureReason](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read block failure reason") } - b.FailureReason = apimodels.BlockFailureReason(reason) - return marshalUtil.ReadOffset(), nil + return b, byteReader.BytesRead(), nil } type TransactionRetainerData struct { @@ -50,28 +54,32 @@ type TransactionRetainerData struct { } func (t *TransactionRetainerData) Bytes() ([]byte, error) { - marshalUtil := marshalutil.New(2) - marshalUtil.WriteUint8(uint8(t.State)) - marshalUtil.WriteUint8(uint8(t.FailureReason)) + byteBuffer := stream.NewByteBuffer(2) + + if err := stream.Write(byteBuffer, t.State); err != nil { + return nil, ierrors.Wrap(err, "failed to write transaction state") + } + if err := stream.Write(byteBuffer, t.FailureReason); err != nil { + return nil, ierrors.Wrap(err, "failed to write transaction failure reason") + } - return marshalUtil.Bytes(), nil + return byteBuffer.Bytes() } -func (t *TransactionRetainerData) FromBytes(bytes []byte) (int, error) { - marshalUtil := marshalutil.New(bytes) - state, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err - } - t.State = apimodels.TransactionState(state) +func TransactionRetainerDataFromBytes(bytes []byte) (*TransactionRetainerData, int, error) { + byteReader := stream.NewByteReader(bytes) - reason, err := marshalUtil.ReadUint8() - if err != nil { - return 0, err + var err error + t := new(TransactionRetainerData) + + if t.State, err = stream.Read[apimodels.TransactionState](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read transaction state") + } + if t.FailureReason, err = stream.Read[apimodels.TransactionFailureReason](byteReader); err != nil { + return nil, 0, ierrors.Wrap(err, "failed to read transaction failure reason") } - t.FailureReason = apimodels.TransactionFailureReason(reason) - return marshalUtil.ReadOffset(), nil + return t, byteReader.BytesRead(), nil } type Retainer struct { @@ -88,23 +96,13 @@ func NewRetainer(slot iotago.SlotIndex, store kvstore.KVStore) (newRetainer *Ret iotago.BlockID.Bytes, iotago.BlockIDFromBytes, (*BlockRetainerData).Bytes, - func(bytes []byte) (*BlockRetainerData, int, error) { - b := new(BlockRetainerData) - c, err := b.FromBytes(bytes) - - return b, c, err - }, + BlockRetainerDataFromBytes, ), transactionStore: kvstore.NewTypedStore(lo.PanicOnErr(store.WithExtendedRealm(kvstore.Realm{transactionStorePrefix})), iotago.BlockID.Bytes, iotago.BlockIDFromBytes, (*TransactionRetainerData).Bytes, - func(bytes []byte) (*TransactionRetainerData, int, error) { - t := new(TransactionRetainerData) - c, err := t.FromBytes(bytes) - - return t, c, err - }, + TransactionRetainerDataFromBytes, ), } } diff --git a/pkg/storage/prunable/slotstore/store.go b/pkg/storage/prunable/slotstore/store.go index d6a571245..97a72e8b9 100644 --- a/pkg/storage/prunable/slotstore/store.go +++ b/pkg/storage/prunable/slotstore/store.go @@ -25,18 +25,18 @@ func NewStore[K, V any]( } } -func (s *Store[K, V]) Load(key K) (V, error) { - value, err := s.kv.Get(key) +func (s *Store[K, V]) Load(key K) (value V, exists bool, err error) { + value, err = s.kv.Get(key) if err != nil { var zeroValue V if ierrors.Is(err, kvstore.ErrKeyNotFound) { - return zeroValue, nil + return zeroValue, false, nil } - return zeroValue, ierrors.Wrapf(err, "failed to get value for key %v", key) + return zeroValue, false, ierrors.Wrapf(err, "failed to get value for key %v", key) } - return value, nil + return value, true, nil } func (s *Store[K, V]) Store(key K, value V) error { diff --git a/pkg/storage/storage_prunable.go b/pkg/storage/storage_prunable.go index 5403484d9..657582904 100644 --- a/pkg/storage/storage_prunable.go +++ b/pkg/storage/storage_prunable.go @@ -41,6 +41,10 @@ func (s *Storage) RootBlocks(slot iotago.SlotIndex) (*slotstore.Store[iotago.Blo return s.prunable.RootBlocks(slot) } +func (s *Storage) GenesisRootBlockID() iotago.BlockID { + return s.Settings().APIProvider().CommittedAPI().ProtocolParameters().GenesisBlockID() +} + func (s *Storage) Mutations(slot iotago.SlotIndex) (kvstore.KVStore, error) { return s.prunable.Mutations(slot) } diff --git a/pkg/tests/accounts_test.go b/pkg/tests/accounts_test.go index 99886583d..0cbf224e5 100644 --- a/pkg/tests/accounts_test.go +++ b/pkg/tests/accounts_test.go @@ -30,6 +30,7 @@ func Test_TransitionAndDestroyAccount(t *testing.T) { }), testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(200, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 8, @@ -100,7 +101,7 @@ func Test_TransitionAndDestroyAccount(t *testing.T) { ) // default block issuer issues a block containing the transaction in slot 1. - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost block1 := ts.IssueBasicBlockAtSlotWithOptions("block1", block1Slot, ts.DefaultWallet(), tx1, mock.WithSlotCommitment(genesisCommitment)) latestParent := ts.CommitUntilSlot(ts.BlockID("block1").Slot(), block1) @@ -108,7 +109,7 @@ func Test_TransitionAndDestroyAccount(t *testing.T) { // assert diff of the genesis account, it should have a new output ID, new expiry slot and a new block issuer key. ts.AssertAccountDiff(genesisAccountOutput.AccountID, block1Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, PreviousExpirySlot: iotago.MaxSlotIndex, NewExpirySlot: newExpirySlot, NewOutputID: ts.DefaultWallet().Output("TX1:0").OutputID(), @@ -139,7 +140,7 @@ func Test_TransitionAndDestroyAccount(t *testing.T) { // assert diff of the destroyed account. ts.AssertAccountDiff(genesisAccountOutput.AccountID, block2Slot, &model.AccountDiff{ BICChange: -iotago.BlockIssuanceCredits(123), - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewExpirySlot: 0, PreviousExpirySlot: newExpirySlot, NewOutputID: iotago.EmptyOutputID, @@ -157,6 +158,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(100, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 8, @@ -219,7 +221,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { mock.WithAccountAmount(mock.MinIssuerAccountAmount), ) - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost block1 := ts.IssueBasicBlockAtSlotWithOptions("block1", block1Slot, ts.DefaultWallet(), tx1) latestParent := ts.CommitUntilSlot(block1Slot, block1) @@ -229,7 +231,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { ts.AssertAccountDiff(newAccountOutput.AccountID, block1Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewExpirySlot: newAccountExpirySlot, PreviousExpirySlot: 0, NewOutputID: newAccount.OutputID(), @@ -271,7 +273,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { ts.AssertAccountDiff(newAccountOutput.AccountID, block2Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewOutputID: iotago.EmptyOutputID, PreviousOutputID: iotago.EmptyOutputID, BlockIssuerKeysAdded: iotago.NewBlockIssuerKeys(), @@ -304,7 +306,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { // Transitioning to delayed claiming effectively removes the delegation, so we expect a negative delegation stake change. ts.AssertAccountDiff(newAccountOutput.AccountID, block3Slot, &model.AccountDiff{ BICChange: 0, - PreviousUpdatedTime: 0, + PreviousUpdatedSlot: 0, NewOutputID: iotago.EmptyOutputID, PreviousOutputID: iotago.EmptyOutputID, BlockIssuerKeysAdded: iotago.NewBlockIssuerKeys(), @@ -332,6 +334,7 @@ func Test_ImplicitAccounts(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(100, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 8, @@ -429,7 +432,7 @@ func Test_ImplicitAccounts(t *testing.T) { // the implicit account should now have been transitioned to a full account in the accounts ledger. ts.AssertAccountDiff(implicitAccountID, block2Slot, &model.AccountDiff{ BICChange: allotted - burned, - PreviousUpdatedTime: block1Slot, + PreviousUpdatedSlot: block1Slot, NewOutputID: fullAccountOutputID, PreviousOutputID: implicitAccountOutputID, PreviousExpirySlot: iotago.MaxSlotIndex, diff --git a/pkg/tests/booker_test.go b/pkg/tests/booker_test.go index 8a9e2e576..9608d8f57 100644 --- a/pkg/tests/booker_test.go +++ b/pkg/tests/booker_test.go @@ -229,6 +229,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(20, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, testsuite.DefaultSlotsPerEpochExponent, @@ -498,6 +499,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(20, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, testsuite.DefaultSlotsPerEpochExponent, diff --git a/pkg/tests/committee_rotation_test.go b/pkg/tests/committee_rotation_test.go index 375a3c98f..835df5931 100644 --- a/pkg/tests/committee_rotation_test.go +++ b/pkg/tests/committee_rotation_test.go @@ -16,6 +16,7 @@ func Test_TopStakersRotation(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 4, diff --git a/pkg/tests/confirmation_state_test.go b/pkg/tests/confirmation_state_test.go index c874dd400..63219ee69 100644 --- a/pkg/tests/confirmation_state_test.go +++ b/pkg/tests/confirmation_state_test.go @@ -20,6 +20,7 @@ func TestConfirmationFlags(t *testing.T) { // TODO: remove this opt and use a proper value when refactoring the test with scheduler testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(100, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, testsuite.DefaultSlotsPerEpochExponent, @@ -98,7 +99,7 @@ func TestConfirmationFlags(t *testing.T) { }) // Verify that nodes have the expected states. - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index a8bb02537..961adfbcd 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -17,6 +17,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(100, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -102,6 +103,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(100, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -196,6 +198,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(100, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, diff --git a/pkg/tests/protocol_engine_rollback_test.go b/pkg/tests/protocol_engine_rollback_test.go index e99339b19..0de8dc8f6 100644 --- a/pkg/tests/protocol_engine_rollback_test.go +++ b/pkg/tests/protocol_engine_rollback_test.go @@ -29,6 +29,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -115,7 +116,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { } { - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), @@ -215,6 +216,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -307,7 +309,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { } { - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), @@ -414,6 +416,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -506,7 +509,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { } { - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), @@ -613,6 +616,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -705,7 +709,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T } { - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go index aa6bf7834..b56f185ed 100644 --- a/pkg/tests/protocol_engine_switching_test.go +++ b/pkg/tests/protocol_engine_switching_test.go @@ -30,6 +30,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -143,7 +144,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Verify that nodes have the expected states. { - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), diff --git a/pkg/tests/protocol_startup_test.go b/pkg/tests/protocol_startup_test.go index 11b6672d1..8d1e502ed 100644 --- a/pkg/tests/protocol_startup_test.go +++ b/pkg/tests/protocol_startup_test.go @@ -24,6 +24,7 @@ func Test_BookInCommittedSlot(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -62,7 +63,7 @@ func Test_BookInCommittedSlot(t *testing.T) { } // Verify that nodes have the expected states. - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), @@ -123,6 +124,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -173,7 +175,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { } // Verify that nodes have the expected states. - genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment := iotago.NewEmptyCommitment(ts.API) genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost ts.AssertNodeState(ts.Nodes(), testsuite.WithSnapshotImported(true), diff --git a/pkg/tests/upgrade_signaling_test.go b/pkg/tests/upgrade_signaling_test.go index 8f5ed29f3..3d69028c4 100644 --- a/pkg/tests/upgrade_signaling_test.go +++ b/pkg/tests/upgrade_signaling_test.go @@ -31,6 +31,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts := testsuite.NewTestSuite(t, testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( + 0, testsuite.GenesisTimeWithOffsetBySlots(1000, testsuite.DefaultSlotDurationInSeconds), testsuite.DefaultSlotDurationInSeconds, 3, @@ -132,7 +133,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -145,7 +146,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: wallet.BlockIssuer.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:5").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(wallet.BlockIssuer.PublicKey))), @@ -166,7 +167,7 @@ func Test_Upgrade_Signaling(t *testing.T) { // check account data before all nodes set the current version ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -179,7 +180,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeD").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:4").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").Validator.PublicKey))), @@ -200,7 +201,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -365,7 +366,7 @@ func Test_Upgrade_Signaling(t *testing.T) { // check account data at the end of the test ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeA").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:1").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))), @@ -378,7 +379,7 @@ func Test_Upgrade_Signaling(t *testing.T) { ts.AssertAccountData(&accounts.AccountData{ ID: ts.Node("nodeD").Validator.AccountID, - Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateSlot: 0}, ExpirySlot: iotago.MaxSlotIndex, OutputID: ts.AccountOutput("Genesis:4").OutputID(), BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").Validator.PublicKey))), diff --git a/pkg/testsuite/accounts.go b/pkg/testsuite/accounts.go index b3683761a..715276ef4 100644 --- a/pkg/testsuite/accounts.go +++ b/pkg/testsuite/accounts.go @@ -29,8 +29,8 @@ func (t *TestSuite) AssertAccountData(accountData *accounts.AccountData, nodes . return ierrors.Errorf("AssertAccountData: %s: accountID %s expected credits value %d, got %d", node.Name, accountData.ID, accountData.Credits.Value, actualAccountData.Credits.Value) } - if accountData.Credits.UpdateTime != actualAccountData.Credits.UpdateTime { - return ierrors.Errorf("AssertAccountData: %s: accountID %s expected credits update time %d, got %d", node.Name, accountData.ID, accountData.Credits.UpdateTime, actualAccountData.Credits.UpdateTime) + if accountData.Credits.UpdateSlot != actualAccountData.Credits.UpdateSlot { + return ierrors.Errorf("AssertAccountData: %s: accountID %s expected credits update time %d, got %d", node.Name, accountData.ID, accountData.Credits.UpdateSlot, actualAccountData.Credits.UpdateSlot) } if accountData.OutputID != actualAccountData.OutputID { @@ -98,8 +98,8 @@ func (t *TestSuite) AssertAccountDiff(accountID iotago.AccountID, index iotago.S return ierrors.Errorf("AssertAccountDiff: %s: expected change %d but actual %d for account %s at slot %d", node.Name, accountDiff.BICChange, actualAccountDiff.BICChange, accountID, index) } - if accountDiff.PreviousUpdatedTime != actualAccountDiff.PreviousUpdatedTime { - return ierrors.Errorf("AssertAccountDiff: %s: expected previous updated time %d but actual %d for account %s at slot %d", node.Name, accountDiff.PreviousUpdatedTime, actualAccountDiff.PreviousUpdatedTime, accountID, index) + if accountDiff.PreviousUpdatedSlot != actualAccountDiff.PreviousUpdatedSlot { + return ierrors.Errorf("AssertAccountDiff: %s: expected previous updated time %d but actual %d for account %s at slot %d", node.Name, accountDiff.PreviousUpdatedSlot, actualAccountDiff.PreviousUpdatedSlot, accountID, index) } if accountDiff.NewExpirySlot != actualAccountDiff.NewExpirySlot { diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go index b9bde387f..0c6efdbe7 100644 --- a/pkg/testsuite/mock/node.go +++ b/pkg/testsuite/mock/node.go @@ -182,7 +182,7 @@ func (n *Node) hookLogging(failOnBlockFiltered bool) { fmt.Printf("%s > Network.AttestationsRequestReceived: from %s %s\n", n.Name, source, id) }) - events.Network.WarpSyncResponseReceived.Hook(func(id iotago.CommitmentID, ds iotago.BlockIDs, m *merklehasher.Proof[iotago.Identifier], ds2 iotago.TransactionIDs, m2 *merklehasher.Proof[iotago.Identifier], id2 peer.ID) { + events.Network.WarpSyncResponseReceived.Hook(func(id iotago.CommitmentID, ds map[iotago.CommitmentID]iotago.BlockIDs, m *merklehasher.Proof[iotago.Identifier], ds2 iotago.TransactionIDs, m2 *merklehasher.Proof[iotago.Identifier], id2 peer.ID) { fmt.Printf("%s > Network.WarpSyncResponseReceived: from %s %s\n", n.Name, id2, id) }) @@ -334,8 +334,9 @@ func (n *Node) attachEngineLogsWithName(failOnBlockFiltered bool, instance *engi rootsStorage, err := instance.Storage.Roots(details.Commitment.ID().Slot()) require.NoError(n.Testing, err, "roots storage for slot %d not found", details.Commitment.Slot()) - roots, err := rootsStorage.Load(details.Commitment.ID()) + roots, exists, err := rootsStorage.Load(details.Commitment.ID()) require.NoError(n.Testing, err) + require.True(n.Testing, exists) attestationBlockIDs := make([]iotago.BlockID, 0) tree, err := instance.Attestations.GetMap(details.Commitment.Slot()) diff --git a/pkg/testsuite/snapshotcreator/options.go b/pkg/testsuite/snapshotcreator/options.go index 85aa1f439..20addc3f2 100644 --- a/pkg/testsuite/snapshotcreator/options.go +++ b/pkg/testsuite/snapshotcreator/options.go @@ -20,6 +20,9 @@ type Options struct { // ProtocolParameters provides the protocol parameters used for the network. ProtocolParameters iotago.ProtocolParameters + // AddGenesisRootBlock defines whether a Genesis root block should be added. + AddGenesisRootBlock bool + // RootBlocks define the initial blocks to which new blocks can attach to. RootBlocks map[iotago.BlockID]iotago.CommitmentID @@ -84,6 +87,13 @@ func WithRootBlocks(rootBlocks map[iotago.BlockID]iotago.CommitmentID) options.O } } +// WithAddGenesisRootBlock define whether a Genesis root block should be added. +func WithAddGenesisRootBlock(add bool) options.Option[Options] { + return func(m *Options) { + m.AddGenesisRootBlock = add + } +} + // WithGenesisKeyManager defines the seed used to generate keypair that can spend Genesis outputs. func WithGenesisKeyManager(keyManager *mock.KeyManager) options.Option[Options] { return func(m *Options) { diff --git a/pkg/testsuite/snapshotcreator/snapshotcreator.go b/pkg/testsuite/snapshotcreator/snapshotcreator.go index c09f9d622..52e546f34 100644 --- a/pkg/testsuite/snapshotcreator/snapshotcreator.go +++ b/pkg/testsuite/snapshotcreator/snapshotcreator.go @@ -66,7 +66,8 @@ func CreateSnapshot(opts ...options.Option[Options]) error { } api := s.Settings().APIProvider().CommittedAPI() - if err := s.Commitments().Store(model.NewEmptyCommitment(api)); err != nil { + genesisCommitment := model.NewEmptyCommitment(api) + if err := s.Commitments().Store(genesisCommitment); err != nil { return ierrors.Wrap(err, "failed to store empty commitment") } @@ -82,7 +83,7 @@ func CreateSnapshot(opts ...options.Option[Options]) error { accountID := blake2b.Sum256(ed25519PubKey[:]) committeeAccountsData = append(committeeAccountsData, &accounts.AccountData{ ID: accountID, - Credits: &accounts.BlockIssuanceCredits{Value: snapshotAccountDetails.BlockIssuanceCredits, UpdateTime: 0}, + Credits: &accounts.BlockIssuanceCredits{Value: snapshotAccountDetails.BlockIssuanceCredits, UpdateSlot: 0}, ExpirySlot: snapshotAccountDetails.ExpirySlot, OutputID: iotago.OutputID{}, BlockIssuerKeys: iotago.BlockIssuerKeys{snapshotAccountDetails.IssuerKey}, @@ -120,6 +121,10 @@ func CreateSnapshot(opts ...options.Option[Options]) error { ) defer engineInstance.Shutdown() + if opt.AddGenesisRootBlock { + engineInstance.EvictionState.AddRootBlock(api.ProtocolParameters().GenesisBlockID(), genesisCommitment.ID()) + } + for blockID, commitmentID := range opt.RootBlocks { engineInstance.EvictionState.AddRootBlock(blockID, commitmentID) } @@ -164,7 +169,7 @@ func CreateSnapshot(opts ...options.Option[Options]) error { return err } - utxoOutput := utxoledger.CreateOutput(engineInstance, outputID, iotago.EmptyBlockID, GenesisTransactionCreationSlot, output, proof) + utxoOutput := utxoledger.CreateOutput(engineInstance, outputID, api.ProtocolParameters().GenesisBlockID(), GenesisTransactionCreationSlot, output, proof) if err := engineInstance.Ledger.AddGenesisUnspentOutput(utxoOutput); err != nil { return err } diff --git a/pkg/testsuite/storage_commitments.go b/pkg/testsuite/storage_commitments.go index 407682a9c..970b14a7a 100644 --- a/pkg/testsuite/storage_commitments.go +++ b/pkg/testsuite/storage_commitments.go @@ -58,7 +58,7 @@ func (t *TestSuite) AssertEqualStoredCommitmentAtIndex(index iotago.SlotIndex, n }) } -func (t *TestSuite) AssertStorageCommitmentBlocks(slot iotago.SlotIndex, expectedBlocks iotago.BlockIDs, nodes ...*mock.Node) { +func (t *TestSuite) AssertStorageCommitmentBlocks(slot iotago.SlotIndex, expectedBlocksBySlotCommitmentID map[iotago.CommitmentID]iotago.BlockIDs, nodes ...*mock.Node) { mustNodes(nodes) t.Eventually(func() error { @@ -73,13 +73,21 @@ func (t *TestSuite) AssertStorageCommitmentBlocks(slot iotago.SlotIndex, expecte return ierrors.Wrapf(err, "AssertStorageCommitmentBlocks: %s: error getting committed slot for commitment: %s", node.Name, storedCommitment.ID()) } - committedBlocks, err := committedSlot.BlockIDs() + committedBlocksBySlotCommitmentID, err := committedSlot.BlocksIDsBySlotCommitmentID() if err != nil { return ierrors.Wrapf(err, "AssertStorageCommitmentBlocks: %s: error getting committed blocks for slot: %d", node.Name, slot) } - if !assert.Equal(t.fakeTesting, committedBlocks, expectedBlocks) { - return ierrors.Errorf("AssertStorageCommitmentBlocks: %s: expected %s, got %s", node.Name, expectedBlocks, committedBlocks) + if len(committedBlocksBySlotCommitmentID) == 0 { + committedBlocksBySlotCommitmentID = nil + } + + if len(expectedBlocksBySlotCommitmentID) == 0 { + expectedBlocksBySlotCommitmentID = nil + } + + if !assert.Equal(t.fakeTesting, committedBlocksBySlotCommitmentID, expectedBlocksBySlotCommitmentID) { + return ierrors.Errorf("AssertStorageCommitmentBlocks: %s: expected %s, got %s", node.Name, expectedBlocksBySlotCommitmentID, committedBlocksBySlotCommitmentID) } } diff --git a/pkg/testsuite/storage_rootblocks.go b/pkg/testsuite/storage_rootblocks.go index 8cb868bfd..60b721ddf 100644 --- a/pkg/testsuite/storage_rootblocks.go +++ b/pkg/testsuite/storage_rootblocks.go @@ -17,11 +17,15 @@ func (t *TestSuite) AssertStorageRootBlocks(blocks []*blocks.Block, nodes ...*mo return ierrors.Errorf("AssertStorageRootBlocks: %s: error loading root blocks for %s: %v", node.Name, block.ID().Slot(), err) } - loadedCommitmentID, err := storage.Load(block.ID()) + loadedCommitmentID, exists, err := storage.Load(block.ID()) if err != nil { return ierrors.Wrapf(err, "AssertStorageRootBlocks: %s: failed to load root block %s", node.Name, block.ID()) } + if !exists { + return ierrors.Errorf("AssertStorageRootBlocks: %s: root block %s does not exist", node.Name, block.ID()) + } + if block.SlotCommitmentID() != loadedCommitmentID { return ierrors.Errorf("AssertStorageRootBlocks: %s: expected slot commitment %s, got %s for block %s", node.Name, block.SlotCommitmentID(), loadedCommitmentID, block.ID()) } diff --git a/pkg/testsuite/testsuite.go b/pkg/testsuite/testsuite.go index 794702d79..f7c5f0b11 100644 --- a/pkg/testsuite/testsuite.go +++ b/pkg/testsuite/testsuite.go @@ -90,6 +90,7 @@ func NewTestSuite(testingT *testing.T, opts ...options.Option[TestSuite]) *TestS iotago.WithStakingOptions(1, 100, 1), iotago.WithTimeProviderOptions( + 0, GenesisTimeWithOffsetBySlots(0, DefaultSlotDurationInSeconds), DefaultSlotDurationInSeconds, DefaultSlotsPerEpochExponent, @@ -116,7 +117,7 @@ func NewTestSuite(testingT *testing.T, opts ...options.Option[TestSuite]) *TestS t.ProtocolParameterOptions = append(defaultProtocolParameters, t.ProtocolParameterOptions...) t.API = iotago.V3API(iotago.NewV3ProtocolParameters(t.ProtocolParameterOptions...)) - genesisBlock := blocks.NewRootBlock(iotago.EmptyBlockID, iotago.NewEmptyCommitment(t.API.ProtocolParameters().Version()).MustID(), time.Unix(t.API.ProtocolParameters().GenesisUnixTimestamp(), 0)) + genesisBlock := blocks.NewRootBlock(t.API.ProtocolParameters().GenesisBlockID(), iotago.NewEmptyCommitment(t.API).MustID(), time.Unix(t.API.ProtocolParameters().GenesisUnixTimestamp(), 0)) t.RegisterBlock("Genesis", genesisBlock) t.snapshotPath = t.Directory.Path("genesis_snapshot.bin") @@ -125,7 +126,7 @@ func NewTestSuite(testingT *testing.T, opts ...options.Option[TestSuite]) *TestS snapshotcreator.WithFilePath(t.snapshotPath), snapshotcreator.WithProtocolParameters(t.API.ProtocolParameters()), snapshotcreator.WithRootBlocks(map[iotago.BlockID]iotago.CommitmentID{ - iotago.EmptyBlockID: iotago.NewEmptyCommitment(t.API.ProtocolParameters().Version()).MustID(), + t.API.ProtocolParameters().GenesisBlockID(): iotago.NewEmptyCommitment(t.API).MustID(), }), } t.optsSnapshotOptions = append(defaultSnapshotOptions, t.optsSnapshotOptions...) diff --git a/pkg/utils/ioutils.go b/pkg/utils/ioutils.go deleted file mode 100644 index 5c7d810b4..000000000 --- a/pkg/utils/ioutils.go +++ /dev/null @@ -1,98 +0,0 @@ -package utils - -import ( - "encoding/binary" - "io" - - "github.com/iotaledger/hive.go/ierrors" -) - -func increaseOffsets(amount int64, offsets ...*int64) { - for _, offset := range offsets { - *offset += amount - } -} - -func WriteValueFunc(writeSeeker io.WriteSeeker, value any, offsetsToIncrease ...*int64) error { - length := binary.Size(value) - if length == -1 { - return ierrors.New("unable to determine length of value") - } - - if err := binary.Write(writeSeeker, binary.LittleEndian, value); err != nil { - return ierrors.Wrap(err, "unable to write value") - } - - increaseOffsets(int64(length), offsetsToIncrease...) - - return nil -} - -func WriteBytesFunc(writeSeeker io.WriteSeeker, bytes []byte, offsetsToIncrease ...*int64) error { - length, err := writeSeeker.Write(bytes) - if err != nil { - return ierrors.Wrap(err, "unable to write bytes") - } - - increaseOffsets(int64(length), offsetsToIncrease...) - - return nil -} - -type PositionedWriter struct { - bookmarks map[string]int64 - writer io.WriteSeeker -} - -func NewPositionedWriter(writer io.WriteSeeker) *PositionedWriter { - p := &PositionedWriter{ - bookmarks: make(map[string]int64), - writer: writer, - } - - return p -} - -func (p *PositionedWriter) WriteBytes(bytes []byte) error { - return WriteBytesFunc(p.writer, bytes) -} - -func (p *PositionedWriter) WriteValue(name string, value interface{}, saveBookmark ...bool) error { - if len(saveBookmark) > 0 && saveBookmark[0] { - currentPosition, err := p.writer.Seek(0, io.SeekCurrent) - if err != nil { - return err - } - p.bookmarks[name] = currentPosition - } - if err := WriteValueFunc(p.writer, value); err != nil { - return ierrors.Wrapf(err, "unable to write value %s", name) - } - - return nil -} - -func (p *PositionedWriter) WriteValueAtBookmark(name string, value interface{}) error { - bookmarkPosition, ok := p.bookmarks[name] - if !ok { - return ierrors.Errorf("unable to find saved position for bookmark %s", name) - } - originalPosition, err := p.writer.Seek(0, io.SeekCurrent) - if err != nil { - return ierrors.Wrap(err, "unable to obtain current seek position") - } - if bookmarkPosition >= originalPosition { - return ierrors.Errorf("cannot write into the future, current write position %d is greater than or equal to the bookmark position %d", originalPosition, bookmarkPosition) - } - if _, err := p.writer.Seek(bookmarkPosition, io.SeekStart); err != nil { - return ierrors.Wrapf(err, "unable to seek back to bookmark %s position", name) - } - if err := WriteValueFunc(p.writer, value); err != nil { - return ierrors.Wrapf(err, "unable to write value %s", name) - } - if _, err := p.writer.Seek(originalPosition, io.SeekStart); err != nil { - return ierrors.Wrap(err, "unable to seek to original position") - } - - return nil -} diff --git a/tools/docker-network/run.sh b/tools/docker-network/run.sh index 74090cc05..a87b02f98 100755 --- a/tools/docker-network/run.sh +++ b/tools/docker-network/run.sh @@ -48,7 +48,7 @@ docker run --rm \ -e GOCACHE="/go-cache" \ -e GOMODCACHE="/go-mod-cache" \ -w "/workspace/tools/genesis-snapshot" \ - golang:1.21 go run -tags=rocksdb . --config docker --seed 7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih + golang:1.21-bookworm go run -tags=rocksdb . --config docker --seed 7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih # Move and set permissions for the .snapshot file if ! mv -f ../genesis-snapshot/*.snapshot .; then diff --git a/tools/gendoc/go.mod b/tools/gendoc/go.mod index 45fda2a00..97dd12ea0 100644 --- a/tools/gendoc/go.mod +++ b/tools/gendoc/go.mod @@ -5,7 +5,7 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/app v0.0.0-20231107225803-f89acd088c10 + github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 ) @@ -57,22 +57,22 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/logger v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 // indirect - github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac // indirect - github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e // indirect + github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231108104504-1445f545de82 // indirect + github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231108104322-f301c3573998 // indirect + github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa // indirect github.com/ipfs/boxo v0.13.1 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect diff --git a/tools/gendoc/go.sum b/tools/gendoc/go.sum index d57ca1682..0eec52286 100644 --- a/tools/gendoc/go.sum +++ b/tools/gendoc/go.sum @@ -277,42 +277,42 @@ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10 h1:M24zuxsCGccvksoanDZEjc8K3tWFyw7aZ2sbQK740pE= -github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10/go.mod h1:IFh0gDfeMgZtfCo+5afK59IDR4xXh+cTR9YtLnZPcbY= -github.com/iotaledger/hive.go/app v0.0.0-20231107225803-f89acd088c10 h1:wsUsKHP9meQsr1UPYASpN+QRa2NlWyhDbt0R310NccM= -github.com/iotaledger/hive.go/app v0.0.0-20231107225803-f89acd088c10/go.mod h1:8ZbIKR84oQd/3iQ5eeT7xpudO9/ytzXP7veIYnk7Orc= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 h1:2r4FgIGdc2lHcIbXiUFCCVq4+B0oZk9t6Z0SSLjrzCE= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936 h1:SnmQt9GxrWIvpW7pgQS049x1b8T+lQutTQbo35FImug= +github.com/iotaledger/hive.go/app v0.0.0-20231108050255-98e0fa35e936/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 h1:4aVJTc0KS77uEw0Tny4r0n1ORwcbAQDECaCclgf/6lE= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3/go.mod h1:TZeAqieDu+xDOZp2e9+S+8pZp1PrfgcwLUnxmd8IgLU= -github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10 h1:CJ9nehCDKqFo3sJLMnybx0/AvmdXq6dau5qFr+pivUc= -github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10 h1:FMassldB6buYv8nsfELSkKzR3mj326YNmLy4DNY+20o= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10/go.mod h1:Mc+ACqBGPxrPMIPUBOm6/HL0J6m0iVMwjtIEKW3uow8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10 h1:sGxsehUXmhWW5Vv9PBwuW1mlW2Npdb2yMonZgolVzHs= -github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10/go.mod h1:h3o6okvMSEK3KOX6pOp3yq1h9ohTkTfo6X8MzEadeb0= -github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10 h1:NufkzT29n9OconEE6+8HMoCkW+MXiznGn+HxWrNPy1o= -github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10/go.mod h1:3XkUSKfHaVxGbT0XAvjNlVYqPzhfLTGhDtdNA5UBPco= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10 h1:M43fs0ybJXyVGnN55xG2OrMmXqbpYGudnU8zIA7NNL4= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10 h1:nGffY7n3mxUFtWrKgbvNx7jYu2lGkeu01hp+8aLaOk8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10/go.mod h1:O/U3jtiUDeqqM0MZQFu2UPqS9fUm0C5hNISxlmg/thE= -github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10 h1:ME7iE4yKMYEvfkFvOPswAWsZaq7mLkKiGN88K1X1OBg= -github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10/go.mod h1:s4kzx9QY1MVWHJralj+3q5kI0eARtrJhphYD/iBbPfo= -github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10 h1:yhDHLCtdpLSiv/kDDLDkJZcJispd1OUAWIYF7RXFQi4= -github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10/go.mod h1:JvokzmpmFZPDskMlUqqjgHtD8usVJU4nAY/TNMGge8M= -github.com/iotaledger/hive.go/logger v0.0.0-20231107225803-f89acd088c10 h1:ajaTrqlYEjVbkIu2RTN+GKrQnbbbjoAFea2wLgj2B+c= -github.com/iotaledger/hive.go/logger v0.0.0-20231107225803-f89acd088c10/go.mod h1:aBfAfIB2GO/IblhYt5ipCbyeL9bXSNeAwtYVA3hZaHg= -github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10 h1:5tPaO+hxPTBp5J7Ap2oIqzHEXmYbrh5Rfh4y2l5KaQQ= -github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10/go.mod h1:jRw8yFipiPaqmTPHh7hTcxAP9u6pjRGpByS3REJKkbY= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10 h1:KahaknpEVnJCgyaawYzRVR0rcX2/iCXiUXHvSjlMqEA= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10/go.mod h1:SdK26z8/VhWtxaqCuQrufm80SELgowQPmu9T/8eUQ8g= -github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10 h1:1BQfYB6hxWGTjrv70SP3xxThxTV8t1AqEVQRRr5dTJI= -github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5 h1:17JDzMKTMXKF3xys6gPURRddkZhg1LY+xwfhbr/sVqg= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231031135002-4c79ea5193f5/go.mod h1:LsJvoBUVVnY7tkwwByCVtAwmp5bFXdyJNGU/+KVQJVM= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac h1:c7R33+TQGMYP6pvLUQQaqpdDFl+GZbhAcfGMI0285fo= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231031134131-b6ad918dc1ac/go.mod h1:qPuMUvCTaghsnYRDnRoRuztTyEKFlmi2S7gb44rH7WM= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e h1:ZYRC1MHn/ghsqtjIpYGTxLQrh5n5eUmC0/YWnJiTRhk= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e/go.mod h1:jqbLYq4a/FwuiPBqFfkAwwxU8vs3+kReRq2/tyX5qRA= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 h1:qkq0Wz+Y3J8QYRLd0fwTgHuur/A3k7d82BxOKSfvk8c= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 h1:GtsYwcCqRomhMo190TPxBrOzs6YnVmqkmQgT/lJrJRo= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 h1:Xeb4w0g0Kv2ZjdCZQqz8oiqAU5qAy8OXG8kGTXSPzuY= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 h1:NtQLSS0Lq5qg/w5nbMpXrlQpmcK3KiOaQmgZWoRc4mM= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 h1:o5S4KUAwToOLXoYYRj9ZgqeDsFv1VRM4+Mni0Tdj2Ck= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 h1:kXKJQ8UvbA8kI0Jx0EnlXbwDeZFY8pEX0Q6KaOPsYlQ= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 h1:coXPklQ7JgqTXIUXh3b4OHml1VIvI8x7pQsjsES/u/s= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 h1:VBvGnsVwqhoT9zMyMIlK5fPmz6fsbiPZOwdU1E8WU7o= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936 h1:05EbTaladbyo7mD8yBaWYJh9P8u/TUTmrjVmcUjoW8A= +github.com/iotaledger/hive.go/logger v0.0.0-20231108050255-98e0fa35e936/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 h1:XbC1fmY87UJ/yMs8U2YqlUdJsqb0Xqj/ZYQKlZ7AUG8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 h1:LXhLW2cN9bQYoHQsgmJRb/jiRBRU5s2rLoCNjZfgHdg= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 h1:Y4HgL5gm9S27usg5M2t6wi1BSdCxVorM62lwnpKuMd4= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231108104504-1445f545de82 h1:FdM1lxUKgENO3oOlF5blVqmjER44mLIHGpavyUOY5JI= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231108104504-1445f545de82/go.mod h1:HVxkGPraMDTRudfG9AFN7Ga9gijp6skXB9TKNBc4KgI= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231108104322-f301c3573998 h1:KkC0SaWrjSMg897r2DDosJYALFfLadFST3Fvoaxg7hw= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231108104322-f301c3573998/go.mod h1:c+lBG3vgt2rgXHeOncK8hMllMwihTAtVbu790NslW2w= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa h1:A2nadmSbmn62f6wtrqvv/TCCF2sDiiwyDnl6brbRo1E= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa/go.mod h1:8iDORW4/e4NztyAGqjW07uSMjbhs7snbxw+81IWOczY= github.com/ipfs/boxo v0.13.1 h1:nQ5oQzcMZR3oL41REJDcTbrvDvuZh3J9ckc9+ILeRQI= github.com/ipfs/boxo v0.13.1/go.mod h1:btrtHy0lmO1ODMECbbEY1pxNtrLilvKSYLoGQt1yYCk= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -511,8 +511,6 @@ github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= diff --git a/tools/genesis-snapshot/go.mod b/tools/genesis-snapshot/go.mod index 2035452c7..dbc3a759d 100644 --- a/tools/genesis-snapshot/go.mod +++ b/tools/genesis-snapshot/go.mod @@ -5,12 +5,12 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10 - github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10 + github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 + github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 - github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e + github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa github.com/mr-tron/base58 v1.2.0 github.com/spf13/pflag v1.0.5 golang.org/x/crypto v0.14.0 @@ -26,14 +26,14 @@ require ( github.com/holiman/uint256 v1.2.3 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10 // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10 // indirect + github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/tools/genesis-snapshot/go.sum b/tools/genesis-snapshot/go.sum index 12920f76f..8f8630e48 100644 --- a/tools/genesis-snapshot/go.sum +++ b/tools/genesis-snapshot/go.sum @@ -28,32 +28,32 @@ github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10 h1:M24zuxsCGccvksoanDZEjc8K3tWFyw7aZ2sbQK740pE= -github.com/iotaledger/hive.go/ads v0.0.0-20231107225803-f89acd088c10/go.mod h1:IFh0gDfeMgZtfCo+5afK59IDR4xXh+cTR9YtLnZPcbY= -github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10 h1:CJ9nehCDKqFo3sJLMnybx0/AvmdXq6dau5qFr+pivUc= -github.com/iotaledger/hive.go/constraints v0.0.0-20231107225803-f89acd088c10/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10 h1:FMassldB6buYv8nsfELSkKzR3mj326YNmLy4DNY+20o= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231107225803-f89acd088c10/go.mod h1:Mc+ACqBGPxrPMIPUBOm6/HL0J6m0iVMwjtIEKW3uow8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10 h1:sGxsehUXmhWW5Vv9PBwuW1mlW2Npdb2yMonZgolVzHs= -github.com/iotaledger/hive.go/crypto v0.0.0-20231107225803-f89acd088c10/go.mod h1:h3o6okvMSEK3KOX6pOp3yq1h9ohTkTfo6X8MzEadeb0= -github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10 h1:NufkzT29n9OconEE6+8HMoCkW+MXiznGn+HxWrNPy1o= -github.com/iotaledger/hive.go/ds v0.0.0-20231107225803-f89acd088c10/go.mod h1:3XkUSKfHaVxGbT0XAvjNlVYqPzhfLTGhDtdNA5UBPco= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10 h1:M43fs0ybJXyVGnN55xG2OrMmXqbpYGudnU8zIA7NNL4= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231107225803-f89acd088c10/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10 h1:nGffY7n3mxUFtWrKgbvNx7jYu2lGkeu01hp+8aLaOk8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231107225803-f89acd088c10/go.mod h1:O/U3jtiUDeqqM0MZQFu2UPqS9fUm0C5hNISxlmg/thE= -github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10 h1:ME7iE4yKMYEvfkFvOPswAWsZaq7mLkKiGN88K1X1OBg= -github.com/iotaledger/hive.go/lo v0.0.0-20231107225803-f89acd088c10/go.mod h1:s4kzx9QY1MVWHJralj+3q5kI0eARtrJhphYD/iBbPfo= -github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10 h1:yhDHLCtdpLSiv/kDDLDkJZcJispd1OUAWIYF7RXFQi4= -github.com/iotaledger/hive.go/log v0.0.0-20231107225803-f89acd088c10/go.mod h1:JvokzmpmFZPDskMlUqqjgHtD8usVJU4nAY/TNMGge8M= -github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10 h1:5tPaO+hxPTBp5J7Ap2oIqzHEXmYbrh5Rfh4y2l5KaQQ= -github.com/iotaledger/hive.go/runtime v0.0.0-20231107225803-f89acd088c10/go.mod h1:jRw8yFipiPaqmTPHh7hTcxAP9u6pjRGpByS3REJKkbY= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10 h1:KahaknpEVnJCgyaawYzRVR0rcX2/iCXiUXHvSjlMqEA= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231107225803-f89acd088c10/go.mod h1:SdK26z8/VhWtxaqCuQrufm80SELgowQPmu9T/8eUQ8g= -github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10 h1:1BQfYB6hxWGTjrv70SP3xxThxTV8t1AqEVQRRr5dTJI= -github.com/iotaledger/hive.go/stringify v0.0.0-20231107225803-f89acd088c10/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e h1:ZYRC1MHn/ghsqtjIpYGTxLQrh5n5eUmC0/YWnJiTRhk= -github.com/iotaledger/iota.go/v4 v4.0.0-20231102113728-20b8d01e826e/go.mod h1:jqbLYq4a/FwuiPBqFfkAwwxU8vs3+kReRq2/tyX5qRA= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936 h1:2r4FgIGdc2lHcIbXiUFCCVq4+B0oZk9t6Z0SSLjrzCE= +github.com/iotaledger/hive.go/ads v0.0.0-20231108050255-98e0fa35e936/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936 h1:qkq0Wz+Y3J8QYRLd0fwTgHuur/A3k7d82BxOKSfvk8c= +github.com/iotaledger/hive.go/constraints v0.0.0-20231108050255-98e0fa35e936/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936 h1:GtsYwcCqRomhMo190TPxBrOzs6YnVmqkmQgT/lJrJRo= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231108050255-98e0fa35e936/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936 h1:Xeb4w0g0Kv2ZjdCZQqz8oiqAU5qAy8OXG8kGTXSPzuY= +github.com/iotaledger/hive.go/crypto v0.0.0-20231108050255-98e0fa35e936/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936 h1:NtQLSS0Lq5qg/w5nbMpXrlQpmcK3KiOaQmgZWoRc4mM= +github.com/iotaledger/hive.go/ds v0.0.0-20231108050255-98e0fa35e936/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936 h1:o5S4KUAwToOLXoYYRj9ZgqeDsFv1VRM4+Mni0Tdj2Ck= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231108050255-98e0fa35e936/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936 h1:kXKJQ8UvbA8kI0Jx0EnlXbwDeZFY8pEX0Q6KaOPsYlQ= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231108050255-98e0fa35e936/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936 h1:coXPklQ7JgqTXIUXh3b4OHml1VIvI8x7pQsjsES/u/s= +github.com/iotaledger/hive.go/lo v0.0.0-20231108050255-98e0fa35e936/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936 h1:VBvGnsVwqhoT9zMyMIlK5fPmz6fsbiPZOwdU1E8WU7o= +github.com/iotaledger/hive.go/log v0.0.0-20231108050255-98e0fa35e936/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936 h1:XbC1fmY87UJ/yMs8U2YqlUdJsqb0Xqj/ZYQKlZ7AUG8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231108050255-98e0fa35e936/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936 h1:LXhLW2cN9bQYoHQsgmJRb/jiRBRU5s2rLoCNjZfgHdg= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231108050255-98e0fa35e936/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936 h1:Y4HgL5gm9S27usg5M2t6wi1BSdCxVorM62lwnpKuMd4= +github.com/iotaledger/hive.go/stringify v0.0.0-20231108050255-98e0fa35e936/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa h1:A2nadmSbmn62f6wtrqvv/TCCF2sDiiwyDnl6brbRo1E= +github.com/iotaledger/iota.go/v4 v4.0.0-20231108103955-bf75d703d8aa/go.mod h1:8iDORW4/e4NztyAGqjW07uSMjbhs7snbxw+81IWOczY= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= @@ -84,8 +84,6 @@ github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7B github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= diff --git a/tools/genesis-snapshot/presets/presets.go b/tools/genesis-snapshot/presets/presets.go index 315aebac9..5c81cdaa7 100644 --- a/tools/genesis-snapshot/presets/presets.go +++ b/tools/genesis-snapshot/presets/presets.go @@ -22,16 +22,14 @@ var Base = []options.Option[snapshotcreator.Options]{ iotago.NewV3ProtocolParameters( iotago.WithNetworkOptions("default", "rms"), iotago.WithSupplyOptions(4_600_000_000_000_000, 100, 1, 10, 100, 100, 100), - iotago.WithTimeProviderOptions(1696841745, 10, 13), + iotago.WithTimeProviderOptions(0, 1696841745, 10, 13), iotago.WithLivenessOptions(30, 30, 7, 14, 30), // increase/decrease threshold = fraction * slotDurationInSeconds * schedulerRate iotago.WithCongestionControlOptions(500, 500, 500, 800000, 500000, 100000, 1000, 100), iotago.WithWorkScoreOptions(25, 1, 100, 50, 10, 10, 50, 1, 10, 250), ), ), - snapshotcreator.WithRootBlocks(map[iotago.BlockID]iotago.CommitmentID{ - iotago.EmptyBlockID: iotago.NewEmptyCommitment(3).MustID(), - }), + snapshotcreator.WithAddGenesisRootBlock(true), } var Docker = []options.Option[snapshotcreator.Options]{ @@ -135,7 +133,7 @@ var Docker = []options.Option[snapshotcreator.Options]{ iotago.NewV3ProtocolParameters( iotago.WithNetworkOptions("docker", "rms"), iotago.WithSupplyOptions(4_600_000_000_000_000, 1, 1, 10, 100, 100, 100), - iotago.WithTimeProviderOptions(time.Now().Unix(), 10, 13), + iotago.WithTimeProviderOptions(0, time.Now().Unix(), 10, 13), iotago.WithLivenessOptions(30, 30, 7, 14, 30), // increase/decrease threshold = fraction * slotDurationInSeconds * schedulerRate iotago.WithCongestionControlOptions(500, 500, 500, 800000, 500000, 100000, 1000, 100), @@ -244,7 +242,7 @@ var Feature = []options.Option[snapshotcreator.Options]{ iotago.NewV3ProtocolParameters( iotago.WithNetworkOptions("feature", "rms"), iotago.WithSupplyOptions(4_600_000_000_000_000, 100, 1, 10, 100, 100, 100), - iotago.WithTimeProviderOptions(time.Now().Unix(), 10, 13), + iotago.WithTimeProviderOptions(666666, time.Now().Unix(), 10, 13), iotago.WithLivenessOptions(30, 30, 10, 20, 30), // increase/decrease threshold = fraction * slotDurationInSeconds * schedulerRate iotago.WithCongestionControlOptions(500, 500, 500, 800000, 500000, 100000, 1000, 100),