diff --git a/.github/workflows/feature-network-deploy.yml b/.github/workflows/feature-network-deploy.yml index 812411683..3a09e46d1 100644 --- a/.github/workflows/feature-network-deploy.yml +++ b/.github/workflows/feature-network-deploy.yml @@ -46,6 +46,25 @@ jobs: cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,mode=max,dest=/tmp/.buildx-cache-new + - uses: actions/setup-go@v4 + with: + go-version-file: 'tools/genesis-snapshot/go.mod' + cache: false + + - name: Print Go version + run: go version + + - name: Generate genesis snapshot + working-directory: tools/genesis-snapshot + run: go run -tags=rocksdb . --config feature --seed 7R1itJx5hVuo9w9hjg5cwKFmek4HMSoBDgJZN8hKGxih --filename genesis-snapshot.bin + + - name: Upload snapshot + id: upload-snapshot + run: | + SNAPSHOT_URL=$(curl -T ./tools/genesis-snapshot/genesis-snapshot.bin https://transfer.sh) + echo "Snapshot URL: $SNAPSHOT_URL" + echo "snapshot_url=$SNAPSHOT_URL" >> $GITHUB_OUTPUT + - # Temp fix # https://github.com/docker/build-push-action/issues/252 # https://github.com/moby/buildkit/issues/1896 @@ -70,7 +89,7 @@ jobs: - name: Ansible deploy env: CUSTOM_SNAPSHOT_URL: '${{ github.event.inputs.snapshotUrl }}' - DEFAULT_SNAPSHOT_URL: 'https://0x0.st/HywH.bin' + DEFAULT_SNAPSHOT_URL: '${{ steps.upload-snapshot.outputs.snapshot_url }}' NETWORK_ENVIRONMENT: '${{ secrets.NETWORK_ENVIRONMENT }}' IOTA_CORE_DOCKER_IMAGE_REPO: 'iotaledger/iota-core' IOTA_CORE_DOCKER_IMAGE_TAG: 'feature' diff --git a/components/inx/server_blocks.go b/components/inx/server_blocks.go index 07a0a8ed3..6f95a99ee 100644 --- a/components/inx/server_blocks.go +++ b/components/inx/server_blocks.go @@ -46,6 +46,12 @@ func (s *Server) ListenToBlocks(_ *inx.NoParams, srv inx.INX_ListenToBlocksServe unhook := deps.Protocol.Events.Engine.Booker.BlockBooked.Hook(func(block *blocks.Block) { payload := inx.NewBlockWithBytes(block.ID(), block.ModelBlock().Data()) + + if ctx.Err() != nil { + // context is done, so we don't need to send the payload + return + } + if err := srv.Send(payload); err != nil { Component.LogErrorf("send error: %v", err) cancel() @@ -74,6 +80,13 @@ func (s *Server) ListenToAcceptedBlocks(_ *inx.NoParams, srv inx.INX_ListenToAcc if err != nil { Component.LogErrorf("get block metadata error: %v", err) cancel() + + return + } + + if ctx.Err() != nil { + // context is done, so we don't need to send the payload + return } if err := srv.Send(payload); err != nil { @@ -104,6 +117,13 @@ func (s *Server) ListenToConfirmedBlocks(_ *inx.NoParams, srv inx.INX_ListenToCo if err != nil { Component.LogErrorf("get block metadata error: %v", err) cancel() + + return + } + + if ctx.Err() != nil { + // context is done, so we don't need to send the payload + return } if err := srv.Send(payload); err != nil { diff --git a/components/inx/server_node.go b/components/inx/server_node.go index 0039cad2b..959e412ac 100644 --- a/components/inx/server_node.go +++ b/components/inx/server_node.go @@ -7,30 +7,30 @@ import ( "github.com/iotaledger/hive.go/runtime/event" "github.com/iotaledger/hive.go/runtime/workerpool" inx "github.com/iotaledger/inx/go" + "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol/engine/syncmanager" - iotago "github.com/iotaledger/iota.go/v4" ) func inxNodeStatus(status *syncmanager.SyncStatus) *inx.NodeStatus { - finalizedCommitmentID := iotago.EmptyCommitmentID + var finalizedCommitment *model.Commitment // HasPruned is false when a node just started from a snapshot and keeps data of the LastPrunedEpoch, thus still need // to send finalized commitment. if !status.HasPruned || status.LatestFinalizedSlot > deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(status.LastPrunedEpoch) { - finalizedCommitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(status.LatestFinalizedSlot) + var err error + finalizedCommitment, err = deps.Protocol.MainEngineInstance().Storage.Commitments().Load(status.LatestFinalizedSlot) if err != nil { return nil } - finalizedCommitmentID = finalizedCommitment.ID() } return &inx.NodeStatus{ - IsHealthy: status.NodeSynced, - IsBootstrapped: status.NodeBootstrapped, - LastAcceptedBlockSlot: uint32(status.LastAcceptedBlockSlot), - LastConfirmedBlockSlot: uint32(status.LastConfirmedBlockSlot), - LatestCommitment: inxCommitment(status.LatestCommitment), - LatestFinalizedCommitmentId: inx.NewCommitmentId(finalizedCommitmentID), - PruningEpoch: uint32(status.LastPrunedEpoch), + IsHealthy: status.NodeSynced, + IsBootstrapped: status.NodeBootstrapped, + LastAcceptedBlockSlot: uint32(status.LastAcceptedBlockSlot), + LastConfirmedBlockSlot: uint32(status.LastConfirmedBlockSlot), + LatestCommitment: inxCommitment(status.LatestCommitment), + LatestFinalizedCommitment: inxCommitment(finalizedCommitment), + PruningEpoch: uint32(status.LastPrunedEpoch), } } @@ -112,12 +112,11 @@ func (s *Server) ReadNodeConfiguration(context.Context, *inx.NoParams) (*inx.Nod return &inx.NodeConfiguration{ BaseToken: &inx.BaseToken{ - Name: deps.BaseToken.Name, - TickerSymbol: deps.BaseToken.TickerSymbol, - Unit: deps.BaseToken.Unit, - Subunit: deps.BaseToken.Subunit, - Decimals: deps.BaseToken.Decimals, - UseMetricPrefix: deps.BaseToken.UseMetricPrefix, + Name: deps.BaseToken.Name, + TickerSymbol: deps.BaseToken.TickerSymbol, + Unit: deps.BaseToken.Unit, + Subunit: deps.BaseToken.Subunit, + Decimals: deps.BaseToken.Decimals, }, ProtocolParameters: protoParams, }, nil diff --git a/components/inx/server_utxo.go b/components/inx/server_utxo.go index bc11ae9e0..d1e87561e 100644 --- a/components/inx/server_utxo.go +++ b/components/inx/server_utxo.go @@ -12,6 +12,7 @@ import ( "github.com/iotaledger/hive.go/runtime/workerpool" inx "github.com/iotaledger/inx/go" "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" + "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" iotago "github.com/iotaledger/iota.go/v4" ) @@ -29,7 +30,7 @@ func NewLedgerOutput(o *utxoledger.Output) (*inx.LedgerOutput, error) { } includedSlot := o.SlotBooked() - if includedSlot <= latestCommitment.Slot() { + if includedSlot > 0 && includedSlot <= latestCommitment.Slot() { includedCommitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(includedSlot) if err != nil { return nil, ierrors.Wrapf(err, "failed to load commitment with slot: %d", includedSlot) @@ -54,7 +55,7 @@ func NewLedgerSpent(s *utxoledger.Spent) (*inx.LedgerSpent, error) { latestCommitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() spentSlot := s.SlotSpent() - if spentSlot <= latestCommitment.Slot() { + if spentSlot > 0 && spentSlot <= latestCommitment.Slot() { spentCommitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(spentSlot) if err != nil { return nil, ierrors.Wrapf(err, "failed to load commitment with slot: %d", spentSlot) @@ -65,11 +66,11 @@ func NewLedgerSpent(s *utxoledger.Spent) (*inx.LedgerSpent, error) { return l, nil } -func NewLedgerUpdateBatchBegin(slot iotago.SlotIndex, newOutputsCount int, newSpentsCount int) *inx.LedgerUpdate { +func NewLedgerUpdateBatchBegin(commitmentID iotago.CommitmentID, newOutputsCount int, newSpentsCount int) *inx.LedgerUpdate { return &inx.LedgerUpdate{ Op: &inx.LedgerUpdate_BatchMarker{ BatchMarker: &inx.LedgerUpdate_Marker{ - Slot: uint32(slot), + CommitmentId: inx.NewCommitmentId(commitmentID), MarkerType: inx.LedgerUpdate_Marker_BEGIN, CreatedCount: uint32(newOutputsCount), ConsumedCount: uint32(newSpentsCount), @@ -78,11 +79,11 @@ func NewLedgerUpdateBatchBegin(slot iotago.SlotIndex, newOutputsCount int, newSp } } -func NewLedgerUpdateBatchEnd(slot iotago.SlotIndex, newOutputsCount int, newSpentsCount int) *inx.LedgerUpdate { +func NewLedgerUpdateBatchEnd(commitmentID iotago.CommitmentID, newOutputsCount int, newSpentsCount int) *inx.LedgerUpdate { return &inx.LedgerUpdate{ Op: &inx.LedgerUpdate_BatchMarker{ BatchMarker: &inx.LedgerUpdate_Marker{ - Slot: uint32(slot), + CommitmentId: inx.NewCommitmentId(commitmentID), MarkerType: inx.LedgerUpdate_Marker_END, CreatedCount: uint32(newOutputsCount), ConsumedCount: uint32(newSpentsCount), @@ -191,8 +192,13 @@ func (s *Server) ReadUnspentOutputs(_ *inx.NoParams, srv inx.INX_ReadUnspentOutp func (s *Server) ListenToLedgerUpdates(req *inx.SlotRangeRequest, srv inx.INX_ListenToLedgerUpdatesServer) error { createLedgerUpdatePayloadAndSend := func(slot iotago.SlotIndex, outputs utxoledger.Outputs, spents utxoledger.Spents) error { + commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(slot) + if err != nil { + return status.Errorf(codes.NotFound, "commitment for slot %d not found", slot) + } + // Send Begin - if err := srv.Send(NewLedgerUpdateBatchBegin(slot, len(outputs), len(spents))); err != nil { + if err := srv.Send(NewLedgerUpdateBatchBegin(commitment.ID(), len(outputs), len(spents))); err != nil { return fmt.Errorf("send error: %w", err) } @@ -221,7 +227,7 @@ func (s *Server) ListenToLedgerUpdates(req *inx.SlotRangeRequest, srv inx.INX_Li } // Send End - if err := srv.Send(NewLedgerUpdateBatchEnd(slot, len(outputs), len(spents))); err != nil { + if err := srv.Send(NewLedgerUpdateBatchEnd(commitment.ID(), len(outputs), len(spents))); err != nil { return fmt.Errorf("send error: %w", err) } @@ -317,8 +323,8 @@ func (s *Server) ListenToLedgerUpdates(req *inx.SlotRangeRequest, srv inx.INX_Li wp := workerpool.New("ListenToLedgerUpdates", workerpool.WithWorkerCount(workerCount)).Start() - unhook := deps.Protocol.Events.Engine.Ledger.StateDiffApplied.Hook(func(slot iotago.SlotIndex, newOutputs utxoledger.Outputs, newSpents utxoledger.Spents) { - done, err := handleRangedSend2(slot, newOutputs, newSpents, stream, catchUpFunc, sendFunc) + unhook := deps.Protocol.Events.Engine.Notarization.SlotCommitted.Hook(func(scd *notarization.SlotCommittedDetails) { + done, err := handleRangedSend2(scd.Commitment.Slot(), scd.OutputsCreated, scd.OutputsConsumed, stream, catchUpFunc, sendFunc) switch { case err != nil: innerErr = err @@ -353,7 +359,8 @@ func (s *Server) ListenToAcceptedTransactions(_ *inx.NoParams, srv inx.INX_Liste if err := transactionMetadata.Inputs().ForEach(func(stateMetadata mempool.StateMetadata) error { spentOutput, ok := stateMetadata.State().(*utxoledger.Output) if !ok { - return ierrors.Errorf("unexpected state metadata type: %T", stateMetadata.State()) + // not an Output, so we don't need to send it (could be MockedState, Commitment, BlockIssuanceCreditInput, RewardInput, etc.) + return nil } inxSpent, err := NewLedgerSpent(utxoledger.NewSpent(spentOutput, transactionMetadata.ID(), slot)) @@ -366,13 +373,16 @@ func (s *Server) ListenToAcceptedTransactions(_ *inx.NoParams, srv inx.INX_Liste }); err != nil { Component.LogErrorf("error creating payload: %v", err) cancel() + + return } var created []*inx.LedgerOutput if err := transactionMetadata.Outputs().ForEach(func(stateMetadata mempool.StateMetadata) error { output, ok := stateMetadata.State().(*utxoledger.Output) if !ok { - return ierrors.Errorf("unexpected state metadata type: %T", stateMetadata.State()) + // not an Output, so we don't need to send it (could be MockedState, Commitment, BlockIssuanceCreditInput, RewardInput, etc.) + return nil } inxOutput, err := NewLedgerOutput(output) @@ -385,6 +395,8 @@ func (s *Server) ListenToAcceptedTransactions(_ *inx.NoParams, srv inx.INX_Liste }); err != nil { Component.LogErrorf("error creating payload: %v", err) cancel() + + return } payload := &inx.AcceptedTransaction{ @@ -393,6 +405,12 @@ func (s *Server) ListenToAcceptedTransactions(_ *inx.NoParams, srv inx.INX_Liste Consumed: consumed, Created: created, } + + if ctx.Err() != nil { + // context is done, so we don't need to send the payload + return + } + if err := srv.Send(payload); err != nil { Component.LogErrorf("send error: %v", err) cancel() diff --git a/components/protocol/params.go b/components/protocol/params.go index 596a83a16..36fbc3d5e 100644 --- a/components/protocol/params.go +++ b/components/protocol/params.go @@ -37,8 +37,6 @@ type BaseToken struct { Subunit string `default:"glow" usage:"the base token subunit"` // the base token amount of decimals Decimals uint32 `default:"6" usage:"the base token amount of decimals"` - // the base token uses the metric prefix - UseMetricPrefix bool `default:"false" usage:"the base token uses the metric prefix"` } // ParametersDatabase contains the definition of configuration parameters used by the storage layer. diff --git a/components/restapi/core/accounts.go b/components/restapi/core/accounts.go index 155773f03..381c0105b 100644 --- a/components/restapi/core/accounts.go +++ b/components/restapi/core/accounts.go @@ -117,7 +117,7 @@ func validatorByAccountID(c echo.Context) (*apimodels.ValidatorResponse, error) } return &apimodels.ValidatorResponse{ - AccountID: accountID, + AddressBech32: accountID.ToAddress().Bech32(deps.Protocol.CommittedAPI().ProtocolParameters().Bech32HRP()), PoolStake: accountData.ValidatorStake + accountData.DelegationStake, ValidatorStake: accountData.ValidatorStake, StakingEpochEnd: accountData.StakeEndEpoch, @@ -239,7 +239,7 @@ func selectedCommittee(c echo.Context) (*apimodels.CommitteeResponse, error) { committee := make([]*apimodels.CommitteeMemberResponse, 0, accounts.Size()) accounts.ForEach(func(accountID iotago.AccountID, seat *account.Pool) bool { committee = append(committee, &apimodels.CommitteeMemberResponse{ - AccountID: accountID, + AddressBech32: accountID.ToAddress().Bech32(deps.Protocol.CommittedAPI().ProtocolParameters().Bech32HRP()), PoolStake: seat.PoolStake, ValidatorStake: seat.ValidatorStake, FixedCost: seat.FixedCost, diff --git a/components/restapi/core/blocks.go b/components/restapi/core/blocks.go index 2b58599ea..0764cf329 100644 --- a/components/restapi/core/blocks.go +++ b/components/restapi/core/blocks.go @@ -12,7 +12,7 @@ import ( "github.com/iotaledger/iota.go/v4/nodeclient/apimodels" ) -func blockByID(c echo.Context) (*model.Block, error) { +func blockByID(c echo.Context) (*iotago.Block, error) { blockID, err := httpserver.ParseBlockIDParam(c, restapi.ParameterBlockID) if err != nil { return nil, ierrors.Wrapf(err, "failed to parse block ID %s", c.Param(restapi.ParameterBlockID)) @@ -23,7 +23,7 @@ func blockByID(c echo.Context) (*model.Block, error) { return nil, ierrors.Wrapf(echo.ErrNotFound, "block not found: %s", blockID.ToHex()) } - return block, nil + return block.ProtocolBlock(), nil } func blockMetadataByBlockID(blockID iotago.BlockID) (*apimodels.BlockMetadataResponse, error) { @@ -44,6 +44,28 @@ func blockMetadataByID(c echo.Context) (*apimodels.BlockMetadataResponse, error) return blockMetadataByBlockID(blockID) } +func blockWithMetadataByID(c echo.Context) (*apimodels.BlockWithMetadataResponse, error) { + blockID, err := httpserver.ParseBlockIDParam(c, restapi.ParameterBlockID) + if err != nil { + return nil, ierrors.Wrapf(err, "failed to parse block ID %s", c.Param(restapi.ParameterBlockID)) + } + + block, exists := deps.Protocol.MainEngineInstance().Block(blockID) + if !exists { + return nil, ierrors.Wrapf(echo.ErrNotFound, "block not found: %s", blockID.ToHex()) + } + + blockMetadata, err := blockMetadataByBlockID(blockID) + if err != nil { + return nil, err + } + + return &apimodels.BlockWithMetadataResponse{ + Block: block.ProtocolBlock(), + Metadata: blockMetadata, + }, nil +} + func blockIssuanceBySlot(slotIndex iotago.SlotIndex) (*apimodels.IssuanceBlockHeaderResponse, error) { references := deps.Protocol.MainEngineInstance().TipSelection.SelectTips(iotago.BasicBlockMaxParents) diff --git a/components/restapi/core/commitment.go b/components/restapi/core/commitment.go index 4ff0f581d..48f2598d2 100644 --- a/components/restapi/core/commitment.go +++ b/components/restapi/core/commitment.go @@ -34,6 +34,11 @@ func getUTXOChanges(slot iotago.SlotIndex) (*apimodels.UTXOChangesResponse, erro return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get slot diffs %d: %s", slot, err) } + commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(diffs.Slot) + if err != nil { + return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to load commitment %d: %s", diffs.Slot, err) + } + createdOutputs := make(iotago.OutputIDs, len(diffs.Outputs)) consumedOutputs := make(iotago.OutputIDs, len(diffs.Spents)) @@ -46,7 +51,7 @@ func getUTXOChanges(slot iotago.SlotIndex) (*apimodels.UTXOChangesResponse, erro } return &apimodels.UTXOChangesResponse{ - Slot: slot, + CommitmentID: commitment.ID(), CreatedOutputs: createdOutputs, ConsumedOutputs: consumedOutputs, }, nil diff --git a/components/restapi/core/component.go b/components/restapi/core/component.go index 8361b8d8f..6785dc462 100644 --- a/components/restapi/core/component.go +++ b/components/restapi/core/component.go @@ -44,6 +44,12 @@ const ( // MIMEApplicationVendorIOTASerializerV2 => bytes. RouteBlockMetadata = "/blocks/:" + restapipkg.ParameterBlockID + "/metadata" + // RouteBlockWithMetadata is the route for getting a block, together with its metadata by its blockID. + // GET returns the block and metadata. + // MIMEApplicationJSON => json. + // MIMEApplicationVendorIOTASerializerV2 => bytes. + RouteBlockWithMetadata = "/blocks/:" + restapipkg.ParameterBlockID + "/full" + // RouteBlocks is the route for sending new blocks. // POST creates a single new block and returns the new block ID. // The block is parsed based on the given type in the request "Content-Type" header. @@ -181,12 +187,12 @@ func configure() error { }) routeGroup.GET(RouteBlock, func(c echo.Context) error { - block, err := blockByID(c) + resp, err := blockByID(c) if err != nil { return err } - return responseByHeader(c, block.ProtocolBlock()) + return responseByHeader(c, resp) }) routeGroup.GET(RouteBlockMetadata, func(c echo.Context) error { @@ -198,6 +204,15 @@ func configure() error { return responseByHeader(c, resp) }, checkNodeSynced()) + routeGroup.GET(RouteBlockWithMetadata, func(c echo.Context) error { + resp, err := blockWithMetadataByID(c) + if err != nil { + return err + } + + return responseByHeader(c, resp) + }, checkNodeSynced()) + routeGroup.POST(RouteBlocks, func(c echo.Context) error { resp, err := sendBlock(c) if err != nil { @@ -276,7 +291,7 @@ func configure() error { }) routeGroup.GET(RouteOutput, func(c echo.Context) error { - resp, err := getOutput(c) + resp, err := outputByID(c) if err != nil { return err } @@ -285,7 +300,7 @@ func configure() error { }) routeGroup.GET(RouteOutputMetadata, func(c echo.Context) error { - resp, err := getOutputMetadata(c) + resp, err := outputMetadataByID(c) if err != nil { return err } @@ -294,7 +309,7 @@ func configure() error { }) routeGroup.GET(RouteOutputWithMetadata, func(c echo.Context) error { - resp, err := getOutputWithMetadata(c) + resp, err := outputWithMetadataByID(c) if err != nil { return err } diff --git a/components/restapi/core/node.go b/components/restapi/core/node.go index 927c58515..c6b829c87 100644 --- a/components/restapi/core/node.go +++ b/components/restapi/core/node.go @@ -49,12 +49,11 @@ func info() *apimodels.InfoResponse { }, ProtocolParameters: protocolParameters(), BaseToken: &apimodels.InfoResBaseToken{ - Name: deps.BaseToken.Name, - TickerSymbol: deps.BaseToken.TickerSymbol, - Unit: deps.BaseToken.Unit, - Subunit: deps.BaseToken.Subunit, - Decimals: deps.BaseToken.Decimals, - UseMetricPrefix: deps.BaseToken.UseMetricPrefix, + Name: deps.BaseToken.Name, + TickerSymbol: deps.BaseToken.TickerSymbol, + Unit: deps.BaseToken.Unit, + Subunit: deps.BaseToken.Subunit, + Decimals: deps.BaseToken.Decimals, }, Features: features, } diff --git a/components/restapi/core/utxo.go b/components/restapi/core/utxo.go index bdf62c89c..2fd9a7fad 100644 --- a/components/restapi/core/utxo.go +++ b/components/restapi/core/utxo.go @@ -10,7 +10,7 @@ import ( "github.com/iotaledger/iota.go/v4/nodeclient/apimodels" ) -func getOutput(c echo.Context) (*apimodels.OutputResponse, error) { +func outputByID(c echo.Context) (*apimodels.OutputResponse, error) { outputID, err := httpserver.ParseOutputIDParam(c, restapipkg.ParameterOutputID) if err != nil { return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(restapipkg.ParameterOutputID)) @@ -27,7 +27,7 @@ func getOutput(c echo.Context) (*apimodels.OutputResponse, error) { }, nil } -func getOutputMetadata(c echo.Context) (*apimodels.OutputMetadata, error) { +func outputMetadataByID(c echo.Context) (*apimodels.OutputMetadata, error) { outputID, err := httpserver.ParseOutputIDParam(c, restapipkg.ParameterOutputID) if err != nil { return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(restapipkg.ParameterOutputID)) @@ -45,7 +45,7 @@ func getOutputMetadata(c echo.Context) (*apimodels.OutputMetadata, error) { return newOutputMetadataResponse(output) } -func getOutputWithMetadata(c echo.Context) (*apimodels.OutputWithMetadataResponse, error) { +func outputWithMetadataByID(c echo.Context) (*apimodels.OutputWithMetadataResponse, error) { outputID, err := httpserver.ParseOutputIDParam(c, restapipkg.ParameterOutputID) if err != nil { return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(restapipkg.ParameterOutputID)) diff --git a/config_defaults.json b/config_defaults.json index f89cc121a..2b6d4e5e1 100644 --- a/config_defaults.json +++ b/config_defaults.json @@ -112,8 +112,7 @@ "tickerSymbol": "SMR", "unit": "SMR", "subunit": "glow", - "decimals": 6, - "useMetricPrefix": false + "decimals": 6 } }, "dashboard": { diff --git a/documentation/docs/references/configuration.md b/documentation/docs/references/configuration.md index b6b3d58e2..08782cf56 100644 --- a/documentation/docs/references/configuration.md +++ b/documentation/docs/references/configuration.md @@ -341,14 +341,13 @@ Example: ### BaseToken -| Name | Description | Type | Default value | -| --------------- | ------------------------------------- | ------- | ------------- | -| name | The base token name | string | "Shimmer" | -| tickerSymbol | The base token ticker symbol | string | "SMR" | -| unit | The base token unit | string | "SMR" | -| subunit | The base token subunit | string | "glow" | -| decimals | The base token amount of decimals | uint | 6 | -| useMetricPrefix | The base token uses the metric prefix | boolean | false | +| Name | Description | Type | Default value | +| ------------ | --------------------------------- | ------ | ------------- | +| name | The base token name | string | "Shimmer" | +| tickerSymbol | The base token ticker symbol | string | "SMR" | +| unit | The base token unit | string | "SMR" | +| subunit | The base token subunit | string | "glow" | +| decimals | The base token amount of decimals | uint | 6 | Example: @@ -368,8 +367,7 @@ Example: "tickerSymbol": "SMR", "unit": "SMR", "subunit": "glow", - "decimals": 6, - "useMetricPrefix": false + "decimals": 6 } } } diff --git a/go.mod b/go.mod index 4e3018cb5..783007690 100644 --- a/go.mod +++ b/go.mod @@ -11,21 +11,21 @@ require ( github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/app v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc - github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc + github.com/iotaledger/hive.go/app v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42 github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/logger v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc - github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231110132801-e38d9fbdd467 - github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231110132251-8abdb05cce43 - github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b + github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/logger v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231120094046-1308e2a5e072 + github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231120082637-ccd5b8465251 + github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140 github.com/labstack/echo/v4 v4.11.3 github.com/labstack/gommon v0.4.1 github.com/libp2p/go-libp2p v0.32.0 @@ -62,7 +62,7 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/eclipse/paho.mqtt.golang v1.4.3 // indirect github.com/elastic/gosigar v0.14.2 // indirect - github.com/ethereum/go-ethereum v1.13.4 // indirect + github.com/ethereum/go-ethereum v1.13.5 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/fjl/memsize v0.0.2 // indirect diff --git a/go.sum b/go.sum index eea324bb8..d8a8e6585 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.13.4 h1:25HJnaWVg3q1O7Z62LaaI6S9wVq8QCw3K88g8wEzrcM= -github.com/ethereum/go-ethereum v1.13.4/go.mod h1:I0U5VewuuTzvBtVzKo7b3hJzDhXOUtn9mJW7SsIPB0Q= +github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= +github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= @@ -277,38 +277,38 @@ github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PT github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc h1:PsArE43UkLymmDy9r7n42Yd1pv1iq4FwSx3iv2Mo+vc= github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= -github.com/iotaledger/hive.go/app v0.0.0-20231110191152-7135670285dc h1:jMbElktKULtS8pA8MK5i5BTbOy+dtwAOGmVSZ5x6J2s= -github.com/iotaledger/hive.go/app v0.0.0-20231110191152-7135670285dc/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= -github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc h1:qeE5T8LXGjKaFduWCt06CXsUTkhfHNx6hOD5xYP31QU= -github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc h1:dyguf5k/eVGyv94ISm/FDtInOktce6koo+QtJvAPUT8= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= -github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc h1:3wT7e5fRdDnnomkM6xPD110BCFz66MaXKxYUvLFuYkc= -github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= -github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc h1:YQUKGFcOBGKSrok++Er5SZTtQx0UHTRgH4cvlHVOiwc= -github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc h1:sNFIiT+gEE6UlftfiBdrsUBIJtnhV6EpwVRw2YpbhUc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/app v0.0.0-20231113110812-4ca2b6cc9a42 h1:K6VF23FOqHTRdk5OzsuBkYlGV008SZgKYqNwb0bp3rk= +github.com/iotaledger/hive.go/app v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= +github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42 h1:+PyLPZhRHy+Negjpuj0CSLaObpErEH7yI6HB2z5N6b0= +github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42 h1:3dW4gz0Vr9BogN826HRTp0OFlbngjhWcVPUfDhJ57Yw= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42 h1:t6EKe+O7XAmbe07cVHuM/3aBLEbVIY4D6yefANB4PUA= +github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42 h1:QZiMlDxmikF64zimWQunTrsEGOK9ydRahUAz2I46JAk= +github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42 h1:gxlZ4zL6EfLyqT0+hIFV3WVE0FrPVgV5cQdyn36vPXQ= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc h1:3fsqfM2NqfhrewVdlKT3MHcXxVNvUCSP7P32il1ypa0= github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= -github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc h1:OrQBscQTsAzAJGwVs7qlPgczbvufsbENkOYRmyM+CF4= -github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42 h1:kcHkWyURZDVqO80OmJo5Z+wTJB6H+s52WAnU575vX0o= +github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc h1:joYrsSZuVG3DfAQR9iS3qjnMExJ0qNp2+369sxb1Y4g= github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= -github.com/iotaledger/hive.go/logger v0.0.0-20231110191152-7135670285dc h1:p4K5bCNRVmbzVXZUa53Hg8s6gCW+tYjhG1f3C+1F044= -github.com/iotaledger/hive.go/logger v0.0.0-20231110191152-7135670285dc/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= -github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc h1:dN9VYzV53oz2TlHHGtRtqaGvMDvFRW0Uh433z13k6+E= -github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc h1:/DIsAs3PWCNkHoLXR2+uW34VAvZvfiCCJYA/rczfnmw= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= -github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc h1:Dp9sOvU2B7xoyX28bYZgUUDAIqMCBhsmK2vWhIgDyWE= -github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231110132801-e38d9fbdd467 h1:2FNiPAUbHOJ+mLI1aU81QaoitbkebxJWUEylPdnC2Lc= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231110132801-e38d9fbdd467/go.mod h1:bXOm6f+0zP19Ku/ozcSWZQiJb9ge9X7gg1TEcpRexUQ= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231110132251-8abdb05cce43 h1:Rs1vQypwaWvs+BqQWoGu6ToVl2F8eSErJabd5lmO4Pw= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231110132251-8abdb05cce43/go.mod h1:MvgF3pUPvdH/xIfrgdURFlpTyvnRWgcBMaTQb0GEKf0= -github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b h1:eU9vrxmXr1rMs67BsIWrfmEK+IjIsOnbl2XTlTtNIls= -github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b/go.mod h1:1CUJKGvkOUGXakxFZGAagEQDX9qYyhzIElmUHCHo9RM= +github.com/iotaledger/hive.go/logger v0.0.0-20231113110812-4ca2b6cc9a42 h1:uD99UbTtBM5SIP9N3c/3BBLtb0frGYFsZ2lS8Zxtqr4= +github.com/iotaledger/hive.go/logger v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42 h1:hpR++ME3Y3CcxA431Zg0PgcCJUNkbBqjNXxR/bs+NdI= +github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42 h1:hepsnGvaS39azq80GV8DT9HlexoO/RqJbyiW5FXZ0HQ= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42 h1:9c7NiX2cnNPHR9UNWINDqNkolupXiDF3543pR6KLwIg= +github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231120094046-1308e2a5e072 h1:xbaW2dnDZy0ThcEcdK7ir3b+ynBXsn0R14lgxiFVuB0= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231120094046-1308e2a5e072/go.mod h1:iFiY6UukYeL8D3N1mtg4jh/9lxTBhzG0QgtD+w0gpps= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231120082637-ccd5b8465251 h1:bYGO8jXNXJNMGPG9etGW7WXfLbRU9ofx1xdd29/sS9M= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231120082637-ccd5b8465251/go.mod h1:chzj8FDIeXHIh3D52QTZ7imADlzdkhg7o7E2Qr85MJ8= +github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140 h1:8zHRYT1KADR9bOLUg7Ia4XA3StBHzV4Tb2Qtp42KLN8= +github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140/go.mod h1:1CUJKGvkOUGXakxFZGAagEQDX9qYyhzIElmUHCHo9RM= github.com/ipfs/boxo v0.13.1 h1:nQ5oQzcMZR3oL41REJDcTbrvDvuZh3J9ckc9+ILeRQI= github.com/ipfs/boxo v0.13.1/go.mod h1:btrtHy0lmO1ODMECbbEY1pxNtrLilvKSYLoGQt1yYCk= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= diff --git a/pkg/protocol/engine/attestation/slotattestation/manager.go b/pkg/protocol/engine/attestation/slotattestation/manager.go index ad73fde40..7cf4d45bf 100644 --- a/pkg/protocol/engine/attestation/slotattestation/manager.go +++ b/pkg/protocol/engine/attestation/slotattestation/manager.go @@ -329,8 +329,10 @@ func (m *Manager) Reset() { } func (m *Manager) computeAttestationCommitmentOffset(slot iotago.SlotIndex) (cutoffSlot iotago.SlotIndex, isValid bool) { - if slot < m.apiProvider.APIForSlot(slot).ProtocolParameters().MaxCommittableAge() { - return 0, false + protocolParams := m.apiProvider.APIForSlot(slot).ProtocolParameters() + + if slot < protocolParams.GenesisSlot()+protocolParams.MaxCommittableAge() { + return protocolParams.GenesisSlot(), false } return slot - m.apiProvider.APIForSlot(slot).ProtocolParameters().MaxCommittableAge(), true diff --git a/pkg/protocol/engine/attestation/slotattestation/snapshot.go b/pkg/protocol/engine/attestation/slotattestation/snapshot.go index 0b4b3c14e..af034322c 100644 --- a/pkg/protocol/engine/attestation/slotattestation/snapshot.go +++ b/pkg/protocol/engine/attestation/slotattestation/snapshot.go @@ -15,7 +15,6 @@ func (m *Manager) Import(reader io.ReadSeeker) error { var attestations []*iotago.Attestation if err := stream.ReadCollection(reader, serializer.SeriLengthPrefixTypeAsUint32, func(i int) error { - attestation, err := stream.ReadObjectWithSize[*iotago.Attestation](reader, serializer.SeriLengthPrefixTypeAsUint16, iotago.AttestationFromBytes(m.apiProvider)) if err != nil { return ierrors.Wrapf(err, "failed to read attestation %d", i) @@ -51,7 +50,9 @@ func (m *Manager) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) err } if _, isValid := m.computeAttestationCommitmentOffset(targetSlot); !isValid { - if err := stream.Write(writer, uint64(0)); err != nil { + if err := stream.WriteCollection(writer, serializer.SeriLengthPrefixTypeAsUint32, func() (int, error) { + return 0, nil + }); err != nil { return ierrors.Wrap(err, "failed to write 0 attestation count") } diff --git a/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go b/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go index 6b4a6458b..f4bba78bf 100644 --- a/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go +++ b/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go @@ -143,6 +143,8 @@ func (s *Scheduler) Shutdown() { s.bufferMutex.Lock() defer s.bufferMutex.Unlock() + s.TriggerShutdown() + // validator workers need to be shut down first, otherwise they will hang on the shutdown channel. s.validatorBuffer.buffer.ForEach(func(accountID iotago.AccountID, validatorQueue *ValidatorQueue) bool { s.shutdownValidatorQueue(validatorQueue) @@ -152,9 +154,10 @@ func (s *Scheduler) Shutdown() { s.validatorBuffer.Clear() close(s.shutdownSignal) - s.TriggerStopped() s.workersWg.Wait() + + s.TriggerStopped() } // Start starts the scheduler. diff --git a/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go b/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go index f85b9fd21..2b8f9183e 100644 --- a/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go +++ b/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go @@ -36,6 +36,8 @@ func New() *Scheduler { } func (s *Scheduler) Shutdown() { + s.TriggerShutdown() + s.TriggerStopped() } func (s *Scheduler) IsBlockIssuerReady(_ iotago.AccountID, _ ...*blocks.Block) bool { diff --git a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go index a86de875d..fc66ca885 100644 --- a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go +++ b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/confirmation_ratification.go @@ -51,7 +51,7 @@ func (g *Gadget) trackConfirmationRatifierWeight(votingBlock *blocks.Block) { func (g *Gadget) shouldConfirm(block *blocks.Block) bool { blockSeats := len(block.ConfirmationRatifiers()) - totalCommitteeSeats := g.seatManager.SeatCount() + totalCommitteeSeats := g.seatManager.SeatCountInSlot(block.ID().Slot()) return votes.IsThresholdReached(blockSeats, totalCommitteeSeats, g.optsConfirmationThreshold) } diff --git a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go index 5faa0f703..3934e82d3 100644 --- a/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go +++ b/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget/witness_weight.go @@ -87,7 +87,7 @@ func (g *Gadget) TrackWitnessWeight(votingBlock *blocks.Block) { } func (g *Gadget) shouldPreAcceptAndPreConfirm(block *blocks.Block) (preAccept bool, preConfirm bool) { - committeeTotalSeats := g.seatManager.SeatCount() + committeeTotalSeats := g.seatManager.SeatCountInSlot(block.ID().Slot()) blockSeats := len(block.Witnesses()) onlineCommitteeTotalSeats := g.seatManager.OnlineCommittee().Size() diff --git a/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go b/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go index 78e09984f..d882ed1de 100644 --- a/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go +++ b/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go @@ -120,9 +120,9 @@ func (g *Gadget) trackVotes(block *blocks.Block) { } func (g *Gadget) refreshSlotFinalization(tracker *slottracker.SlotTracker, previousLatestSlotIndex iotago.SlotIndex, newLatestSlotIndex iotago.SlotIndex) (finalizedSlots []iotago.SlotIndex) { - committeeTotalSeats := g.seatManager.SeatCount() for i := lo.Max(g.lastFinalizedSlot, previousLatestSlotIndex) + 1; i <= newLatestSlotIndex; i++ { + committeeTotalSeats := g.seatManager.SeatCountInSlot(i) attestorsTotalSeats := len(tracker.Voters(i)) if !votes.IsThresholdReached(attestorsTotalSeats, committeeTotalSeats, g.optsSlotFinalizationThreshold) { diff --git a/pkg/protocol/engine/ledger/events.go b/pkg/protocol/engine/ledger/events.go index ad30d98c0..5549bfcc9 100644 --- a/pkg/protocol/engine/ledger/events.go +++ b/pkg/protocol/engine/ledger/events.go @@ -2,12 +2,10 @@ package ledger import ( "github.com/iotaledger/hive.go/runtime/event" - "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" iotago "github.com/iotaledger/iota.go/v4" ) type Events struct { - StateDiffApplied *event.Event3[iotago.SlotIndex, utxoledger.Outputs, utxoledger.Spents] AccountCreated *event.Event1[iotago.AccountID] AccountDestroyed *event.Event1[iotago.AccountID] @@ -17,7 +15,6 @@ type Events struct { // NewEvents contains the constructor of the Events object (it is generated by a generic factory). var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { return &Events{ - StateDiffApplied: event.New3[iotago.SlotIndex, utxoledger.Outputs, utxoledger.Spents](), AccountCreated: event.New1[iotago.AccountID](), AccountDestroyed: event.New1[iotago.AccountID](), } diff --git a/pkg/protocol/engine/ledger/ledger.go b/pkg/protocol/engine/ledger/ledger.go index d0ac06856..3c2a1f971 100644 --- a/pkg/protocol/engine/ledger/ledger.go +++ b/pkg/protocol/engine/ledger/ledger.go @@ -37,7 +37,7 @@ type Ledger interface { ManaManager() *mana.Manager RMCManager() *rmc.Manager - CommitSlot(slot iotago.SlotIndex) (stateRoot, mutationRoot, accountRoot iotago.Identifier, err error) + CommitSlot(slot iotago.SlotIndex) (stateRoot, mutationRoot, accountRoot iotago.Identifier, created utxoledger.Outputs, consumed utxoledger.Spents, err error) Import(reader io.ReadSeeker) error Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) error diff --git a/pkg/protocol/engine/ledger/ledger/ledger.go b/pkg/protocol/engine/ledger/ledger/ledger.go index 6d262a077..710926e5f 100644 --- a/pkg/protocol/engine/ledger/ledger/ledger.go +++ b/pkg/protocol/engine/ledger/ledger/ledger.go @@ -138,10 +138,10 @@ func (l *Ledger) AttachTransaction(block *blocks.Block) (attachedTransaction mem return nil, false } -func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, mutationRoot iotago.Identifier, accountRoot iotago.Identifier, err error) { +func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, mutationRoot iotago.Identifier, accountRoot iotago.Identifier, created utxoledger.Outputs, consumed utxoledger.Spents, err error) { ledgerIndex, err := l.utxoLedger.ReadLedgerSlot() if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, err + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, err } if slot != ledgerIndex+1 { @@ -150,7 +150,7 @@ func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, stateDiff, err := l.memPool.StateDiff(slot) if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to retrieve state diff for slot %d: %w", slot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, ierrors.Errorf("failed to retrieve state diff for slot %d: %w", slot, err) } // collect outputs and allotments from the "uncompacted" stateDiff @@ -158,7 +158,7 @@ func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, // and retrieve intermediate outputs to show to the user spends, outputs, accountDiffs, err := l.processStateDiffTransactions(stateDiff) if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to process state diff transactions in slot %d: %w", slot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, ierrors.Errorf("failed to process state diff transactions in slot %d: %w", slot, err) } // Now we process the collected account changes, for that we consume the "compacted" state diff to get the overall @@ -167,7 +167,7 @@ func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, // output side createdAccounts, consumedAccounts, destroyedAccounts, err := l.processCreatedAndConsumedAccountOutputs(stateDiff, accountDiffs) if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to process outputs consumed and created in slot %d: %w", slot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, ierrors.Errorf("failed to process outputs consumed and created in slot %d: %w", slot, err) } l.prepareAccountDiffs(accountDiffs, slot, consumedAccounts, createdAccounts) @@ -175,7 +175,7 @@ func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, // Commit the changes // Update the UTXO ledger if err = l.utxoLedger.ApplyDiff(slot, outputs, spends); err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to apply diff to UTXO ledger for slot %d: %w", slot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, ierrors.Errorf("failed to apply diff to UTXO ledger for slot %d: %w", slot, err) } // Update the Accounts ledger @@ -187,15 +187,15 @@ func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, } rmcForSlot, err := l.rmcManager.RMC(rmcSlot) if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("ledger failed to get RMC for slot %d: %w", rmcSlot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, ierrors.Errorf("ledger failed to get RMC for slot %d: %w", rmcSlot, err) } if err = l.accountsLedger.ApplyDiff(slot, rmcForSlot, accountDiffs, destroyedAccounts); err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to apply diff to Accounts ledger for slot %d: %w", slot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, ierrors.Errorf("failed to apply diff to Accounts ledger for slot %d: %w", slot, err) } // Update the mana manager's cache if err = l.manaManager.ApplyDiff(slot, destroyedAccounts, createdAccounts, accountDiffs); err != nil { - return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, ierrors.Errorf("failed to apply diff to mana manager for slot %d: %w", slot, err) + return iotago.Identifier{}, iotago.Identifier{}, iotago.Identifier{}, nil, nil, ierrors.Errorf("failed to apply diff to mana manager for slot %d: %w", slot, err) } // Mark each transaction as committed so the mempool can evict it @@ -204,9 +204,7 @@ func (l *Ledger) CommitSlot(slot iotago.SlotIndex) (stateRoot iotago.Identifier, return true }) - l.events.StateDiffApplied.Trigger(slot, outputs, spends) - - return l.utxoLedger.StateTreeRoot(), stateDiff.Mutations().Root(), l.accountsLedger.AccountsTreeRoot(), nil + return l.utxoLedger.StateTreeRoot(), stateDiff.Mutations().Root(), l.accountsLedger.AccountsTreeRoot(), outputs, spends, nil } func (l *Ledger) AddAccount(output *utxoledger.Output, blockIssuanceCredits iotago.BlockIssuanceCredits) error { diff --git a/pkg/protocol/engine/notarization/events.go b/pkg/protocol/engine/notarization/events.go index 082c75760..81aaa5e8a 100644 --- a/pkg/protocol/engine/notarization/events.go +++ b/pkg/protocol/engine/notarization/events.go @@ -4,6 +4,7 @@ import ( "github.com/iotaledger/hive.go/ads" "github.com/iotaledger/hive.go/runtime/event" "github.com/iotaledger/iota-core/pkg/model" + "github.com/iotaledger/iota-core/pkg/protocol/engine/utxoledger" iotago "github.com/iotaledger/iota.go/v4" ) @@ -28,4 +29,6 @@ type SlotCommittedDetails struct { Commitment *model.Commitment AcceptedBlocks ads.Set[iotago.Identifier, iotago.BlockID] ActiveValidatorsCount int + OutputsCreated utxoledger.Outputs + OutputsConsumed utxoledger.Spents } diff --git a/pkg/protocol/engine/notarization/slotnotarization/manager.go b/pkg/protocol/engine/notarization/slotnotarization/manager.go index 6dd1c36c5..0b033661b 100644 --- a/pkg/protocol/engine/notarization/slotnotarization/manager.go +++ b/pkg/protocol/engine/notarization/slotnotarization/manager.go @@ -192,7 +192,7 @@ func (m *Manager) createCommitment(slot iotago.SlotIndex) (*model.Commitment, er return nil, ierrors.Wrap(err, "failed to commit attestations") } - stateRoot, mutationRoot, accountRoot, err := m.ledger.CommitSlot(slot) + stateRoot, mutationRoot, accountRoot, created, consumed, err := m.ledger.CommitSlot(slot) if err != nil { return nil, ierrors.Wrap(err, "failed to commit ledger") } @@ -255,6 +255,8 @@ func (m *Manager) createCommitment(slot iotago.SlotIndex) (*model.Commitment, er Commitment: newModelCommitment, AcceptedBlocks: acceptedBlocks, ActiveValidatorsCount: 0, + OutputsCreated: created, + OutputsConsumed: consumed, }) if err = m.storage.Settings().SetLatestCommitment(newModelCommitment); err != nil { diff --git a/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/orchestrator.go b/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/orchestrator.go index cd771ec0e..a2e0f5167 100644 --- a/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/orchestrator.go +++ b/pkg/protocol/engine/upgrade/signalingupgradeorchestrator/orchestrator.go @@ -263,7 +263,7 @@ func (o *Orchestrator) tryUpgrade(currentEpoch iotago.EpochIndex, lastSlotInEpoc } // Check whether the threshold for version was reached. - totalSeatCount := o.seatManager.SeatCount() + totalSeatCount := o.seatManager.SeatCountInEpoch(currentEpoch) if !votes.IsThresholdReached(mostSupporters, totalSeatCount, votes.SuperMajority) { return } diff --git a/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go b/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go index 7e90c5bc4..021bc1374 100644 --- a/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go +++ b/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go @@ -159,7 +159,10 @@ func (m *ManualPOA) OnlineCommittee() ds.Set[account.SeatIndex] { return m.online } -func (m *ManualPOA) SeatCount() int { +func (m *ManualPOA) SeatCountInSlot(_ iotago.SlotIndex) int { + return m.committee.SeatCount() +} +func (m *ManualPOA) SeatCountInEpoch(_ iotago.EpochIndex) int { return m.committee.SeatCount() } diff --git a/pkg/protocol/sybilprotection/seatmanager/poa/poa.go b/pkg/protocol/sybilprotection/seatmanager/poa/poa.go index f5c113c76..6b9c22133 100644 --- a/pkg/protocol/sybilprotection/seatmanager/poa/poa.go +++ b/pkg/protocol/sybilprotection/seatmanager/poa/poa.go @@ -146,7 +146,14 @@ func (s *SeatManager) OnlineCommittee() ds.Set[account.SeatIndex] { return s.activityTracker.OnlineCommittee() } -func (s *SeatManager) SeatCount() int { +func (s *SeatManager) SeatCountInSlot(_ iotago.SlotIndex) int { + s.committeeMutex.RLock() + defer s.committeeMutex.RUnlock() + + return s.committee.SeatCount() +} + +func (s *SeatManager) SeatCountInEpoch(_ iotago.EpochIndex) int { s.committeeMutex.RLock() defer s.committeeMutex.RUnlock() @@ -166,9 +173,11 @@ func (s *SeatManager) InitializeCommittee(epoch iotago.EpochIndex, activityTime return ierrors.Wrapf(err, "failed to load PoA committee for epoch %d", epoch) } - s.committee = committeeAccounts.SelectCommittee(committeeAccounts.IDs()...) + committeeAccountsIDs := committeeAccounts.IDs() + s.committee = committeeAccounts.SelectCommittee(committeeAccountsIDs...) - onlineValidators := committeeAccounts.IDs() + // Set validators that are part of the committee as active. + onlineValidators := committeeAccountsIDs if len(s.optsOnlineCommitteeStartup) > 0 { onlineValidators = s.optsOnlineCommitteeStartup } diff --git a/pkg/protocol/sybilprotection/seatmanager/seatmanager.go b/pkg/protocol/sybilprotection/seatmanager/seatmanager.go index fe87322c4..38c443e17 100644 --- a/pkg/protocol/sybilprotection/seatmanager/seatmanager.go +++ b/pkg/protocol/sybilprotection/seatmanager/seatmanager.go @@ -33,7 +33,9 @@ type SeatManager interface { OnlineCommittee() ds.Set[account.SeatIndex] // SeatCount returns the number of seats in the SeatManager. - SeatCount() int + SeatCountInSlot(slot iotago.SlotIndex) int + + SeatCountInEpoch(epoch iotago.EpochIndex) int // Interface embeds the required methods of the module.Interface. module.Interface diff --git a/pkg/protocol/sybilprotection/seatmanager/topstakers/options.go b/pkg/protocol/sybilprotection/seatmanager/topstakers/options.go index 4f190f182..b6d6fffe4 100644 --- a/pkg/protocol/sybilprotection/seatmanager/topstakers/options.go +++ b/pkg/protocol/sybilprotection/seatmanager/topstakers/options.go @@ -19,9 +19,3 @@ func WithOnlineCommitteeStartup(optsOnlineCommittee ...iotago.AccountID) options p.optsOnlineCommitteeStartup = optsOnlineCommittee } } - -func WithSeatCount(optsSeatCount uint32) options.Option[SeatManager] { - return func(p *SeatManager) { - p.optsSeatCount = optsSeatCount - } -} diff --git a/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go b/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go index 7183d6aca..a56772599 100644 --- a/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go +++ b/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go @@ -7,6 +7,7 @@ import ( "github.com/iotaledger/hive.go/ds" "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" @@ -30,7 +31,6 @@ type SeatManager struct { committeeMutex syncutils.RWMutex activityTracker activitytracker.ActivityTracker - optsSeatCount uint32 optsActivityWindow time.Duration optsOnlineCommitteeStartup []iotago.AccountID @@ -85,42 +85,21 @@ func (s *SeatManager) RotateCommittee(epoch iotago.EpochIndex, candidates accoun s.committeeMutex.Lock() defer s.committeeMutex.Unlock() - // If there are fewer candidates than required for epoch 0, then the previous committee cannot be copied. - if len(candidates) < s.SeatCount() && epoch == 0 { - return nil, ierrors.Errorf("at least %d candidates are required for committee in epoch 0, got %d", s.SeatCount(), len(candidates)) + if len(candidates) == 0 { + return nil, ierrors.New("candidates must not be empty") } - // If there are fewer candidates than required, then re-use the previous committee. - if len(candidates) < s.SeatCount() { - // TODO: what if staking period of a committee member ends in the next epoch? - committee, exists := s.committeeInEpoch(epoch - 1) - if !exists { - return nil, ierrors.Errorf("cannot re-use previous committee from epoch %d as it does not exist", epoch-1) - } - - accounts, err := committee.Accounts() - if err != nil { - return nil, ierrors.Wrapf(err, "error while getting accounts from committee for epoch %d", epoch-1) - } - - if err := s.committeeStore.Store(epoch, accounts); err != nil { - return nil, ierrors.Wrapf(err, "error while storing committee for epoch %d", epoch) - } - - return committee, nil - } - - committee, err := s.selectNewCommittee(candidates) + committee, err := s.selectNewCommittee(epoch, candidates) if err != nil { return nil, ierrors.Wrap(err, "error while selecting new committee") } - accounts, err := committee.Accounts() + committeeAccounts, err := committee.Accounts() if err != nil { - return nil, ierrors.Wrapf(err, "error while getting accounts for newly selected committee for epoch %d", epoch) + return nil, ierrors.Wrapf(err, "error while getting committeeAccounts for newly selected committee for epoch %d", epoch) } - if err := s.committeeStore.Store(epoch, accounts); err != nil { + if err := s.committeeStore.Store(epoch, committeeAccounts); err != nil { return nil, ierrors.Wrapf(err, "error while storing committee for epoch %d", epoch) } @@ -161,8 +140,22 @@ func (s *SeatManager) OnlineCommittee() ds.Set[account.SeatIndex] { return s.activityTracker.OnlineCommittee() } -func (s *SeatManager) SeatCount() int { - return int(s.optsSeatCount) +func (s *SeatManager) SeatCountInSlot(slot iotago.SlotIndex) int { + epoch := s.apiProvider.APIForSlot(slot).TimeProvider().EpochFromSlot(slot) + + return s.SeatCountInEpoch(epoch) +} + +func (s *SeatManager) SeatCountInEpoch(epoch iotago.EpochIndex) int { + s.committeeMutex.RLock() + defer s.committeeMutex.RUnlock() + + // TODO: this function is a hot path as it is called for every single block. Maybe accessing the storage is too slow. + if committee, exists := s.committeeInEpoch(epoch); exists { + return committee.SeatCount() + } + + return int(s.apiProvider.APIForEpoch(epoch).ProtocolParameters().TargetCommitteeSize()) } func (s *SeatManager) Shutdown() { @@ -202,8 +195,8 @@ func (s *SeatManager) SetCommittee(epoch iotago.EpochIndex, validators *account. s.committeeMutex.Lock() defer s.committeeMutex.Unlock() - if validators.Size() != int(s.optsSeatCount) { - return ierrors.Errorf("invalid number of validators: %d, expected: %d", validators.Size(), s.optsSeatCount) + if validators.Size() == 0 { + return ierrors.New("committee must not be empty") } err := s.committeeStore.Store(epoch, validators) @@ -214,7 +207,7 @@ func (s *SeatManager) SetCommittee(epoch iotago.EpochIndex, validators *account. return nil } -func (s *SeatManager) selectNewCommittee(candidates accounts.AccountsData) (*account.SeatedAccounts, error) { +func (s *SeatManager) selectNewCommittee(epoch iotago.EpochIndex, candidates accounts.AccountsData) (*account.SeatedAccounts, error) { sort.Slice(candidates, func(i int, j int) bool { // Prioritize the candidate that has a larger pool stake. if candidates[i].ValidatorStake+candidates[i].DelegationStake != candidates[j].ValidatorStake+candidates[j].DelegationStake { @@ -240,10 +233,14 @@ func (s *SeatManager) selectNewCommittee(candidates accounts.AccountsData) (*acc return bytes.Compare(candidates[i].ID[:], candidates[j].ID[:]) > 0 }) + // We try to select up to targetCommitteeSize candidates to be part of the committee. If there are fewer candidates + // than required, then we select all of them and the committee size will be smaller than targetCommitteeSize. + committeeSize := lo.Min(len(candidates), int(s.apiProvider.APIForEpoch(epoch).ProtocolParameters().TargetCommitteeSize())) + // Create new Accounts instance that only included validators selected to be part of the committee. newCommitteeAccounts := account.NewAccounts() - for _, candidateData := range candidates[:s.optsSeatCount] { + for _, candidateData := range candidates[:committeeSize] { if err := newCommitteeAccounts.Set(candidateData.ID, &account.Pool{ PoolStake: candidateData.ValidatorStake + candidateData.DelegationStake, ValidatorStake: candidateData.ValidatorStake, diff --git a/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers_test.go b/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers_test.go index dded90610..83141789c 100644 --- a/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers_test.go +++ b/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers_test.go @@ -1,6 +1,7 @@ package topstakers import ( + "fmt" "testing" "time" @@ -21,17 +22,28 @@ import ( ) func TestTopStakers_InitializeCommittee(t *testing.T) { + var testAPI = iotago.V3API( + iotago.NewV3ProtocolParameters( + iotago.WithNetworkOptions("TestJungle", "tgl"), + iotago.WithSupplyOptions(2_779_530_283_277_761, 0, 0, 0, 0, 0, 0), + iotago.WithWorkScoreOptions(0, 1, 0, 0, 0, 0, 0, 0, 0, 0), // all zero except block offset gives all blocks workscore = 1 + iotago.WithTargetCommitteeSize(3), + ), + ) + committeeStore := epochstore.NewStore(kvstore.Realm{}, mapdb.NewMapDB(), 0, (*account.Accounts).Bytes, account.AccountsFromBytes) topStakersSeatManager := &SeatManager{ - apiProvider: api.SingleVersionProvider(tpkg.TestAPI), + apiProvider: api.SingleVersionProvider(testAPI), committeeStore: committeeStore, events: seatmanager.NewEvents(), activityTracker: activitytrackerv1.NewActivityTracker(time.Second * 30), - - optsSeatCount: 3, } + // Try setting an empty committee. + err := topStakersSeatManager.SetCommittee(0, account.NewAccounts()) + require.Error(t, err) + // Create committee for epoch 0 initialCommittee := account.NewAccounts() for i := 0; i < 3; i++ { @@ -43,176 +55,265 @@ func TestTopStakers_InitializeCommittee(t *testing.T) { t.Fatal(err) } } - // Try setting committee that is too small - should return an error. - err := topStakersSeatManager.SetCommittee(0, initialCommittee) + + // Set committee for epoch 0. + err = topStakersSeatManager.SetCommittee(0, initialCommittee) require.NoError(t, err) weightedSeats, exists := topStakersSeatManager.CommitteeInEpoch(0) require.True(t, exists) initialCommitteeAccountIDs := initialCommittee.IDs() - // Make sure that the online committee is handled correctly. + // Online committee should be empty. require.True(t, topStakersSeatManager.OnlineCommittee().IsEmpty()) + // After initialization, the online committee should contain the seats of the initial committee. require.NoError(t, topStakersSeatManager.InitializeCommittee(0, time.Time{})) - assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[0])), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2])), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2]))) + assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), + lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[0])), + lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2])), + lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2])), + ) } func TestTopStakers_RotateCommittee(t *testing.T) { + var testAPI = iotago.V3API( + iotago.NewV3ProtocolParameters( + iotago.WithNetworkOptions("TestJungle", "tgl"), + iotago.WithSupplyOptions(2_779_530_283_277_761, 0, 0, 0, 0, 0, 0), + iotago.WithWorkScoreOptions(0, 1, 0, 0, 0, 0, 0, 0, 0, 0), // all zero except block offset gives all blocks workscore = 1 + iotago.WithTargetCommitteeSize(10), + ), + ) + committeeStore := epochstore.NewStore(kvstore.Realm{}, mapdb.NewMapDB(), 0, (*account.Accounts).Bytes, account.AccountsFromBytes) - topStakersSeatManager := &SeatManager{ - apiProvider: api.SingleVersionProvider(tpkg.TestAPI), + s := &SeatManager{ + apiProvider: api.SingleVersionProvider(testAPI), committeeStore: committeeStore, events: seatmanager.NewEvents(), activityTracker: activitytrackerv1.NewActivityTracker(time.Second * 30), - - optsSeatCount: 3, } // Committee should not exist because it was never set. - _, exists := topStakersSeatManager.CommitteeInSlot(10) + _, exists := s.CommitteeInSlot(10) require.False(t, exists) - _, exists = topStakersSeatManager.CommitteeInEpoch(0) + _, exists = s.CommitteeInEpoch(0) require.False(t, exists) + var committeeInEpoch0 *account.SeatedAccounts + var committeeInEpoch0IDs []iotago.AccountID + expectedCommitteeInEpoch0 := account.NewAccounts() + // Create committee for epoch 0 - initialCommittee := account.NewAccounts() - require.NoError(t, initialCommittee.Set(tpkg.RandAccountID(), &account.Pool{ - PoolStake: 1900, - ValidatorStake: 900, - FixedCost: 11, - })) - - require.NoError(t, initialCommittee.Set(tpkg.RandAccountID(), &account.Pool{ - PoolStake: 1900, - ValidatorStake: 900, - FixedCost: 11, - })) - - // Try setting committee that is too small - should return an error. - err := topStakersSeatManager.SetCommittee(0, initialCommittee) - require.Error(t, err) + { + addCommitteeMember(t, expectedCommitteeInEpoch0, &account.Pool{PoolStake: 1900, ValidatorStake: 900, FixedCost: 11}) + addCommitteeMember(t, expectedCommitteeInEpoch0, &account.Pool{PoolStake: 1900, ValidatorStake: 900, FixedCost: 11}) + addCommitteeMember(t, expectedCommitteeInEpoch0, &account.Pool{PoolStake: 1900, ValidatorStake: 900, FixedCost: 11}) - require.NoError(t, initialCommittee.Set(tpkg.RandAccountID(), &account.Pool{ - PoolStake: 1900, - ValidatorStake: 900, - FixedCost: 11, - })) + // We should be able to set a committee with only 3 members for epoch 0 (this could be set e.g. via the snapshot). + err := s.SetCommittee(0, expectedCommitteeInEpoch0) + require.NoError(t, err) - // Set committee with the correct size - err = topStakersSeatManager.SetCommittee(0, initialCommittee) - require.NoError(t, err) - weightedSeats, exists := topStakersSeatManager.CommitteeInEpoch(0) - require.True(t, exists) - initialCommitteeAccountIDs := initialCommittee.IDs() + // Make sure that the online committee is handled correctly. + { + committeeInEpoch0, exists = s.CommitteeInEpoch(0) + require.True(t, exists) + committeeInEpoch0IDs = expectedCommitteeInEpoch0.IDs() - // Make sure that the online committee is handled correctly. - require.True(t, topStakersSeatManager.OnlineCommittee().IsEmpty()) + require.True(t, s.OnlineCommittee().IsEmpty()) - topStakersSeatManager.activityTracker.MarkSeatActive(lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[0])), initialCommitteeAccountIDs[0], tpkg.TestAPI.TimeProvider().SlotStartTime(1)) - assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[0]))) + s.activityTracker.MarkSeatActive(lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[0])), committeeInEpoch0IDs[0], testAPI.TimeProvider().SlotStartTime(1)) + assertOnlineCommittee(t, s.OnlineCommittee(), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[0]))) - topStakersSeatManager.activityTracker.MarkSeatActive(lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[1])), initialCommitteeAccountIDs[1], tpkg.TestAPI.TimeProvider().SlotStartTime(2)) - assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[0])), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[1]))) + s.activityTracker.MarkSeatActive(lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[1])), committeeInEpoch0IDs[1], testAPI.TimeProvider().SlotStartTime(2)) + assertOnlineCommittee(t, s.OnlineCommittee(), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[0])), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[1]))) - topStakersSeatManager.activityTracker.MarkSeatActive(lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2])), initialCommitteeAccountIDs[2], tpkg.TestAPI.TimeProvider().SlotStartTime(3)) - assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[0])), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[1])), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2]))) + s.activityTracker.MarkSeatActive(lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[2])), committeeInEpoch0IDs[2], testAPI.TimeProvider().SlotStartTime(3)) + assertOnlineCommittee(t, s.OnlineCommittee(), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[0])), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[1])), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[2]))) - // Make sure that after a period of inactivity, the inactive seats are marked as offline. - topStakersSeatManager.activityTracker.MarkSeatActive(lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2])), initialCommitteeAccountIDs[2], tpkg.TestAPI.TimeProvider().SlotEndTime(7)) - assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2]))) + // Make sure that after a period of inactivity, the inactive seats are marked as offline. + s.activityTracker.MarkSeatActive(lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[2])), committeeInEpoch0IDs[2], testAPI.TimeProvider().SlotEndTime(7)) + assertOnlineCommittee(t, s.OnlineCommittee(), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[2]))) + } - // Make sure that the committee was assigned to the correct epoch. - _, exists = topStakersSeatManager.CommitteeInEpoch(1) - require.False(t, exists) + // Make sure that the committee was assigned to the correct epoch. + _, exists = s.CommitteeInEpoch(1) + require.False(t, exists) - // Make sure that the committee members match the expected ones. - committee, exists := topStakersSeatManager.CommitteeInEpoch(0) - require.True(t, exists) - assertCommittee(t, initialCommittee, committee) + // Make sure that the committee members match the expected ones. + assertCommitteeInEpoch(t, s, testAPI, 0, expectedCommitteeInEpoch0) - committee, exists = topStakersSeatManager.CommitteeInSlot(3) - require.True(t, exists) - assertCommittee(t, initialCommittee, committee) - - // Design candidate list and expected committee members. - accountsContext := make(accounts.AccountsData, 0) - expectedCommittee := account.NewAccounts() - numCandidates := 10 - - // Add some candidates that have the same fields to test sorting by secondary fields. - candidate1ID := tpkg.RandAccountID() - accountsContext = append(accountsContext, &accounts.AccountData{ - ID: candidate1ID, - ValidatorStake: 399, - DelegationStake: 800 - 399, - FixedCost: 3, - StakeEndEpoch: iotago.MaxEpochIndex, - }) - - candidate2ID := tpkg.RandAccountID() - accountsContext = append(accountsContext, &accounts.AccountData{ - ID: candidate2ID, - ValidatorStake: 399, - DelegationStake: 800 - 399, - FixedCost: 3, - StakeEndEpoch: iotago.MaxEpochIndex, - }) - - for i := 1; i <= numCandidates; i++ { - candidateAccountID := tpkg.RandAccountID() - candidatePool := &account.Pool{ - PoolStake: iotago.BaseToken(i * 100), - ValidatorStake: iotago.BaseToken(i * 50), - FixedCost: iotago.Mana(i), + // Make sure that the committee size is correct for this epoch + assertCommitteeSizeInEpoch(t, s, testAPI, 0, 3) + } + + expectedCommitteeInEpoch1 := account.NewAccounts() + // Design candidate list and expected committee members for epoch 1. + { + epoch := iotago.EpochIndex(1) + accountsData := make(accounts.AccountsData, 0) + numCandidates := 15 + expectedCommitteeSize := testAPI.ProtocolParameters().TargetCommitteeSize() + require.EqualValues(t, expectedCommitteeSize, s.SeatCountInEpoch(epoch)) + + s.SeatCountInEpoch(epoch) + + // Add some candidates that have the same fields to test sorting by secondary fields. + { + candidate0ID := tpkg.RandAccountID() + candidate0ID.RegisterAlias("candidate0") + accountsData = append(accountsData, &accounts.AccountData{ + ID: candidate0ID, + ValidatorStake: 100, + DelegationStake: 800 - 399, + FixedCost: 3, + StakeEndEpoch: iotago.MaxEpochIndex, + }) + + candidate1ID := tpkg.RandAccountID() + candidate1ID.RegisterAlias("candidate1") + accountsData = append(accountsData, &accounts.AccountData{ + ID: candidate1ID, + ValidatorStake: 100, + DelegationStake: 800 - 399, + FixedCost: 3, + StakeEndEpoch: iotago.MaxEpochIndex, + }) } - accountsContext = append(accountsContext, &accounts.AccountData{ - ID: candidateAccountID, - ValidatorStake: iotago.BaseToken(i * 50), - DelegationStake: iotago.BaseToken(i*100) - iotago.BaseToken(i*50), - FixedCost: tpkg.RandMana(iotago.MaxMana), - StakeEndEpoch: tpkg.RandEpoch(), - }) - if i+topStakersSeatManager.SeatCount() > numCandidates { - expectedCommittee.Set(candidateAccountID, candidatePool) + for i := 2; i <= numCandidates; i++ { + candidateAccountID := tpkg.RandAccountID() + candidateAccountID.RegisterAlias(fmt.Sprintf("candidate%d", i)) + candidatePool := &account.Pool{ + PoolStake: iotago.BaseToken(i * 100), + ValidatorStake: iotago.BaseToken(i * 50), + FixedCost: iotago.Mana(i), + } + accountsData = append(accountsData, &accounts.AccountData{ + ID: candidateAccountID, + ValidatorStake: iotago.BaseToken(i * 50), + DelegationStake: iotago.BaseToken(i*100) - iotago.BaseToken(i*50), + FixedCost: tpkg.RandMana(iotago.MaxMana), + StakeEndEpoch: tpkg.RandEpoch(), + }) + + if i+int(expectedCommitteeSize) > numCandidates { + require.NoError(t, expectedCommitteeInEpoch1.Set(candidateAccountID, candidatePool)) + } } + + // Rotate the committee and make sure that the returned committee matches the expected. + rotatedCommitteeInEpoch1, err := s.RotateCommittee(epoch, accountsData) + require.NoError(t, err) + assertCommittee(t, expectedCommitteeInEpoch1, rotatedCommitteeInEpoch1) + + // Make sure that after committee rotation, the online committee is not changed. + assertOnlineCommittee(t, s.OnlineCommittee(), lo.Return1(committeeInEpoch0.GetSeat(committeeInEpoch0IDs[2]))) + + committeeInEpoch1Accounts, err := rotatedCommitteeInEpoch1.Accounts() + require.NoError(t, err) + newCommitteeMemberIDs := committeeInEpoch1Accounts.IDs() + + // A new committee member appears online and makes the previously active committee seat inactive. + s.activityTracker.MarkSeatActive(lo.Return1(committeeInEpoch0.GetSeat(newCommitteeMemberIDs[0])), newCommitteeMemberIDs[0], testAPI.TimeProvider().SlotEndTime(14)) + assertOnlineCommittee(t, s.OnlineCommittee(), lo.Return1(committeeInEpoch0.GetSeat(newCommitteeMemberIDs[0]))) + + // Make sure that the committee retrieved from the committee store matches the expected. + assertCommitteeInEpoch(t, s, testAPI, 1, expectedCommitteeInEpoch1) + assertCommitteeSizeInEpoch(t, s, testAPI, 1, 10) + + // Make sure that the previous committee was not modified and is still accessible. + assertCommitteeInEpoch(t, s, testAPI, 0, expectedCommitteeInEpoch0) + assertCommitteeSizeInEpoch(t, s, testAPI, 0, 3) } - // Rotate the committee and make sure that the returned committee matches the expected. - newCommittee, err := topStakersSeatManager.RotateCommittee(1, accountsContext) - require.NoError(t, err) - assertCommittee(t, expectedCommittee, newCommittee) + // Rotate committee again with fewer candidates than the target committee size. + expectedCommitteeInEpoch2 := account.NewAccounts() + { + epoch := iotago.EpochIndex(2) + accountsData := make(accounts.AccountsData, 0) + + candidate0ID := tpkg.RandAccountID() + candidate0ID.RegisterAlias("candidate0-epoch2") + accountsData = append(accountsData, &accounts.AccountData{ + ID: candidate0ID, + ValidatorStake: 100, + DelegationStake: 800 - 399, + FixedCost: 3, + StakeEndEpoch: iotago.MaxEpochIndex, + }) + require.NoError(t, expectedCommitteeInEpoch2.Set(candidate0ID, &account.Pool{PoolStake: 1900, ValidatorStake: 900, FixedCost: 11})) - // Make sure that after committee rotation, the online committee is not changed. - assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), lo.Return1(weightedSeats.GetSeat(initialCommitteeAccountIDs[2]))) + // Rotate the committee and make sure that the returned committee matches the expected. + rotatedCommitteeInEpoch2, err := s.RotateCommittee(epoch, accountsData) + require.NoError(t, err) + assertCommittee(t, expectedCommitteeInEpoch2, rotatedCommitteeInEpoch2) - accounts, err := newCommittee.Accounts() - require.NoError(t, err) - newCommitteeMemberIDs := accounts.IDs() + assertCommitteeInEpoch(t, s, testAPI, 2, expectedCommitteeInEpoch2) + assertCommitteeSizeInEpoch(t, s, testAPI, 2, 1) - // A new committee member appears online and makes the previously active committee seat inactive. - topStakersSeatManager.activityTracker.MarkSeatActive(lo.Return1(weightedSeats.GetSeat(newCommitteeMemberIDs[0])), newCommitteeMemberIDs[0], tpkg.TestAPI.TimeProvider().SlotEndTime(14)) - assertOnlineCommittee(t, topStakersSeatManager.OnlineCommittee(), lo.Return1(weightedSeats.GetSeat(newCommitteeMemberIDs[0]))) + // Make sure that the committee retrieved from the committee store matches the expected. + assertCommitteeInEpoch(t, s, testAPI, 1, expectedCommitteeInEpoch1) + assertCommitteeSizeInEpoch(t, s, testAPI, 1, 10) - // Make sure that the committee retrieved from the committee store matches the expected. - committee, exists = topStakersSeatManager.CommitteeInEpoch(1) - require.True(t, exists) - assertCommittee(t, expectedCommittee, committee) + // Make sure that the previous committee was not modified and is still accessible. + assertCommitteeInEpoch(t, s, testAPI, 0, expectedCommitteeInEpoch0) + assertCommitteeSizeInEpoch(t, s, testAPI, 0, 3) + } + + // Try to rotate committee with no candidates. Instead, set reuse of committee. + { + epoch := iotago.EpochIndex(3) + accountsData := make(accounts.AccountsData, 0) + + _, err := s.RotateCommittee(epoch, accountsData) + require.Error(t, err) - committee, exists = topStakersSeatManager.CommitteeInSlot(tpkg.TestAPI.TimeProvider().EpochStart(1)) + // Set reuse of committee manually. + expectedCommitteeInEpoch2.SetReused() + err = s.SetCommittee(epoch, expectedCommitteeInEpoch2) + require.NoError(t, err) + + assertCommitteeInEpoch(t, s, testAPI, 3, expectedCommitteeInEpoch2) + assertCommitteeSizeInEpoch(t, s, testAPI, 3, 1) + + assertCommitteeInEpoch(t, s, testAPI, 2, expectedCommitteeInEpoch2) + assertCommitteeSizeInEpoch(t, s, testAPI, 2, 1) + + // Make sure that the committee retrieved from the committee store matches the expected (with reused flag set). + loadedCommittee, err := s.committeeStore.Load(epoch) + require.NoError(t, err) + require.True(t, loadedCommittee.IsReused()) + assertCommittee(t, expectedCommitteeInEpoch2, loadedCommittee.SelectCommittee(loadedCommittee.IDs()...)) + } +} + +func addCommitteeMember(t *testing.T, committee *account.Accounts, pool *account.Pool) iotago.AccountID { + accountID := tpkg.RandAccountID() + require.NoError(t, committee.Set(accountID, pool)) + + return accountID +} + +func assertCommitteeSizeInEpoch(t *testing.T, seatManager *SeatManager, testAPI iotago.API, epoch iotago.EpochIndex, expectedCommitteeSize int) { + require.Equal(t, expectedCommitteeSize, seatManager.SeatCountInEpoch(epoch)) + require.Equal(t, expectedCommitteeSize, seatManager.SeatCountInSlot(testAPI.TimeProvider().EpochStart(epoch))) + require.Equal(t, expectedCommitteeSize, seatManager.SeatCountInSlot(testAPI.TimeProvider().EpochEnd(epoch))) +} + +func assertCommitteeInEpoch(t *testing.T, seatManager *SeatManager, testAPI iotago.API, epoch iotago.EpochIndex, expectedCommittee *account.Accounts) { + committee, exists := seatManager.CommitteeInEpoch(epoch) require.True(t, exists) assertCommittee(t, expectedCommittee, committee) - // Make sure that the previous committee was not modified and is still accessible. - committee, exists = topStakersSeatManager.CommitteeInEpoch(0) + committee, exists = seatManager.CommitteeInSlot(testAPI.TimeProvider().EpochStart(epoch)) require.True(t, exists) - assertCommittee(t, initialCommittee, committee) + assertCommittee(t, expectedCommittee, committee) - committee, exists = topStakersSeatManager.CommitteeInSlot(tpkg.TestAPI.TimeProvider().EpochEnd(0)) + committee, exists = seatManager.CommitteeInSlot(testAPI.TimeProvider().EpochEnd(epoch)) require.True(t, exists) - assertCommittee(t, initialCommittee, committee) + assertCommittee(t, expectedCommittee, committee) } func assertCommittee(t *testing.T, expectedCommittee *account.Accounts, actualCommittee *account.SeatedAccounts) { diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go index 6c855d4e4..9e11f42c7 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go @@ -34,7 +34,16 @@ type Tracker struct { mutex syncutils.RWMutex } -func NewTracker(rewardsStorePerEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error), poolStatsStore *epochstore.Store[*model.PoolsStats], committeeStore *epochstore.Store[*account.Accounts], committeeCandidatesInEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error), validatorPerformancesFunc func(slot iotago.SlotIndex) (*slotstore.Store[iotago.AccountID, *model.ValidatorPerformance], error), latestAppliedEpoch iotago.EpochIndex, apiProvider iotago.APIProvider, errHandler func(error)) *Tracker { +func NewTracker( + rewardsStorePerEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error), + poolStatsStore *epochstore.Store[*model.PoolsStats], + committeeStore *epochstore.Store[*account.Accounts], + committeeCandidatesInEpochFunc func(epoch iotago.EpochIndex) (kvstore.KVStore, error), + validatorPerformancesFunc func(slot iotago.SlotIndex) (*slotstore.Store[iotago.AccountID, *model.ValidatorPerformance], error), + latestAppliedEpoch iotago.EpochIndex, + apiProvider iotago.APIProvider, + errHandler func(error), +) *Tracker { return &Tracker{ nextEpochCommitteeCandidates: shrinkingmap.New[iotago.AccountID, iotago.SlotIndex](), rewardsStorePerEpochFunc: rewardsStorePerEpochFunc, @@ -80,12 +89,12 @@ func (t *Tracker) TrackCandidateBlock(block *blocks.Block) { t.mutex.Lock() defer t.mutex.Unlock() - blockEpoch := t.apiProvider.APIForSlot(block.ID().Slot()).TimeProvider().EpochFromSlot(block.ID().Slot()) - if block.Payload().PayloadType() != iotago.PayloadCandidacyAnnouncement { return } + blockEpoch := t.apiProvider.APIForSlot(block.ID().Slot()).TimeProvider().EpochFromSlot(block.ID().Slot()) + var rollback bool t.nextEpochCommitteeCandidates.Compute(block.ProtocolBlock().Header.IssuerID, func(currentValue iotago.SlotIndex, exists bool) iotago.SlotIndex { if !exists || currentValue > block.ID().Slot() { @@ -125,6 +134,7 @@ func (t *Tracker) TrackCandidateBlock(block *blocks.Block) { } +// EligibleValidatorCandidates returns the eligible validator candidates registered in the given epoch for the next epoch. func (t *Tracker) EligibleValidatorCandidates(epoch iotago.EpochIndex) (ds.Set[iotago.AccountID], error) { t.mutex.RLock() defer t.mutex.RUnlock() @@ -132,7 +142,7 @@ func (t *Tracker) EligibleValidatorCandidates(epoch iotago.EpochIndex) (ds.Set[i return t.getValidatorCandidates(epoch) } -// ValidatorCandidates returns the registered validator candidates for the given epoch. +// ValidatorCandidates returns the eligible validator candidates registered in the given epoch for the next epoch. func (t *Tracker) ValidatorCandidates(epoch iotago.EpochIndex) (ds.Set[iotago.AccountID], error) { t.mutex.RLock() defer t.mutex.RUnlock() @@ -143,14 +153,7 @@ func (t *Tracker) ValidatorCandidates(epoch iotago.EpochIndex) (ds.Set[iotago.Ac func (t *Tracker) getValidatorCandidates(epoch iotago.EpochIndex) (ds.Set[iotago.AccountID], error) { candidates := ds.NewSet[iotago.AccountID]() - // Epoch 0 has no candidates as it's the genesis committee. - if epoch == 0 { - return candidates, nil - } - - // we store candidates in the store for the epoch of their activity, but the passed argument points to the target epoch, - // so it's necessary to subtract one epoch from the passed value - candidateStore, err := t.committeeCandidatesInEpochFunc(epoch - 1) + candidateStore, err := t.committeeCandidatesInEpochFunc(epoch) if err != nil { return nil, ierrors.Wrapf(err, "error while retrieving candidates for epoch %d", epoch) } diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/tracker_test.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/tracker_test.go index 1eddb125d..731e89acc 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/tracker_test.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/tracker_test.go @@ -136,28 +136,8 @@ func TestManager_Candidates(t *testing.T) { ts.Instance.TrackCandidateBlock(blocks.NewBlock(lo.PanicOnErr(model.BlockFromBlock(block6)))) } - require.True(t, lo.PanicOnErr(ts.Instance.EligibleValidatorCandidates(1)).HasAll(ds.NewReadableSet(issuer1, issuer2, issuer3))) - require.True(t, lo.PanicOnErr(ts.Instance.ValidatorCandidates(1)).HasAll(ds.NewReadableSet(issuer1, issuer2, issuer3))) - require.True(t, lo.PanicOnErr(ts.Instance.EligibleValidatorCandidates(2)).IsEmpty()) - require.True(t, lo.PanicOnErr(ts.Instance.ValidatorCandidates(2)).IsEmpty()) - - // retrieve epoch candidates for epoch 0, because we candidates prefixed with epoch in which they candidated - candidatesStore, err := ts.Instance.committeeCandidatesInEpochFunc(0) - require.NoError(t, err) - - candidacySlotIssuer1, err := candidatesStore.Get(issuer1[:]) - require.NoError(t, err) - require.Equal(t, iotago.SlotIndex(1).MustBytes(), candidacySlotIssuer1) - - candidacySlotIssuer2, err := candidatesStore.Get(issuer2[:]) - require.NoError(t, err) - require.Equal(t, iotago.SlotIndex(2).MustBytes(), candidacySlotIssuer2) - - candidacySlotIssuer3, err := candidatesStore.Get(issuer3[:]) - require.NoError(t, err) - require.Equal(t, iotago.SlotIndex(3).MustBytes(), candidacySlotIssuer3) - - ts.Instance.ClearCandidates() - - require.True(t, ts.Instance.nextEpochCommitteeCandidates.IsEmpty()) + require.True(t, lo.PanicOnErr(ts.Instance.EligibleValidatorCandidates(0)).HasAll(ds.NewReadableSet(issuer1, issuer2, issuer3))) + require.True(t, lo.PanicOnErr(ts.Instance.ValidatorCandidates(0)).HasAll(ds.NewReadableSet(issuer1, issuer2, issuer3))) + require.True(t, lo.PanicOnErr(ts.Instance.EligibleValidatorCandidates(1)).IsEmpty()) + require.True(t, lo.PanicOnErr(ts.Instance.ValidatorCandidates(1)).IsEmpty()) } diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go index 7500e656b..f99050243 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go @@ -18,7 +18,7 @@ import ( "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager" - "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/poa" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/topstakers" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1/performance" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/iota.go/v4/nodeclient/apimodels" @@ -51,7 +51,7 @@ func NewProvider(opts ...options.Option[SybilProtection]) module.Provider[*engin events: sybilprotection.NewEvents(), apiProvider: e, - optsSeatManagerProvider: poa.NewProvider(), + optsSeatManagerProvider: topstakers.NewProvider(), }, opts, func(o *SybilProtection) { o.seatManager = o.optsSeatManagerProvider(e) @@ -106,6 +106,10 @@ func (o *SybilProtection) TrackBlock(block *blocks.Block) { return } + if block.Payload().PayloadType() != iotago.PayloadCandidacyAnnouncement { + return + } + accountData, exists, err := o.ledger.Account(block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot()) if err != nil { o.errHandler(ierrors.Wrapf(err, "error while retrieving account from account %s in slot %d from accounts ledger", block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot())) @@ -133,9 +137,7 @@ func (o *SybilProtection) TrackBlock(block *blocks.Block) { return } - if block.Payload().PayloadType() == iotago.PayloadCandidacyAnnouncement { - o.performanceTracker.TrackCandidateBlock(block) - } + o.performanceTracker.TrackCandidateBlock(block) } func (o *SybilProtection) CommitSlot(slot iotago.SlotIndex) (committeeRoot iotago.Identifier, rewardsRoot iotago.Identifier, err error) { @@ -155,23 +157,11 @@ func (o *SybilProtection) CommitSlot(slot iotago.SlotIndex) (committeeRoot iotag if _, committeeExists := o.seatManager.CommitteeInEpoch(nextEpoch); !committeeExists { // If the committee for the epoch wasn't set before due to finalization of a slot, // we promote the current committee to also serve in the next epoch. - committee, exists := o.seatManager.CommitteeInEpoch(currentEpoch) - if !exists { - // that should never happen as it is already the fallback strategy - panic(fmt.Sprintf("committee for current epoch %d not found", currentEpoch)) - } - - committeeAccounts, err := committee.Accounts() + committeeAccounts, err := o.reuseCommittee(currentEpoch, nextEpoch) if err != nil { - return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to get accounts from committee for epoch %d", currentEpoch) + return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to reuse committee for epoch %d", nextEpoch) } - committeeAccounts.SetReused() - if err = o.seatManager.SetCommittee(nextEpoch, committeeAccounts); err != nil { - return iotago.Identifier{}, iotago.Identifier{}, ierrors.Wrapf(err, "failed to set committee for epoch %d", nextEpoch) - } - o.performanceTracker.ClearCandidates() - o.events.CommitteeSelected.Trigger(committeeAccounts, nextEpoch) } } @@ -363,7 +353,7 @@ func (o *SybilProtection) OrderedRegisteredCandidateValidatorsList(epoch iotago. } active := activeCandidates.Has(candidate) validatorResp = append(validatorResp, &apimodels.ValidatorResponse{ - AccountID: accountData.ID, + AddressBech32: accountData.ID.ToAddress().Bech32(o.apiProvider.CommittedAPI().ProtocolParameters().Bech32HRP()), StakingEpochEnd: accountData.StakeEndEpoch, PoolStake: accountData.ValidatorStake + accountData.DelegationStake, ValidatorStake: accountData.ValidatorStake, @@ -385,15 +375,49 @@ func (o *SybilProtection) OrderedRegisteredCandidateValidatorsList(epoch iotago. return validatorResp, nil } +func (o *SybilProtection) reuseCommittee(currentEpoch iotago.EpochIndex, targetEpoch iotago.EpochIndex) (*account.Accounts, error) { + committee, exists := o.seatManager.CommitteeInEpoch(currentEpoch) + if !exists { + // that should never happen as it is already the fallback strategy + panic(fmt.Sprintf("committee for current epoch %d not found", currentEpoch)) + } + + committeeAccounts, err := committee.Accounts() + if err != nil { + return nil, ierrors.Wrapf(err, "failed to get accounts from committee for epoch %d", currentEpoch) + } + + committeeAccounts.SetReused() + if err = o.seatManager.SetCommittee(targetEpoch, committeeAccounts); err != nil { + return nil, ierrors.Wrapf(err, "failed to set committee for epoch %d", targetEpoch) + } + + o.performanceTracker.ClearCandidates() + + return committeeAccounts, nil +} + func (o *SybilProtection) selectNewCommittee(slot iotago.SlotIndex) (*account.Accounts, error) { timeProvider := o.apiProvider.APIForSlot(slot).TimeProvider() currentEpoch := timeProvider.EpochFromSlot(slot) nextEpoch := currentEpoch + 1 - candidates, err := o.performanceTracker.EligibleValidatorCandidates(nextEpoch) + + // We get the list of candidates for the next epoch. They are registered in the current epoch. + candidates, err := o.performanceTracker.EligibleValidatorCandidates(currentEpoch) if err != nil { return nil, ierrors.Wrapf(err, "failed to retrieve candidates for epoch %d", nextEpoch) } + // If there's no candidate, reuse the current committee. + if candidates.Size() == 0 { + committeeAccounts, err := o.reuseCommittee(currentEpoch, nextEpoch) + if err != nil { + return nil, ierrors.Wrapf(err, "failed to reuse committee (due to no candidates) for epoch %d", nextEpoch) + } + + return committeeAccounts, nil + } + candidateAccounts := make(accounts.AccountsData, 0) if err := candidates.ForEach(func(candidate iotago.AccountID) error { accountData, exists, err := o.ledger.Account(candidate, slot) diff --git a/pkg/storage/database/db_instance.go b/pkg/storage/database/db_instance.go index 14cdabeaf..63ef9a41e 100644 --- a/pkg/storage/database/db_instance.go +++ b/pkg/storage/database/db_instance.go @@ -1,6 +1,8 @@ package database import ( + "sync/atomic" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" @@ -10,6 +12,8 @@ type DBInstance struct { store *lockedKVStore // KVStore that is used to access the DB instance healthTracker *kvstore.StoreHealthTracker dbConfig Config + isClosed atomic.Bool + isShutdown atomic.Bool } func NewDBInstance(dbConfig Config) *DBInstance { @@ -18,7 +22,13 @@ func NewDBInstance(dbConfig Config) *DBInstance { panic(err) } - lockableKVStore := newLockedKVStore(db) + dbInstance := &DBInstance{ + dbConfig: dbConfig, + } + + lockableKVStore := newLockedKVStore(db, dbInstance) + + dbInstance.store = lockableKVStore // HealthTracker state is only modified while holding the lock on the lockableKVStore; // that's why it needs to use openableKVStore (which does not lock) instead of lockableKVStore to avoid a deadlock. @@ -30,10 +40,23 @@ func NewDBInstance(dbConfig Config) *DBInstance { panic(err) } - return &DBInstance{ - store: lockableKVStore, - healthTracker: storeHealthTracker, - dbConfig: dbConfig, + dbInstance.healthTracker = storeHealthTracker + + return dbInstance +} + +func (d *DBInstance) Shutdown() { + d.isShutdown.Store(true) + + d.Close() +} + +func (d *DBInstance) Flush() { + d.store.Lock() + defer d.store.Unlock() + + if !d.isClosed.Load() { + _ = d.store.instance().Flush() } } @@ -45,20 +68,34 @@ func (d *DBInstance) Close() { } func (d *DBInstance) CloseWithoutLocking() { - if err := d.healthTracker.MarkHealthy(); err != nil { - panic(err) - } + if !d.isClosed.Load() { + if err := d.healthTracker.MarkHealthy(); err != nil { + panic(err) + } - if err := FlushAndClose(d.store); err != nil { - panic(err) + if err := FlushAndClose(d.store); err != nil { + panic(err) + } + + d.isClosed.Store(true) } } // Open re-opens a closed DBInstance. It must only be called while holding a lock on DBInstance, // otherwise it might cause a race condition and corruption of node's state. func (d *DBInstance) Open() { + if !d.isClosed.Load() { + panic("cannot open DBInstance that is not closed") + } + + if d.isShutdown.Load() { + panic("cannot open DBInstance that is shutdown") + } + d.store.Replace(lo.PanicOnErr(StoreWithDefaultSettings(d.dbConfig.Directory, false, d.dbConfig.Engine))) + d.isClosed.Store(false) + if err := d.healthTracker.MarkCorrupted(); err != nil { panic(err) } diff --git a/pkg/storage/database/lockedkvstore.go b/pkg/storage/database/lockedkvstore.go index cb365f46e..c76601853 100644 --- a/pkg/storage/database/lockedkvstore.go +++ b/pkg/storage/database/lockedkvstore.go @@ -14,9 +14,9 @@ type lockedKVStore struct { instanceMutex *syncutils.RWMutex } -func newLockedKVStore(storeInstance kvstore.KVStore) *lockedKVStore { +func newLockedKVStore(storeInstance kvstore.KVStore, dbInstance *DBInstance) *lockedKVStore { return &lockedKVStore{ - openableKVStore: newOpenableKVStore(storeInstance), + openableKVStore: newOpenableKVStore(storeInstance, dbInstance), instanceMutex: new(syncutils.RWMutex), } } diff --git a/pkg/storage/database/openablekvstore.go b/pkg/storage/database/openablekvstore.go index 9ff04df3a..35e4cd818 100644 --- a/pkg/storage/database/openablekvstore.go +++ b/pkg/storage/database/openablekvstore.go @@ -10,25 +10,38 @@ import ( ) type openableKVStore struct { + dbInstance *DBInstance storeInstance kvstore.KVStore // KVStore that is used to access the DB instance parentStore *openableKVStore dbPrefix kvstore.KeyPrefix } -func newOpenableKVStore(storeInstance kvstore.KVStore) *openableKVStore { +func newOpenableKVStore(storeInstance kvstore.KVStore, dbInstance *DBInstance) *openableKVStore { return &openableKVStore{ + dbInstance: dbInstance, storeInstance: storeInstance, parentStore: nil, dbPrefix: kvstore.EmptyPrefix, } } +func (s *openableKVStore) topParent() *openableKVStore { + current := s + for current.parentStore != nil { + current = current.parentStore + } + + return current +} + func (s *openableKVStore) instance() kvstore.KVStore { - if s.storeInstance != nil { - return s.storeInstance + parent := s.topParent() + + if parent.dbInstance.isClosed.Load() { + parent.dbInstance.Open() } - return s.parentStore.instance() + return parent.storeInstance } func (s *openableKVStore) Replace(newKVStore kvstore.KVStore) { @@ -44,13 +57,16 @@ func (s *openableKVStore) Replace(newKVStore kvstore.KVStore) { func (s *openableKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { return s.withRealm(realm) } + func (s *openableKVStore) withRealm(realm kvstore.Realm) (kvstore.KVStore, error) { return &openableKVStore{ + dbInstance: nil, storeInstance: nil, parentStore: s, dbPrefix: realm, }, nil } + func (s *openableKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { return s.withRealm(s.buildKeyPrefix(realm)) } @@ -98,8 +114,10 @@ func (s *openableKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { func (s *openableKVStore) Flush() error { return s.instance().Flush() } + func (s *openableKVStore) Close() error { - return s.instance().Close() + s.topParent().dbInstance.CloseWithoutLocking() + return nil } func (s *openableKVStore) Batched() (kvstore.BatchedMutations, error) { diff --git a/pkg/storage/database/utils.go b/pkg/storage/database/utils.go index 0b47cf41b..eaded98c8 100644 --- a/pkg/storage/database/utils.go +++ b/pkg/storage/database/utils.go @@ -1,9 +1,9 @@ package database func FlushAndClose(store *lockedKVStore) error { - if err := store.FlushWithoutLocking(); err != nil { + if err := store.instance().Flush(); err != nil { return err } - return store.CloseWithoutLocking() + return store.instance().Close() } diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index 907fd054a..a76a96259 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -17,8 +17,10 @@ import ( ) type BucketManager struct { - openDBs *cache.Cache[iotago.EpochIndex, *database.DBInstance] - openDBsMutex syncutils.RWMutex + openDBsCache *cache.Cache[iotago.EpochIndex, *database.DBInstance] + openDBsCacheMutex syncutils.RWMutex + + openDBs *shrinkingmap.ShrinkingMap[iotago.EpochIndex, *database.DBInstance] lastPrunedEpoch *model.EvictionIndex[iotago.EpochIndex] lastPrunedMutex syncutils.RWMutex @@ -38,19 +40,14 @@ func NewBucketManager(dbConfig database.Config, errorHandler func(error), opts . optsMaxOpenDBs: 5, dbConfig: dbConfig, errorHandler: errorHandler, + openDBs: shrinkingmap.New[iotago.EpochIndex, *database.DBInstance](), dbSizes: shrinkingmap.New[iotago.EpochIndex, int64](), lastPrunedEpoch: model.NewEvictionIndex[iotago.EpochIndex](), }, opts, func(m *BucketManager) { - m.openDBs = cache.New[iotago.EpochIndex, *database.DBInstance](m.optsMaxOpenDBs) - m.openDBs.SetEvictCallback(func(baseIndex iotago.EpochIndex, db *database.DBInstance) { + // We use an LRU cache to try closing unnecessary databases. + m.openDBsCache = cache.New[iotago.EpochIndex, *database.DBInstance](m.optsMaxOpenDBs) + m.openDBsCache.SetEvictCallback(func(baseIndex iotago.EpochIndex, db *database.DBInstance) { db.Close() - - size, err := dbPrunableDirectorySize(dbConfig.Directory, baseIndex) - if err != nil { - errorHandler(ierrors.Wrapf(err, "failed to get size of prunable directory for base index %d", baseIndex)) - } - - m.dbSizes.Set(baseIndex, size) }) }) } @@ -74,12 +71,15 @@ func (b *BucketManager) Get(epoch iotago.EpochIndex, realm kvstore.Realm) (kvsto } func (b *BucketManager) Shutdown() { - b.openDBsMutex.Lock() - defer b.openDBsMutex.Unlock() + b.openDBsCacheMutex.Lock() + defer b.openDBsCacheMutex.Unlock() - b.openDBs.Each(func(epoch iotago.EpochIndex, db *database.DBInstance) { - db.Close() - b.openDBs.Remove(epoch) + b.openDBs.ForEach(func(epoch iotago.EpochIndex, db *database.DBInstance) bool { + db.Shutdown() + b.openDBsCache.Remove(epoch) + b.openDBs.Delete(epoch) + + return true }) } @@ -92,27 +92,21 @@ func (b *BucketManager) TotalSize() int64 { return true }) - b.openDBsMutex.Lock() - defer b.openDBsMutex.Unlock() - // Add up all the open databases - b.openDBs.Each(func(key iotago.EpochIndex, val *database.DBInstance) { + b.openDBs.ForEach(func(key iotago.EpochIndex, val *database.DBInstance) bool { size, err := dbPrunableDirectorySize(b.dbConfig.Directory, key) if err != nil { b.errorHandler(ierrors.Wrapf(err, "dbPrunableDirectorySize failed for key %s: %s", b.dbConfig.Directory, key)) - - return } sum += size + + return true }) return sum } func (b *BucketManager) BucketSize(epoch iotago.EpochIndex) (int64, error) { - b.openDBsMutex.RLock() - defer b.openDBsMutex.RUnlock() - size, exists := b.dbSizes.Get(epoch) if exists { return size, nil @@ -172,23 +166,26 @@ func (b *BucketManager) RestoreFromDisk() (lastPrunedEpoch iotago.EpochIndex) { // epochIndex 0 -> db 0 // epochIndex 1 -> db 1 // epochIndex 2 -> db 2 -func (b *BucketManager) getDBInstance(epoch iotago.EpochIndex) (db *database.DBInstance) { +func (b *BucketManager) getDBInstance(epoch iotago.EpochIndex) *database.DBInstance { // Lock global mutex to prevent closing and copying storage data on disk during engine switching. b.mutex.RLock() defer b.mutex.RUnlock() - b.openDBsMutex.Lock() - defer b.openDBsMutex.Unlock() + b.openDBsCacheMutex.Lock() + defer b.openDBsCacheMutex.Unlock() // check if exists again, as other goroutine might have created it in parallel - db, exists := b.openDBs.Get(epoch) - if !exists { - db = database.NewDBInstance(b.dbConfig.WithDirectory(dbPathFromIndex(b.dbConfig.Directory, epoch))) + db := lo.Return1(b.openDBs.GetOrCreate(epoch, func() *database.DBInstance { + db := database.NewDBInstance(b.dbConfig.WithDirectory(dbPathFromIndex(b.dbConfig.Directory, epoch))) // Remove the cached db size since we will open the db b.dbSizes.Delete(epoch) - b.openDBs.Put(epoch, db) - } + + return db + })) + + // Mark the db as used in the cache + b.openDBsCache.Put(epoch, db) return db } @@ -211,8 +208,8 @@ func (b *BucketManager) Prune(epoch iotago.EpochIndex) error { // DeleteBucket deletes directory that stores the data for the given bucket and returns boolean // flag indicating whether a directory for that bucket existed. func (b *BucketManager) DeleteBucket(epoch iotago.EpochIndex) (deleted bool) { - b.openDBsMutex.Lock() - defer b.openDBsMutex.Unlock() + b.openDBsCacheMutex.Lock() + defer b.openDBsCacheMutex.Unlock() if exists, err := PathExists(dbPathFromIndex(b.dbConfig.Directory, epoch)); err != nil { panic(err) @@ -222,8 +219,9 @@ func (b *BucketManager) DeleteBucket(epoch iotago.EpochIndex) (deleted bool) { db, exists := b.openDBs.Get(epoch) if exists { - db.Close() - b.openDBs.Remove(epoch) + db.Shutdown() + b.openDBsCache.Remove(epoch) + b.openDBs.Delete(epoch) } if err := os.RemoveAll(dbPathFromIndex(b.dbConfig.Directory, epoch)); err != nil { @@ -246,24 +244,22 @@ func (b *BucketManager) PruneSlots(epoch iotago.EpochIndex, pruningRange [2]iota } } - // shutting down the storage does not prevent this storage from being used again and only forces a flush. - b.Shutdown() + _ = b.Flush() return nil } func (b *BucketManager) Flush() error { - b.openDBsMutex.RLock() - defer b.openDBsMutex.RUnlock() - - var err error - b.openDBs.Each(func(epoch iotago.EpochIndex, db *database.DBInstance) { - if err = db.KVStore().Flush(); err != nil { - return + var innerErr error + b.openDBs.ForEach(func(epoch iotago.EpochIndex, db *database.DBInstance) bool { + if err := db.KVStore().Flush(); err != nil { + innerErr = err } + + return true }) - return err + return innerErr } func PathExists(path string) (bool, error) { diff --git a/pkg/tests/accounts_test.go b/pkg/tests/accounts_test.go index b23ab1c02..755bf0765 100644 --- a/pkg/tests/accounts_test.go +++ b/pkg/tests/accounts_test.go @@ -44,7 +44,7 @@ func Test_TransitionAndDestroyAccount(t *testing.T) { testsuite.DefaultLivenessThresholdUpperBoundInSeconds, testsuite.DefaultMinCommittableAge, 100, - testsuite.DefaultEpochNearingThreshold, + 120, ), ), ) @@ -172,7 +172,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { testsuite.DefaultLivenessThresholdUpperBoundInSeconds, testsuite.DefaultMinCommittableAge, 100, - testsuite.DefaultEpochNearingThreshold, + 120, ), ), ) @@ -209,7 +209,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { BlockIssuerKeys: wallet.BlockIssuer.BlockIssuerKeys(), }, ts.Nodes()...) - //CREATE NEW ACCOUNT WITH BLOCK ISSUER AND STAKING FEATURES FROM BASIC UTXO + // CREATE NEW ACCOUNT WITH BLOCK ISSUER AND STAKING FEATURES FROM BASIC UTXO newAccountBlockIssuerKey := utils.RandBlockIssuerKey() // set the expiry slot of the transitioned genesis account to the latest committed + MaxCommittableAge newAccountExpirySlot := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot() + ts.API.ProtocolParameters().MaxCommittableAge() @@ -348,7 +348,7 @@ func Test_ImplicitAccounts(t *testing.T) { testsuite.DefaultLivenessThresholdUpperBoundInSeconds, testsuite.DefaultMinCommittableAge, 100, - testsuite.DefaultEpochNearingThreshold, + 120, ), ), ) diff --git a/pkg/tests/committee_rotation_test.go b/pkg/tests/committee_rotation_test.go index 835df5931..a7415bc24 100644 --- a/pkg/tests/committee_rotation_test.go +++ b/pkg/tests/committee_rotation_test.go @@ -2,13 +2,14 @@ package tests import ( "testing" + "time" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/iota-core/pkg/protocol" + "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization/slotnotarization" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/topstakers" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1" "github.com/iotaledger/iota-core/pkg/testsuite" - "github.com/iotaledger/iota-core/pkg/testsuite/snapshotcreator" iotago "github.com/iotaledger/iota.go/v4" ) @@ -24,17 +25,11 @@ func Test_TopStakersRotation(t *testing.T) { iotago.WithLivenessOptions( 10, 10, - 3, + 2, 4, 5, ), - ), - testsuite.WithSnapshotOptions( - snapshotcreator.WithSeatManagerProvider( - topstakers.NewProvider( - topstakers.WithSeatCount(3), - ), - ), + iotago.WithTargetCommitteeSize(3), ), ) defer ts.Shutdown() @@ -47,71 +42,135 @@ func Test_TopStakersRotation(t *testing.T) { ts.AddValidatorNode("node6", 1_000_001) ts.AddGenesisWallet("default", node1) - nodeOptions := make(map[string][]options.Option[protocol.Protocol]) + ts.AddNode("node7") - for _, node := range ts.Nodes() { - nodeOptions[node.Name] = []options.Option[protocol.Protocol]{protocol.WithSybilProtectionProvider( + nodeOpts := []options.Option[protocol.Protocol]{ + protocol.WithNotarizationProvider( + slotnotarization.NewProvider(), + ), + protocol.WithSybilProtectionProvider( sybilprotectionv1.NewProvider( sybilprotectionv1.WithSeatManagerProvider( topstakers.NewProvider( - topstakers.WithSeatCount(3), + // We need to make sure that inactive nodes are evicted from the committee to continue acceptance. + topstakers.WithActivityWindow(15 * time.Second), ), ), ), - )} + ), } - ts.Run(true, nodeOptions) - for _, node := range ts.Nodes() { - nodeOptions[node.Name] = []options.Option[protocol.Protocol]{protocol.WithSybilProtectionProvider( - sybilprotectionv1.NewProvider( - sybilprotectionv1.WithSeatManagerProvider( - topstakers.NewProvider(topstakers.WithSeatCount(3)), - ), - ), - )} - } + ts.Run(true, map[string][]options.Option[protocol.Protocol]{ + "node1": nodeOpts, + "node2": nodeOpts, + "node3": nodeOpts, + "node4": nodeOpts, + "node5": nodeOpts, + "node6": nodeOpts, + "node7": nodeOpts, + }) + ts.AssertSybilProtectionCommittee(0, []iotago.AccountID{ ts.Node("node1").Validator.AccountID, ts.Node("node2").Validator.AccountID, ts.Node("node3").Validator.AccountID, }, ts.Nodes()...) - ts.IssueBlocksAtSlots("wave-1:", []iotago.SlotIndex{1, 2, 3, 4}, 4, "Genesis", ts.Nodes(), true, nil) + // Select committee for epoch 1 and test candidacy announcements at different times. + { + ts.IssueBlocksAtSlots("wave-1:", []iotago.SlotIndex{1, 2, 3, 4}, 4, "Genesis", ts.Nodes(), true, nil) - ts.IssueCandidacyAnnouncementInSlot("node1-candidacy:1", 4, "wave-1:4.3", ts.Wallet("node1")) - ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:1", 5, "node1-candidacy:1", ts.Wallet("node4")) + ts.IssueCandidacyAnnouncementInSlot("node1-candidacy:1", 4, "wave-1:4.3", ts.Wallet("node1")) + ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:1", 5, "node1-candidacy:1", ts.Wallet("node4")) - ts.IssueBlocksAtSlots("wave-2:", []iotago.SlotIndex{5, 6, 7, 8, 9}, 4, "node4-candidacy:1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-2:", []iotago.SlotIndex{5, 6, 7, 8, 9}, 4, "node4-candidacy:1", ts.Nodes(), true, nil) - ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:2", 9, "wave-2:9.3", ts.Wallet("node4")) - ts.IssueCandidacyAnnouncementInSlot("node5-candidacy:1", 9, "node4-candidacy:2", ts.Wallet("node5")) + ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:2", 9, "wave-2:9.3", ts.Wallet("node4")) + ts.IssueCandidacyAnnouncementInSlot("node5-candidacy:1", 9, "node4-candidacy:2", ts.Wallet("node5")) - // This candidacy should be considered as it's announced at the last possible slot. - ts.IssueCandidacyAnnouncementInSlot("node6-candidacy:1", 10, "node5-candidacy:1", ts.Wallet("node6")) + // This candidacy should be considered as it's announced at the last possible slot. + ts.IssueCandidacyAnnouncementInSlot("node6-candidacy:1", 10, "node5-candidacy:1", ts.Wallet("node6")) - ts.IssueBlocksAtSlots("wave-3:", []iotago.SlotIndex{10}, 4, "node6-candidacy:1", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-3:", []iotago.SlotIndex{10}, 4, "node6-candidacy:1", ts.Nodes(), true, nil) - // Those candidacies should not be considered as they're issued after EpochNearingThreshold (slot 10). - ts.IssueCandidacyAnnouncementInSlot("node2-candidacy:1", 11, "wave-3:10.3", ts.Wallet("node2")) - ts.IssueCandidacyAnnouncementInSlot("node3-candidacy:1", 11, "node2-candidacy:1", ts.Wallet("node3")) - ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:3", 11, "node3-candidacy:1", ts.Wallet("node3")) - ts.IssueCandidacyAnnouncementInSlot("node5-candidacy:2", 11, "node4-candidacy:3", ts.Wallet("node3")) + // Those candidacies should not be considered as they're issued after EpochNearingThreshold (slot 10). + ts.IssueCandidacyAnnouncementInSlot("node2-candidacy:1", 11, "wave-3:10.3", ts.Wallet("node2")) + ts.IssueCandidacyAnnouncementInSlot("node3-candidacy:1", 11, "node2-candidacy:1", ts.Wallet("node3")) + ts.IssueCandidacyAnnouncementInSlot("node4-candidacy:3", 11, "node3-candidacy:1", ts.Wallet("node3")) + ts.IssueCandidacyAnnouncementInSlot("node5-candidacy:2", 11, "node4-candidacy:3", ts.Wallet("node3")) - // Assert that only candidates that issued before slot 11 are considered. - ts.AssertSybilProtectionCandidates(1, []iotago.AccountID{ - ts.Node("node1").Validator.AccountID, - ts.Node("node4").Validator.AccountID, - ts.Node("node5").Validator.AccountID, - ts.Node("node6").Validator.AccountID, - }, ts.Nodes()...) + // Assert that only candidates that issued before slot 11 are considered. + ts.AssertSybilProtectionCandidates(0, []iotago.AccountID{ + ts.Node("node1").Validator.AccountID, + ts.Node("node4").Validator.AccountID, + ts.Node("node5").Validator.AccountID, + ts.Node("node6").Validator.AccountID, + }, ts.Nodes()...) - ts.IssueBlocksAtSlots("wave-4:", []iotago.SlotIndex{11, 12, 13, 14, 15, 16, 17}, 4, "node5-candidacy:2", ts.Nodes(), true, nil) + ts.IssueBlocksAtSlots("wave-4:", []iotago.SlotIndex{11, 12, 13, 14, 15, 16, 17}, 4, "node5-candidacy:2", ts.Nodes(), true, nil) - ts.AssertLatestFinalizedSlot(13, ts.Nodes()...) - ts.AssertSybilProtectionCommittee(1, []iotago.AccountID{ - ts.Node("node1").Validator.AccountID, - ts.Node("node4").Validator.AccountID, - ts.Node("node5").Validator.AccountID, - }, ts.Nodes()...) + ts.AssertLatestFinalizedSlot(14, ts.Nodes()...) + ts.AssertSybilProtectionCommittee(1, []iotago.AccountID{ + ts.Node("node1").Validator.AccountID, + ts.Node("node4").Validator.AccountID, + ts.Node("node5").Validator.AccountID, + }, ts.Nodes()...) + } + + // Do not announce new candidacies for epoch 2 but finalize slots. The committee should be the reused. + { + ts.IssueBlocksAtSlots("wave-5:", []iotago.SlotIndex{18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, 4, "wave-4:17.3", ts.Nodes(), true, nil) + + ts.AssertSybilProtectionCandidates(1, []iotago.AccountID{}, ts.Nodes()...) + ts.AssertLatestCommitmentSlotIndex(28, ts.Nodes()...) + ts.AssertLatestFinalizedSlot(27, ts.Nodes()...) + ts.AssertSybilProtectionCommittee(2, []iotago.AccountID{ + ts.Node("node1").Validator.AccountID, + ts.Node("node4").Validator.AccountID, + ts.Node("node5").Validator.AccountID, + }, ts.Nodes()...) + } + + // Do not finalize slots in time for epoch 3. The committee should be the reused. Even though there are candidates. + { + // Issue blocks to remove the inactive committee members. + ts.IssueBlocksAtSlots("wave-6:", []iotago.SlotIndex{31, 32}, 4, "wave-5:30.3", ts.Nodes("node5", "node7"), false, nil) + ts.AssertLatestCommitmentSlotIndex(30, ts.Nodes()...) + + ts.IssueCandidacyAnnouncementInSlot("node6-candidacy:2", 33, "wave-6:32.3", ts.Wallet("node6")) + + // Issue the rest of the epoch just before we reach epoch end - maxCommittableAge. + ts.IssueBlocksAtSlots("wave-7:", []iotago.SlotIndex{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}, 4, "node6-candidacy:2", ts.Nodes("node5"), true, nil) + + ts.AssertLatestCommitmentSlotIndex(43, ts.Nodes()...) + // Even though we have a candidate, the committee should be reused as we did not finalize at epochNearingThreshold before epoch end - maxCommittableAge was committed + ts.AssertSybilProtectionCandidates(2, []iotago.AccountID{ + ts.Node("node6").Validator.AccountID, + }, ts.Nodes()...) + // Check that the committee is reused. + ts.AssertSybilProtectionCommittee(3, []iotago.AccountID{ + ts.Node("node1").Validator.AccountID, + ts.Node("node4").Validator.AccountID, + ts.Node("node5").Validator.AccountID, + }, ts.Nodes()...) + } + + // Rotate committee to smaller committee due to too few candidates available. + { + ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{46, 47, 48, 49, 50, 51, 52, 53, 54, 55}, 4, "wave-7:45.3", ts.Nodes(), true, nil) + + ts.IssueCandidacyAnnouncementInSlot("node3-candidacy:2", 56, "wave-8:55.3", ts.Wallet("node3")) + + ts.IssueBlocksAtSlots("wave-8:", []iotago.SlotIndex{56, 57, 58, 59, 60, 61}, 4, "node3-candidacy:2", ts.Nodes(), true, nil) + + ts.AssertLatestCommitmentSlotIndex(59, ts.Nodes()...) + ts.AssertLatestFinalizedSlot(58, ts.Nodes()...) + // We finalized at epochEnd-epochNearingThreshold, so the committee should be rotated even if there is just one candidate. + ts.AssertSybilProtectionCandidates(3, []iotago.AccountID{ + ts.Node("node3").Validator.AccountID, + }, ts.Nodes()...) + ts.AssertSybilProtectionCommittee(4, []iotago.AccountID{ + ts.Node("node3").Validator.AccountID, + }, ts.Nodes()...) + } } diff --git a/pkg/tests/confirmation_state_test.go b/pkg/tests/confirmation_state_test.go index 63219ee69..1c764d3bd 100644 --- a/pkg/tests/confirmation_state_test.go +++ b/pkg/tests/confirmation_state_test.go @@ -9,7 +9,7 @@ import ( "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization/slotnotarization" - "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/poa" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/topstakers" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1" "github.com/iotaledger/iota-core/pkg/testsuite" iotago "github.com/iotaledger/iota.go/v4" @@ -28,10 +28,11 @@ func TestConfirmationFlags(t *testing.T) { iotago.WithLivenessOptions( 10, 10, - 10, - 20, + testsuite.DefaultMinCommittableAge, + testsuite.DefaultMaxCommittableAge, testsuite.DefaultEpochNearingThreshold, ), + iotago.WithTargetCommitteeSize(4), ), ) defer ts.Shutdown() @@ -47,55 +48,27 @@ func TestConfirmationFlags(t *testing.T) { nodeC.Validator.AccountID, nodeD.Validator.AccountID, } - ts.Run(true, map[string][]options.Option[protocol.Protocol]{ - "nodeA": { - protocol.WithNotarizationProvider( - slotnotarization.NewProvider(), - ), - protocol.WithSybilProtectionProvider( - sybilprotectionv1.NewProvider( - sybilprotectionv1.WithSeatManagerProvider( - poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)), - ), - ), - ), - }, - "nodeB": { - protocol.WithNotarizationProvider( - slotnotarization.NewProvider(), - ), - protocol.WithSybilProtectionProvider( - sybilprotectionv1.NewProvider( - sybilprotectionv1.WithSeatManagerProvider( - poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)), - ), - ), - ), - }, - "nodeC": { - protocol.WithNotarizationProvider( - slotnotarization.NewProvider(), - ), - protocol.WithSybilProtectionProvider( - sybilprotectionv1.NewProvider( - sybilprotectionv1.WithSeatManagerProvider( - poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)), - ), - ), - ), - }, - "nodeD": { - protocol.WithNotarizationProvider( - slotnotarization.NewProvider(), - ), - protocol.WithSybilProtectionProvider( - sybilprotectionv1.NewProvider( - sybilprotectionv1.WithSeatManagerProvider( - poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)), + + nodeOpts := []options.Option[protocol.Protocol]{ + protocol.WithNotarizationProvider( + slotnotarization.NewProvider(), + ), + protocol.WithSybilProtectionProvider( + sybilprotectionv1.NewProvider( + sybilprotectionv1.WithSeatManagerProvider( + topstakers.NewProvider( + topstakers.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), + topstakers.WithActivityWindow(2*time.Minute), ), ), ), - }, + ), + } + ts.Run(true, map[string][]options.Option[protocol.Protocol]{ + "nodeA": nodeOpts, + "nodeB": nodeOpts, + "nodeC": nodeOpts, + "nodeD": nodeOpts, }) // Verify that nodes have the expected states. diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 961adfbcd..64c4a58c6 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -15,6 +15,7 @@ import ( func TestLossOfAcceptanceFromGenesis(t *testing.T) { ts := testsuite.NewTestSuite(t, + testsuite.WithWaitFor(15*time.Second), testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( 0, @@ -27,7 +28,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { 10, 2, 4, - 2, + 5, ), ), ) @@ -113,7 +114,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { 10, 2, 4, - 2, + 5, ), ), ) @@ -208,7 +209,7 @@ func TestLossOfAcceptanceWithRestartFromDisk(t *testing.T) { 10, 2, 4, - 2, + 5, ), ), ) diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go index b56f185ed..03b2cfc81 100644 --- a/pkg/tests/protocol_engine_switching_test.go +++ b/pkg/tests/protocol_engine_switching_test.go @@ -40,7 +40,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { 10, 2, 4, - 2, + 5, ), ), diff --git a/pkg/tests/protocol_startup_test.go b/pkg/tests/protocol_startup_test.go index 8d1e502ed..e4124af5b 100644 --- a/pkg/tests/protocol_startup_test.go +++ b/pkg/tests/protocol_startup_test.go @@ -34,7 +34,7 @@ func Test_BookInCommittedSlot(t *testing.T) { 10, 2, 4, - 2, + 5, ), ), ) @@ -134,7 +134,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { 10, 2, 4, - 2, + 5, ), ), ) diff --git a/pkg/tests/upgrade_signaling_test.go b/pkg/tests/upgrade_signaling_test.go index 3d69028c4..76b63980f 100644 --- a/pkg/tests/upgrade_signaling_test.go +++ b/pkg/tests/upgrade_signaling_test.go @@ -19,6 +19,8 @@ import ( "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/engine/upgrade/signalingupgradeorchestrator" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/topstakers" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1" "github.com/iotaledger/iota-core/pkg/storage" "github.com/iotaledger/iota-core/pkg/storage/permanent" "github.com/iotaledger/iota-core/pkg/testsuite" @@ -40,8 +42,8 @@ func Test_Upgrade_Signaling(t *testing.T) { 10, 10, 2, - 6, - 2, + 4, + 5, ), iotago.WithVersionSignalingOptions(7, 5, 2), ), @@ -84,6 +86,16 @@ func Test_Upgrade_Signaling(t *testing.T) { ), ), ), + protocol.WithSybilProtectionProvider( + sybilprotectionv1.NewProvider( + sybilprotectionv1.WithSeatManagerProvider( + topstakers.NewProvider( + // We need to make sure that inactive nodes are evicted from the committee to continue acceptance. + topstakers.WithActivityWindow(15 * time.Second), + ), + ), + ), + ), } nodeOptionsWithV5 := append(nodeOptionsWithoutV5, @@ -213,10 +225,10 @@ func Test_Upgrade_Signaling(t *testing.T) { }, ts.Nodes()...) // check that rollback is correct - account, exists, err := ts.Node("nodeA").Protocol.MainEngineInstance().Ledger.Account(ts.Node("nodeA").Validator.AccountID, 7) + pastAccounts, err := ts.Node("nodeA").Protocol.MainEngineInstance().Ledger.PastAccounts(iotago.AccountIDs{ts.Node("nodeA").Validator.AccountID}, 7) require.NoError(t, err) - require.True(t, exists) - require.Equal(t, model.VersionAndHash{Version: 4, Hash: hash2}, account.LatestSupportedProtocolVersionAndHash) + require.Contains(t, pastAccounts, ts.Node("nodeA").Validator.AccountID) + require.Equal(t, model.VersionAndHash{Version: 4, Hash: hash2}, pastAccounts[ts.Node("nodeA").Validator.AccountID].LatestSupportedProtocolVersionAndHash) ts.IssueBlocksAtEpoch("", 2, 4, "15.3", ts.Nodes(), true, nil) ts.IssueBlocksAtEpoch("", 3, 4, "23.3", ts.Nodes(), true, nil) @@ -399,7 +411,8 @@ func Test_Upgrade_Signaling(t *testing.T) { // Check that issuing still produces the same commitments on the nodes that upgraded. The nodes that did not upgrade // should not be able to issue and process blocks with the new version. - ts.IssueBlocksAtEpoch("", 8, 4, "63.3", ts.Nodes("nodeB", "nodeC"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{64, 65}, 4, "63.3", ts.Nodes("nodeB", "nodeC"), false, nil) + ts.IssueBlocksAtSlots("", []iotago.SlotIndex{66, 67, 68, 69, 70, 71}, 4, "65.3", ts.Nodes("nodeB", "nodeC"), true, nil) // Nodes that did not set up the new protocol parameters are not able to process blocks with the new version. ts.AssertNodeState(ts.Nodes("nodeA", "nodeD", "nodeF", "nodeG"), diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go index d927f4f6f..8bcb5d13a 100644 --- a/pkg/testsuite/mock/node.go +++ b/pkg/testsuite/mock/node.go @@ -150,6 +150,13 @@ func (n *Node) hookEvents() { events.CandidateEngineActivated.Hook(func(e *engine.Engine) { n.candidateEngineActivatedCount.Add(1) }) events.MainEngineSwitched.Hook(func(e *engine.Engine) { n.mainEngineSwitchedCount.Add(1) }) + + n.Protocol.Events.Engine.CommitmentFilter.BlockFiltered.Hook(func(event *commitmentfilter.BlockFilteredEvent) { + n.mutex.Lock() + defer n.mutex.Unlock() + + n.filteredBlockEvents = append(n.filteredBlockEvents, event) + }) } func (n *Node) hookLogging(failOnBlockFiltered bool) { diff --git a/pkg/testsuite/snapshotcreator/options.go b/pkg/testsuite/snapshotcreator/options.go index 20addc3f2..05ec414cf 100644 --- a/pkg/testsuite/snapshotcreator/options.go +++ b/pkg/testsuite/snapshotcreator/options.go @@ -6,8 +6,6 @@ import ( "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger" ledger1 "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger/ledger" - "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager" - "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/poa" "github.com/iotaledger/iota-core/pkg/testsuite/mock" iotago "github.com/iotaledger/iota.go/v4" ) @@ -35,26 +33,18 @@ type Options struct { // BasicOutput defines the basic outputs that are created in the ledger as part of the Genesis. BasicOutputs []BasicOutputDetails - DataBaseVersion byte - LedgerProvider module.Provider[*engine.Engine, ledger.Ledger] - SeatManagerProvider module.Provider[*engine.Engine, seatmanager.SeatManager] + DataBaseVersion byte + LedgerProvider module.Provider[*engine.Engine, ledger.Ledger] } func NewOptions(opts ...options.Option[Options]) *Options { return options.Apply(&Options{ - FilePath: "snapshot.bin", - DataBaseVersion: 1, - LedgerProvider: ledger1.NewProvider(), - SeatManagerProvider: poa.NewProvider(), + FilePath: "snapshot.bin", + DataBaseVersion: 1, + LedgerProvider: ledger1.NewProvider(), }, opts) } -func WithSeatManagerProvider(seatManagerProvider module.Provider[*engine.Engine, seatmanager.SeatManager]) options.Option[Options] { - return func(m *Options) { - m.SeatManagerProvider = seatManagerProvider - } -} - func WithLedgerProvider(ledgerProvider module.Provider[*engine.Engine, ledger.Ledger]) options.Option[Options] { return func(m *Options) { m.LedgerProvider = ledgerProvider diff --git a/pkg/testsuite/snapshotcreator/snapshotcreator.go b/pkg/testsuite/snapshotcreator/snapshotcreator.go index afbc29494..e3ec5d5f4 100644 --- a/pkg/testsuite/snapshotcreator/snapshotcreator.go +++ b/pkg/testsuite/snapshotcreator/snapshotcreator.go @@ -104,8 +104,7 @@ func CreateSnapshot(opts ...options.Option[Options]) error { blocktime.NewProvider(), thresholdblockgadget.NewProvider(), totalweightslotgadget.NewProvider(), - sybilprotectionv1.NewProvider(sybilprotectionv1.WithInitialCommittee(committeeAccountsData), - sybilprotectionv1.WithSeatManagerProvider(opt.SeatManagerProvider)), + sybilprotectionv1.NewProvider(sybilprotectionv1.WithInitialCommittee(committeeAccountsData)), slotnotarization.NewProvider(), slotattestation.NewProvider(), opt.LedgerProvider, diff --git a/pkg/testsuite/sybilprotection.go b/pkg/testsuite/sybilprotection.go index 39673cb29..292b04ef9 100644 --- a/pkg/testsuite/sybilprotection.go +++ b/pkg/testsuite/sybilprotection.go @@ -17,11 +17,17 @@ func (t *TestSuite) AssertSybilProtectionCommittee(epoch iotago.EpochIndex, expe for _, node := range nodes { t.Eventually(func() error { - accounts, err := lo.Return1(node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInEpoch(epoch)).Accounts() + committeeInEpoch, exists := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInEpoch(epoch) + if !exists { + return ierrors.Errorf("AssertSybilProtectionCommittee: %s: failed to get committee in epoch %d", node.Name, epoch) + } + + committeeInEpochAccounts, err := committeeInEpoch.Accounts() if err != nil { - t.Testing.Fatal(err) + return ierrors.Errorf("AssertSybilProtectionCommittee: %s: failed to get accounts in committee in epoch %d: %w", node.Name, epoch, err) } - accountIDs := accounts.IDs() + + accountIDs := committeeInEpochAccounts.IDs() if !assert.ElementsMatch(t.fakeTesting, expectedAccounts, accountIDs) { return ierrors.Errorf("AssertSybilProtectionCommittee: %s: expected %s, got %s", node.Name, expectedAccounts, accountIDs) } diff --git a/pkg/testsuite/testsuite_options.go b/pkg/testsuite/testsuite_options.go index 6f467834e..0f3991013 100644 --- a/pkg/testsuite/testsuite_options.go +++ b/pkg/testsuite/testsuite_options.go @@ -65,7 +65,7 @@ const ( DefaultLivenessThresholdUpperBoundInSeconds uint16 = 30 DefaultMinCommittableAge iotago.SlotIndex = 10 DefaultMaxCommittableAge iotago.SlotIndex = 20 - DefaultEpochNearingThreshold iotago.SlotIndex = 16 + DefaultEpochNearingThreshold iotago.SlotIndex = 24 DefaultMinReferenceManaCost iotago.Mana = 500 DefaultRMCIncrease iotago.Mana = 500 diff --git a/tools/gendoc/go.mod b/tools/gendoc/go.mod index 8e346d660..9f913336e 100644 --- a/tools/gendoc/go.mod +++ b/tools/gendoc/go.mod @@ -5,7 +5,7 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/app v0.0.0-20231110191152-7135670285dc + github.com/iotaledger/hive.go/app v0.0.0-20231113110812-4ca2b6cc9a42 github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 ) @@ -25,7 +25,7 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/eclipse/paho.mqtt.golang v1.4.3 // indirect github.com/elastic/gosigar v0.14.2 // indirect - github.com/ethereum/go-ethereum v1.13.4 // indirect + github.com/ethereum/go-ethereum v1.13.5 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/fbiville/markdown-table-formatter v0.3.0 // indirect github.com/felixge/fgprof v0.9.3 // indirect @@ -58,21 +58,21 @@ require ( github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42 // indirect github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc // indirect + github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42 // indirect github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/logger v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231110132801-e38d9fbdd467 // indirect - github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231110132251-8abdb05cce43 // indirect - github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b // indirect + github.com/iotaledger/hive.go/logger v0.0.0-20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231120094046-1308e2a5e072 // indirect + github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231120082637-ccd5b8465251 // indirect + github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140 // indirect github.com/ipfs/boxo v0.13.1 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect diff --git a/tools/gendoc/go.sum b/tools/gendoc/go.sum index 691e76fe2..4c0a34b99 100644 --- a/tools/gendoc/go.sum +++ b/tools/gendoc/go.sum @@ -96,8 +96,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.13.4 h1:25HJnaWVg3q1O7Z62LaaI6S9wVq8QCw3K88g8wEzrcM= -github.com/ethereum/go-ethereum v1.13.4/go.mod h1:I0U5VewuuTzvBtVzKo7b3hJzDhXOUtn9mJW7SsIPB0Q= +github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= +github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= @@ -279,40 +279,40 @@ github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PT github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc h1:PsArE43UkLymmDy9r7n42Yd1pv1iq4FwSx3iv2Mo+vc= github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= -github.com/iotaledger/hive.go/app v0.0.0-20231110191152-7135670285dc h1:jMbElktKULtS8pA8MK5i5BTbOy+dtwAOGmVSZ5x6J2s= -github.com/iotaledger/hive.go/app v0.0.0-20231110191152-7135670285dc/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= +github.com/iotaledger/hive.go/app v0.0.0-20231113110812-4ca2b6cc9a42 h1:K6VF23FOqHTRdk5OzsuBkYlGV008SZgKYqNwb0bp3rk= +github.com/iotaledger/hive.go/app v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 h1:4aVJTc0KS77uEw0Tny4r0n1ORwcbAQDECaCclgf/6lE= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3/go.mod h1:TZeAqieDu+xDOZp2e9+S+8pZp1PrfgcwLUnxmd8IgLU= -github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc h1:qeE5T8LXGjKaFduWCt06CXsUTkhfHNx6hOD5xYP31QU= -github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc h1:dyguf5k/eVGyv94ISm/FDtInOktce6koo+QtJvAPUT8= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= -github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc h1:3wT7e5fRdDnnomkM6xPD110BCFz66MaXKxYUvLFuYkc= -github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= -github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc h1:YQUKGFcOBGKSrok++Er5SZTtQx0UHTRgH4cvlHVOiwc= -github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc h1:sNFIiT+gEE6UlftfiBdrsUBIJtnhV6EpwVRw2YpbhUc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42 h1:+PyLPZhRHy+Negjpuj0CSLaObpErEH7yI6HB2z5N6b0= +github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42 h1:3dW4gz0Vr9BogN826HRTp0OFlbngjhWcVPUfDhJ57Yw= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42 h1:t6EKe+O7XAmbe07cVHuM/3aBLEbVIY4D6yefANB4PUA= +github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42 h1:QZiMlDxmikF64zimWQunTrsEGOK9ydRahUAz2I46JAk= +github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42 h1:gxlZ4zL6EfLyqT0+hIFV3WVE0FrPVgV5cQdyn36vPXQ= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc h1:3fsqfM2NqfhrewVdlKT3MHcXxVNvUCSP7P32il1ypa0= github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= -github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc h1:OrQBscQTsAzAJGwVs7qlPgczbvufsbENkOYRmyM+CF4= -github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42 h1:kcHkWyURZDVqO80OmJo5Z+wTJB6H+s52WAnU575vX0o= +github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc h1:joYrsSZuVG3DfAQR9iS3qjnMExJ0qNp2+369sxb1Y4g= github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= -github.com/iotaledger/hive.go/logger v0.0.0-20231110191152-7135670285dc h1:p4K5bCNRVmbzVXZUa53Hg8s6gCW+tYjhG1f3C+1F044= -github.com/iotaledger/hive.go/logger v0.0.0-20231110191152-7135670285dc/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= -github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc h1:dN9VYzV53oz2TlHHGtRtqaGvMDvFRW0Uh433z13k6+E= -github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc h1:/DIsAs3PWCNkHoLXR2+uW34VAvZvfiCCJYA/rczfnmw= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= -github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc h1:Dp9sOvU2B7xoyX28bYZgUUDAIqMCBhsmK2vWhIgDyWE= -github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231110132801-e38d9fbdd467 h1:2FNiPAUbHOJ+mLI1aU81QaoitbkebxJWUEylPdnC2Lc= -github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231110132801-e38d9fbdd467/go.mod h1:bXOm6f+0zP19Ku/ozcSWZQiJb9ge9X7gg1TEcpRexUQ= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231110132251-8abdb05cce43 h1:Rs1vQypwaWvs+BqQWoGu6ToVl2F8eSErJabd5lmO4Pw= -github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231110132251-8abdb05cce43/go.mod h1:MvgF3pUPvdH/xIfrgdURFlpTyvnRWgcBMaTQb0GEKf0= -github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b h1:eU9vrxmXr1rMs67BsIWrfmEK+IjIsOnbl2XTlTtNIls= -github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b/go.mod h1:1CUJKGvkOUGXakxFZGAagEQDX9qYyhzIElmUHCHo9RM= +github.com/iotaledger/hive.go/logger v0.0.0-20231113110812-4ca2b6cc9a42 h1:uD99UbTtBM5SIP9N3c/3BBLtb0frGYFsZ2lS8Zxtqr4= +github.com/iotaledger/hive.go/logger v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42 h1:hpR++ME3Y3CcxA431Zg0PgcCJUNkbBqjNXxR/bs+NdI= +github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42 h1:hepsnGvaS39azq80GV8DT9HlexoO/RqJbyiW5FXZ0HQ= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42 h1:9c7NiX2cnNPHR9UNWINDqNkolupXiDF3543pR6KLwIg= +github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231120094046-1308e2a5e072 h1:xbaW2dnDZy0ThcEcdK7ir3b+ynBXsn0R14lgxiFVuB0= +github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231120094046-1308e2a5e072/go.mod h1:iFiY6UukYeL8D3N1mtg4jh/9lxTBhzG0QgtD+w0gpps= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231120082637-ccd5b8465251 h1:bYGO8jXNXJNMGPG9etGW7WXfLbRU9ofx1xdd29/sS9M= +github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231120082637-ccd5b8465251/go.mod h1:chzj8FDIeXHIh3D52QTZ7imADlzdkhg7o7E2Qr85MJ8= +github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140 h1:8zHRYT1KADR9bOLUg7Ia4XA3StBHzV4Tb2Qtp42KLN8= +github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140/go.mod h1:1CUJKGvkOUGXakxFZGAagEQDX9qYyhzIElmUHCHo9RM= github.com/ipfs/boxo v0.13.1 h1:nQ5oQzcMZR3oL41REJDcTbrvDvuZh3J9ckc9+ILeRQI= github.com/ipfs/boxo v0.13.1/go.mod h1:btrtHy0lmO1ODMECbbEY1pxNtrLilvKSYLoGQt1yYCk= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= diff --git a/tools/genesis-snapshot/go.mod b/tools/genesis-snapshot/go.mod index 4c1d8b220..0d28ae51d 100644 --- a/tools/genesis-snapshot/go.mod +++ b/tools/genesis-snapshot/go.mod @@ -5,12 +5,12 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc - github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc + github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42 + github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 - github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b + github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140 github.com/mr-tron/base58 v1.2.0 github.com/spf13/pflag v1.0.5 golang.org/x/crypto v0.15.0 @@ -21,19 +21,19 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/ethereum/go-ethereum v1.13.4 // indirect + github.com/ethereum/go-ethereum v1.13.5 // indirect github.com/google/uuid v1.4.0 // indirect github.com/holiman/uint256 v1.2.3 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42 // indirect github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc // indirect github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/tools/genesis-snapshot/go.sum b/tools/genesis-snapshot/go.sum index 38eaf1b62..b8d3f0878 100644 --- a/tools/genesis-snapshot/go.sum +++ b/tools/genesis-snapshot/go.sum @@ -12,8 +12,8 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/ethereum/go-ethereum v1.13.4 h1:25HJnaWVg3q1O7Z62LaaI6S9wVq8QCw3K88g8wEzrcM= -github.com/ethereum/go-ethereum v1.13.4/go.mod h1:I0U5VewuuTzvBtVzKo7b3hJzDhXOUtn9mJW7SsIPB0Q= +github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk= +github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -30,30 +30,30 @@ github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PT github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc h1:PsArE43UkLymmDy9r7n42Yd1pv1iq4FwSx3iv2Mo+vc= github.com/iotaledger/hive.go/ads v0.0.0-20231110191152-7135670285dc/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= -github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc h1:qeE5T8LXGjKaFduWCt06CXsUTkhfHNx6hOD5xYP31QU= -github.com/iotaledger/hive.go/constraints v0.0.0-20231110191152-7135670285dc/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc h1:dyguf5k/eVGyv94ISm/FDtInOktce6koo+QtJvAPUT8= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231110191152-7135670285dc/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= -github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc h1:3wT7e5fRdDnnomkM6xPD110BCFz66MaXKxYUvLFuYkc= -github.com/iotaledger/hive.go/crypto v0.0.0-20231110191152-7135670285dc/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= -github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc h1:YQUKGFcOBGKSrok++Er5SZTtQx0UHTRgH4cvlHVOiwc= -github.com/iotaledger/hive.go/ds v0.0.0-20231110191152-7135670285dc/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc h1:sNFIiT+gEE6UlftfiBdrsUBIJtnhV6EpwVRw2YpbhUc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231110191152-7135670285dc/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42 h1:+PyLPZhRHy+Negjpuj0CSLaObpErEH7yI6HB2z5N6b0= +github.com/iotaledger/hive.go/constraints v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42 h1:3dW4gz0Vr9BogN826HRTp0OFlbngjhWcVPUfDhJ57Yw= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231113110812-4ca2b6cc9a42/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42 h1:t6EKe+O7XAmbe07cVHuM/3aBLEbVIY4D6yefANB4PUA= +github.com/iotaledger/hive.go/crypto v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42 h1:QZiMlDxmikF64zimWQunTrsEGOK9ydRahUAz2I46JAk= +github.com/iotaledger/hive.go/ds v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42 h1:gxlZ4zL6EfLyqT0+hIFV3WVE0FrPVgV5cQdyn36vPXQ= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc h1:3fsqfM2NqfhrewVdlKT3MHcXxVNvUCSP7P32il1ypa0= github.com/iotaledger/hive.go/kvstore v0.0.0-20231110191152-7135670285dc/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= -github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc h1:OrQBscQTsAzAJGwVs7qlPgczbvufsbENkOYRmyM+CF4= -github.com/iotaledger/hive.go/lo v0.0.0-20231110191152-7135670285dc/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42 h1:kcHkWyURZDVqO80OmJo5Z+wTJB6H+s52WAnU575vX0o= +github.com/iotaledger/hive.go/lo v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc h1:joYrsSZuVG3DfAQR9iS3qjnMExJ0qNp2+369sxb1Y4g= github.com/iotaledger/hive.go/log v0.0.0-20231110191152-7135670285dc/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= -github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc h1:dN9VYzV53oz2TlHHGtRtqaGvMDvFRW0Uh433z13k6+E= -github.com/iotaledger/hive.go/runtime v0.0.0-20231110191152-7135670285dc/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc h1:/DIsAs3PWCNkHoLXR2+uW34VAvZvfiCCJYA/rczfnmw= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231110191152-7135670285dc/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= -github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc h1:Dp9sOvU2B7xoyX28bYZgUUDAIqMCBhsmK2vWhIgDyWE= -github.com/iotaledger/hive.go/stringify v0.0.0-20231110191152-7135670285dc/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= -github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b h1:eU9vrxmXr1rMs67BsIWrfmEK+IjIsOnbl2XTlTtNIls= -github.com/iotaledger/iota.go/v4 v4.0.0-20231110131407-263d0662856b/go.mod h1:1CUJKGvkOUGXakxFZGAagEQDX9qYyhzIElmUHCHo9RM= +github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42 h1:hpR++ME3Y3CcxA431Zg0PgcCJUNkbBqjNXxR/bs+NdI= +github.com/iotaledger/hive.go/runtime v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42 h1:hepsnGvaS39azq80GV8DT9HlexoO/RqJbyiW5FXZ0HQ= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231113110812-4ca2b6cc9a42/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42 h1:9c7NiX2cnNPHR9UNWINDqNkolupXiDF3543pR6KLwIg= +github.com/iotaledger/hive.go/stringify v0.0.0-20231113110812-4ca2b6cc9a42/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140 h1:8zHRYT1KADR9bOLUg7Ia4XA3StBHzV4Tb2Qtp42KLN8= +github.com/iotaledger/iota.go/v4 v4.0.0-20231120063545-80c263f28140/go.mod h1:1CUJKGvkOUGXakxFZGAagEQDX9qYyhzIElmUHCHo9RM= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= diff --git a/tools/genesis-snapshot/presets/presets.go b/tools/genesis-snapshot/presets/presets.go index 5c81cdaa7..d12b9c289 100644 --- a/tools/genesis-snapshot/presets/presets.go +++ b/tools/genesis-snapshot/presets/presets.go @@ -133,7 +133,7 @@ var Docker = []options.Option[snapshotcreator.Options]{ iotago.NewV3ProtocolParameters( iotago.WithNetworkOptions("docker", "rms"), iotago.WithSupplyOptions(4_600_000_000_000_000, 1, 1, 10, 100, 100, 100), - iotago.WithTimeProviderOptions(0, time.Now().Unix(), 10, 13), + iotago.WithTimeProviderOptions(5, time.Now().Unix(), 10, 13), iotago.WithLivenessOptions(30, 30, 7, 14, 30), // increase/decrease threshold = fraction * slotDurationInSeconds * schedulerRate iotago.WithCongestionControlOptions(500, 500, 500, 800000, 500000, 100000, 1000, 100), @@ -242,7 +242,7 @@ var Feature = []options.Option[snapshotcreator.Options]{ iotago.NewV3ProtocolParameters( iotago.WithNetworkOptions("feature", "rms"), iotago.WithSupplyOptions(4_600_000_000_000_000, 100, 1, 10, 100, 100, 100), - iotago.WithTimeProviderOptions(666666, time.Now().Unix(), 10, 13), + iotago.WithTimeProviderOptions(666666, time.Now().Unix()-100_000, 10, 13), // Let's fix genesis at 10_000 slots back. iotago.WithLivenessOptions(30, 30, 10, 20, 30), // increase/decrease threshold = fraction * slotDurationInSeconds * schedulerRate iotago.WithCongestionControlOptions(500, 500, 500, 800000, 500000, 100000, 1000, 100),