diff --git a/components/app/app.go b/components/app/app.go
index b8c82c8bd..4cb029500 100644
--- a/components/app/app.go
+++ b/components/app/app.go
@@ -4,7 +4,6 @@ import (
"github.com/iotaledger/hive.go/app"
"github.com/iotaledger/hive.go/app/components/profiling"
"github.com/iotaledger/hive.go/app/components/shutdown"
- "github.com/iotaledger/iota-core/components/blockissuer"
"github.com/iotaledger/iota-core/components/dashboard"
dashboardmetrics "github.com/iotaledger/iota-core/components/dashboard_metrics"
"github.com/iotaledger/iota-core/components/debugapi"
@@ -39,7 +38,6 @@ func App() *app.App {
debugapi.Component,
metricstracker.Component,
protocol.Component,
- blockissuer.Component,
validator.Component,
dashboardmetrics.Component,
dashboard.Component,
diff --git a/components/blockissuer/component.go b/components/blockissuer/component.go
deleted file mode 100644
index eb6af3dec..000000000
--- a/components/blockissuer/component.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package blockissuer
-
-import (
- "context"
-
- "go.uber.org/dig"
-
- "github.com/iotaledger/hive.go/app"
- "github.com/iotaledger/iota-core/components/restapi"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
- "github.com/iotaledger/iota-core/pkg/daemon"
- "github.com/iotaledger/iota-core/pkg/protocol"
-)
-
-func init() {
- Component = &app.Component{
- Name: "BlockIssuer",
- DepsFunc: func(cDeps dependencies) { deps = cDeps },
- Params: params,
- Provide: provide,
- Run: run,
- IsEnabled: func(_ *dig.Container) bool {
- return ParamsBlockIssuer.Enabled
- },
- }
-}
-
-var (
- Component *app.Component
- deps dependencies
-)
-
-type dependencies struct {
- dig.In
-
- BlockIssuer *blockfactory.BlockIssuer
-}
-
-func provide(c *dig.Container) error {
- type innerDependencies struct {
- dig.In
-
- Protocol *protocol.Protocol
- }
-
- return c.Provide(func(deps innerDependencies) *blockfactory.BlockIssuer {
- return blockfactory.New(deps.Protocol,
- blockfactory.WithTipSelectionTimeout(ParamsBlockIssuer.TipSelectionTimeout),
- blockfactory.WithTipSelectionRetryInterval(ParamsBlockIssuer.TipSelectionRetryInterval),
- blockfactory.WithIncompleteBlockAccepted(restapi.ParamsRestAPI.AllowIncompleteBlock),
- blockfactory.WithRateSetterEnabled(ParamsBlockIssuer.RateSetterEnabled),
- )
- })
-}
-
-func run() error {
- return Component.Daemon().BackgroundWorker(Component.Name, func(ctx context.Context) {
- Component.LogInfof("Starting BlockIssuer")
- <-ctx.Done()
- deps.BlockIssuer.Shutdown()
- Component.LogInfo("Stopping BlockIssuer... done")
- }, daemon.PriorityBlockIssuer)
-}
diff --git a/components/blockissuer/params.go b/components/blockissuer/params.go
deleted file mode 100644
index 988f1a371..000000000
--- a/components/blockissuer/params.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package blockissuer
-
-import (
- "time"
-
- "github.com/iotaledger/hive.go/app"
-)
-
-// ParametersBlockIssuer contains the definition of the configuration parameters used by the BlockIssuer component.
-type ParametersBlockIssuer struct {
- // Enabled whether the BlockIssuer component is enabled.
- Enabled bool `default:"true" usage:"whether the BlockIssuer component is enabled"`
-
- // TipSelectionTimeout the timeout for tip selection.
- TipSelectionTimeout time.Duration `default:"10s" usage:"the timeout for tip selection"`
-
- // TipSelectionRetryInterval the interval for retrying tip selection.
- TipSelectionRetryInterval time.Duration `default:"200ms" usage:"the interval for retrying tip selection"`
-
- // RateSetterEnabled whether the RateSetter should be taken into account when issuing blocks.
- RateSetterEnabled bool `default:"false" usage:"whether the RateSetter should be taken into account when issuing blocks"`
-}
-
-// ParamsBlockIssuer is the default configuration parameters for the BlockIssuer component.
-var ParamsBlockIssuer = &ParametersBlockIssuer{}
-
-var params = &app.ComponentParams{
- Params: map[string]any{
- "blockIssuer": ParamsBlockIssuer,
- },
- Masked: []string{"blockIssuer.privateKey"},
-}
diff --git a/components/inx/component.go b/components/inx/component.go
index 290dae5b5..2314e7958 100644
--- a/components/inx/component.go
+++ b/components/inx/component.go
@@ -8,7 +8,7 @@ import (
"github.com/iotaledger/hive.go/app"
"github.com/iotaledger/iota-core/components/protocol"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
+ "github.com/iotaledger/iota-core/pkg/blockhandler"
"github.com/iotaledger/iota-core/pkg/daemon"
protocolpkg "github.com/iotaledger/iota-core/pkg/protocol"
restapipkg "github.com/iotaledger/iota-core/pkg/restapi"
@@ -22,22 +22,20 @@ func init() {
IsEnabled: func(c *dig.Container) bool {
return ParamsINX.Enabled
},
- Provide: provide,
- Configure: configure,
- Run: run,
+ Provide: provide,
+ Run: run,
}
}
var (
- Component *app.Component
- deps dependencies
- blockIssuerAccount blockfactory.Account
+ Component *app.Component
+ deps dependencies
)
type dependencies struct {
dig.In
Protocol *protocolpkg.Protocol
- BlockIssuer *blockfactory.BlockIssuer
+ BlockHandler *blockhandler.BlockHandler
Echo *echo.Echo `optional:"true"`
RestRouteManager *restapipkg.RestRouteManager
INXServer *Server
@@ -54,12 +52,6 @@ func provide(c *dig.Container) error {
return nil
}
-func configure() error {
- blockIssuerAccount = blockfactory.AccountFromParams(ParamsINX.BlockIssuerAccount, ParamsINX.BlockIssuerPrivateKey)
-
- return nil
-}
-
func run() error {
if err := Component.Daemon().BackgroundWorker("INX", func(ctx context.Context) {
Component.LogInfo("Starting INX ... done")
diff --git a/components/inx/params.go b/components/inx/params.go
index 857d0eeb5..6f9eca990 100644
--- a/components/inx/params.go
+++ b/components/inx/params.go
@@ -10,10 +10,6 @@ type ParametersINX struct {
Enabled bool `default:"false" usage:"whether the INX plugin is enabled"`
// the bind address on which the INX can be accessed from
BindAddress string `default:"localhost:9029" usage:"the bind address on which the INX can be accessed from"`
- // BlockIssuerAccount the accountID of the account that will issue the blocks.
- BlockIssuerAccount string `default:"" usage:"the accountID of the account that will issue the blocks"`
- // BlockIssuerPrivateKey the private key of the account that will issue the blocks.
- BlockIssuerPrivateKey string `default:"" usage:"the private key of the account that will issue the blocks"`
}
var ParamsINX = &ParametersINX{}
diff --git a/components/inx/server_blocks.go b/components/inx/server_blocks.go
index e41439fbb..e45569dab 100644
--- a/components/inx/server_blocks.go
+++ b/components/inx/server_blocks.go
@@ -11,7 +11,7 @@ import (
"github.com/iotaledger/hive.go/runtime/event"
"github.com/iotaledger/hive.go/runtime/workerpool"
inx "github.com/iotaledger/inx/go"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
+ "github.com/iotaledger/iota-core/pkg/blockhandler"
"github.com/iotaledger/iota-core/pkg/protocol/engine/blocks"
iotago "github.com/iotaledger/iota.go/v4"
)
@@ -130,13 +130,13 @@ func (s *Server) attachBlock(ctx context.Context, block *iotago.ProtocolBlock) (
mergedCtx, mergedCtxCancel := contextutils.MergeContexts(ctx, Component.Daemon().ContextStopped())
defer mergedCtxCancel()
- blockID, err := deps.BlockIssuer.AttachBlock(mergedCtx, block, blockIssuerAccount)
+ blockID, err := deps.BlockHandler.AttachBlock(mergedCtx, block)
if err != nil {
switch {
- case ierrors.Is(err, blockfactory.ErrBlockAttacherInvalidBlock):
+ case ierrors.Is(err, blockhandler.ErrBlockAttacherInvalidBlock):
return nil, status.Errorf(codes.InvalidArgument, "failed to attach block: %s", err.Error())
- case ierrors.Is(err, blockfactory.ErrBlockAttacherAttachingNotPossible):
+ case ierrors.Is(err, blockhandler.ErrBlockAttacherAttachingNotPossible):
return nil, status.Errorf(codes.Internal, "failed to attach block: %s", err.Error())
default:
diff --git a/components/restapi/core/blocks.go b/components/restapi/core/blocks.go
index dcff49cb9..67b041b16 100644
--- a/components/restapi/core/blocks.go
+++ b/components/restapi/core/blocks.go
@@ -6,8 +6,9 @@ import (
"github.com/labstack/echo/v4"
"github.com/iotaledger/hive.go/ierrors"
+ "github.com/iotaledger/hive.go/serializer/v2/serix"
"github.com/iotaledger/inx-app/pkg/httpserver"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
+ "github.com/iotaledger/iota-core/pkg/blockhandler"
"github.com/iotaledger/iota-core/pkg/model"
"github.com/iotaledger/iota-core/pkg/restapi"
iotago "github.com/iotaledger/iota.go/v4"
@@ -71,7 +72,7 @@ func sendBlock(c echo.Context) (*apimodels.BlockCreatedResponse, error) {
return nil, ierrors.Wrapf(httpserver.ErrInvalidParameter, "invalid block, error: %w", err)
}
- var iotaBlock = &iotago.ProtocolBlock{}
+ var iotaBlock *iotago.ProtocolBlock
if c.Request().Body == nil {
// bad request
@@ -85,38 +86,27 @@ func sendBlock(c echo.Context) (*apimodels.BlockCreatedResponse, error) {
switch mimeType {
case echo.MIMEApplicationJSON:
- // Do not validate here, the parents might need to be set
- if err := deps.Protocol.CurrentAPI().JSONDecode(bytes, iotaBlock); err != nil {
+ if err := deps.Protocol.CurrentAPI().JSONDecode(bytes, iotaBlock, serix.WithValidation()); err != nil {
return nil, ierrors.Wrapf(httpserver.ErrInvalidParameter, "invalid block, error: %w", err)
}
case httpserver.MIMEApplicationVendorIOTASerializerV2:
- version, _, err := iotago.VersionFromBytes(bytes)
+ iotaBlock, _, err = iotago.ProtocolBlockFromBytes(deps.Protocol)(bytes)
if err != nil {
return nil, ierrors.Wrapf(httpserver.ErrInvalidParameter, "invalid block, error: %w", err)
}
- apiForVersion, err := deps.Protocol.APIForVersion(version)
- if err != nil {
- return nil, ierrors.Wrapf(httpserver.ErrInvalidParameter, "invalid block, error: %w", err)
- }
-
- // Do not validate here, the parents might need to be set
- if _, err := apiForVersion.Decode(bytes, iotaBlock); err != nil {
- return nil, ierrors.Wrapf(httpserver.ErrInvalidParameter, "invalid block, error: %w", err)
- }
-
default:
return nil, echo.ErrUnsupportedMediaType
}
- blockID, err := deps.BlockIssuer.AttachBlock(c.Request().Context(), iotaBlock, blockIssuerAccount)
+ blockID, err := deps.BlockHandler.AttachBlock(c.Request().Context(), iotaBlock)
if err != nil {
switch {
- case ierrors.Is(err, blockfactory.ErrBlockAttacherInvalidBlock):
+ case ierrors.Is(err, blockhandler.ErrBlockAttacherInvalidBlock):
return nil, ierrors.Wrapf(httpserver.ErrInvalidParameter, "failed to attach block: %w", err)
- case ierrors.Is(err, blockfactory.ErrBlockAttacherAttachingNotPossible):
+ case ierrors.Is(err, blockhandler.ErrBlockAttacherAttachingNotPossible):
return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to attach block: %w", err)
default:
diff --git a/components/restapi/core/component.go b/components/restapi/core/component.go
index fd9a49214..fb4085c48 100644
--- a/components/restapi/core/component.go
+++ b/components/restapi/core/component.go
@@ -13,7 +13,7 @@ import (
"github.com/iotaledger/iota-core/components/metricstracker"
"github.com/iotaledger/iota-core/components/protocol"
"github.com/iotaledger/iota-core/components/restapi"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
+ "github.com/iotaledger/iota-core/pkg/blockhandler"
protocolpkg "github.com/iotaledger/iota-core/pkg/protocol"
restapipkg "github.com/iotaledger/iota-core/pkg/restapi"
)
@@ -39,10 +39,9 @@ const (
// GET returns block metadata.
RouteBlockMetadata = "/blocks/:" + restapipkg.ParameterBlockID + "/metadata"
- // RouteBlocks is the route for creating new blocks.
+ // RouteBlocks is the route for sending new blocks.
// POST creates a single new block and returns the new block ID.
// The block is parsed based on the given type in the request "Content-Type" header.
- // By providing only the protocolVersion and payload transaction user can POST a transaction.
// MIMEApplicationJSON => json.
// MIMEVendorIOTASerializer => bytes.
RouteBlocks = "/blocks"
@@ -134,8 +133,7 @@ var (
Component *app.Component
deps dependencies
- blockIssuerAccount blockfactory.Account
- features = []string{}
+ features = []string{}
)
type dependencies struct {
@@ -144,7 +142,7 @@ type dependencies struct {
AppInfo *app.Info
RestRouteManager *restapipkg.RestRouteManager
Protocol *protocolpkg.Protocol
- BlockIssuer *blockfactory.BlockIssuer `optional:"true"`
+ BlockHandler *blockhandler.BlockHandler
MetricsTracker *metricstracker.MetricsTracker
BaseToken *protocol.BaseToken
}
@@ -157,12 +155,6 @@ func configure() error {
routeGroup := deps.RestRouteManager.AddRoute("core/v3")
- if restapi.ParamsRestAPI.AllowIncompleteBlock {
- AddFeature("allowIncompleteBlock")
- }
-
- blockIssuerAccount = blockfactory.AccountFromParams(restapi.ParamsRestAPI.BlockIssuerAccount, restapi.ParamsRestAPI.BlockIssuerPrivateKey)
-
routeGroup.GET(RouteInfo, func(c echo.Context) error {
resp := info()
@@ -195,7 +187,7 @@ func configure() error {
c.Response().Header().Set(echo.HeaderLocation, resp.BlockID.ToHex())
return httpserver.JSONResponse(c, http.StatusCreated, resp)
- }, checkNodeSynced(), checkUpcomingUnsupportedProtocolVersion())
+ }, checkNodeSynced())
routeGroup.GET(RouteBlockIssuance, func(c echo.Context) error {
resp, err := blockIssuance(c)
@@ -360,19 +352,6 @@ func checkNodeSynced() echo.MiddlewareFunc {
}
}
-func checkUpcomingUnsupportedProtocolVersion() echo.MiddlewareFunc {
- return func(next echo.HandlerFunc) echo.HandlerFunc {
- return func(c echo.Context) error {
- // todo update with protocol upgrades support
- // if !deps.ProtocolManager.NextPendingSupported() {
- // return ierrors.Wrap(echo.ErrServiceUnavailable, "node does not support the upcoming protocol upgrade")
- // }
-
- return next(c)
- }
- }
-}
-
func responseByHeader(c echo.Context, obj any) error {
mimeType, err := httpserver.GetAcceptHeaderContentType(c, httpserver.MIMEApplicationVendorIOTASerializerV2, echo.MIMEApplicationJSON)
if err != nil && err != httpserver.ErrNotAcceptable {
diff --git a/components/restapi/params.go b/components/restapi/params.go
index a1fd8791e..440fdc102 100644
--- a/components/restapi/params.go
+++ b/components/restapi/params.go
@@ -16,8 +16,6 @@ type ParametersRestAPI struct {
ProtectedRoutes []string `usage:"the HTTP REST routes which need to be called with authorization. Wildcards using * are allowed"`
// whether the debug logging for requests should be enabled
DebugRequestLoggerEnabled bool `default:"false" usage:"whether the debug logging for requests should be enabled"`
- // AllowIncompleteBlock defines whether the node allows to fill in incomplete block and issue it for user.
- AllowIncompleteBlock bool `default:"false" usage:"whether the node allows to fill in incomplete block and issue it for user"`
// MaxPageSize defines the maximum number of results per page.
MaxPageSize uint32 `default:"100" usage:"the maximum number of results per page"`
// RequestsMemoryCacheGranularity defines per how many slots a cache is created for big API requests.
@@ -36,12 +34,6 @@ type ParametersRestAPI struct {
// the maximum number of results that may be returned by an endpoint
MaxResults int `default:"1000" usage:"the maximum number of results that may be returned by an endpoint"`
}
-
- // BlockIssuerAccount the accountID of the account that will issue the blocks.
- BlockIssuerAccount string `default:"" usage:"the accountID of the account that will issue the blocks"`
-
- // BlockIssuerPrivateKey the private key of the account that will issue the blocks.
- BlockIssuerPrivateKey string `default:"" usage:"the private key of the account that will issue the blocks"`
}
var ParamsRestAPI = &ParametersRestAPI{
diff --git a/components/validator/component.go b/components/validator/component.go
index 1199ac9a4..c7a79bb96 100644
--- a/components/validator/component.go
+++ b/components/validator/component.go
@@ -10,8 +10,7 @@ import (
"github.com/iotaledger/hive.go/app"
"github.com/iotaledger/hive.go/runtime/event"
"github.com/iotaledger/hive.go/runtime/timed"
- "github.com/iotaledger/iota-core/components/blockissuer"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
+ "github.com/iotaledger/iota-core/pkg/blockhandler"
"github.com/iotaledger/iota-core/pkg/daemon"
"github.com/iotaledger/iota-core/pkg/protocol"
"github.com/iotaledger/iota-core/pkg/protocol/engine/notarization"
@@ -25,7 +24,7 @@ func init() {
Params: params,
Run: run,
IsEnabled: func(_ *dig.Container) bool {
- return ParamsValidator.Enabled && blockissuer.ParamsBlockIssuer.Enabled
+ return ParamsValidator.Enabled
},
}
}
@@ -36,18 +35,18 @@ var (
isValidator atomic.Bool
executor *timed.TaskExecutor[iotago.AccountID]
- validatorAccount blockfactory.Account
+ validatorAccount blockhandler.Account
)
type dependencies struct {
dig.In
- Protocol *protocol.Protocol
- BlockIssuer *blockfactory.BlockIssuer
+ Protocol *protocol.Protocol
+ BlockHandler *blockhandler.BlockHandler
}
func run() error {
- validatorAccount = blockfactory.AccountFromParams(ParamsValidator.Account, ParamsValidator.PrivateKey)
+ validatorAccount = blockhandler.AccountFromParams(ParamsValidator.Account, ParamsValidator.PrivateKey)
executor = timed.NewTaskExecutor[iotago.AccountID](1)
diff --git a/components/validator/issuer.go b/components/validator/issuer.go
index 6ea188566..97c0497a6 100644
--- a/components/validator/issuer.go
+++ b/components/validator/issuer.go
@@ -4,7 +4,9 @@ import (
"context"
"time"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
+ "github.com/iotaledger/iota-core/pkg/model"
+ iotago "github.com/iotaledger/iota.go/v4"
+ "github.com/iotaledger/iota.go/v4/builder"
)
func issueValidatorBlock(ctx context.Context) {
@@ -35,17 +37,29 @@ func issueValidatorBlock(ctx context.Context) {
return
}
- modelBlock, err := deps.BlockIssuer.CreateValidationBlock(ctx,
- validatorAccount,
- blockfactory.WithValidationBlockHeaderOptions(
- blockfactory.WithIssuingTime(blockIssuingTime),
- blockfactory.WithSlotCommitment(latestCommitment.Commitment()),
- ),
- blockfactory.WithProtocolParametersHash(protocolParametersHash),
- blockfactory.WithHighestSupportedVersion(deps.Protocol.LatestAPI().Version()),
- )
+ parents := engineInstance.TipSelection.SelectTips(iotago.BlockTypeValidationMaxParents)
+
+ // create the validation block here using the validation block builder from iota.go
+ validationBlock, err := builder.NewValidationBlockBuilder(deps.Protocol.CurrentAPI()).
+ IssuingTime(blockIssuingTime).
+ SlotCommitmentID(latestCommitment.ID()).
+ ProtocolParametersHash(protocolParametersHash).
+ HighestSupportedVersion(deps.Protocol.LatestAPI().Version()).
+ LatestFinalizedSlot(engineInstance.SyncManager.LatestFinalizedSlot()).
+ StrongParents(parents[iotago.StrongParentType]).
+ WeakParents(parents[iotago.WeakParentType]).
+ ShallowLikeParents(parents[iotago.ShallowLikeParentType]).
+ Sign(validatorAccount.ID(), validatorAccount.PrivateKey()).
+ Build()
if err != nil {
- Component.LogWarnf("error creating validator block: %s", err.Error())
+ Component.LogWarnf("error creating validation block: %s", err.Error())
+
+ return
+ }
+
+ modelBlock, err := model.BlockFromBlock(validationBlock)
+ if err != nil {
+ Component.LogWarnf("error creating model block from validation block: %s", err.Error())
return
}
@@ -56,7 +70,7 @@ func issueValidatorBlock(ctx context.Context) {
nextBroadcast = blockIssuingTime.Add(ParamsValidator.CandidateBroadcastInterval)
}
- if err = deps.BlockIssuer.IssueBlock(modelBlock); err != nil {
+ if err = deps.BlockHandler.SubmitBlock(modelBlock); err != nil {
Component.LogWarnf("error issuing validator block: %s", err.Error())
return
diff --git a/config_defaults.json b/config_defaults.json
index ee999ac24..e7577c2d2 100644
--- a/config_defaults.json
+++ b/config_defaults.json
@@ -65,7 +65,6 @@
"/api/*"
],
"debugRequestLoggerEnabled": false,
- "allowIncompleteBlock": false,
"maxPageSize": 100,
"requestsMemoryCacheGranularity": 10,
"maxRequestedSlotAge": 10,
@@ -75,9 +74,7 @@
"limits": {
"maxBodyLength": "1M",
"maxResults": 1000
- },
- "blockIssuerAccount": "",
- "blockIssuerPrivateKey": ""
+ }
},
"debugAPI": {
"enabled": true,
@@ -155,8 +152,6 @@
},
"inx": {
"enabled": false,
- "bindAddress": "localhost:9029",
- "blockIssuerAccount": "",
- "blockIssuerPrivateKey": ""
+ "bindAddress": "localhost:9029"
}
}
diff --git a/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2 b/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2
index 979850857..e86b3be51 100644
--- a/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2
+++ b/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2
@@ -40,7 +40,6 @@ services:
--profiling.bindAddress=0.0.0.0:6061
--profiling.enabled=true
--protocol.snapshot.path=/app/data/snapshot.bin
- --restAPI.allowIncompleteBlock=true
{% if 'node-01' in inventory_hostname or 'node-02' in inventory_hostname or 'node-03' in inventory_hostname %}
--validator.enabled=true
--validator.account={{validatorAccount}}
@@ -49,11 +48,6 @@ services:
{% if 'node-01' in inventory_hostname %}
--validator.ignoreBootstrapped=true
{% endif %}
- --restAPI.allowIncompleteBlock=true
- --restAPI.blockIssuerAccount={{validatorAccount}}
- --restAPI.blockIssuerPrivateKey={{validatorPrivKey}}
- --inx.blockIssuerAccount={{validatorAccount}}
- --inx.blockIssuerPrivateKey={{validatorPrivKey}}
--p2p.peers=/dns/node-01.feature/tcp/14666/p2p/12D3KooWCrjmh4dUCWfGVQT6ivzArieJB9Z3eKdy2mdEEN95NDPS
--p2p.externalMultiAddresses={{ ips | join(',') }}
--p2p.identityPrivateKey={{p2pIdentityPrivateKey}}
diff --git a/documentation/docs/references/configuration.md b/documentation/docs/references/configuration.md
index 484cf55c4..bfe970cc5 100644
--- a/documentation/docs/references/configuration.md
+++ b/documentation/docs/references/configuration.md
@@ -179,14 +179,11 @@ Example:
| publicRoutes | The HTTP REST routes which can be called without authorization. Wildcards using \* are allowed | array | /health
/api/routes
/api/core/v3/info
/api/core/v3/blocks\*
/api/core/v3/transactions\*
/api/core/v3/commitments\*
/api/core/v3/outputs\*
/api/core/v3/accounts\*
/api/core/v3/validators\*
/api/core/v3/rewards\*
/api/core/v3/committee
/api/debug/v2/\*
/api/indexer/v2/\*
/api/mqtt/v2 |
| protectedRoutes | The HTTP REST routes which need to be called with authorization. Wildcards using \* are allowed | array | /api/\* |
| debugRequestLoggerEnabled | Whether the debug logging for requests should be enabled | boolean | false |
-| allowIncompleteBlock | Whether the node allows to fill in incomplete block and issue it for user | boolean | false |
| maxPageSize | The maximum number of results per page | uint | 100 |
| requestsMemoryCacheGranularity | Defines per how many slots a cache is created for big API requests | uint | 10 |
| maxRequestedSlotAge | The maximum age of a request that will be processed | uint | 10 |
| [jwtAuth](#restapi_jwtauth) | Configuration for jwtAuth | object | |
| [limits](#restapi_limits) | Configuration for limits | object | |
-| blockIssuerAccount | The accountID of the account that will issue the blocks | string | "" |
-| blockIssuerPrivateKey | The private key of the account that will issue the blocks | string | "" |
### JwtAuth
@@ -228,7 +225,6 @@ Example:
"/api/*"
],
"debugRequestLoggerEnabled": false,
- "allowIncompleteBlock": false,
"maxPageSize": 100,
"requestsMemoryCacheGranularity": 10,
"maxRequestedSlotAge": 10,
@@ -238,9 +234,7 @@ Example:
"limits": {
"maxBodyLength": "1M",
"maxResults": 1000
- },
- "blockIssuerAccount": "",
- "blockIssuerPrivateKey": ""
+ }
}
}
```
@@ -499,12 +493,10 @@ Example:
## 14. Inx
-| Name | Description | Type | Default value |
-| --------------------- | --------------------------------------------------------- | ------- | ---------------- |
-| enabled | Whether the INX plugin is enabled | boolean | false |
-| bindAddress | The bind address on which the INX can be accessed from | string | "localhost:9029" |
-| blockIssuerAccount | The accountID of the account that will issue the blocks | string | "" |
-| blockIssuerPrivateKey | The private key of the account that will issue the blocks | string | "" |
+| Name | Description | Type | Default value |
+| ----------- | ------------------------------------------------------ | ------- | ---------------- |
+| enabled | Whether the INX plugin is enabled | boolean | false |
+| bindAddress | The bind address on which the INX can be accessed from | string | "localhost:9029" |
Example:
@@ -512,9 +504,7 @@ Example:
{
"inx": {
"enabled": false,
- "bindAddress": "localhost:9029",
- "blockIssuerAccount": "",
- "blockIssuerPrivateKey": ""
+ "bindAddress": "localhost:9029"
}
}
```
diff --git a/pkg/blockfactory/account.go b/pkg/blockhandler/account.go
similarity index 98%
rename from pkg/blockfactory/account.go
rename to pkg/blockhandler/account.go
index d945d692c..20d33c0ee 100644
--- a/pkg/blockfactory/account.go
+++ b/pkg/blockhandler/account.go
@@ -1,4 +1,4 @@
-package blockfactory
+package blockhandler
import (
"crypto/ed25519"
diff --git a/pkg/blockfactory/block_params.go b/pkg/blockhandler/block_params.go
similarity index 99%
rename from pkg/blockfactory/block_params.go
rename to pkg/blockhandler/block_params.go
index e8600c0bf..951f285c8 100644
--- a/pkg/blockfactory/block_params.go
+++ b/pkg/blockhandler/block_params.go
@@ -1,4 +1,4 @@
-package blockfactory
+package blockhandler
import (
"time"
diff --git a/pkg/blockhandler/blockissuer.go b/pkg/blockhandler/blockissuer.go
new file mode 100644
index 000000000..45452d9ab
--- /dev/null
+++ b/pkg/blockhandler/blockissuer.go
@@ -0,0 +1,118 @@
+package blockhandler
+
+import (
+ "context"
+
+ "github.com/iotaledger/hive.go/ierrors"
+ "github.com/iotaledger/hive.go/runtime/event"
+ "github.com/iotaledger/hive.go/runtime/workerpool"
+ "github.com/iotaledger/iota-core/pkg/model"
+ "github.com/iotaledger/iota-core/pkg/protocol"
+ "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks"
+ "github.com/iotaledger/iota-core/pkg/protocol/engine/filter"
+ iotago "github.com/iotaledger/iota.go/v4"
+)
+
+var (
+ ErrBlockAttacherInvalidBlock = ierrors.New("invalid block")
+ ErrBlockAttacherAttachingNotPossible = ierrors.New("attaching not possible")
+ ErrBlockAttacherIncompleteBlockNotAllowed = ierrors.New("incomplete block is not allowed on this node")
+)
+
+// TODO: make sure an honest validator does not issue blocks within the same slot ratification period in two conflicting chains.
+// - this can be achieved by remembering the last issued block together with the engine name/chain.
+// - if the engine name/chain is the same we can always issue a block.
+// - if the engine name/chain is different we need to make sure to wait "slot ratification" slots.
+
+// BlockIssuer contains logic to create and issue blocks signed by the given account.
+type BlockHandler struct {
+ events *Events
+
+ workerPool *workerpool.WorkerPool
+
+ protocol *protocol.Protocol
+}
+
+func New(p *protocol.Protocol) *BlockHandler {
+ return &BlockHandler{
+ events: NewEvents(),
+ workerPool: p.Workers.CreatePool("BlockIssuer"),
+ protocol: p,
+ }
+}
+
+// Shutdown shuts down the block issuer.
+func (i *BlockHandler) Shutdown() {
+ i.workerPool.Shutdown()
+ i.workerPool.ShutdownComplete.Wait()
+}
+
+// SubmitBlock submits a block to be processed.
+func (i *BlockHandler) SubmitBlock(block *model.Block) error {
+ return i.submitBlock(block)
+}
+
+// SubmitBlockAndAwaitEvent submits a block to be processed and waits for the event to be triggered.
+func (i *BlockHandler) SubmitBlockAndAwaitEvent(ctx context.Context, block *model.Block, evt *event.Event1[*blocks.Block]) error {
+ triggered := make(chan error, 1)
+ exit := make(chan struct{})
+ defer close(exit)
+
+ defer evt.Hook(func(eventBlock *blocks.Block) {
+ if block.ID() != eventBlock.ID() {
+ return
+ }
+ select {
+ case triggered <- nil:
+ case <-exit:
+ }
+ }, event.WithWorkerPool(i.workerPool)).Unhook()
+
+ defer i.protocol.Events.Engine.Filter.BlockPreFiltered.Hook(func(event *filter.BlockPreFilteredEvent) {
+ if block.ID() != event.Block.ID() {
+ return
+ }
+ select {
+ case triggered <- event.Reason:
+ case <-exit:
+ }
+ }, event.WithWorkerPool(i.workerPool)).Unhook()
+
+ if err := i.submitBlock(block); err != nil {
+ return ierrors.Wrapf(err, "failed to issue block %s", block.ID())
+ }
+
+ select {
+ case <-ctx.Done():
+ return ierrors.Errorf("context canceled whilst waiting for event on block %s", block.ID())
+ case err := <-triggered:
+ if err != nil {
+ return ierrors.Wrapf(err, "block filtered out %s", block.ID())
+ }
+
+ return nil
+ }
+}
+
+func (i *BlockHandler) AttachBlock(ctx context.Context, iotaBlock *iotago.ProtocolBlock) (iotago.BlockID, error) {
+ modelBlock, err := model.BlockFromBlock(iotaBlock)
+ if err != nil {
+ return iotago.EmptyBlockID(), ierrors.Wrap(err, "error serializing block to model block")
+ }
+
+ if err = i.SubmitBlockAndAwaitEvent(ctx, modelBlock, i.protocol.Events.Engine.BlockDAG.BlockAttached); err != nil {
+ return iotago.EmptyBlockID(), ierrors.Wrap(err, "error issuing model block")
+ }
+
+ return modelBlock.ID(), nil
+}
+
+func (i *BlockHandler) submitBlock(block *model.Block) error {
+ if err := i.protocol.IssueBlock(block); err != nil {
+ return err
+ }
+
+ i.events.BlockSubmitted.Trigger(block)
+
+ return nil
+}
diff --git a/pkg/blockhandler/events.go b/pkg/blockhandler/events.go
new file mode 100644
index 000000000..0b3c09e55
--- /dev/null
+++ b/pkg/blockhandler/events.go
@@ -0,0 +1,25 @@
+package blockhandler
+
+import (
+ "github.com/iotaledger/hive.go/runtime/event"
+ "github.com/iotaledger/iota-core/pkg/model"
+)
+
+// Events represents events happening on a block factory.
+type Events struct {
+ // Triggered when a block is submitted, i.e. sent to the protocol to be processed.
+ BlockSubmitted *event.Event1[*model.Block]
+
+ // Fired when an error occurred.
+ Error *event.Event1[error]
+
+ event.Group[Events, *Events]
+}
+
+// NewEvents contains the constructor of the Events object (it is generated by a generic factory).
+var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) {
+ return &Events{
+ BlockSubmitted: event.New1[*model.Block](),
+ Error: event.New1[error](),
+ }
+})
diff --git a/pkg/tests/accounts_test.go b/pkg/tests/accounts_test.go
index 3ed88b77d..c0bfc7b00 100644
--- a/pkg/tests/accounts_test.go
+++ b/pkg/tests/accounts_test.go
@@ -5,7 +5,6 @@ import (
"github.com/iotaledger/hive.go/lo"
"github.com/iotaledger/hive.go/runtime/options"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
"github.com/iotaledger/iota-core/pkg/model"
"github.com/iotaledger/iota-core/pkg/protocol"
"github.com/iotaledger/iota-core/pkg/protocol/engine/accounts"
@@ -43,6 +42,7 @@ func Test_TransitionAccount(t *testing.T) {
node1 := ts.AddValidatorNode("node1")
_ = ts.AddNode("node2")
+ blockIssuer := ts.AddBasicBlockIssuer("default", iotago.MaxBlockIssuanceCredits/2)
ts.Run(true, map[string][]options.Option[protocol.Protocol]{})
@@ -89,13 +89,12 @@ func Test_TransitionAccount(t *testing.T) {
))
var block1Slot iotago.SlotIndex = 1
- activeNodes := []*mock.Node{node1}
genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version())
genesisCommitment.ReferenceManaCost = ts.API.ProtocolParameters().CongestionControlParameters().MinReferenceManaCost
- block1 := ts.IssueBlockAtSlotWithOptions("block1", block1Slot, genesisCommitment, node1, tx1)
+ block1 := ts.IssueBasicBlockAtSlotWithOptions("block1", block1Slot, genesisCommitment, blockIssuer, node1, tx1)
- latestParent := ts.CommitUntilSlot(ts.BlockID("block1").Slot(), activeNodes, block1)
+ latestParent := ts.CommitUntilSlot(ts.BlockID("block1").Slot(), block1)
ts.AssertAccountDiff(genesisAccountOutput.AccountID, block1Slot, &model.AccountDiff{
BICChange: 0,
@@ -119,7 +118,7 @@ func Test_TransitionAccount(t *testing.T) {
// DESTROY GENESIS ACCOUNT, CREATE NEW ACCOUNT WITH BLOCK ISSUER AND STAKING FEATURES FROM BASIC UTXO
// commit until the expiry slot of the transitioned genesis account plus one
- latestParent = ts.CommitUntilSlot(accountOutputs[0].FeatureSet().BlockIssuer().ExpirySlot+1, activeNodes, latestParent)
+ latestParent = ts.CommitUntilSlot(accountOutputs[0].FeatureSet().BlockIssuer().ExpirySlot+1, latestParent)
// set the expiry slof of the transitioned genesis account to the latest committed + MaxCommittableAge
newAccountExpirySlot := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot() + ts.API.ProtocolParameters().MaxCommittableAge()
inputForNewAccount, newAccountOutputs, newAccountWallets := ts.TransactionFramework.CreateAccountFromInput("TX1:1",
@@ -150,9 +149,9 @@ func Test_TransitionAccount(t *testing.T) {
testsuite.WithSlotCreated(block2Slot),
))
- block2 := ts.IssueBlockAtSlotWithOptions("block2", block2Slot, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node1, tx2, blockfactory.WithStrongParents(latestParent.ID()))
+ block2 := ts.IssueBasicBlockAtSlotWithOptions("block2", block2Slot, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), blockIssuer, node1, tx2, mock.WithStrongParents(latestParent.ID()))
- latestParent = ts.CommitUntilSlot(block2Slot, activeNodes, block2)
+ latestParent = ts.CommitUntilSlot(block2Slot, block2)
// assert diff of a destroyed account, to make sure we can correctly restore it
ts.AssertAccountDiff(genesisAccountOutput.AccountID, block2Slot, &model.AccountDiff{
@@ -220,9 +219,9 @@ func Test_TransitionAccount(t *testing.T) {
testsuite.WithSlotCreated(block3Slot),
))
- block3 := ts.IssueBlockAtSlotWithOptions("block3", block3Slot, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node1, tx3, blockfactory.WithStrongParents(latestParent.ID()))
+ block3 := ts.IssueBasicBlockAtSlotWithOptions("block3", block3Slot, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), blockIssuer, node1, tx3, mock.WithStrongParents(latestParent.ID()))
- latestParent = ts.CommitUntilSlot(block3Slot, activeNodes, block3)
+ latestParent = ts.CommitUntilSlot(block3Slot, block3)
delegatedAmount := inputForNewDelegation[0].BaseTokenAmount()
ts.AssertAccountDiff(newAccountOutput.AccountID, block3Slot, &model.AccountDiff{
@@ -266,9 +265,9 @@ func Test_TransitionAccount(t *testing.T) {
block4Slot := latestParent.ID().Slot()
- block4 := ts.IssueBlockAtSlotWithOptions("block4", block4Slot, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node1, tx4, blockfactory.WithStrongParents(latestParent.ID()))
+ block4 := ts.IssueBasicBlockAtSlotWithOptions("block4", block4Slot, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), blockIssuer, node1, tx4, mock.WithStrongParents(latestParent.ID()))
- latestParent = ts.CommitUntilSlot(block4Slot, activeNodes, block4)
+ latestParent = ts.CommitUntilSlot(block4Slot, block4)
// Transitioning to delayed claiming effectively removes the delegation, so we expect a negative delegation stake change.
ts.AssertAccountDiff(newAccountOutput.AccountID, block4Slot, &model.AccountDiff{
@@ -310,9 +309,9 @@ func Test_TransitionAccount(t *testing.T) {
slotIndexBlock5 := latestParent.ID().Index()
- block5 := ts.IssueBlockAtSlotWithOptions("block5", slotIndexBlock5, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node1, tx5, blockfactory.WithStrongParents(latestParent.ID()))
+ block5 := ts.IssueBasicBlockAtSlotWithOptions("block5", slotIndexBlock5, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), blockIssuer, node1, tx5, mock.WithStrongParents(latestParent.ID()))
- latestParent = ts.CommitUntilSlot(slotIndexBlock5, activeNodes, block5)
+ latestParent = ts.CommitUntilSlot(slotIndexBlock5, block5)
var implicitBlockIssuerKey iotago.BlockIssuerKey = iotago.Ed25519PublicKeyHashBlockIssuerKeyFromImplicitAccountCreationAddress(implicitAccountAddress)
@@ -352,9 +351,9 @@ func Test_TransitionAccount(t *testing.T) {
slotIndexBlock6 := latestParent.ID().Index()
- block6 := ts.IssueBlockAtSlotWithOptions("block6", slotIndexBlock6, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node1, tx6, blockfactory.WithStrongParents(latestParent.ID()))
+ block6 := ts.IssueBasicBlockAtSlotWithOptions("block6", slotIndexBlock6, node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), blockIssuer, node1, tx6, mock.WithStrongParents(latestParent.ID()))
- latestParent = ts.CommitUntilSlot(slotIndexBlock6, activeNodes, block6)
+ latestParent = ts.CommitUntilSlot(slotIndexBlock6, block6)
fullAccountOutputID := ts.TransactionFramework.Output("TX6:0").OutputID()
diff --git a/pkg/tests/booker_test.go b/pkg/tests/booker_test.go
index 6815c8f2f..5fe14486a 100644
--- a/pkg/tests/booker_test.go
+++ b/pkg/tests/booker_test.go
@@ -5,7 +5,6 @@ import (
"github.com/iotaledger/hive.go/lo"
"github.com/iotaledger/hive.go/runtime/options"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
"github.com/iotaledger/iota-core/pkg/core/acceptance"
"github.com/iotaledger/iota-core/pkg/protocol"
"github.com/iotaledger/iota-core/pkg/protocol/engine/blocks"
@@ -19,13 +18,14 @@ func Test_IssuingTransactionsOutOfOrder(t *testing.T) {
defer ts.Shutdown()
node1 := ts.AddValidatorNode("node1")
+ blockIssuer := ts.AddBasicBlockIssuer("default")
ts.Run(true, map[string][]options.Option[protocol.Protocol]{})
tx1 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx1", 1, "Genesis:0"))
tx2 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx2", 1, "tx1:0"))
- ts.IssuePayloadWithOptions("block1", node1, tx2)
+ ts.IssuePayloadWithOptions("block1", blockIssuer, node1, tx2)
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx2"), true, node1)
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx1"), false, node1)
@@ -33,7 +33,7 @@ func Test_IssuingTransactionsOutOfOrder(t *testing.T) {
ts.AssertTransactionsInCacheBooked(ts.TransactionFramework.Transactions("tx2"), false, node1)
// make sure that the block is not booked
- ts.IssuePayloadWithOptions("block2", node1, tx1)
+ ts.IssuePayloadWithOptions("block2", blockIssuer, node1, tx1)
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1)
ts.AssertTransactionsInCacheBooked(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1)
@@ -54,12 +54,13 @@ func Test_DoubleSpend(t *testing.T) {
node1 := ts.AddValidatorNode("node1")
node2 := ts.AddValidatorNode("node2")
+ blockIssuer := ts.AddBasicBlockIssuer("default")
ts.Run(true, map[string][]options.Option[protocol.Protocol]{})
ts.AssertSybilProtectionCommittee(0, []iotago.AccountID{
- node1.AccountID,
- node2.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
}, ts.Nodes()...)
// Create and issue double spends
@@ -67,8 +68,8 @@ func Test_DoubleSpend(t *testing.T) {
tx1 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx1", 1, "Genesis:0"))
tx2 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx2", 1, "Genesis:0"))
- ts.IssuePayloadWithOptions("block1", node1, tx1, blockfactory.WithStrongParents(ts.BlockID("Genesis")))
- ts.IssuePayloadWithOptions("block2", node1, tx2, blockfactory.WithStrongParents(ts.BlockID("Genesis")))
+ ts.IssuePayloadWithOptions("block1", blockIssuer, node1, tx1, mock.WithStrongParents(ts.BlockID("Genesis")))
+ ts.IssuePayloadWithOptions("block2", blockIssuer, node1, tx2, mock.WithStrongParents(ts.BlockID("Genesis")))
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1, node2)
ts.AssertTransactionsInCacheBooked(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1, node2)
@@ -86,8 +87,8 @@ func Test_DoubleSpend(t *testing.T) {
// Issue some more blocks and assert that conflicts are propagated to blocks.
{
- ts.IssueValidationBlock("block3", node1, blockfactory.WithStrongParents(ts.BlockID("block1")))
- ts.IssueValidationBlock("block4", node1, blockfactory.WithStrongParents(ts.BlockID("block2")))
+ ts.IssueValidationBlock("block3", node1, mock.WithStrongParents(ts.BlockID("block1")))
+ ts.IssueValidationBlock("block4", node1, mock.WithStrongParents(ts.BlockID("block2")))
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("block3"): {"tx1"},
@@ -98,15 +99,15 @@ func Test_DoubleSpend(t *testing.T) {
// Issue an invalid block and assert that its vote is not cast.
{
- ts.IssueValidationBlock("block5", node2, blockfactory.WithStrongParents(ts.BlockIDs("block3", "block4")...))
+ ts.IssueValidationBlock("block5", node2, mock.WithStrongParents(ts.BlockIDs("block3", "block4")...))
ts.AssertTransactionsInCachePending(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1, node2)
}
// Issue valid blocks that resolve the conflict.
{
- ts.IssueValidationBlock("block6", node2, blockfactory.WithStrongParents(ts.BlockIDs("block3", "block4")...), blockfactory.WithShallowLikeParents(ts.BlockID("block2")))
- ts.IssueValidationBlock("block7", node1, blockfactory.WithStrongParents(ts.BlockIDs("block6")...))
+ ts.IssueValidationBlock("block6", node2, mock.WithStrongParents(ts.BlockIDs("block3", "block4")...), mock.WithShallowLikeParents(ts.BlockID("block2")))
+ ts.IssueValidationBlock("block7", node1, mock.WithStrongParents(ts.BlockIDs("block6")...))
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("block6"): {"tx2"},
@@ -123,6 +124,8 @@ func Test_MultipleAttachments(t *testing.T) {
nodeA := ts.AddValidatorNode("nodeA")
nodeB := ts.AddValidatorNode("nodeB")
+ blockIssuerA := ts.AddBasicBlockIssuer("blockIssuerA")
+ blockIssuerB := ts.AddBasicBlockIssuer("blockIssuerA")
ts.Run(true, map[string][]options.Option[protocol.Protocol]{})
@@ -132,13 +135,13 @@ func Test_MultipleAttachments(t *testing.T) {
{
tx1 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx1", 2, "Genesis:0"))
- ts.IssuePayloadWithOptions("A.1", nodeA, tx1, blockfactory.WithStrongParents(ts.BlockID("Genesis")))
- ts.IssueValidationBlock("A.1.1", nodeA, blockfactory.WithStrongParents(ts.BlockID("A.1")))
- ts.IssuePayloadWithOptions("B.1", nodeB, tx1, blockfactory.WithStrongParents(ts.BlockID("Genesis")))
- ts.IssueValidationBlock("B.1.1", nodeB, blockfactory.WithStrongParents(ts.BlockID("B.1")))
+ ts.IssuePayloadWithOptions("A.1", blockIssuerA, nodeA, tx1, mock.WithStrongParents(ts.BlockID("Genesis")))
+ ts.IssueValidationBlock("A.1.1", nodeA, mock.WithStrongParents(ts.BlockID("A.1")))
+ ts.IssuePayloadWithOptions("B.1", blockIssuerB, nodeB, tx1, mock.WithStrongParents(ts.BlockID("Genesis")))
+ ts.IssueValidationBlock("B.1.1", nodeB, mock.WithStrongParents(ts.BlockID("B.1")))
- ts.IssueValidationBlock("A.2", nodeA, blockfactory.WithStrongParents(ts.BlockID("B.1.1")))
- ts.IssueValidationBlock("B.2", nodeB, blockfactory.WithStrongParents(ts.BlockID("A.1.1")))
+ ts.IssueValidationBlock("A.2", nodeA, mock.WithStrongParents(ts.BlockID("B.1.1")))
+ ts.IssueValidationBlock("B.2", nodeB, mock.WithStrongParents(ts.BlockID("A.1.1")))
ts.AssertBlocksInCachePreAccepted(ts.Blocks("A.1", "B.1"), true, ts.Nodes()...)
ts.AssertBlocksInCacheAccepted(ts.Blocks("A.1", "B.1"), false, ts.Nodes()...)
@@ -159,14 +162,14 @@ func Test_MultipleAttachments(t *testing.T) {
{
tx2 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx2", 1, "tx1:1"))
- ts.IssuePayloadWithOptions("A.3", nodeA, tx2, blockfactory.WithStrongParents(ts.BlockID("Genesis")))
- ts.IssueValidationBlock("B.3", nodeB, blockfactory.WithStrongParents(ts.BlockID("A.3")))
- ts.IssueValidationBlock("A.4", nodeA, blockfactory.WithStrongParents(ts.BlockID("B.3")))
+ ts.IssuePayloadWithOptions("A.3", nodeA.Validator, nodeA, tx2, mock.WithStrongParents(ts.BlockID("Genesis")))
+ ts.IssueValidationBlock("B.3", nodeB, mock.WithStrongParents(ts.BlockID("A.3")))
+ ts.IssueValidationBlock("A.4", nodeA, mock.WithStrongParents(ts.BlockID("B.3")))
ts.AssertBlocksInCachePreAccepted(ts.Blocks("A.3"), true, ts.Nodes()...)
- ts.IssueValidationBlock("B.4", nodeB, blockfactory.WithStrongParents(ts.BlockIDs("B.3", "A.4")...))
- ts.IssueValidationBlock("A.5", nodeA, blockfactory.WithStrongParents(ts.BlockIDs("B.3", "A.4")...))
+ ts.IssueValidationBlock("B.4", nodeB, mock.WithStrongParents(ts.BlockIDs("B.3", "A.4")...))
+ ts.IssueValidationBlock("A.5", nodeA, mock.WithStrongParents(ts.BlockIDs("B.3", "A.4")...))
ts.AssertBlocksInCachePreAccepted(ts.Blocks("B.3", "A.4"), true, ts.Nodes()...)
ts.AssertBlocksInCachePreAccepted(ts.Blocks("B.4", "A.5"), false, ts.Nodes()...)
@@ -191,11 +194,11 @@ func Test_MultipleAttachments(t *testing.T) {
// Issue a block that includes tx1, and make sure that tx2 is accepted as well as a consequence.
{
- ts.IssueValidationBlock("A.6", nodeA, blockfactory.WithStrongParents(ts.BlockIDs("A.2", "B.2")...))
- ts.IssueValidationBlock("B.5", nodeB, blockfactory.WithStrongParents(ts.BlockIDs("A.2", "B.2")...))
+ ts.IssueValidationBlock("A.6", nodeA, mock.WithStrongParents(ts.BlockIDs("A.2", "B.2")...))
+ ts.IssueValidationBlock("B.5", nodeB, mock.WithStrongParents(ts.BlockIDs("A.2", "B.2")...))
- ts.IssueValidationBlock("A.7", nodeA, blockfactory.WithStrongParents(ts.BlockIDs("A.6", "B.5")...))
- ts.IssueValidationBlock("B.6", nodeB, blockfactory.WithStrongParents(ts.BlockIDs("A.6", "B.5")...))
+ ts.IssueValidationBlock("A.7", nodeA, mock.WithStrongParents(ts.BlockIDs("A.6", "B.5")...))
+ ts.IssueValidationBlock("B.6", nodeB, mock.WithStrongParents(ts.BlockIDs("A.6", "B.5")...))
ts.AssertBlocksInCachePreAccepted(ts.Blocks("A.2", "B.2", "A.6", "B.5"), true, ts.Nodes()...)
ts.AssertBlocksInCacheAccepted(ts.Blocks("A.1", "B.1"), true, ts.Nodes()...)
@@ -230,12 +233,13 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
node1 := ts.AddValidatorNode("node1")
node2 := ts.AddValidatorNode("node2")
+ ts.AddBasicBlockIssuer("default")
ts.Run(true, map[string][]options.Option[protocol.Protocol]{})
ts.AssertSybilProtectionCommittee(0, []iotago.AccountID{
- node1.AccountID,
- node2.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
}, ts.Nodes()...)
genesisCommitment := lo.PanicOnErr(node1.Protocol.MainEngineInstance().Storage.Commitments().Load(0)).Commitment()
@@ -245,9 +249,9 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
tx1 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx1", 1, "Genesis:0"))
tx2 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx2", 1, "Genesis:0"))
- ts.IssueBlockAtSlotWithOptions("block1.1", 1, genesisCommitment, node1, tx1)
- ts.IssueBlockAtSlotWithOptions("block1.2", 1, genesisCommitment, node1, tx2)
- ts.IssueBlockAtSlot("block2.tx1", 2, genesisCommitment, node1, ts.BlockIDs("block1.1")...)
+ ts.IssueBasicBlockAtSlotWithOptions("block1.1", 1, genesisCommitment, ts.DefaultBasicBlockIssuer(), node1, tx1)
+ ts.IssueBasicBlockAtSlotWithOptions("block1.2", 1, genesisCommitment, ts.DefaultBasicBlockIssuer(), node1, tx2)
+ ts.IssueValidationBlockAtSlot("block2.tx1", 2, genesisCommitment, node1, ts.BlockIDs("block1.1")...)
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1, node2)
ts.AssertTransactionsInCacheBooked(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1, node2)
@@ -266,8 +270,8 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
// Issue some more blocks and assert that conflicts are propagated to blocks.
{
- ts.IssueBlockAtSlot("block2.1", 2, genesisCommitment, node1, ts.BlockID("block1.1"))
- ts.IssueBlockAtSlot("block2.2", 2, genesisCommitment, node1, ts.BlockID("block1.2"))
+ ts.IssueValidationBlockAtSlot("block2.1", 2, genesisCommitment, node1, ts.BlockID("block1.1"))
+ ts.IssueValidationBlockAtSlot("block2.2", 2, genesisCommitment, node1, ts.BlockID("block1.2"))
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("block2.1"): {"tx1"},
@@ -279,8 +283,8 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
// Issue valid blocks that resolve the conflict.
{
- ts.IssueBlockAtSlot("block2.3", 2, genesisCommitment, node2, ts.BlockIDs("block2.2")...)
- ts.IssueBlockAtSlot("block2.4", 2, genesisCommitment, node1, ts.BlockIDs("block2.3")...)
+ ts.IssueValidationBlockAtSlot("block2.3", 2, genesisCommitment, node2, ts.BlockIDs("block2.2")...)
+ ts.IssueValidationBlockAtSlot("block2.4", 2, genesisCommitment, node1, ts.BlockIDs("block2.3")...)
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("block2.3"): {"tx2"},
@@ -301,8 +305,8 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
testsuite.WithEvictedSlot(0),
)
- ts.IssueBlockAtSlot("block5.1", 5, genesisCommitment, node1, ts.BlockIDsWithPrefix("block1.1")...)
- ts.IssueBlockAtSlot("block5.2", 5, genesisCommitment, node1, ts.BlockIDsWithPrefix("block1.2")...)
+ ts.IssueValidationBlockAtSlot("block5.1", 5, genesisCommitment, node1, ts.BlockIDsWithPrefix("block1.1")...)
+ ts.IssueValidationBlockAtSlot("block5.2", 5, genesisCommitment, node1, ts.BlockIDsWithPrefix("block1.2")...)
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("block5.1"): {"tx1"}, // on rejected conflict
@@ -350,7 +354,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
// Issue TX3 on top of rejected TX1 and 1 commitment on node2 (committed to slot 1)
{
- ts.IssueBlockAtSlotWithOptions("n2-commit1", 5, commitment1, node2, tx4)
+ ts.IssueBasicBlockAtSlotWithOptions("n2-commit1", 5, commitment1, ts.DefaultBasicBlockIssuer(), node2, tx4)
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("n2-commit1"): {}, // no conflits inherited as the block is invalid and doesn't get booked.
@@ -368,7 +372,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
// Issue a block on node1 that inherits a pending conflict that has been orphaned on node2
{
- ts.IssueBlockAtSlot("n1-rejected-genesis", 5, genesisCommitment, node1, ts.BlockIDs("block2.tx1")...)
+ ts.IssueValidationBlockAtSlot("n1-rejected-genesis", 5, genesisCommitment, node1, ts.BlockIDs("block2.tx1")...)
ts.AssertBlocksInCacheBooked(ts.Blocks("n1-rejected-genesis"), true, node1)
ts.AssertBlocksInCacheInvalid(ts.Blocks("n1-rejected-genesis"), false, node1)
@@ -383,7 +387,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
// Issue TX4 on top of rejected TX1 but Genesis commitment on node2 (committed to slot 1)
{
- ts.IssueBlockAtSlotWithOptions("n2-genesis", 5, genesisCommitment, node2, tx4, blockfactory.WithStrongParents(ts.BlockID("Genesis")))
+ ts.IssueBasicBlockAtSlotWithOptions("n2-genesis", 5, genesisCommitment, ts.DefaultBasicBlockIssuer(), node2, tx4, mock.WithStrongParents(ts.BlockID("Genesis")))
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("n2-genesis"): {"tx4"}, // on rejected conflict
@@ -395,7 +399,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
// Issue TX4 on top of rejected TX1 but Genesis commitment on node1 (committed to slot 0)
{
- ts.IssueBlockAtSlotWithOptions("n1-genesis", 5, genesisCommitment, node1, tx4, blockfactory.WithStrongParents(ts.BlockID("Genesis")))
+ ts.IssueBasicBlockAtSlotWithOptions("n1-genesis", 5, genesisCommitment, ts.DefaultBasicBlockIssuer(), node1, tx4, mock.WithStrongParents(ts.BlockID("Genesis")))
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx1"), true, node2)
ts.AssertTransactionsInCacheRejected(ts.TransactionFramework.Transactions("tx4"), true, node2)
@@ -424,14 +428,14 @@ func Test_SpendRejectedCommittedRace(t *testing.T) {
)
// Exchange each-other blocks, ignoring invalidity
- ts.IssueExistingBlock("n2-genesis", node1)
- ts.IssueExistingBlock("n2-commit1", node1)
- ts.IssueExistingBlock("n1-genesis", node2)
- ts.IssueExistingBlock("n1-rejected-genesis", node2)
+ ts.IssueExistingBlock("n2-genesis", ts.DefaultBasicBlockIssuer(), node1)
+ ts.IssueExistingBlock("n2-commit1", ts.DefaultBasicBlockIssuer(), node1)
+ ts.IssueExistingBlock("n1-genesis", ts.DefaultBasicBlockIssuer(), node2)
+ ts.IssueExistingBlock("n1-rejected-genesis", ts.DefaultBasicBlockIssuer(), node2)
- ts.IssueBlockAtSlot("n1-rejected-commit1", 5, commitment1, node1, ts.BlockIDs("n1-rejected-genesis")...)
+ ts.IssueValidationBlockAtSlot("n1-rejected-commit1", 5, commitment1, node1, ts.BlockIDs("n1-rejected-genesis")...)
// Needs reissuing on node2 because it is invalid
- ts.IssueExistingBlock("n1-rejected-commit1", node2)
+ ts.IssueExistingBlock("n1-rejected-commit1", ts.DefaultBasicBlockIssuer(), node2)
// The nodes agree on the results of the invalid blocks
ts.AssertBlocksInCacheBooked(ts.Blocks("n2-genesis", "n1-genesis", "n1-rejected-genesis"), true, node1, node2)
@@ -481,12 +485,13 @@ func Test_SpendPendingCommittedRace(t *testing.T) {
node1 := ts.AddValidatorNode("node1")
node2 := ts.AddValidatorNode("node2")
+ ts.AddBasicBlockIssuer("default")
ts.Run(true, map[string][]options.Option[protocol.Protocol]{})
ts.AssertSybilProtectionCommittee(0, []iotago.AccountID{
- node1.AccountID,
- node2.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
}, ts.Nodes()...)
genesisCommitment := lo.PanicOnErr(node1.Protocol.MainEngineInstance().Storage.Commitments().Load(0)).Commitment()
@@ -496,8 +501,8 @@ func Test_SpendPendingCommittedRace(t *testing.T) {
tx1 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx1", 1, "Genesis:0"))
tx2 := lo.PanicOnErr(ts.TransactionFramework.CreateSimpleTransaction("tx2", 1, "Genesis:0"))
- ts.IssueBlockAtSlotWithOptions("block1.1", 1, genesisCommitment, node2, tx1)
- ts.IssueBlockAtSlotWithOptions("block1.2", 1, genesisCommitment, node2, tx2)
+ ts.IssueBasicBlockAtSlotWithOptions("block1.1", 1, genesisCommitment, ts.DefaultBasicBlockIssuer(), node2, tx1)
+ ts.IssueBasicBlockAtSlotWithOptions("block1.2", 1, genesisCommitment, ts.DefaultBasicBlockIssuer(), node2, tx2)
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1, node2)
ts.AssertTransactionsInCacheBooked(ts.TransactionFramework.Transactions("tx1", "tx2"), true, node1, node2)
@@ -515,8 +520,8 @@ func Test_SpendPendingCommittedRace(t *testing.T) {
// Issue some more blocks and assert that conflicts are propagated to blocks.
{
- ts.IssueBlockAtSlot("block2.1", 2, genesisCommitment, node2, ts.BlockID("block1.1"))
- ts.IssueBlockAtSlot("block2.2", 2, genesisCommitment, node2, ts.BlockID("block1.2"))
+ ts.IssueValidationBlockAtSlot("block2.1", 2, genesisCommitment, node2, ts.BlockID("block1.1"))
+ ts.IssueValidationBlockAtSlot("block2.2", 2, genesisCommitment, node2, ts.BlockID("block1.2"))
ts.AssertBlocksInCacheConflicts(map[*blocks.Block][]string{
ts.Block("block2.1"): {"tx1"},
@@ -536,7 +541,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) {
testsuite.WithEvictedSlot(0),
)
- ts.IssueBlockAtSlot("", 5, genesisCommitment, node1, ts.BlockIDsWithPrefix("4.0")...)
+ ts.IssueValidationBlockAtSlot("", 5, genesisCommitment, node1, ts.BlockIDsWithPrefix("4.0")...)
ts.IssueBlocksAtSlots("", []iotago.SlotIndex{5}, 1, "4.0", ts.Nodes("node1"), false, nil)
@@ -574,8 +579,8 @@ func Test_SpendPendingCommittedRace(t *testing.T) {
// Issue a block booked on a pending conflict on node2
{
- ts.IssueBlockAtSlot("n2-pending-genesis", 5, genesisCommitment, node2, ts.BlockIDs("block2.1")...)
- ts.IssueBlockAtSlot("n2-pending-commit1", 5, commitment1, node2, ts.BlockIDs("block2.1")...)
+ ts.IssueValidationBlockAtSlot("n2-pending-genesis", 5, genesisCommitment, node2, ts.BlockIDs("block2.1")...)
+ ts.IssueValidationBlockAtSlot("n2-pending-commit1", 5, commitment1, node2, ts.BlockIDs("block2.1")...)
ts.AssertTransactionsExist(ts.TransactionFramework.Transactions("tx1"), true, node2)
ts.AssertTransactionsInCachePending(ts.TransactionFramework.Transactions("tx1"), true, node2)
@@ -605,8 +610,8 @@ func Test_SpendPendingCommittedRace(t *testing.T) {
)
// Exchange each-other blocks, ignoring invalidity
- ts.IssueExistingBlock("n2-pending-genesis", node1)
- ts.IssueExistingBlock("n2-pending-commit1", node1)
+ ts.IssueExistingBlock("n2-pending-genesis", ts.DefaultBasicBlockIssuer(), node1)
+ ts.IssueExistingBlock("n2-pending-commit1", ts.DefaultBasicBlockIssuer(), node1)
// The nodes agree on the results of the invalid blocks
ts.AssertBlocksInCacheBooked(ts.Blocks("n2-pending-genesis", "n2-pending-commit1"), true, node1, node2)
diff --git a/pkg/tests/confirmation_state_test.go b/pkg/tests/confirmation_state_test.go
index 55a7031bb..23e10e812 100644
--- a/pkg/tests/confirmation_state_test.go
+++ b/pkg/tests/confirmation_state_test.go
@@ -31,10 +31,10 @@ func TestConfirmationFlags(t *testing.T) {
nodeD := ts.AddValidatorNode("nodeD")
expectedCommittee := []iotago.AccountID{
- nodeA.AccountID,
- nodeB.AccountID,
- nodeC.AccountID,
- nodeD.AccountID,
+ nodeA.Validator.AccountID,
+ nodeB.Validator.AccountID,
+ nodeC.Validator.AccountID,
+ nodeD.Validator.AccountID,
}
ts.Run(true, map[string][]options.Option[protocol.Protocol]{
"nodeA": {
@@ -44,7 +44,7 @@ func TestConfirmationFlags(t *testing.T) {
protocol.WithSybilProtectionProvider(
sybilprotectionv1.NewProvider(
sybilprotectionv1.WithSeatManagerProvider(
- poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.AccountID), poa.WithActivityWindow(2*time.Minute)),
+ poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)),
),
),
),
@@ -56,7 +56,7 @@ func TestConfirmationFlags(t *testing.T) {
protocol.WithSybilProtectionProvider(
sybilprotectionv1.NewProvider(
sybilprotectionv1.WithSeatManagerProvider(
- poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.AccountID), poa.WithActivityWindow(2*time.Minute)),
+ poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)),
),
),
),
@@ -68,7 +68,7 @@ func TestConfirmationFlags(t *testing.T) {
protocol.WithSybilProtectionProvider(
sybilprotectionv1.NewProvider(
sybilprotectionv1.WithSeatManagerProvider(
- poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.AccountID), poa.WithActivityWindow(2*time.Minute)),
+ poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)),
),
),
),
@@ -80,7 +80,7 @@ func TestConfirmationFlags(t *testing.T) {
protocol.WithSybilProtectionProvider(
sybilprotectionv1.NewProvider(
sybilprotectionv1.WithSeatManagerProvider(
- poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.AccountID), poa.WithActivityWindow(2*time.Minute)),
+ poa.NewProvider(poa.WithOnlineCommitteeStartup(nodeA.Validator.AccountID), poa.WithActivityWindow(2*time.Minute)),
),
),
),
@@ -98,7 +98,7 @@ func TestConfirmationFlags(t *testing.T) {
testsuite.WithChainID(genesisCommitment.MustID()),
testsuite.WithStorageCommitments([]*iotago.Commitment{genesisCommitment}),
testsuite.WithSybilProtectionCommittee(0, expectedCommittee),
- testsuite.WithSybilProtectionOnlineCommittee(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.AccountID))),
+ testsuite.WithSybilProtectionOnlineCommittee(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.Validator.AccountID))),
testsuite.WithEvictedSlot(0),
testsuite.WithActiveRootBlocks(ts.Blocks("Genesis")),
testsuite.WithStorageRootBlocks(ts.Blocks("Genesis")),
@@ -106,11 +106,11 @@ func TestConfirmationFlags(t *testing.T) {
// Slots 1-3: only node A is online and issues blocks, make slot 1 committed.
{
- ts.IssueBlockAtSlot("A.1.0", 1, genesisCommitment, nodeA, ts.BlockID("Genesis"))
- ts.IssueBlockAtSlot("A.1.1", 1, genesisCommitment, nodeA, ts.BlockID("A.1.0"))
- ts.IssueBlockAtSlot("A.2.0", 2, genesisCommitment, nodeA, ts.BlockID("A.1.1"))
- ts.IssueBlockAtSlot("A.2.1", 2, genesisCommitment, nodeA, ts.BlockID("A.2.0"))
- ts.IssueBlockAtSlot("A.3.0", 3, genesisCommitment, nodeA, ts.BlockID("A.2.1"))
+ ts.IssueValidationBlockAtSlot("A.1.0", 1, genesisCommitment, nodeA, ts.BlockID("Genesis"))
+ ts.IssueValidationBlockAtSlot("A.1.1", 1, genesisCommitment, nodeA, ts.BlockID("A.1.0"))
+ ts.IssueValidationBlockAtSlot("A.2.0", 2, genesisCommitment, nodeA, ts.BlockID("A.1.1"))
+ ts.IssueValidationBlockAtSlot("A.2.1", 2, genesisCommitment, nodeA, ts.BlockID("A.2.0"))
+ ts.IssueValidationBlockAtSlot("A.3.0", 3, genesisCommitment, nodeA, ts.BlockID("A.2.1"))
ts.AssertBlocksInCachePreAccepted(ts.Blocks("A.1.0", "A.1.1", "A.2.0", "A.2.1", "A.3.0"), true, ts.Nodes()...)
ts.AssertBlocksInCacheAccepted(ts.Blocks("A.1.0", "A.1.1", "A.2.0", "A.2.1"), true, ts.Nodes()...)
@@ -122,8 +122,8 @@ func TestConfirmationFlags(t *testing.T) {
slot1CommittableSlot := 1 + ts.API.ProtocolParameters().MinCommittableAge()
alias1A0 := fmt.Sprintf("A.%d.0", slot1CommittableSlot)
alias1A1 := fmt.Sprintf("A.%d.1", slot1CommittableSlot)
- ts.IssueBlockAtSlot(alias1A0, slot1CommittableSlot, genesisCommitment, nodeA, ts.BlockID("A.3.0"))
- ts.IssueBlockAtSlot(alias1A1, slot1CommittableSlot, genesisCommitment, nodeA, ts.BlockID(alias1A0))
+ ts.IssueValidationBlockAtSlot(alias1A0, slot1CommittableSlot, genesisCommitment, nodeA, ts.BlockID("A.3.0"))
+ ts.IssueValidationBlockAtSlot(alias1A1, slot1CommittableSlot, genesisCommitment, nodeA, ts.BlockID(alias1A0))
ts.AssertBlocksInCachePreAccepted(ts.Blocks(alias1A0), true, ts.Nodes()...)
ts.AssertBlocksInCacheAccepted(ts.Blocks("A.3.0"), true, ts.Nodes()...)
@@ -141,10 +141,10 @@ func TestConfirmationFlags(t *testing.T) {
alias2A1 := fmt.Sprintf("A.%d.1", slot2CommittableSlot)
alias2A2 := fmt.Sprintf("A.%d.2", slot2CommittableSlot)
alias2B0 := fmt.Sprintf("B.%d.0", slot2CommittableSlot)
- ts.IssueBlockAtSlot(alias2A0, slot2CommittableSlot, genesisCommitment, nodeA, ts.BlockID(alias1A1))
- ts.IssueBlockAtSlot(alias2A1, slot2CommittableSlot, slot1Commitment, nodeA, ts.BlockID(alias2A0))
- ts.IssueBlockAtSlot(alias2B0, slot2CommittableSlot, slot1Commitment, nodeB, ts.BlockID(alias2A1))
- ts.IssueBlockAtSlot(alias2A2, slot2CommittableSlot, slot1Commitment, nodeA, ts.BlockID(alias2B0))
+ ts.IssueValidationBlockAtSlot(alias2A0, slot2CommittableSlot, genesisCommitment, nodeA, ts.BlockID(alias1A1))
+ ts.IssueValidationBlockAtSlot(alias2A1, slot2CommittableSlot, slot1Commitment, nodeA, ts.BlockID(alias2A0))
+ ts.IssueValidationBlockAtSlot(alias2B0, slot2CommittableSlot, slot1Commitment, nodeB, ts.BlockID(alias2A1))
+ ts.IssueValidationBlockAtSlot(alias2A2, slot2CommittableSlot, slot1Commitment, nodeA, ts.BlockID(alias2B0))
ts.AssertBlocksInCachePreAccepted(ts.Blocks(alias2A1, alias2B0), true, ts.Nodes()...)
ts.AssertBlocksInCacheAccepted(ts.Blocks(alias1A1, alias2A0), true, ts.Nodes()...)
@@ -155,8 +155,8 @@ func TestConfirmationFlags(t *testing.T) {
testsuite.WithEqualStoredCommitmentAtIndex(2),
testsuite.WithSybilProtectionCommittee(slot2CommittableSlot, expectedCommittee),
testsuite.WithSybilProtectionOnlineCommittee(
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.AccountID)),
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.Validator.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.Validator.AccountID)),
),
testsuite.WithEvictedSlot(2),
)
@@ -170,10 +170,10 @@ func TestConfirmationFlags(t *testing.T) {
alias3A0 := fmt.Sprintf("A.%d.0", slot3CommittableSlot)
alias3B0 := fmt.Sprintf("B.%d.0", slot3CommittableSlot)
alias3C1 := fmt.Sprintf("C.%d.1", slot3CommittableSlot)
- ts.IssueBlockAtSlot(alias3C0, slot3CommittableSlot, slot1Commitment, nodeC, ts.BlockID(alias2A2))
- ts.IssueBlockAtSlot(alias3A0, slot3CommittableSlot, slot2Commitment, nodeA, ts.BlockID(alias3C0))
- ts.IssueBlockAtSlot(alias3B0, slot3CommittableSlot, slot2Commitment, nodeB, ts.BlockID(alias3C0))
- ts.IssueBlockAtSlot(alias3C1, slot3CommittableSlot, slot1Commitment, nodeC, ts.BlockID(alias3C0))
+ ts.IssueValidationBlockAtSlot(alias3C0, slot3CommittableSlot, slot1Commitment, nodeC, ts.BlockID(alias2A2))
+ ts.IssueValidationBlockAtSlot(alias3A0, slot3CommittableSlot, slot2Commitment, nodeA, ts.BlockID(alias3C0))
+ ts.IssueValidationBlockAtSlot(alias3B0, slot3CommittableSlot, slot2Commitment, nodeB, ts.BlockID(alias3C0))
+ ts.IssueValidationBlockAtSlot(alias3C1, slot3CommittableSlot, slot1Commitment, nodeC, ts.BlockID(alias3C0))
ts.AssertBlocksInCachePreAccepted(ts.Blocks("A.3.0", alias1A1, alias2A0, alias2A1, alias2A2, alias2B0, alias3C0), true, ts.Nodes()...)
ts.AssertBlocksInCachePreConfirmed(ts.Blocks("A.3.0", alias1A1, alias2A0, alias2A1, alias2A2, alias2B0, alias3C0), true, ts.Nodes()...)
@@ -195,9 +195,9 @@ func TestConfirmationFlags(t *testing.T) {
testsuite.WithEqualStoredCommitmentAtIndex(2),
testsuite.WithSybilProtectionCommittee(slot3CommittableSlot, expectedCommittee),
testsuite.WithSybilProtectionOnlineCommittee(
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.AccountID)),
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.AccountID)),
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeC.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.Validator.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.Validator.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeC.Validator.AccountID)),
),
testsuite.WithEvictedSlot(2),
)
@@ -207,9 +207,9 @@ func TestConfirmationFlags(t *testing.T) {
alias4A0 := fmt.Sprintf("A.%d.0", slot4CommittableSlot)
alias4B0 := fmt.Sprintf("B.%d.0", slot4CommittableSlot)
alias4C0 := fmt.Sprintf("C.%d.0", slot4CommittableSlot)
- ts.IssueBlockAtSlot(alias4A0, slot4CommittableSlot, slot2Commitment, nodeA, ts.BlockIDs(alias3A0, alias3B0, alias3C1)...)
- ts.IssueBlockAtSlot(alias4B0, slot4CommittableSlot, slot2Commitment, nodeB, ts.BlockIDs(alias3A0, alias3B0, alias3C1)...)
- ts.IssueBlockAtSlot(alias4C0, slot4CommittableSlot, slot2Commitment, nodeC, ts.BlockIDs(alias3A0, alias3B0, alias3C1)...)
+ ts.IssueValidationBlockAtSlot(alias4A0, slot4CommittableSlot, slot2Commitment, nodeA, ts.BlockIDs(alias3A0, alias3B0, alias3C1)...)
+ ts.IssueValidationBlockAtSlot(alias4B0, slot4CommittableSlot, slot2Commitment, nodeB, ts.BlockIDs(alias3A0, alias3B0, alias3C1)...)
+ ts.IssueValidationBlockAtSlot(alias4C0, slot4CommittableSlot, slot2Commitment, nodeC, ts.BlockIDs(alias3A0, alias3B0, alias3C1)...)
ts.AssertBlocksInCachePreAccepted(ts.Blocks(alias3A0, alias3B0, alias3C1), true, ts.Nodes()...)
ts.AssertBlocksInCachePreConfirmed(ts.Blocks(alias3A0, alias3B0, alias3C1), true, ts.Nodes()...)
@@ -231,9 +231,9 @@ func TestConfirmationFlags(t *testing.T) {
alias4A1 := fmt.Sprintf("A.%d.1", slot4CommittableSlot)
alias4B1 := fmt.Sprintf("B.%d.1", slot4CommittableSlot)
alias4C1 := fmt.Sprintf("C.%d.1", slot4CommittableSlot)
- ts.IssueBlockAtSlot(alias4A1, slot4CommittableSlot, slot2Commitment, nodeA, ts.BlockIDs(alias4A0, alias4B0, alias4C0)...)
- ts.IssueBlockAtSlot(alias4B1, slot4CommittableSlot, slot2Commitment, nodeB, ts.BlockIDs(alias4A0, alias4B0, alias4C0)...)
- ts.IssueBlockAtSlot(alias4C1, slot4CommittableSlot, slot2Commitment, nodeC, ts.BlockIDs(alias4A0, alias4B0, alias4C0)...)
+ ts.IssueValidationBlockAtSlot(alias4A1, slot4CommittableSlot, slot2Commitment, nodeA, ts.BlockIDs(alias4A0, alias4B0, alias4C0)...)
+ ts.IssueValidationBlockAtSlot(alias4B1, slot4CommittableSlot, slot2Commitment, nodeB, ts.BlockIDs(alias4A0, alias4B0, alias4C0)...)
+ ts.IssueValidationBlockAtSlot(alias4C1, slot4CommittableSlot, slot2Commitment, nodeC, ts.BlockIDs(alias4A0, alias4B0, alias4C0)...)
ts.AssertBlocksInCachePreAccepted(ts.Blocks(alias4A0, alias4B0, alias4C0), true, ts.Nodes()...)
ts.AssertBlocksInCachePreConfirmed(ts.Blocks(alias4A0, alias4B0, alias4C0), true, ts.Nodes()...)
@@ -250,9 +250,9 @@ func TestConfirmationFlags(t *testing.T) {
testsuite.WithEqualStoredCommitmentAtIndex(3),
testsuite.WithSybilProtectionCommittee(slot4CommittableSlot, expectedCommittee),
testsuite.WithSybilProtectionOnlineCommittee(
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.AccountID)),
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.AccountID)),
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeC.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.Validator.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.Validator.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeC.Validator.AccountID)),
),
testsuite.WithEvictedSlot(3),
)
diff --git a/pkg/tests/protocol_engine_rollback_test.go b/pkg/tests/protocol_engine_rollback_test.go
index 79381bf1f..e0a8396f4 100644
--- a/pkg/tests/protocol_engine_rollback_test.go
+++ b/pkg/tests/protocol_engine_rollback_test.go
@@ -17,11 +17,11 @@ import (
"github.com/iotaledger/iota-core/pkg/protocol/engine"
"github.com/iotaledger/iota-core/pkg/protocol/engine/blocks"
"github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager"
- "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/mock"
+ mock2 "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/mock"
"github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1"
"github.com/iotaledger/iota-core/pkg/storage"
"github.com/iotaledger/iota-core/pkg/testsuite"
- mock2 "github.com/iotaledger/iota-core/pkg/testsuite/mock"
+ "github.com/iotaledger/iota-core/pkg/testsuite/mock"
iotago "github.com/iotaledger/iota.go/v4"
)
@@ -46,11 +46,11 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) {
poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] {
return module.Provide(func(e *engine.Engine) seatmanager.SeatManager {
- poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA)
+ poa := mock2.NewManualPOAProvider()(e).(*mock2.ManualPOA)
- for _, node := range []*mock2.Node{node0, node1, node2, node3} {
- if node.Validator {
- poa.AddAccount(node.AccountID, node.Name)
+ for _, node := range []*mock.Node{node0, node1, node2, node3} {
+ if node.IsValidator() {
+ poa.AddAccount(node.Validator.AccountID, node.Name)
}
}
poa.SetOnline("node0", "node1", "node2", "node3")
@@ -92,20 +92,20 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) {
// Verify that nodes have the expected states.
expectedCommittee := []iotago.AccountID{
- node0.AccountID,
- node1.AccountID,
- node2.AccountID,
- node3.AccountID,
+ node0.Validator.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
+ node3.Validator.AccountID,
}
expectedOnlineCommitteeFull := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.Validator.AccountID)),
}
for _, node := range ts.Nodes() {
- node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
+ node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
}
{
@@ -145,7 +145,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) {
for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} {
var attestationBlocks []*blocks.Block
for _, node := range ts.Nodes() {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name)))
}
}
@@ -226,11 +226,11 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) {
poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] {
return module.Provide(func(e *engine.Engine) seatmanager.SeatManager {
- poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA)
+ poa := mock2.NewManualPOAProvider()(e).(*mock2.ManualPOA)
- for _, node := range []*mock2.Node{node0, node1, node2, node3} {
- if node.Validator {
- poa.AddAccount(node.AccountID, node.Name)
+ for _, node := range []*mock.Node{node0, node1, node2, node3} {
+ if node.IsValidator() {
+ poa.AddAccount(node.Validator.AccountID, node.Name)
}
}
poa.SetOnline("node0", "node1", "node2", "node3")
@@ -272,25 +272,25 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) {
// Verify that nodes have the expected states.
expectedCommittee := []iotago.AccountID{
- node0.AccountID,
- node1.AccountID,
- node2.AccountID,
- node3.AccountID,
+ node0.Validator.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
+ node3.Validator.AccountID,
}
expectedOnlineCommitteeFull := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.Validator.AccountID)),
}
expectedOnlineCommitteeHalf := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
}
for _, node := range ts.Nodes() {
- node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
+ node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
}
{
@@ -330,7 +330,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) {
for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} {
var attestationBlocks []*blocks.Block
for _, node := range ts.Nodes() {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name)))
}
}
@@ -342,13 +342,13 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) {
// Update online committee.
for _, node := range ts.Nodes() {
- manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA)
+ manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA)
manualPOA.SetOnline("node0", "node1")
manualPOA.SetOffline("node2", "node3")
}
{
- ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", []*mock2.Node{node0, node1}, true, nil)
+ ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil)
ts.AssertNodeState(ts.Nodes(),
testsuite.WithLatestFinalizedSlot(8),
@@ -418,11 +418,11 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) {
poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] {
return module.Provide(func(e *engine.Engine) seatmanager.SeatManager {
- poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA)
+ poa := mock2.NewManualPOAProvider()(e).(*mock2.ManualPOA)
- for _, node := range []*mock2.Node{node0, node1, node2, node3} {
- if node.Validator {
- poa.AddAccount(node.AccountID, node.Name)
+ for _, node := range []*mock.Node{node0, node1, node2, node3} {
+ if node.IsValidator() {
+ poa.AddAccount(node.Validator.AccountID, node.Name)
}
}
poa.SetOnline("node0", "node1", "node2", "node3")
@@ -464,25 +464,25 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) {
// Verify that nodes have the expected states.
expectedCommittee := []iotago.AccountID{
- node0.AccountID,
- node1.AccountID,
- node2.AccountID,
- node3.AccountID,
+ node0.Validator.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
+ node3.Validator.AccountID,
}
expectedOnlineCommitteeFull := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.Validator.AccountID)),
}
expectedOnlineCommitteeHalf := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
}
for _, node := range ts.Nodes() {
- node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
+ node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
}
{
@@ -522,7 +522,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) {
for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} {
var attestationBlocks []*blocks.Block
for _, node := range ts.Nodes() {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name)))
}
}
@@ -534,13 +534,13 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) {
// Update online committee.
for _, node := range ts.Nodes() {
- manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA)
+ manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA)
manualPOA.SetOnline("node0", "node1")
manualPOA.SetOffline("node2", "node3")
}
{
- ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16, 17, 18, 19}, 4, "P0:11.3", []*mock2.Node{node0, node1}, true, nil)
+ ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16, 17, 18, 19}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil)
ts.AssertNodeState(ts.Nodes(),
testsuite.WithLatestFinalizedSlot(8),
@@ -610,11 +610,11 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T
poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] {
return module.Provide(func(e *engine.Engine) seatmanager.SeatManager {
- poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA)
+ poa := mock2.NewManualPOAProvider()(e).(*mock2.ManualPOA)
- for _, node := range []*mock2.Node{node0, node1, node2, node3} {
- if node.Validator {
- poa.AddAccount(node.AccountID, node.Name)
+ for _, node := range []*mock.Node{node0, node1, node2, node3} {
+ if node.IsValidator() {
+ poa.AddAccount(node.Validator.AccountID, node.Name)
}
}
poa.SetOnline("node0", "node1", "node2", "node3")
@@ -656,25 +656,25 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T
// Verify that nodes have the expected states.
expectedCommittee := []iotago.AccountID{
- node0.AccountID,
- node1.AccountID,
- node2.AccountID,
- node3.AccountID,
+ node0.Validator.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
+ node3.Validator.AccountID,
}
expectedOnlineCommitteeFull := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.Validator.AccountID)),
}
expectedOnlineCommitteeHalf := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
}
for _, node := range ts.Nodes() {
- node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
+ node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3")
}
{
@@ -714,7 +714,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T
for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} {
var attestationBlocks []*blocks.Block
for _, node := range ts.Nodes() {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name)))
}
}
@@ -726,13 +726,13 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T
// Update online committee.
for _, node := range ts.Nodes() {
- manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA)
+ manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA)
manualPOA.SetOnline("node0", "node1")
manualPOA.SetOffline("node2", "node3")
}
{
- ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15}, 4, "P0:11.3", []*mock2.Node{node0, node1}, true, nil)
+ ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15}, 4, "P0:11.3", []*mock.Node{node0, node1}, true, nil)
ts.AssertNodeState(ts.Nodes(),
testsuite.WithLatestFinalizedSlot(8),
diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go
index 359cd1bb4..2ff81b2d4 100644
--- a/pkg/tests/protocol_engine_switching_test.go
+++ b/pkg/tests/protocol_engine_switching_test.go
@@ -49,6 +49,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
node6 := ts.AddValidatorNode("node6")
node7 := ts.AddValidatorNode("node7")
node8 := ts.AddNode("node8")
+ ts.AddBasicBlockIssuer("default", iotago.MaxBlockIssuanceCredits/2)
const expectedCommittedSlotAfterPartitionMerge = 19
nodesP1 := []*mock.Node{node0, node1, node2, node3, node4, node5}
@@ -59,8 +60,8 @@ func TestProtocol_EngineSwitching(t *testing.T) {
poa := mock2.NewManualPOAProvider()(e).(*mock2.ManualPOA)
for _, node := range append(nodesP1, nodesP2...) {
- if node.Validator {
- poa.AddAccount(node.AccountID, node.Name)
+ if node.IsValidator() {
+ poa.AddAccount(node.Validator.AccountID, node.Name)
}
}
poa.SetOnline("node0", "node1", "node2", "node3", "node4")
@@ -107,24 +108,24 @@ func TestProtocol_EngineSwitching(t *testing.T) {
ts.Run(false, nodeOptions)
expectedCommittee := []iotago.AccountID{
- node0.AccountID,
- node1.AccountID,
- node2.AccountID,
- node3.AccountID,
- node4.AccountID,
- node6.AccountID,
- node7.AccountID,
+ node0.Validator.AccountID,
+ node1.Validator.AccountID,
+ node2.Validator.AccountID,
+ node3.Validator.AccountID,
+ node4.Validator.AccountID,
+ node6.Validator.AccountID,
+ node7.Validator.AccountID,
}
expectedP1OnlineCommittee := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node4.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node4.Validator.AccountID)),
}
expectedP2OnlineCommittee := []account.SeatIndex{
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node6.AccountID)),
- lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node7.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node6.Validator.AccountID)),
+ lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node7.Validator.AccountID)),
}
expectedOnlineCommittee := append(expectedP1OnlineCommittee, expectedP2OnlineCommittee...)
@@ -170,7 +171,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9, 10, 11} {
var attestationBlocks []*blocks.Block
for _, node := range ts.Nodes() {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name)))
}
}
@@ -225,7 +226,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
for _, slot := range []iotago.SlotIndex{12, 13, 14, 15} {
var attestationBlocks []*blocks.Block
for _, node := range nodesP1 {
- if node.Validator {
+ if node.IsValidator() {
if slot <= 13 {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name)))
} else {
@@ -236,7 +237,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
// We carry these attestations forward with the window even though these nodes didn't issue in P1.
for _, node := range nodesP2 {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", lo.Min(slot, 13), node.Name)))
}
}
@@ -247,7 +248,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
for _, slot := range []iotago.SlotIndex{16, 17, 18} {
var attestationBlocks []*blocks.Block
for _, node := range nodesP1 {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P1:%d.3-%s", slot, node.Name)))
}
}
@@ -279,7 +280,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
for _, slot := range []iotago.SlotIndex{12, 13, 14, 15} {
var attestationBlocks []*blocks.Block
for _, node := range nodesP2 {
- if node.Validator {
+ if node.IsValidator() {
if slot <= 13 {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name)))
} else {
@@ -290,7 +291,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
// We carry these attestations forward with the window even though these nodes didn't issue in P1.
for _, node := range nodesP1 {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", lo.Min(slot, 13), node.Name)))
}
}
@@ -301,7 +302,7 @@ func TestProtocol_EngineSwitching(t *testing.T) {
for _, slot := range []iotago.SlotIndex{16, 17, 18} {
var attestationBlocks []*blocks.Block
for _, node := range nodesP2 {
- if node.Validator {
+ if node.IsValidator() {
attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P2:%d.3-%s", slot, node.Name)))
}
}
@@ -334,16 +335,16 @@ func TestProtocol_EngineSwitching(t *testing.T) {
wg := &sync.WaitGroup{}
// Issue blocks on both partitions after merging the networks.
- node0.IssueActivity(ctxP1, wg, 21)
- node1.IssueActivity(ctxP1, wg, 21)
- node2.IssueActivity(ctxP1, wg, 21)
- node3.IssueActivity(ctxP1, wg, 21)
- node4.IssueActivity(ctxP1, wg, 21)
- node5.IssueActivity(ctxP1, wg, 21)
-
- node6.IssueActivity(ctxP2, wg, 21)
- node7.IssueActivity(ctxP2, wg, 21)
- node8.IssueActivity(ctxP2, wg, 21)
+ node0.Validator.IssueActivity(ctxP1, wg, 21, node0)
+ node1.Validator.IssueActivity(ctxP1, wg, 21, node1)
+ node2.Validator.IssueActivity(ctxP1, wg, 21, node2)
+ node3.Validator.IssueActivity(ctxP1, wg, 21, node3)
+ node4.Validator.IssueActivity(ctxP1, wg, 21, node4)
+ //node5.Validator.IssueActivity(ctxP1, wg, 21, node5)
+
+ node6.Validator.IssueActivity(ctxP2, wg, 21, node6)
+ node7.Validator.IssueActivity(ctxP2, wg, 21, node7)
+ //node8.Validator.IssueActivity(ctxP2, wg, 21, node8)
// P1 finalized until slot 18. We do not expect any forks here because our CW is higher than the other partition's.
ts.AssertForkDetectedCount(0, nodesP1...)
diff --git a/pkg/tests/protocol_startup_test.go b/pkg/tests/protocol_startup_test.go
index 2418cfc22..20ffe89a3 100644
--- a/pkg/tests/protocol_startup_test.go
+++ b/pkg/tests/protocol_startup_test.go
@@ -47,11 +47,11 @@ func Test_BookInCommittedSlot(t *testing.T) {
ts.Wait()
expectedCommittee := []iotago.AccountID{
- nodeA.AccountID,
+ nodeA.Validator.AccountID,
}
expectedOnlineCommittee := []account.SeatIndex{
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.Validator.AccountID)),
}
// Verify that nodes have the expected states.
@@ -106,7 +106,7 @@ func Test_BookInCommittedSlot(t *testing.T) {
})
ts.AssertAttestationsForSlot(slot, ts.Blocks(aliases...), ts.Nodes()...)
}
- ts.IssueBlockAtSlot("5*", 5, lo.PanicOnErr(nodeA.Protocol.MainEngineInstance().Storage.Commitments().Load(3)).Commitment(), ts.Node("nodeA"), ts.BlockIDsWithPrefix("4.3-")...)
+ ts.IssueValidationBlockAtSlot("5*", 5, lo.PanicOnErr(nodeA.Protocol.MainEngineInstance().Storage.Commitments().Load(3)).Commitment(), ts.Node("nodeA"), ts.BlockIDsWithPrefix("4.3-")...)
ts.AssertBlocksExist(ts.Blocks("5*"), false, ts.Nodes("nodeA")...)
}
@@ -127,6 +127,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) {
nodeA := ts.AddValidatorNode("nodeA")
nodeB := ts.AddValidatorNode("nodeB")
ts.AddNode("nodeC")
+ ts.AddBasicBlockIssuer("default", iotago.MaxBlockIssuanceCredits/2)
nodeOptions := []options.Option[protocol.Protocol]{
protocol.WithStorageOptions(
@@ -148,13 +149,13 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) {
ts.Wait()
expectedCommittee := []iotago.AccountID{
- nodeA.AccountID,
- nodeB.AccountID,
+ nodeA.Validator.AccountID,
+ nodeB.Validator.AccountID,
}
expectedOnlineCommittee := []account.SeatIndex{
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.AccountID)),
- lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeA.Validator.AccountID)),
+ lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(nodeB.Validator.AccountID)),
}
// Verify that nodes have the expected states.
@@ -278,7 +279,6 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) {
ts.RemoveNode("nodeC")
nodeC1 := ts.AddNode("nodeC-restarted")
- nodeC1.CopyIdentityFromNode(nodeC)
nodeC1.Initialize(true,
protocol.WithBaseDirectory(ts.Directory.Path(nodeC.Name)),
protocol.WithStorageOptions(
@@ -313,7 +313,6 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) {
require.NoError(t, ts.Node("nodeA").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath))
nodeD := ts.AddNode("nodeD")
- nodeD.CopyIdentityFromNode(ts.Node("nodeC-restarted")) // we just want to be able to issue some stuff and don't care about the account for now.
nodeD.Initialize(true, append(nodeOptions,
protocol.WithSnapshotPath(snapshotPath),
protocol.WithBaseDirectory(ts.Directory.PathWithCreate(nodeD.Name)),
@@ -464,7 +463,6 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) {
require.NoError(t, ts.Node("nodeA").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath))
nodeD := ts.AddNode("nodeE")
- nodeD.CopyIdentityFromNode(ts.Node("nodeC-restarted")) // we just want to be able to issue some stuff and don't care about the account for now.
nodeD.Initialize(true, append(nodeOptions,
protocol.WithSnapshotPath(snapshotPath),
protocol.WithBaseDirectory(ts.Directory.PathWithCreate(nodeD.Name)),
diff --git a/pkg/tests/upgrade_signaling_test.go b/pkg/tests/upgrade_signaling_test.go
index d30e6ee85..874e3d74a 100644
--- a/pkg/tests/upgrade_signaling_test.go
+++ b/pkg/tests/upgrade_signaling_test.go
@@ -70,6 +70,7 @@ func Test_Upgrade_Signaling(t *testing.T) {
ts.AddValidatorNode("nodeD")
ts.AddNode("nodeE")
ts.AddNode("nodeF")
+ ts.AddBasicBlockIssuer("default", iotago.MaxBlockIssuanceCredits/2)
ts.Run(true, map[string][]options.Option[protocol.Protocol]{
"nodeA": nodeOptions,
@@ -95,11 +96,11 @@ func Test_Upgrade_Signaling(t *testing.T) {
hash2 := iotago.Identifier{2}
ts.AssertAccountData(&accounts.AccountData{
- ID: ts.Node("nodeA").AccountID,
+ ID: ts.Node("nodeA").Validator.AccountID,
Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0},
ExpirySlot: iotago.MaxSlotIndex,
OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 1),
- BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").PubKey))),
+ BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))),
ValidatorStake: testsuite.MinValidatorAccountAmount,
DelegationStake: 0,
FixedCost: 0,
@@ -108,11 +109,11 @@ func Test_Upgrade_Signaling(t *testing.T) {
}, ts.Nodes()...)
ts.AssertAccountData(&accounts.AccountData{
- ID: ts.Node("nodeF").AccountID,
+ ID: ts.DefaultBasicBlockIssuer().AccountID,
Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0},
ExpirySlot: iotago.MaxSlotIndex,
- OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 6),
- BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeF").PubKey))),
+ OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 5),
+ BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.DefaultBasicBlockIssuer().PublicKey))),
ValidatorStake: 0,
DelegationStake: 0,
FixedCost: 0,
@@ -132,11 +133,11 @@ func Test_Upgrade_Signaling(t *testing.T) {
// check account data before all nodes set the current version
ts.AssertAccountData(&accounts.AccountData{
- ID: ts.Node("nodeA").AccountID,
+ ID: ts.Node("nodeA").Validator.AccountID,
Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0},
ExpirySlot: iotago.MaxSlotIndex,
OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 1),
- BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").PubKey))),
+ BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))),
ValidatorStake: testsuite.MinValidatorAccountAmount,
DelegationStake: 0,
FixedCost: 0,
@@ -145,11 +146,11 @@ func Test_Upgrade_Signaling(t *testing.T) {
}, ts.Nodes()...)
ts.AssertAccountData(&accounts.AccountData{
- ID: ts.Node("nodeD").AccountID,
+ ID: ts.Node("nodeD").Validator.AccountID,
Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0},
ExpirySlot: iotago.MaxSlotIndex,
OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 4),
- BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").PubKey))),
+ BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").Validator.PublicKey))),
ValidatorStake: testsuite.MinValidatorAccountAmount,
DelegationStake: 0,
FixedCost: 0,
@@ -166,11 +167,11 @@ func Test_Upgrade_Signaling(t *testing.T) {
ts.IssueBlocksAtEpoch("", 1, 4, "7.3", ts.Nodes(), true, nil)
ts.AssertAccountData(&accounts.AccountData{
- ID: ts.Node("nodeA").AccountID,
+ ID: ts.Node("nodeA").Validator.AccountID,
Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0},
ExpirySlot: iotago.MaxSlotIndex,
OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 1),
- BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").PubKey))),
+ BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))),
ValidatorStake: testsuite.MinValidatorAccountAmount,
DelegationStake: 0,
FixedCost: 0,
@@ -179,7 +180,7 @@ func Test_Upgrade_Signaling(t *testing.T) {
}, ts.Nodes()...)
// check that rollback is correct
- account, exists, err := ts.Node("nodeA").Protocol.MainEngineInstance().Ledger.Account(ts.Node("nodeA").AccountID, 7)
+ account, exists, err := ts.Node("nodeA").Protocol.MainEngineInstance().Ledger.Account(ts.Node("nodeA").Validator.AccountID, 7)
require.NoError(t, err)
require.True(t, exists)
require.Equal(t, model.VersionAndHash{Version: 4, Hash: hash2}, account.LatestSupportedProtocolVersionAndHash)
@@ -217,7 +218,6 @@ func Test_Upgrade_Signaling(t *testing.T) {
ts.RemoveNode("nodeE")
nodeE1 := ts.AddNode("nodeE1")
- nodeE1.CopyIdentityFromNode(nodeE)
nodeE1.Initialize(true,
append(nodeOptions,
protocol.WithBaseDirectory(ts.Directory.Path(nodeE.Name)),
@@ -276,7 +276,6 @@ func Test_Upgrade_Signaling(t *testing.T) {
ts.RemoveNode("nodeE1")
nodeE2 := ts.AddNode("nodeE2")
- nodeE2.CopyIdentityFromNode(nodeE1)
nodeE2.Initialize(true,
append(nodeOptions,
protocol.WithBaseDirectory(ts.Directory.Path("nodeE")),
@@ -339,11 +338,11 @@ func Test_Upgrade_Signaling(t *testing.T) {
// check account data at the end of the test
ts.AssertAccountData(&accounts.AccountData{
- ID: ts.Node("nodeA").AccountID,
+ ID: ts.Node("nodeA").Validator.AccountID,
Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0},
ExpirySlot: iotago.MaxSlotIndex,
OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 1),
- BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").PubKey))),
+ BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeA").Validator.PublicKey))),
ValidatorStake: testsuite.MinValidatorAccountAmount,
DelegationStake: 0,
FixedCost: 0,
@@ -352,11 +351,11 @@ func Test_Upgrade_Signaling(t *testing.T) {
}, ts.Nodes()...)
ts.AssertAccountData(&accounts.AccountData{
- ID: ts.Node("nodeD").AccountID,
+ ID: ts.Node("nodeD").Validator.AccountID,
Credits: &accounts.BlockIssuanceCredits{Value: iotago.MaxBlockIssuanceCredits / 2, UpdateTime: 0},
ExpirySlot: iotago.MaxSlotIndex,
OutputID: iotago.OutputIDFromTransactionIDAndIndex(snapshotcreator.GenesisTransactionID, 4),
- BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").PubKey))),
+ BlockIssuerKeys: iotago.NewBlockIssuerKeys(iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(ts.Node("nodeD").Validator.PublicKey))),
ValidatorStake: testsuite.MinValidatorAccountAmount,
DelegationStake: 0,
FixedCost: 0,
diff --git a/pkg/testsuite/mock/account.go b/pkg/testsuite/mock/account.go
new file mode 100644
index 000000000..aac2de2a1
--- /dev/null
+++ b/pkg/testsuite/mock/account.go
@@ -0,0 +1,70 @@
+package mock
+
+import (
+ "crypto/ed25519"
+ "fmt"
+
+ "github.com/iotaledger/hive.go/crypto"
+ iotago "github.com/iotaledger/iota.go/v4"
+)
+
+// Account represents an account.
+type Account interface {
+ // ID returns the accountID.
+ ID() iotago.AccountID
+
+ // Address returns the account address.
+ Address() iotago.Address
+
+ // PrivateKey returns the account private key for signing.
+ PrivateKey() ed25519.PrivateKey
+}
+
+var _ Account = &Ed25519Account{}
+
+// Ed25519Account is an account that uses an Ed25519 key pair.
+type Ed25519Account struct {
+ accountID iotago.AccountID
+ privateKey ed25519.PrivateKey
+}
+
+// NewEd25519Account creates a new Ed25519Account.
+func NewEd25519Account(accountID iotago.AccountID, privateKey ed25519.PrivateKey) *Ed25519Account {
+ return &Ed25519Account{
+ accountID: accountID,
+ privateKey: privateKey,
+ }
+}
+
+// ID returns the accountID.
+func (e *Ed25519Account) ID() iotago.AccountID {
+ return e.accountID
+}
+
+// Address returns the account address.
+func (e *Ed25519Account) Address() iotago.Address {
+ ed25519PubKey, ok := e.privateKey.Public().(ed25519.PublicKey)
+ if !ok {
+ panic("invalid public key type")
+ }
+
+ return iotago.Ed25519AddressFromPubKey(ed25519PubKey)
+}
+
+// PrivateKey returns the account private key for signing.
+func (e *Ed25519Account) PrivateKey() ed25519.PrivateKey {
+ return e.privateKey
+}
+
+func AccountFromParams(accountHex, privateKey string) Account {
+ accountID, err := iotago.IdentifierFromHexString(accountHex)
+ if err != nil {
+ panic(fmt.Sprintln("invalid accountID hex string", err))
+ }
+ privKey, err := crypto.ParseEd25519PrivateKeyFromString(privateKey)
+ if err != nil {
+ panic(fmt.Sprintln("invalid ed25519 private key string", err))
+ }
+
+ return NewEd25519Account(accountID, privKey)
+}
diff --git a/pkg/testsuite/mock/block_params.go b/pkg/testsuite/mock/block_params.go
new file mode 100644
index 000000000..7013329f3
--- /dev/null
+++ b/pkg/testsuite/mock/block_params.go
@@ -0,0 +1,122 @@
+package mock
+
+import (
+ "time"
+
+ "github.com/iotaledger/hive.go/runtime/options"
+ "github.com/iotaledger/iota-core/pkg/model"
+ iotago "github.com/iotaledger/iota.go/v4"
+)
+
+type BlockHeaderParams struct {
+ ParentsCount int
+ References model.ParentReferences
+ SlotCommitment *iotago.Commitment
+ LatestFinalizedSlot *iotago.SlotIndex
+ IssuingTime *time.Time
+ ProtocolVersion *iotago.Version
+ Issuer Account
+}
+type BasicBlockParams struct {
+ BlockHeader *BlockHeaderParams
+ Payload iotago.Payload
+}
+type ValidatorBlockParams struct {
+ BlockHeader *BlockHeaderParams
+ HighestSupportedVersion *iotago.Version
+ ProtocolParametersHash *iotago.Identifier
+}
+
+func WithParentsCount(parentsCount int) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ builder.ParentsCount = parentsCount
+ }
+}
+
+func WithStrongParents(blockIDs ...iotago.BlockID) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ if builder.References == nil {
+ builder.References = make(model.ParentReferences)
+ }
+
+ builder.References[iotago.StrongParentType] = blockIDs
+ }
+}
+func WithWeakParents(blockIDs ...iotago.BlockID) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ if builder.References == nil {
+ builder.References = make(model.ParentReferences)
+ }
+
+ builder.References[iotago.WeakParentType] = blockIDs
+ }
+}
+
+func WithShallowLikeParents(blockIDs ...iotago.BlockID) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ if builder.References == nil {
+ builder.References = make(model.ParentReferences)
+ }
+
+ builder.References[iotago.ShallowLikeParentType] = blockIDs
+ }
+}
+
+func WithSlotCommitment(commitment *iotago.Commitment) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ builder.SlotCommitment = commitment
+ }
+}
+
+func WithLatestFinalizedSlot(commitmentIndex iotago.SlotIndex) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ builder.LatestFinalizedSlot = &commitmentIndex
+ }
+}
+
+func WithIssuingTime(issuingTime time.Time) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ builder.IssuingTime = &issuingTime
+ }
+}
+
+func WithProtocolVersion(version iotago.Version) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ builder.ProtocolVersion = &version
+ }
+}
+func WithIssuer(issuer Account) func(builder *BlockHeaderParams) {
+ return func(builder *BlockHeaderParams) {
+ builder.Issuer = issuer
+ }
+}
+
+func WithValidationBlockHeaderOptions(opts ...options.Option[BlockHeaderParams]) func(builder *ValidatorBlockParams) {
+ return func(builder *ValidatorBlockParams) {
+ builder.BlockHeader = options.Apply(&BlockHeaderParams{}, opts)
+ }
+}
+
+func WithBasicBlockHeader(opts ...options.Option[BlockHeaderParams]) func(builder *BasicBlockParams) {
+ return func(builder *BasicBlockParams) {
+ builder.BlockHeader = options.Apply(&BlockHeaderParams{}, opts)
+ }
+}
+
+func WithPayload(payload iotago.Payload) func(builder *BasicBlockParams) {
+ return func(builder *BasicBlockParams) {
+ builder.Payload = payload
+ }
+}
+
+func WithHighestSupportedVersion(highestSupportedVersion iotago.Version) func(builder *ValidatorBlockParams) {
+ return func(builder *ValidatorBlockParams) {
+ builder.HighestSupportedVersion = &highestSupportedVersion
+ }
+}
+
+func WithProtocolParametersHash(protocolParametersHash iotago.Identifier) func(builder *ValidatorBlockParams) {
+ return func(builder *ValidatorBlockParams) {
+ builder.ProtocolParametersHash = &protocolParametersHash
+ }
+}
diff --git a/pkg/blockfactory/blockissuer.go b/pkg/testsuite/mock/blockissuer.go
similarity index 65%
rename from pkg/blockfactory/blockissuer.go
rename to pkg/testsuite/mock/blockissuer.go
index eb1692261..86a48a75c 100644
--- a/pkg/blockfactory/blockissuer.go
+++ b/pkg/testsuite/mock/blockissuer.go
@@ -1,9 +1,16 @@
-package blockfactory
+package mock
import (
"context"
+ "crypto/ed25519"
+ "fmt"
+ "sync"
+ "testing"
"time"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/blake2b"
+
"github.com/iotaledger/hive.go/core/safemath"
"github.com/iotaledger/hive.go/ierrors"
"github.com/iotaledger/hive.go/lo"
@@ -13,7 +20,6 @@ import (
"github.com/iotaledger/hive.go/runtime/workerpool"
"github.com/iotaledger/hive.go/serializer/v2/serix"
"github.com/iotaledger/iota-core/pkg/model"
- "github.com/iotaledger/iota-core/pkg/protocol"
"github.com/iotaledger/iota-core/pkg/protocol/engine/blocks"
"github.com/iotaledger/iota-core/pkg/protocol/engine/filter"
iotago "github.com/iotaledger/iota.go/v4"
@@ -33,11 +39,18 @@ var (
// BlockIssuer contains logic to create and issue blocks signed by the given account.
type BlockIssuer struct {
+ Testing *testing.T
+
+ Name string
+ Validator bool
+
events *Events
workerPool *workerpool.WorkerPool
- protocol *protocol.Protocol
+ privateKey ed25519.PrivateKey
+ PublicKey ed25519.PublicKey
+ AccountID iotago.AccountID
optsTipSelectionTimeout time.Duration
optsTipSelectionRetryInterval time.Duration
@@ -46,11 +59,24 @@ type BlockIssuer struct {
optsRateSetterEnabled bool
}
-func New(p *protocol.Protocol, opts ...options.Option[BlockIssuer]) *BlockIssuer {
+func NewBlockIssuer(t *testing.T, name string, validator bool, opts ...options.Option[BlockIssuer]) *BlockIssuer {
+ pub, priv, err := ed25519.GenerateKey(nil)
+ if err != nil {
+ panic(err)
+ }
+
+ accountID := iotago.AccountID(blake2b.Sum256(pub))
+ accountID.RegisterAlias(name)
+
return options.Apply(&BlockIssuer{
+ Testing: t,
+ Name: name,
+ Validator: validator,
events: NewEvents(),
- workerPool: p.Workers.CreatePool("BlockIssuer"),
- protocol: p,
+ workerPool: workerpool.New("BlockIssuer"),
+ privateKey: priv,
+ PublicKey: pub,
+ AccountID: accountID,
optsIncompleteBlockAccepted: false,
optsRateSetterEnabled: false,
optsTipSelectionTimeout: 5 * time.Second,
@@ -64,40 +90,35 @@ func (i *BlockIssuer) Shutdown() {
i.workerPool.ShutdownComplete.Wait()
}
-func (i *BlockIssuer) CreateValidationBlock(ctx context.Context, issuerAccount Account, opts ...options.Option[ValidatorBlockParams]) (*model.Block, error) {
+func (i *BlockIssuer) CreateValidationBlock(ctx context.Context, alias string, issuerAccount Account, node *Node, opts ...options.Option[ValidatorBlockParams]) *blocks.Block {
blockParams := options.Apply(&ValidatorBlockParams{}, opts)
if blockParams.BlockHeader.References == nil {
// TODO: change this to get references for validator block
- references, err := i.getReferences(ctx, nil, blockParams.BlockHeader.ParentsCount)
- if err != nil {
- return nil, ierrors.Wrap(err, "error building block")
- }
+ references, err := i.getReferences(ctx, nil, node, blockParams.BlockHeader.ParentsCount)
+ require.NoError(i.Testing, err)
+
blockParams.BlockHeader.References = references
}
- if err := i.setDefaultBlockParams(blockParams.BlockHeader, issuerAccount); err != nil {
- return nil, err
- }
+ err := i.setDefaultBlockParams(blockParams.BlockHeader, node)
+ require.NoError(i.Testing, err)
if blockParams.HighestSupportedVersion == nil {
// We use the latest supported version and not the current one.
- version := i.protocol.LatestAPI().Version()
+ version := node.Protocol.LatestAPI().Version()
blockParams.HighestSupportedVersion = &version
}
if blockParams.ProtocolParametersHash == nil {
- protocolParametersHash, err := i.protocol.CurrentAPI().ProtocolParameters().Hash()
- if err != nil {
- return nil, ierrors.Wrap(err, "error getting protocol parameters hash")
- }
+ protocolParametersHash, err := node.Protocol.CurrentAPI().ProtocolParameters().Hash()
+ require.NoError(i.Testing, err)
+
blockParams.ProtocolParametersHash = &protocolParametersHash
}
- api, err := i.retrieveAPI(blockParams.BlockHeader)
- if err != nil {
- return nil, ierrors.Wrapf(err, "error getting api for version %d", *blockParams.BlockHeader.ProtocolVersion)
- }
+ api, err := i.retrieveAPI(blockParams.BlockHeader, node)
+ require.NoError(i.Testing, err)
blockBuilder := builder.NewValidationBlockBuilder(api)
@@ -105,11 +126,9 @@ func (i *BlockIssuer) CreateValidationBlock(ctx context.Context, issuerAccount A
blockBuilder.LatestFinalizedSlot(*blockParams.BlockHeader.LatestFinalizedSlot)
blockBuilder.IssuingTime(*blockParams.BlockHeader.IssuingTime)
- if strongParents, exists := blockParams.BlockHeader.References[iotago.StrongParentType]; exists && len(strongParents) > 0 {
- blockBuilder.StrongParents(strongParents)
- } else {
- return nil, ierrors.New("cannot create a block without strong parents")
- }
+ strongParents, exists := blockParams.BlockHeader.References[iotago.StrongParentType]
+ require.True(i.Testing, exists && len(strongParents) > 0)
+ blockBuilder.StrongParents(strongParents)
if weakParents, exists := blockParams.BlockHeader.References[iotago.WeakParentType]; exists {
blockBuilder.WeakParents(weakParents)
@@ -125,60 +144,61 @@ func (i *BlockIssuer) CreateValidationBlock(ctx context.Context, issuerAccount A
blockBuilder.Sign(issuerAccount.ID(), issuerAccount.PrivateKey())
block, err := blockBuilder.Build()
- if err != nil {
- return nil, ierrors.Wrap(err, "error building block")
- }
+ require.NoError(i.Testing, err)
// Make sure we only create syntactically valid blocks.
modelBlock, err := model.BlockFromBlock(block, serix.WithValidation())
- if err != nil {
- return nil, ierrors.Wrap(err, "error serializing block to model block")
- }
+ require.NoError(i.Testing, err)
i.events.BlockConstructed.Trigger(modelBlock)
- return modelBlock, nil
+ modelBlock.ID().RegisterAlias(alias)
+
+ return blocks.NewBlock(modelBlock)
+}
+
+func (i *BlockIssuer) IssueValidationBlock(ctx context.Context, alias string, node *Node, opts ...options.Option[ValidatorBlockParams]) *blocks.Block {
+ block := i.CreateValidationBlock(ctx, alias, NewEd25519Account(i.AccountID, i.privateKey), node, opts...)
+
+ require.NoError(i.Testing, i.IssueBlock(block.ModelBlock(), node))
+
+ fmt.Printf("Issued block: %s - slot %d - commitment %s %d - latest finalized slot %d\n", block.ID(), block.ID().Slot(), block.SlotCommitmentID(), block.SlotCommitmentID().Slot(), block.ProtocolBlock().LatestFinalizedSlot)
+
+ return block
}
-func (i *BlockIssuer) retrieveAPI(blockParams *BlockHeaderParams) (iotago.API, error) {
+func (i *BlockIssuer) retrieveAPI(blockParams *BlockHeaderParams, node *Node) (iotago.API, error) {
if blockParams.ProtocolVersion != nil {
- return i.protocol.APIForVersion(*blockParams.ProtocolVersion)
+ return node.Protocol.APIForVersion(*blockParams.ProtocolVersion)
}
- return i.protocol.CurrentAPI(), nil
+ return node.Protocol.CurrentAPI(), nil
}
// CreateBlock creates a new block with the options.
-func (i *BlockIssuer) CreateBlock(ctx context.Context, issuerAccount Account, opts ...options.Option[BasicBlockParams]) (*model.Block, error) {
+func (i *BlockIssuer) CreateBasicBlock(ctx context.Context, alias string, node *Node, opts ...options.Option[BasicBlockParams]) *blocks.Block {
blockParams := options.Apply(&BasicBlockParams{}, opts)
if blockParams.BlockHeader.References == nil {
- references, err := i.getReferences(ctx, blockParams.Payload, blockParams.BlockHeader.ParentsCount)
- if err != nil {
- return nil, ierrors.Wrap(err, "error building block")
- }
+ references, err := i.getReferences(ctx, blockParams.Payload, node, blockParams.BlockHeader.ParentsCount)
+ require.NoError(i.Testing, err)
blockParams.BlockHeader.References = references
}
- if err := i.setDefaultBlockParams(blockParams.BlockHeader, issuerAccount); err != nil {
- return nil, err
- }
+ err := i.setDefaultBlockParams(blockParams.BlockHeader, node)
+ require.NoError(i.Testing, err)
- api, err := i.retrieveAPI(blockParams.BlockHeader)
- if err != nil {
- return nil, ierrors.Wrapf(err, "error getting api for version %d", *blockParams.BlockHeader.ProtocolVersion)
- }
+ api, err := i.retrieveAPI(blockParams.BlockHeader, node)
+ require.NoError(i.Testing, err)
blockBuilder := builder.NewBasicBlockBuilder(api)
blockBuilder.SlotCommitmentID(blockParams.BlockHeader.SlotCommitment.MustID())
blockBuilder.LatestFinalizedSlot(*blockParams.BlockHeader.LatestFinalizedSlot)
blockBuilder.IssuingTime(*blockParams.BlockHeader.IssuingTime)
- if strongParents, exists := blockParams.BlockHeader.References[iotago.StrongParentType]; exists && len(strongParents) > 0 {
- blockBuilder.StrongParents(strongParents)
- } else {
- return nil, ierrors.New("cannot create a block without strong parents")
- }
+ strongParents, exists := blockParams.BlockHeader.References[iotago.StrongParentType]
+ require.True(i.Testing, exists && len(strongParents) > 0)
+ blockBuilder.StrongParents(strongParents)
if weakParents, exists := blockParams.BlockHeader.References[iotago.WeakParentType]; exists {
blockBuilder.WeakParents(weakParents)
@@ -194,39 +214,71 @@ func (i *BlockIssuer) CreateBlock(ctx context.Context, issuerAccount Account, op
if err != nil {
rmcSlot = 0
}
- rmc, err := i.protocol.MainEngineInstance().Ledger.RMCManager().RMC(rmcSlot)
- if err != nil {
- return nil, ierrors.Wrapf(err, "error loading commitment of slot %d from storage to get RMC", rmcSlot)
- }
+ rmc, err := node.Protocol.MainEngineInstance().Ledger.RMCManager().RMC(rmcSlot)
+ require.NoError(i.Testing, err)
// only set the burned Mana as the last step before signing, so workscore calculation is correct.
blockBuilder.MaxBurnedMana(rmc)
- blockBuilder.Sign(issuerAccount.ID(), issuerAccount.PrivateKey())
+ blockBuilder.Sign(i.AccountID, i.privateKey)
block, err := blockBuilder.Build()
- if err != nil {
- return nil, ierrors.Wrap(err, "error building block")
- }
+ require.NoError(i.Testing, err)
// Make sure we only create syntactically valid blocks.
modelBlock, err := model.BlockFromBlock(block, serix.WithValidation())
- if err != nil {
- return nil, ierrors.Wrap(err, "error serializing block to model block")
- }
+ require.NoError(i.Testing, err)
i.events.BlockConstructed.Trigger(modelBlock)
- return modelBlock, nil
+ modelBlock.ID().RegisterAlias(alias)
+
+ return blocks.NewBlock(modelBlock)
}
-// IssueBlock submits a block to be processed.
-func (i *BlockIssuer) IssueBlock(block *model.Block) error {
- return i.issueBlock(block)
+func (i *BlockIssuer) IssueBasicBlock(ctx context.Context, alias string, node *Node, opts ...options.Option[BasicBlockParams]) *blocks.Block {
+ block := i.CreateBasicBlock(ctx, alias, node, opts...)
+
+ require.NoErrorf(i.Testing, i.IssueBlock(block.ModelBlock(), node), "%s > failed to issue block with alias %s", i.Name, alias)
+
+ fmt.Printf("%s > Issued block: %s - slot %d - commitment %s %d - latest finalized slot %d\n", i.Name, block.ID(), block.ID().Slot(), block.SlotCommitmentID(), block.SlotCommitmentID().Slot(), block.ProtocolBlock().LatestFinalizedSlot)
+
+ return block
+}
+
+func (i *BlockIssuer) IssueActivity(ctx context.Context, wg *sync.WaitGroup, startSlot iotago.SlotIndex, node *Node) {
+ issuingTime := node.Protocol.APIForSlot(startSlot).TimeProvider().SlotStartTime(startSlot)
+ start := time.Now()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ fmt.Println(i.Name, "> Starting activity")
+ var counter int
+ for {
+ if ctx.Err() != nil {
+ fmt.Println(i.Name, "> Stopped activity due to canceled context:", ctx.Err())
+ return
+ }
+
+ blockAlias := fmt.Sprintf("%s-activity.%d", i.Name, counter)
+ timeOffset := time.Since(start)
+ i.IssueValidationBlock(ctx, blockAlias,
+ node,
+ WithValidationBlockHeaderOptions(
+ WithIssuingTime(issuingTime.Add(timeOffset)),
+ ),
+ )
+
+ counter++
+ time.Sleep(1 * time.Second)
+ }
+ }()
}
// IssueBlockAndAwaitEvent submits a block to be processed and waits for the event to be triggered.
-func (i *BlockIssuer) IssueBlockAndAwaitEvent(ctx context.Context, block *model.Block, evt *event.Event1[*blocks.Block]) error {
+func (i *BlockIssuer) IssueBlockAndAwaitEvent(ctx context.Context, block *model.Block, node *Node, evt *event.Event1[*blocks.Block]) error {
triggered := make(chan error, 1)
exit := make(chan struct{})
defer close(exit)
@@ -241,7 +293,7 @@ func (i *BlockIssuer) IssueBlockAndAwaitEvent(ctx context.Context, block *model.
}
}, event.WithWorkerPool(i.workerPool)).Unhook()
- defer i.protocol.Events.Engine.Filter.BlockPreFiltered.Hook(func(event *filter.BlockPreFilteredEvent) {
+ defer node.Protocol.Events.Engine.Filter.BlockPreFiltered.Hook(func(event *filter.BlockPreFilteredEvent) {
if block.ID() != event.Block.ID() {
return
}
@@ -251,7 +303,7 @@ func (i *BlockIssuer) IssueBlockAndAwaitEvent(ctx context.Context, block *model.
}
}, event.WithWorkerPool(i.workerPool)).Unhook()
- if err := i.issueBlock(block); err != nil {
+ if err := i.IssueBlock(block, node); err != nil {
return ierrors.Wrapf(err, "failed to issue block %s", block.ID())
}
@@ -267,11 +319,11 @@ func (i *BlockIssuer) IssueBlockAndAwaitEvent(ctx context.Context, block *model.
}
}
-func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.ProtocolBlock, optIssuerAccount ...Account) (iotago.BlockID, error) {
+func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.ProtocolBlock, node *Node, optIssuerAccount ...Account) (iotago.BlockID, error) {
// if anything changes, need to make a new signature
var resign bool
- apiForVesion, err := i.protocol.APIForVersion(iotaBlock.ProtocolVersion)
+ apiForVesion, err := node.Protocol.APIForVersion(iotaBlock.ProtocolVersion)
if err != nil {
return iotago.EmptyBlockID(), ierrors.Wrapf(ErrBlockAttacherInvalidBlock, "protocolVersion invalid: %d", iotaBlock.ProtocolVersion)
}
@@ -284,8 +336,8 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Protoco
}
if iotaBlock.SlotCommitmentID == iotago.EmptyCommitmentID {
- iotaBlock.SlotCommitmentID = i.protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID()
- iotaBlock.LatestFinalizedSlot = i.protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot()
+ iotaBlock.SlotCommitmentID = node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID()
+ iotaBlock.LatestFinalizedSlot = node.Protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot()
resign = true
}
@@ -299,7 +351,7 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Protoco
}
if len(iotaBlock.Parents()) == 0 {
- references, referencesErr := i.getReferences(ctx, innerBlock.Payload)
+ references, referencesErr := i.getReferences(ctx, innerBlock.Payload, node)
if referencesErr != nil {
return iotago.EmptyBlockID(), ierrors.Wrapf(ErrBlockAttacherAttachingNotPossible, "tipselection failed, error: %w", referencesErr)
}
@@ -326,7 +378,7 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Protoco
resign = true
}
- if err = i.validateReferences(iotaBlock.IssuingTime, iotaBlock.SlotCommitmentID.Slot(), references); err != nil {
+ if err = i.validateReferences(iotaBlock.IssuingTime, iotaBlock.SlotCommitmentID.Slot(), references, node); err != nil {
return iotago.EmptyBlockID(), ierrors.Wrapf(ErrBlockAttacherAttachingNotPossible, "invalid block references, error: %w", err)
}
@@ -335,7 +387,7 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Protoco
if err != nil {
rmcSlot = 0
}
- rmc, err := i.protocol.MainEngineInstance().Ledger.RMCManager().RMC(rmcSlot)
+ rmc, err := node.Protocol.MainEngineInstance().Ledger.RMCManager().RMC(rmcSlot)
if err != nil {
return iotago.EmptyBlockID(), ierrors.Wrapf(err, "error loading commitment of slot %d from storage to get RMC", rmcSlot)
}
@@ -374,10 +426,10 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Protoco
return iotago.EmptyBlockID(), ierrors.Wrap(err, "error serializing block to model block")
}
- if !i.optsRateSetterEnabled || i.protocol.MainEngineInstance().Scheduler.IsBlockIssuerReady(modelBlock.ProtocolBlock().IssuerID) {
+ if !i.optsRateSetterEnabled || node.Protocol.MainEngineInstance().Scheduler.IsBlockIssuerReady(modelBlock.ProtocolBlock().IssuerID) {
i.events.BlockConstructed.Trigger(modelBlock)
- if err = i.IssueBlockAndAwaitEvent(ctx, modelBlock, i.protocol.Events.Engine.BlockDAG.BlockAttached); err != nil {
+ if err = i.IssueBlockAndAwaitEvent(ctx, modelBlock, node, node.Protocol.Events.Engine.BlockDAG.BlockAttached); err != nil {
return iotago.EmptyBlockID(), ierrors.Wrap(err, "error issuing model block")
}
}
@@ -385,7 +437,7 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Protoco
return modelBlock.ID(), nil
}
-func (i *BlockIssuer) setDefaultBlockParams(blockParams *BlockHeaderParams, issuerAccount Account) error {
+func (i *BlockIssuer) setDefaultBlockParams(blockParams *BlockHeaderParams, node *Node) error {
if blockParams.IssuingTime == nil {
issuingTime := time.Now().UTC()
blockParams.IssuingTime = &issuingTime
@@ -393,33 +445,33 @@ func (i *BlockIssuer) setDefaultBlockParams(blockParams *BlockHeaderParams, issu
if blockParams.SlotCommitment == nil {
var err error
- blockParams.SlotCommitment, err = i.getCommitment(i.protocol.CurrentAPI().TimeProvider().SlotFromTime(*blockParams.IssuingTime))
+ blockParams.SlotCommitment, err = i.getCommitment(node.Protocol.CurrentAPI().TimeProvider().SlotFromTime(*blockParams.IssuingTime), node)
if err != nil {
return ierrors.Wrap(err, "error getting commitment")
}
}
if blockParams.LatestFinalizedSlot == nil {
- latestFinalizedSlot := i.protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot()
+ latestFinalizedSlot := node.Protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot()
blockParams.LatestFinalizedSlot = &latestFinalizedSlot
}
if blockParams.Issuer == nil {
- blockParams.Issuer = NewEd25519Account(issuerAccount.ID(), issuerAccount.PrivateKey())
- } else if blockParams.Issuer.ID() != issuerAccount.ID() {
- return ierrors.Errorf("provided issuer account %s, but issuer provided in the block params is different %s", issuerAccount.ID(), blockParams.Issuer.ID())
+ blockParams.Issuer = NewEd25519Account(i.AccountID, i.privateKey)
+ } else if blockParams.Issuer.ID() != i.AccountID {
+ return ierrors.Errorf("provided issuer account %s, but issuer provided in the block params is different %s", i.AccountID, blockParams.Issuer.ID())
}
- if err := i.validateReferences(*blockParams.IssuingTime, blockParams.SlotCommitment.Slot, blockParams.References); err != nil {
+ if err := i.validateReferences(*blockParams.IssuingTime, blockParams.SlotCommitment.Slot, blockParams.References, node); err != nil {
return ierrors.Wrap(err, "block references invalid")
}
return nil
}
-func (i *BlockIssuer) getCommitment(blockSlot iotago.SlotIndex) (*iotago.Commitment, error) {
- protoParams := i.protocol.CurrentAPI().ProtocolParameters()
- commitment := i.protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment()
+func (i *BlockIssuer) getCommitment(blockSlot iotago.SlotIndex, node *Node) (*iotago.Commitment, error) {
+ protoParams := node.Protocol.CurrentAPI().ProtocolParameters()
+ commitment := node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment()
if blockSlot > commitment.Slot+protoParams.MaxCommittableAge() {
return nil, ierrors.Errorf("can't issue block: block slot %d is too far in the future, latest commitment is %d", blockSlot, commitment.Slot)
@@ -431,7 +483,7 @@ func (i *BlockIssuer) getCommitment(blockSlot iotago.SlotIndex) (*iotago.Commitm
}
commitmentSlot := commitment.Slot - protoParams.MinCommittableAge()
- loadedCommitment, err := i.protocol.MainEngineInstance().Storage.Commitments().Load(commitmentSlot)
+ loadedCommitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentSlot)
if err != nil {
return nil, ierrors.Wrapf(err, "error loading valid commitment of slot %d according to minCommittableAge from storage", commitmentSlot)
}
@@ -442,18 +494,18 @@ func (i *BlockIssuer) getCommitment(blockSlot iotago.SlotIndex) (*iotago.Commitm
return commitment, nil
}
-func (i *BlockIssuer) getReferences(ctx context.Context, p iotago.Payload, strongParentsCountOpt ...int) (model.ParentReferences, error) {
+func (i *BlockIssuer) getReferences(ctx context.Context, p iotago.Payload, node *Node, strongParentsCountOpt ...int) (model.ParentReferences, error) {
strongParentsCount := iotago.BlockMaxParents
if len(strongParentsCountOpt) > 0 && strongParentsCountOpt[0] > 0 {
strongParentsCount = strongParentsCountOpt[0]
}
- return i.getReferencesWithRetry(ctx, p, strongParentsCount)
+ return i.getReferencesWithRetry(ctx, p, strongParentsCount, node)
}
-func (i *BlockIssuer) validateReferences(issuingTime time.Time, slotCommitmentIndex iotago.SlotIndex, references model.ParentReferences) error {
+func (i *BlockIssuer) validateReferences(issuingTime time.Time, slotCommitmentIndex iotago.SlotIndex, references model.ParentReferences, node *Node) error {
for _, parent := range lo.Flatten(lo.Map(lo.Values(references), func(ds iotago.BlockIDs) []iotago.BlockID { return ds })) {
- b, exists := i.protocol.MainEngineInstance().BlockFromCache(parent)
+ b, exists := node.Protocol.MainEngineInstance().BlockFromCache(parent)
if !exists {
return ierrors.Errorf("cannot issue block if the parents are not known: %s", parent)
}
@@ -469,8 +521,8 @@ func (i *BlockIssuer) validateReferences(issuingTime time.Time, slotCommitmentIn
return nil
}
-func (i *BlockIssuer) issueBlock(block *model.Block) error {
- if err := i.protocol.IssueBlock(block); err != nil {
+func (i *BlockIssuer) IssueBlock(block *model.Block, node *Node) error {
+ if err := node.Protocol.IssueBlock(block); err != nil {
return err
}
@@ -479,16 +531,23 @@ func (i *BlockIssuer) issueBlock(block *model.Block) error {
return nil
}
+func (i *BlockIssuer) CopyIdentityFromBlockIssuer(otherBlockIssuer *BlockIssuer) {
+ i.privateKey = otherBlockIssuer.privateKey
+ i.PublicKey = otherBlockIssuer.PublicKey
+ i.AccountID = otherBlockIssuer.AccountID
+ i.Validator = otherBlockIssuer.Validator
+}
+
// getReferencesWithRetry tries to get references for the given payload. If it fails, it will retry at regular intervals until
// the timeout is reached.
-func (i *BlockIssuer) getReferencesWithRetry(ctx context.Context, _ iotago.Payload, parentsCount int) (references model.ParentReferences, err error) {
+func (i *BlockIssuer) getReferencesWithRetry(ctx context.Context, _ iotago.Payload, parentsCount int, node *Node) (references model.ParentReferences, err error) {
timeout := time.NewTimer(i.optsTipSelectionTimeout)
interval := time.NewTicker(i.optsTipSelectionRetryInterval)
defer timeutil.CleanupTimer(timeout)
defer timeutil.CleanupTicker(interval)
for {
- references = i.protocol.MainEngineInstance().TipSelection.SelectTips(parentsCount)
+ references = node.Protocol.MainEngineInstance().TipSelection.SelectTips(parentsCount)
if len(references[iotago.StrongParentType]) > 0 {
return references, nil
}
diff --git a/pkg/blockfactory/events.go b/pkg/testsuite/mock/events.go
similarity index 97%
rename from pkg/blockfactory/events.go
rename to pkg/testsuite/mock/events.go
index b2a129519..510173b4e 100644
--- a/pkg/blockfactory/events.go
+++ b/pkg/testsuite/mock/events.go
@@ -1,4 +1,4 @@
-package blockfactory
+package mock
import (
"github.com/iotaledger/hive.go/runtime/event"
diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go
index 3d62cb05c..53d6e0c61 100644
--- a/pkg/testsuite/mock/node.go
+++ b/pkg/testsuite/mock/node.go
@@ -4,7 +4,6 @@ import (
"context"
"crypto/ed25519"
"fmt"
- "sync"
"sync/atomic"
"testing"
"time"
@@ -18,7 +17,6 @@ import (
"github.com/iotaledger/hive.go/runtime/options"
"github.com/iotaledger/hive.go/runtime/syncutils"
"github.com/iotaledger/hive.go/runtime/workerpool"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
"github.com/iotaledger/iota-core/pkg/core/account"
"github.com/iotaledger/iota-core/pkg/model"
"github.com/iotaledger/iota-core/pkg/protocol"
@@ -51,16 +49,11 @@ type Node struct {
Testing *testing.T
Name string
- Validator bool
+ Validator *BlockIssuer
ctx context.Context
ctxCancel context.CancelFunc
- blockIssuer *blockfactory.BlockIssuer
-
- privateKey ed25519.PrivateKey
- PubKey ed25519.PublicKey
- AccountID iotago.AccountID
PeerID peer.ID
protocolParametersHash iotago.Identifier
highestSupportedVersion iotago.Version
@@ -91,15 +84,21 @@ func NewNode(t *testing.T, net *Network, partition string, name string, validato
peerID := lo.PanicOnErr(peer.IDFromPrivateKey(lo.PanicOnErr(p2pcrypto.UnmarshalEd25519PrivateKey(priv))))
RegisterIDAlias(peerID, name)
+ var validatorBlockIssuer *BlockIssuer
+ if validator {
+ validatorBlockIssuer = NewBlockIssuer(t, name, validator)
+ } else {
+ validatorBlockIssuer = nil
+ }
+
return &Node{
Testing: t,
- Name: name,
- Validator: validator,
- PubKey: pub,
- privateKey: priv,
- AccountID: accountID,
- PeerID: peerID,
+ Name: name,
+
+ Validator: validatorBlockIssuer,
+
+ PeerID: peerID,
Partition: partition,
Endpoint: net.JoinWithEndpointID(peerID, partition),
@@ -109,6 +108,10 @@ func NewNode(t *testing.T, net *Network, partition string, name string, validato
}
}
+func (n *Node) IsValidator() bool {
+ return n.Validator != nil
+}
+
func (n *Node) Initialize(failOnBlockFiltered bool, opts ...options.Option[protocol.Protocol]) {
n.Protocol = protocol.New(n.Workers.CreateGroup("Protocol"),
n.Endpoint,
@@ -118,8 +121,6 @@ func (n *Node) Initialize(failOnBlockFiltered bool, opts ...options.Option[proto
n.hookEvents()
n.hookLogging(failOnBlockFiltered)
- n.blockIssuer = blockfactory.New(n.Protocol, blockfactory.WithTipSelectionTimeout(3*time.Second), blockfactory.WithTipSelectionRetryInterval(time.Millisecond*100))
-
n.ctx, n.ctxCancel = context.WithCancel(context.Background())
started := make(chan struct{}, 1)
@@ -449,13 +450,6 @@ func (n *Node) Shutdown() {
<-stopped
}
-func (n *Node) CopyIdentityFromNode(otherNode *Node) {
- n.AccountID = otherNode.AccountID
- n.PubKey = otherNode.PubKey
- n.privateKey = otherNode.privateKey
- n.Validator = otherNode.Validator
-}
-
func (n *Node) ProtocolParametersHash() iotago.Identifier {
if n.protocolParametersHash == iotago.EmptyIdentifier {
return lo.PanicOnErr(n.Protocol.CurrentAPI().ProtocolParameters().Hash())
@@ -480,80 +474,6 @@ func (n *Node) SetHighestSupportedVersion(version iotago.Version) {
n.highestSupportedVersion = version
}
-func (n *Node) CreateValidationBlock(ctx context.Context, alias string, opts ...options.Option[blockfactory.ValidatorBlockParams]) *blocks.Block {
- modelBlock, err := n.blockIssuer.CreateValidationBlock(ctx, blockfactory.NewEd25519Account(n.AccountID, n.privateKey), opts...)
- require.NoError(n.Testing, err)
-
- modelBlock.ID().RegisterAlias(alias)
-
- return blocks.NewBlock(modelBlock)
-}
-
-func (n *Node) CreateBlock(ctx context.Context, alias string, opts ...options.Option[blockfactory.BasicBlockParams]) *blocks.Block {
- modelBlock, err := n.blockIssuer.CreateBlock(ctx, blockfactory.NewEd25519Account(n.AccountID, n.privateKey), opts...)
- require.NoError(n.Testing, err)
-
- modelBlock.ID().RegisterAlias(alias)
-
- return blocks.NewBlock(modelBlock)
-}
-
-func (n *Node) IssueBlock(ctx context.Context, alias string, opts ...options.Option[blockfactory.BasicBlockParams]) *blocks.Block {
- block := n.CreateBlock(ctx, alias, opts...)
-
- require.NoErrorf(n.Testing, n.blockIssuer.IssueBlock(block.ModelBlock()), "%s > failed to issue block with alias %s", n.Name, alias)
-
- fmt.Printf("%s > Issued block: %s - slot %d - commitment %s %d - latest finalized slot %d\n", n.Name, block.ID(), block.ID().Slot(), block.SlotCommitmentID(), block.SlotCommitmentID().Slot(), block.ProtocolBlock().LatestFinalizedSlot)
-
- return block
-}
-
-func (n *Node) IssueExistingBlock(block *blocks.Block) {
- require.NoErrorf(n.Testing, n.blockIssuer.IssueBlock(block.ModelBlock()), "%s > failed to issue block with alias %s", n.Name, block.ID().Alias())
-
- fmt.Printf("%s > Issued block: %s - slot %d - commitment %s %d - latest finalized slot %d\n", n.Name, block.ID(), block.ID().Slot(), block.SlotCommitmentID(), block.SlotCommitmentID().Slot(), block.ProtocolBlock().LatestFinalizedSlot)
-}
-
-func (n *Node) IssueValidationBlock(ctx context.Context, alias string, opts ...options.Option[blockfactory.ValidatorBlockParams]) *blocks.Block {
- block := n.CreateValidationBlock(ctx, alias, opts...)
-
- require.NoError(n.Testing, n.blockIssuer.IssueBlock(block.ModelBlock()))
-
- fmt.Printf("Issued block: %s - slot %d - commitment %s %d - latest finalized slot %d\n", block.ID(), block.ID().Slot(), block.SlotCommitmentID(), block.SlotCommitmentID().Slot(), block.ProtocolBlock().LatestFinalizedSlot)
-
- return block
-}
-
-func (n *Node) IssueActivity(ctx context.Context, wg *sync.WaitGroup, startSlot iotago.SlotIndex) {
- issuingTime := n.Protocol.APIForSlot(startSlot).TimeProvider().SlotStartTime(startSlot)
- start := time.Now()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- fmt.Println(n.Name, "> Starting activity")
- var counter int
- for {
- if ctx.Err() != nil {
- fmt.Println(n.Name, "> Stopped activity due to canceled context:", ctx.Err())
- return
- }
-
- blockAlias := fmt.Sprintf("%s-activity.%d", n.Name, counter)
- timeOffset := time.Since(start)
- n.IssueValidationBlock(ctx, blockAlias,
- blockfactory.WithValidationBlockHeaderOptions(
- blockfactory.WithIssuingTime(issuingTime.Add(timeOffset)),
- ),
- )
-
- counter++
- time.Sleep(1 * time.Second)
- }
- }()
-}
-
func (n *Node) ForkDetectedCount() int {
return int(n.forkDetectedCount.Load())
}
diff --git a/pkg/testsuite/testsuite.go b/pkg/testsuite/testsuite.go
index ab5e67b5a..5b61e8afb 100644
--- a/pkg/testsuite/testsuite.go
+++ b/pkg/testsuite/testsuite.go
@@ -35,9 +35,10 @@ type TestSuite struct {
fakeTesting *testing.T
network *mock.Network
- Directory *utils.Directory
- nodes *orderedmap.OrderedMap[string, *mock.Node]
- running bool
+ Directory *utils.Directory
+ nodes *orderedmap.OrderedMap[string, *mock.Node]
+ blockIssuers *orderedmap.OrderedMap[string, *mock.BlockIssuer]
+ running bool
snapshotPath string
blocks *shrinkingmap.ShrinkingMap[string, *blocks.Block]
@@ -79,6 +80,7 @@ func NewTestSuite(testingT *testing.T, opts ...options.Option[TestSuite]) *TestS
network: mock.NewNetwork(),
Directory: utils.NewDirectory(testingT.TempDir()),
nodes: orderedmap.New[string, *mock.Node](),
+ blockIssuers: orderedmap.New[string, *mock.BlockIssuer](),
blocks: shrinkingmap.New[string, *blocks.Block](),
automaticTransactionIssuingCounters: *shrinkingmap.New[string, int](),
@@ -347,12 +349,12 @@ func (t *TestSuite) addNodeToPartition(name string, partition string, validator
if len(optAmount) > 0 {
amount = optAmount[0]
}
- if amount > 0 {
+ if amount > 0 && validator {
accountDetails := snapshotcreator.AccountDetails{
- Address: iotago.Ed25519AddressFromPubKey(node.PubKey),
+ Address: iotago.Ed25519AddressFromPubKey(node.Validator.PublicKey),
Amount: amount,
Mana: iotago.Mana(amount),
- IssuerKey: iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(node.PubKey)),
+ IssuerKey: iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(node.Validator.PublicKey)),
ExpirySlot: iotago.MaxSlotIndex,
BlockIssuanceCredits: iotago.MaxBlockIssuanceCredits / 2,
}
@@ -388,6 +390,37 @@ func (t *TestSuite) RemoveNode(name string) {
t.nodes.Delete(name)
}
+func (t *TestSuite) AddBasicBlockIssuer(name string, blockIssuanceCredits ...iotago.BlockIssuanceCredits) *mock.BlockIssuer {
+ newBlockIssuer := mock.NewBlockIssuer(t.Testing, name, false)
+ t.blockIssuers.Set(name, newBlockIssuer)
+ var bic iotago.BlockIssuanceCredits
+ if len(blockIssuanceCredits) == 0 {
+ bic = iotago.MaxBlockIssuanceCredits / 2
+ } else {
+ bic = blockIssuanceCredits[0]
+ }
+
+ accountDetails := snapshotcreator.AccountDetails{
+ Address: iotago.Ed25519AddressFromPubKey(newBlockIssuer.PublicKey),
+ Amount: MinIssuerAccountAmount,
+ Mana: iotago.Mana(MinIssuerAccountAmount),
+ IssuerKey: iotago.Ed25519PublicKeyBlockIssuerKeyFromPublicKey(ed25519.PublicKey(newBlockIssuer.PublicKey)),
+ ExpirySlot: iotago.MaxSlotIndex,
+ BlockIssuanceCredits: bic,
+ }
+
+ t.optsAccounts = append(t.optsAccounts, accountDetails)
+
+ return newBlockIssuer
+}
+
+func (t *TestSuite) DefaultBasicBlockIssuer() *mock.BlockIssuer {
+ defaultBasicBlockIssuer, exists := t.blockIssuers.Get("default")
+ require.True(t.Testing, exists, "default block issuer not found")
+
+ return defaultBasicBlockIssuer
+}
+
func (t *TestSuite) Run(failOnBlockFiltered bool, nodesOptions ...map[string][]options.Option[protocol.Protocol]) {
t.mutex.Lock()
defer t.mutex.Unlock()
@@ -449,7 +482,7 @@ func (t *TestSuite) Run(failOnBlockFiltered bool, nodesOptions ...map[string][]o
func (t *TestSuite) Validators() []*mock.Node {
validators := make([]*mock.Node, 0)
t.nodes.ForEach(func(_ string, node *mock.Node) bool {
- if node.Validator {
+ if node.IsValidator() {
validators = append(validators, node)
}
@@ -459,6 +492,20 @@ func (t *TestSuite) Validators() []*mock.Node {
return validators
}
+// BlockIssersForNodes returns a map of block issuers for each node. If the node is a validator, its block issuer is the validator block issuer. Else, it is the block issuer for the test suite.
+func (t *TestSuite) BlockIssuersForNodes(nodes []*mock.Node) []*mock.BlockIssuer {
+ blockIssuers := make([]*mock.BlockIssuer, 0)
+ for _, node := range nodes {
+ if node.IsValidator() {
+ blockIssuers = append(blockIssuers, node.Validator)
+ } else {
+ blockIssuers = append(blockIssuers, t.DefaultBasicBlockIssuer())
+ }
+ }
+
+ return blockIssuers
+}
+
// Eventually asserts that given condition will be met in opts.waitFor time,
// periodically checking target function each opts.tick.
//
diff --git a/pkg/testsuite/testsuite_issue_blocks.go b/pkg/testsuite/testsuite_issue_blocks.go
index 9a1941226..08c727d5a 100644
--- a/pkg/testsuite/testsuite_issue_blocks.go
+++ b/pkg/testsuite/testsuite_issue_blocks.go
@@ -9,14 +9,13 @@ import (
"github.com/iotaledger/hive.go/lo"
"github.com/iotaledger/hive.go/runtime/options"
- "github.com/iotaledger/iota-core/pkg/blockfactory"
"github.com/iotaledger/iota-core/pkg/protocol/engine/blocks"
"github.com/iotaledger/iota-core/pkg/testsuite/mock"
iotago "github.com/iotaledger/iota.go/v4"
)
-func (t *TestSuite) assertParentsCommitmentExistFromBlockOptions(blockOpts []options.Option[blockfactory.BlockHeaderParams], node *mock.Node) {
- params := options.Apply(&blockfactory.BlockHeaderParams{}, blockOpts)
+func (t *TestSuite) assertParentsCommitmentExistFromBlockOptions(blockOpts []options.Option[mock.BlockHeaderParams], node *mock.Node) {
+ params := options.Apply(&mock.BlockHeaderParams{}, blockOpts)
parents := params.References[iotago.StrongParentType]
parents = append(parents, params.References[iotago.WeakParentType]...)
parents = append(parents, params.References[iotago.ShallowLikeParentType]...)
@@ -26,8 +25,8 @@ func (t *TestSuite) assertParentsCommitmentExistFromBlockOptions(blockOpts []opt
}
}
-func (t *TestSuite) assertParentsExistFromBlockOptions(blockOpts []options.Option[blockfactory.BlockHeaderParams], node *mock.Node) {
- params := options.Apply(&blockfactory.BlockHeaderParams{}, blockOpts)
+func (t *TestSuite) assertParentsExistFromBlockOptions(blockOpts []options.Option[mock.BlockHeaderParams], node *mock.Node) {
+ params := options.Apply(&mock.BlockHeaderParams{}, blockOpts)
parents := params.References[iotago.StrongParentType]
parents = append(parents, params.References[iotago.WeakParentType]...)
parents = append(parents, params.References[iotago.ShallowLikeParentType]...)
@@ -35,16 +34,16 @@ func (t *TestSuite) assertParentsExistFromBlockOptions(blockOpts []options.Optio
t.AssertBlocksExist(t.Blocks(lo.Map(parents, func(id iotago.BlockID) string { return id.Alias() })...), true, node)
}
-func (t *TestSuite) limitParentsCountInBlockOptions(blockOpts []options.Option[blockfactory.BlockHeaderParams], maxCount int) []options.Option[blockfactory.BlockHeaderParams] {
- params := options.Apply(&blockfactory.BlockHeaderParams{}, blockOpts)
+func (t *TestSuite) limitParentsCountInBlockOptions(blockOpts []options.Option[mock.BlockHeaderParams], maxCount int) []options.Option[mock.BlockHeaderParams] {
+ params := options.Apply(&mock.BlockHeaderParams{}, blockOpts)
if len(params.References[iotago.StrongParentType]) > maxCount {
- blockOpts = append(blockOpts, blockfactory.WithStrongParents(params.References[iotago.StrongParentType][:maxCount]...))
+ blockOpts = append(blockOpts, mock.WithStrongParents(params.References[iotago.StrongParentType][:maxCount]...))
}
if len(params.References[iotago.WeakParentType]) > maxCount {
- blockOpts = append(blockOpts, blockfactory.WithWeakParents(params.References[iotago.WeakParentType][:maxCount]...))
+ blockOpts = append(blockOpts, mock.WithWeakParents(params.References[iotago.WeakParentType][:maxCount]...))
}
if len(params.References[iotago.ShallowLikeParentType]) > maxCount {
- blockOpts = append(blockOpts, blockfactory.WithShallowLikeParents(params.References[iotago.ShallowLikeParentType][:maxCount]...))
+ blockOpts = append(blockOpts, mock.WithShallowLikeParents(params.References[iotago.ShallowLikeParentType][:maxCount]...))
}
return blockOpts
@@ -62,16 +61,16 @@ func (t *TestSuite) registerBlock(alias string, block *blocks.Block) {
block.ID().RegisterAlias(alias)
}
-func (t *TestSuite) CreateBlock(alias string, node *mock.Node, blockOpts ...options.Option[blockfactory.BasicBlockParams]) {
+func (t *TestSuite) CreateBasicBlock(alias string, blockIssuer *mock.BlockIssuer, node *mock.Node, blockOpts ...options.Option[mock.BasicBlockParams]) {
t.mutex.Lock()
defer t.mutex.Unlock()
- block := node.CreateBlock(context.Background(), alias, blockOpts...)
+ block := blockIssuer.CreateBasicBlock(context.Background(), alias, node, blockOpts...)
t.registerBlock(alias, block)
}
-func (t *TestSuite) IssueBlockAtSlot(alias string, slot iotago.SlotIndex, slotCommitment *iotago.Commitment, node *mock.Node, parents ...iotago.BlockID) *blocks.Block {
+func (t *TestSuite) IssueValidationBlockAtSlot(alias string, slot iotago.SlotIndex, slotCommitment *iotago.Commitment, node *mock.Node, parents ...iotago.BlockID) *blocks.Block {
t.AssertBlocksExist(t.Blocks(lo.Map(parents, func(id iotago.BlockID) string { return id.Alias() })...), true, node)
t.mutex.Lock()
@@ -81,20 +80,16 @@ func (t *TestSuite) IssueBlockAtSlot(alias string, slot iotago.SlotIndex, slotCo
issuingTime := timeProvider.SlotStartTime(slot).Add(time.Duration(t.uniqueBlockTimeCounter.Add(1)))
require.Truef(t.Testing, issuingTime.Before(time.Now()), "node: %s: issued block (%s, slot: %d) is in the current (%s, slot: %d) or future slot", node.Name, issuingTime, slot, time.Now(), timeProvider.SlotFromTime(time.Now()))
+ require.True(t.Testing, node.IsValidator(), "node: %s: is not a validator node", node.Name)
- var block *blocks.Block
- if node.Validator {
- block = node.IssueValidationBlock(context.Background(), alias, blockfactory.WithValidationBlockHeaderOptions(blockfactory.WithIssuingTime(issuingTime), blockfactory.WithSlotCommitment(slotCommitment), blockfactory.WithStrongParents(parents...)))
- } else {
- block = node.IssueBlock(context.Background(), alias, blockfactory.WithBasicBlockHeader(blockfactory.WithIssuingTime(issuingTime), blockfactory.WithSlotCommitment(slotCommitment), blockfactory.WithStrongParents(parents...)))
- }
+ block := node.Validator.IssueValidationBlock(context.Background(), alias, node, mock.WithValidationBlockHeaderOptions(mock.WithIssuingTime(issuingTime), mock.WithSlotCommitment(slotCommitment), mock.WithStrongParents(parents...)))
t.registerBlock(alias, block)
return block
}
-func (t *TestSuite) IssueExistingBlock(alias string, node *mock.Node) {
+func (t *TestSuite) IssueExistingBlock(alias string, blockIssuer *mock.BlockIssuer, node *mock.Node) {
t.mutex.Lock()
defer t.mutex.Unlock()
@@ -102,32 +97,32 @@ func (t *TestSuite) IssueExistingBlock(alias string, node *mock.Node) {
require.True(t.Testing, exists)
require.NotNil(t.Testing, block)
- node.IssueExistingBlock(block)
+ require.NoError(t.Testing, blockIssuer.IssueBlock(block.ModelBlock(), node))
}
-func (t *TestSuite) IssueValidationBlockWithOptions(alias string, node *mock.Node, blockOpts ...options.Option[blockfactory.ValidatorBlockParams]) *blocks.Block {
+func (t *TestSuite) IssueValidationBlockWithOptions(alias string, blockIssuer *mock.BlockIssuer, node *mock.Node, blockOpts ...options.Option[mock.ValidatorBlockParams]) *blocks.Block {
t.mutex.Lock()
defer t.mutex.Unlock()
- block := node.IssueValidationBlock(context.Background(), alias, blockOpts...)
+ block := blockIssuer.IssueValidationBlock(context.Background(), alias, node, blockOpts...)
t.registerBlock(alias, block)
return block
}
-func (t *TestSuite) IssueBasicBlockWithOptions(alias string, node *mock.Node, blockOpts ...options.Option[blockfactory.BasicBlockParams]) *blocks.Block {
+func (t *TestSuite) IssueBasicBlockWithOptions(alias string, blockIssuer *mock.BlockIssuer, node *mock.Node, blockOpts ...options.Option[mock.BasicBlockParams]) *blocks.Block {
t.mutex.Lock()
defer t.mutex.Unlock()
- block := node.IssueBlock(context.Background(), alias, blockOpts...)
+ block := blockIssuer.IssueBasicBlock(context.Background(), alias, node, blockOpts...)
t.registerBlock(alias, block)
return block
}
-func (t *TestSuite) IssueBlockAtSlotWithOptions(alias string, slot iotago.SlotIndex, slotCommitment *iotago.Commitment, node *mock.Node, payload iotago.Payload, blockOpts ...options.Option[blockfactory.BlockHeaderParams]) *blocks.Block {
+func (t *TestSuite) IssueBasicBlockAtSlotWithOptions(alias string, slot iotago.SlotIndex, slotCommitment *iotago.Commitment, blockIssuer *mock.BlockIssuer, node *mock.Node, payload iotago.Payload, blockOpts ...options.Option[mock.BlockHeaderParams]) *blocks.Block {
t.assertParentsExistFromBlockOptions(blockOpts, node)
t.mutex.Lock()
@@ -138,62 +133,63 @@ func (t *TestSuite) IssueBlockAtSlotWithOptions(alias string, slot iotago.SlotIn
require.Truef(t.Testing, issuingTime.Before(time.Now()), "node: %s: issued block (%s, slot: %d) is in the current (%s, slot: %d) or future slot", node.Name, issuingTime, slot, time.Now(), timeProvider.SlotFromTime(time.Now()))
- block := node.IssueBlock(context.Background(), alias, blockfactory.WithBasicBlockHeader(append(blockOpts, blockfactory.WithIssuingTime(issuingTime), blockfactory.WithSlotCommitment(slotCommitment))...), blockfactory.WithPayload(payload))
+ block := blockIssuer.IssueBasicBlock(context.Background(), alias, node, mock.WithBasicBlockHeader(append(blockOpts, mock.WithIssuingTime(issuingTime), mock.WithSlotCommitment(slotCommitment))...), mock.WithPayload(payload))
t.registerBlock(alias, block)
return block
}
-func (t *TestSuite) IssuePayloadWithOptions(alias string, node *mock.Node, payload iotago.Payload, blockHeaderOpts ...options.Option[blockfactory.BlockHeaderParams]) *blocks.Block {
+func (t *TestSuite) IssuePayloadWithOptions(alias string, blockIssuer *mock.BlockIssuer, node *mock.Node, payload iotago.Payload, blockHeaderOpts ...options.Option[mock.BlockHeaderParams]) *blocks.Block {
t.assertParentsExistFromBlockOptions(blockHeaderOpts, node)
t.mutex.Lock()
defer t.mutex.Unlock()
- block := node.IssueBlock(context.Background(), alias, blockfactory.WithPayload(payload), blockfactory.WithBasicBlockHeader(blockHeaderOpts...))
+ block := blockIssuer.IssueBasicBlock(context.Background(), alias, node, mock.WithPayload(payload), mock.WithBasicBlockHeader(blockHeaderOpts...))
t.registerBlock(alias, block)
return block
}
-func (t *TestSuite) IssueValidationBlock(alias string, node *mock.Node, blockHeaderOpts ...options.Option[blockfactory.BlockHeaderParams]) *blocks.Block {
+func (t *TestSuite) IssueValidationBlock(alias string, node *mock.Node, blockHeaderOpts ...options.Option[mock.BlockHeaderParams]) *blocks.Block {
t.assertParentsExistFromBlockOptions(blockHeaderOpts, node)
- require.Truef(t.Testing, node.Validator, "node: %s: is not a validator node", node.Name)
+ require.Truef(t.Testing, node.IsValidator(), "node: %s: is not a validator node", node.Name)
t.mutex.Lock()
defer t.mutex.Unlock()
- block := node.IssueValidationBlock(context.Background(), alias, blockfactory.WithValidationBlockHeaderOptions(blockHeaderOpts...))
+ block := node.Validator.IssueValidationBlock(context.Background(), alias, node, mock.WithValidationBlockHeaderOptions(blockHeaderOpts...))
t.registerBlock(alias, block)
return block
}
-func (t *TestSuite) IssueBlockRowInSlot(prefix string, slot iotago.SlotIndex, row int, parentsPrefixAlias string, nodes []*mock.Node, issuingOptions map[string][]options.Option[blockfactory.BlockHeaderParams]) []*blocks.Block {
+func (t *TestSuite) IssueBlockRowInSlot(prefix string, slot iotago.SlotIndex, row int, parentsPrefixAlias string, nodes []*mock.Node, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) []*blocks.Block {
+ blockIssuers := t.BlockIssuersForNodes(nodes)
blocksIssued := make([]*blocks.Block, 0, len(nodes))
strongParents := t.BlockIDsWithPrefix(parentsPrefixAlias)
- issuingOptionsCopy := lo.MergeMaps(make(map[string][]options.Option[blockfactory.BlockHeaderParams]), issuingOptions)
+ issuingOptionsCopy := lo.MergeMaps(make(map[string][]options.Option[mock.BlockHeaderParams]), issuingOptions)
- for _, node := range nodes {
+ for index, node := range nodes {
blockAlias := fmt.Sprintf("%s%d.%d-%s", prefix, slot, row, node.Name)
- issuingOptionsCopy[node.Name] = append(issuingOptionsCopy[node.Name], blockfactory.WithStrongParents(strongParents...))
+ issuingOptionsCopy[node.Name] = append(issuingOptionsCopy[node.Name], mock.WithStrongParents(strongParents...))
timeProvider := t.API.TimeProvider()
issuingTime := timeProvider.SlotStartTime(slot).Add(time.Duration(t.uniqueBlockTimeCounter.Add(1)))
require.Truef(t.Testing, issuingTime.Before(time.Now()), "node: %s: issued block (%s, slot: %d) is in the current (%s, slot: %d) or future slot", node.Name, issuingTime, slot, time.Now(), timeProvider.SlotFromTime(time.Now()))
var b *blocks.Block
- if node.Validator {
- blockHeaderOptions := append(issuingOptionsCopy[node.Name], blockfactory.WithIssuingTime(issuingTime))
+ if blockIssuers[index].Validator {
+ blockHeaderOptions := append(issuingOptionsCopy[node.Name], mock.WithIssuingTime(issuingTime))
t.assertParentsCommitmentExistFromBlockOptions(blockHeaderOptions, node)
t.assertParentsExistFromBlockOptions(blockHeaderOptions, node)
- b = t.IssueValidationBlockWithOptions(blockAlias, node, blockfactory.WithValidationBlockHeaderOptions(blockHeaderOptions...), blockfactory.WithHighestSupportedVersion(node.HighestSupportedVersion()), blockfactory.WithProtocolParametersHash(node.ProtocolParametersHash()))
+ b = t.IssueValidationBlockWithOptions(blockAlias, blockIssuers[index], node, mock.WithValidationBlockHeaderOptions(blockHeaderOptions...), mock.WithHighestSupportedVersion(node.HighestSupportedVersion()), mock.WithProtocolParametersHash(node.ProtocolParametersHash()))
} else {
txCount := t.automaticTransactionIssuingCounters.Compute(node.Partition, func(currentValue int, exists bool) int {
return currentValue + 1
@@ -208,11 +204,11 @@ func (t *TestSuite) IssueBlockRowInSlot(prefix string, slot iotago.SlotIndex, ro
issuingOptionsCopy[node.Name] = t.limitParentsCountInBlockOptions(issuingOptionsCopy[node.Name], iotago.BlockMaxParents)
- blockHeaderOptions := append(issuingOptionsCopy[node.Name], blockfactory.WithIssuingTime(issuingTime))
+ blockHeaderOptions := append(issuingOptionsCopy[node.Name], mock.WithIssuingTime(issuingTime))
t.assertParentsCommitmentExistFromBlockOptions(blockHeaderOptions, node)
t.assertParentsExistFromBlockOptions(blockHeaderOptions, node)
- b = t.IssueBasicBlockWithOptions(blockAlias, node, blockfactory.WithPayload(tx), blockfactory.WithBasicBlockHeader(blockHeaderOptions...))
+ b = t.IssueBasicBlockWithOptions(blockAlias, blockIssuers[index], node, mock.WithPayload(tx), mock.WithBasicBlockHeader(blockHeaderOptions...))
}
blocksIssued = append(blocksIssued, b)
}
@@ -220,7 +216,7 @@ func (t *TestSuite) IssueBlockRowInSlot(prefix string, slot iotago.SlotIndex, ro
return blocksIssued
}
-func (t *TestSuite) IssueBlockRowsInSlot(prefix string, slot iotago.SlotIndex, rows int, initialParentsPrefixAlias string, nodes []*mock.Node, issuingOptions map[string][]options.Option[blockfactory.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) {
+func (t *TestSuite) IssueBlockRowsInSlot(prefix string, slot iotago.SlotIndex, rows int, initialParentsPrefixAlias string, nodes []*mock.Node, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) {
var blocksIssued, lastBlockRowIssued []*blocks.Block
parentsPrefixAlias := initialParentsPrefixAlias
@@ -236,7 +232,7 @@ func (t *TestSuite) IssueBlockRowsInSlot(prefix string, slot iotago.SlotIndex, r
return blocksIssued, lastBlockRowIssued
}
-func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, rowsPerSlot int, initialParentsPrefixAlias string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[blockfactory.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) {
+func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, rowsPerSlot int, initialParentsPrefixAlias string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) {
var blocksIssued, lastBlockRowIssued []*blocks.Block
parentsPrefixAlias := initialParentsPrefixAlias
@@ -261,7 +257,7 @@ func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex,
return blocksIssued, lastBlockRowIssued
}
-func (t *TestSuite) IssueBlocksAtEpoch(prefix string, epoch iotago.EpochIndex, rowsPerSlot int, initialParentsPrefixAlias string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[blockfactory.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) {
+func (t *TestSuite) IssueBlocksAtEpoch(prefix string, epoch iotago.EpochIndex, rowsPerSlot int, initialParentsPrefixAlias string, nodes []*mock.Node, waitForSlotsCommitted bool, issuingOptions map[string][]options.Option[mock.BlockHeaderParams]) (allBlocksIssued []*blocks.Block, lastBlockRow []*blocks.Block) {
return t.IssueBlocksAtSlots(prefix, t.SlotsForEpoch(epoch), rowsPerSlot, initialParentsPrefixAlias, nodes, waitForSlotsCommitted, issuingOptions)
}
@@ -280,12 +276,14 @@ func (t *TestSuite) SlotsForEpoch(epoch iotago.EpochIndex) []iotago.SlotIndex {
return slots
}
-func (t *TestSuite) CommitUntilSlot(slot iotago.SlotIndex, activeNodes []*mock.Node, parent *blocks.Block) *blocks.Block {
+func (t *TestSuite) CommitUntilSlot(slot iotago.SlotIndex, parent *blocks.Block) *blocks.Block {
+
// we need to get accepted tangle time up to slot + minCA + 1
// first issue a chain of blocks with step size minCA up until slot + minCA + 1
// then issue one more block to accept the last in the chain which will trigger commitment of the second last in the chain
+ activeValidators := t.Validators()
- latestCommittedSlot := activeNodes[0].Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot()
+ latestCommittedSlot := activeValidators[0].Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot()
if latestCommittedSlot >= slot {
return parent
}
@@ -294,14 +292,15 @@ func (t *TestSuite) CommitUntilSlot(slot iotago.SlotIndex, activeNodes []*mock.N
chainIndex := 0
for {
// preacceptance of nextBlockSlot
- for _, node := range activeNodes {
+ for _, node := range activeValidators {
+ require.True(t.Testing, node.IsValidator(), "node: %s: is not a validator node", node.Name)
blockAlias := fmt.Sprintf("chain-%s-%d-%s", parent.ID().Alias(), chainIndex, node.Name)
- tip = t.IssueBlockAtSlot(blockAlias, nextBlockSlot, node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node, tip.ID())
+ tip = t.IssueValidationBlockAtSlot(blockAlias, nextBlockSlot, node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node, tip.ID())
}
// acceptance of nextBlockSlot
- for _, node := range activeNodes {
+ for _, node := range activeValidators {
blockAlias := fmt.Sprintf("chain-%s-%d-%s", parent.ID().Alias(), chainIndex+1, node.Name)
- tip = t.IssueBlockAtSlot(blockAlias, nextBlockSlot, node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node, tip.ID())
+ tip = t.IssueValidationBlockAtSlot(blockAlias, nextBlockSlot, node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment(), node, tip.ID())
}
if nextBlockSlot == slot+t.optsMinCommittableAge {
break
@@ -310,7 +309,7 @@ func (t *TestSuite) CommitUntilSlot(slot iotago.SlotIndex, activeNodes []*mock.N
chainIndex += 2
}
- for _, node := range activeNodes {
+ for _, node := range activeValidators {
t.AssertLatestCommitmentSlotIndex(slot, node)
}
diff --git a/tools/docker-network/docker-compose.yml b/tools/docker-network/docker-compose.yml
index b673e932c..ebc264856 100644
--- a/tools/docker-network/docker-compose.yml
+++ b/tools/docker-network/docker-compose.yml
@@ -16,13 +16,8 @@ services:
--validator.ignoreBootstrapped=true
--validator.account=0x907c02e9302e0f0571f10f885594e56d8c54ff0708ab7a39bc1b74d396b93b12
--validator.privateKey=443a988ea61797651217de1f4662d4d6da11fd78e67f94511453bf6576045a05293dc170d9a59474e6d81cfba7f7d924c09b25d7166bcfba606e53114d0a758b
- --restAPI.allowIncompleteBlock=true
- --restAPI.blockIssuerAccount=0x907c02e9302e0f0571f10f885594e56d8c54ff0708ab7a39bc1b74d396b93b12
- --restAPI.blockIssuerPrivateKey=443a988ea61797651217de1f4662d4d6da11fd78e67f94511453bf6576045a05293dc170d9a59474e6d81cfba7f7d924c09b25d7166bcfba606e53114d0a758b
--inx.enabled=true
--inx.bindAddress=0.0.0.0:9029
- --inx.blockIssuerAccount=0x907c02e9302e0f0571f10f885594e56d8c54ff0708ab7a39bc1b74d396b93b12
- --inx.blockIssuerPrivateKey=443a988ea61797651217de1f4662d4d6da11fd78e67f94511453bf6576045a05293dc170d9a59474e6d81cfba7f7d924c09b25d7166bcfba606e53114d0a758b
volumes:
- ./docker-network.snapshot:/app/data/snapshot.bin
- ./config.json:/app/config.json:ro
@@ -54,13 +49,8 @@ services:
--validator.enabled=true
--validator.account=0x375358f92cc94750669598b0aaa55a6ff73310b90710e1fad524c0f911be0fea
--validator.privateKey=3a5d39f8b60367a17fd54dac2a32c172c8e1fd6cf74ce65f1e13edba565f281705c1de274451db8de8182d64c6ee0dca3ae0c9077e0b4330c976976171d79064
- --restAPI.allowIncompleteBlock=true
- --restAPI.blockIssuerAccount=0x375358f92cc94750669598b0aaa55a6ff73310b90710e1fad524c0f911be0fea
- --restAPI.blockIssuerPrivateKey=3a5d39f8b60367a17fd54dac2a32c172c8e1fd6cf74ce65f1e13edba565f281705c1de274451db8de8182d64c6ee0dca3ae0c9077e0b4330c976976171d79064
--inx.enabled=true
--inx.bindAddress=0.0.0.0:9029
- --inx.blockIssuerAccount=0x375358f92cc94750669598b0aaa55a6ff73310b90710e1fad524c0f911be0fea
- --inx.blockIssuerPrivateKey=3a5d39f8b60367a17fd54dac2a32c172c8e1fd6cf74ce65f1e13edba565f281705c1de274451db8de8182d64c6ee0dca3ae0c9077e0b4330c976976171d79064
volumes:
- ./docker-network.snapshot:/app/data/snapshot.bin
- ./config.json:/app/config.json:ro
@@ -84,13 +74,8 @@ services:
--validator.enabled=true
--validator.account=0x6aee704f25558e8aa7630fed0121da53074188abc423b3c5810f80be4936eb6e
--validator.privateKey=db39d2fde6301d313b108dc9db1ee724d0f405f6fde966bd776365bc5f4a5fb31e4b21eb51dcddf65c20db1065e1f1514658b23a3ddbf48d30c0efc926a9a648
- --restAPI.allowIncompleteBlock=true
- --restAPI.blockIssuerAccount=0x6aee704f25558e8aa7630fed0121da53074188abc423b3c5810f80be4936eb6e
- --restAPI.blockIssuerPrivateKey=db39d2fde6301d313b108dc9db1ee724d0f405f6fde966bd776365bc5f4a5fb31e4b21eb51dcddf65c20db1065e1f1514658b23a3ddbf48d30c0efc926a9a648
--inx.enabled=true
--inx.bindAddress=0.0.0.0:9029
- --inx.blockIssuerAccount=0x6aee704f25558e8aa7630fed0121da53074188abc423b3c5810f80be4936eb6e
- --inx.blockIssuerPrivateKey=db39d2fde6301d313b108dc9db1ee724d0f405f6fde966bd776365bc5f4a5fb31e4b21eb51dcddf65c20db1065e1f1514658b23a3ddbf48d30c0efc926a9a648
volumes:
- ./docker-network.snapshot:/app/data/snapshot.bin
- ./config.json:/app/config.json:ro
@@ -109,13 +94,8 @@ services:
${COMMON_CONFIG}
${MANUALPEERING_CONFIG}
--p2p.identityPrivateKey=03feb3bcd25e57f75697bb329e6e0100680431e4c45c85bc013da2aea9e9d0345e08a0c37407dc62369deebc64cb0fb3ea26127d19d141ee7fb8eaa6b92019d7
- --restAPI.allowIncompleteBlock=true
- --restAPI.blockIssuerAccount=0xd057230f48fdf59ae093e2179857c590e960b05389399399ce69ad169a9c7f37
- --restAPI.blockIssuerPrivateKey=dcf7adb000f03826f1964a3e5378874b1972c38229fb740a8e47f2c421cddcf9a54fafa44a88e4a6a37796526ea884f613a24d84337871226eb6360f022d8b39
--inx.enabled=true
--inx.bindAddress=0.0.0.0:9029
- --inx.blockIssuerAccount=0xd057230f48fdf59ae093e2179857c590e960b05389399399ce69ad169a9c7f37
- --inx.blockIssuerPrivateKey=dcf7adb000f03826f1964a3e5378874b1972c38229fb740a8e47f2c421cddcf9a54fafa44a88e4a6a37796526ea884f613a24d84337871226eb6360f022d8b39
volumes:
- ./docker-network.snapshot:/app/data/snapshot.bin
- ./config.json:/app/config.json:ro
@@ -134,13 +114,8 @@ services:
${COMMON_CONFIG}
${MANUALPEERING_CONFIG}
--p2p.identityPrivateKey=7d1491df3ef334dee988d6cdfc4b430b996d520bd63375a01d6754f8cee979b855b200fbea8c936ea1937a27e6ad72a7c9a21c1b17c2bd3c11f1f6994d813446
- --restAPI.allowIncompleteBlock=true
- --restAPI.blockIssuerAccount=0x7a9ec5d9c0c145bae03b20cbe481cfea306e688c00525b410eb8cb18a169ed7f
- --restAPI.blockIssuerPrivateKey=0d8ecad4cefe927d2b6c64ee56576c52450f9a7a0113f96683cf8e8cc5c64264cb5ea14175ce649149ee41217c44aa70c3205b9939968449eae408727a71f91b
--inx.enabled=true
--inx.bindAddress=0.0.0.0:9029
- --inx.blockIssuerAccount=0x7a9ec5d9c0c145bae03b20cbe481cfea306e688c00525b410eb8cb18a169ed7f
- --inx.blockIssuerPrivateKey=0d8ecad4cefe927d2b6c64ee56576c52450f9a7a0113f96683cf8e8cc5c64264cb5ea14175ce649149ee41217c44aa70c3205b9939968449eae408727a71f91b
volumes:
- ./docker-network.snapshot:/app/data/snapshot.bin
- ./config.json:/app/config.json:ro