Skip to content

Commit

Permalink
Merge branch 'develop' into chore/scheduler-todos
Browse files Browse the repository at this point in the history
  • Loading branch information
cyberphysic4l committed Oct 30, 2023
2 parents 6f201e0 + 71b37b9 commit 37a8edf
Show file tree
Hide file tree
Showing 94 changed files with 2,428 additions and 1,543 deletions.
2 changes: 2 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ RUN cp ./peering.json /app/peering.json
# using distroless cc "nonroot" image, which includes everything in the base image (glibc, libssl and openssl)
FROM gcr.io/distroless/cc-debian12:nonroot

HEALTHCHECK --interval=10s --timeout=5s --retries=30 CMD ["/app/iota-core", "tools", "node-info"]

# Copy the app dir into distroless image
COPY --chown=nonroot:nonroot --from=build /app /app

Expand Down
2 changes: 2 additions & 0 deletions Dockerfile.dev
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ RUN mkdir -p /app/data/peerdb
# using distroless cc "nonroot" image, which includes everything in the base image (glibc, libssl and openssl)
FROM gcr.io/distroless/cc-debian12:nonroot

HEALTHCHECK --interval=10s --timeout=5s --retries=30 CMD ["/app/iota-core", "tools", "node-info"]

# Copy the app dir into distroless image
COPY --chown=nonroot:nonroot --from=build /app /app

Expand Down
20 changes: 20 additions & 0 deletions components/app/app.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package app

import (
"fmt"
"os"

"github.com/iotaledger/hive.go/app"
"github.com/iotaledger/hive.go/app/components/profiling"
"github.com/iotaledger/hive.go/app/components/shutdown"
Expand All @@ -15,6 +18,7 @@ import (
"github.com/iotaledger/iota-core/components/restapi"
coreapi "github.com/iotaledger/iota-core/components/restapi/core"
"github.com/iotaledger/iota-core/components/validator"
"github.com/iotaledger/iota-core/pkg/toolset"
)

var (
Expand All @@ -28,6 +32,12 @@ var (
func App() *app.App {
return app.New(Name, Version,
// app.WithVersionCheck("iotaledger", "iota-core"),
app.WithUsageText(fmt.Sprintf(`Usage of %s (%s %s):
Run '%s tools' to list all available tools.
Command line flags:
`, os.Args[0], Name, Version, os.Args[0])),
app.WithInitComponent(InitComponent),
app.WithComponents(
shutdown.Component,
Expand Down Expand Up @@ -63,5 +73,15 @@ func init() {
AdditionalConfigs: []*app.ConfigurationSet{
app.NewConfigurationSet("peering", "peering", "peeringConfigFilePath", "peeringConfig", false, true, false, "peering.json", "n"),
},
Init: initialize,
}
}

func initialize(_ *app.App) error {
if toolset.ShouldHandleTools() {
toolset.HandleTools()
// HandleTools will call os.Exit
}

return nil
}
20 changes: 10 additions & 10 deletions components/dashboard/explorer_routes.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,24 +105,24 @@ func createExplorerBlock(block *model.Block, cachedBlock *blocks.Block, metadata
var payloadJSON []byte
basicBlock, isBasic := block.BasicBlock()
if isBasic {
payloadJSON, err = lo.PanicOnErr(deps.Protocol.APIForVersion(iotaBlk.ProtocolVersion)).JSONEncode(basicBlock.Payload)
payloadJSON, err = lo.PanicOnErr(deps.Protocol.APIForVersion(iotaBlk.Header.ProtocolVersion)).JSONEncode(basicBlock.Payload)
if err != nil {
return nil
}
}

t := &ExplorerBlock{
ID: block.ID().ToHex(),
NetworkID: iotaBlk.NetworkID,
ProtocolVersion: iotaBlk.ProtocolVersion,
NetworkID: iotaBlk.Header.NetworkID,
ProtocolVersion: iotaBlk.Header.ProtocolVersion,
SolidificationTimestamp: 0,
IssuanceTimestamp: iotaBlk.IssuingTime.Unix(),
IssuanceTimestamp: iotaBlk.Header.IssuingTime.Unix(),
SequenceNumber: 0,
IssuerID: iotaBlk.IssuerID.ToHex(),
IssuerID: iotaBlk.Header.IssuerID.ToHex(),
Signature: hexutil.EncodeHex(sigBytes),
StrongParents: iotaBlk.Block.StrongParentIDs().ToHex(),
WeakParents: iotaBlk.Block.WeakParentIDs().ToHex(),
ShallowLikedParents: iotaBlk.Block.ShallowLikeParentIDs().ToHex(),
StrongParents: iotaBlk.Body.StrongParentIDs().ToHex(),
WeakParents: iotaBlk.Body.WeakParentIDs().ToHex(),
ShallowLikedParents: iotaBlk.Body.ShallowLikeParentIDs().ToHex(),

PayloadType: func() iotago.PayloadType {
if isBasic && basicBlock.Payload != nil {
Expand Down Expand Up @@ -152,8 +152,8 @@ func createExplorerBlock(block *model.Block, cachedBlock *blocks.Block, metadata

return ""
}(),
CommitmentID: iotaBlk.SlotCommitmentID.ToHex(),
LatestConfirmedSlot: uint64(iotaBlk.LatestFinalizedSlot),
CommitmentID: iotaBlk.Header.SlotCommitmentID.ToHex(),
LatestConfirmedSlot: uint64(iotaBlk.Header.LatestFinalizedSlot),
}

if cachedBlock != nil {
Expand Down
6 changes: 3 additions & 3 deletions components/dashboard/visualizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@ func sendVertex(blk *blocks.Block, confirmed bool) {

broadcastWsBlock(&wsblk{MsgTypeVertex, &vertex{
ID: blk.ID().ToHex(),
StrongParents: blk.ProtocolBlock().Block.StrongParentIDs().ToHex(),
WeakParents: blk.ProtocolBlock().Block.WeakParentIDs().ToHex(),
ShallowLikedParents: blk.ProtocolBlock().Block.ShallowLikeParentIDs().ToHex(),
StrongParents: blk.ProtocolBlock().Body.StrongParentIDs().ToHex(),
WeakParents: blk.ProtocolBlock().Body.WeakParentIDs().ToHex(),
ShallowLikedParents: blk.ProtocolBlock().Body.ShallowLikeParentIDs().ToHex(),
IsConfirmed: confirmed,
IsTx: isTx,
IsTxAccepted: func() bool {
Expand Down
4 changes: 2 additions & 2 deletions components/debugapi/debug_models.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ func BlockMetadataResponseFromBlock(block *blocks.Block) *BlockMetadataResponse
return &BlockMetadataResponse{
BlockID: block.ID().String(),
StrongParents: lo.Map(block.StrongParents(), func(blockID iotago.BlockID) string { return blockID.String() }),
WeakParents: lo.Map(block.ProtocolBlock().Block.WeakParentIDs(), func(blockID iotago.BlockID) string { return blockID.String() }),
ShallowLikeParents: lo.Map(block.ProtocolBlock().Block.ShallowLikeParentIDs(), func(blockID iotago.BlockID) string { return blockID.String() }),
WeakParents: lo.Map(block.ProtocolBlock().Body.WeakParentIDs(), func(blockID iotago.BlockID) string { return blockID.String() }),
ShallowLikeParents: lo.Map(block.ProtocolBlock().Body.ShallowLikeParentIDs(), func(blockID iotago.BlockID) string { return blockID.String() }),
Solid: block.IsSolid(),
Invalid: block.IsInvalid(),
Booked: block.IsBooked(),
Expand Down
2 changes: 1 addition & 1 deletion components/inx/server_blocks.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func (s *Server) SubmitBlock(ctx context.Context, rawBlock *inx.RawBlock) (*inx.
return s.attachBlock(ctx, block)
}

func (s *Server) attachBlock(ctx context.Context, block *iotago.ProtocolBlock) (*inx.BlockId, error) {
func (s *Server) attachBlock(ctx context.Context, block *iotago.Block) (*inx.BlockId, error) {
mergedCtx, mergedCtxCancel := contextutils.MergeContexts(ctx, Component.Daemon().ContextStopped())
defer mergedCtxCancel()

Expand Down
2 changes: 1 addition & 1 deletion components/metrics/metrics_accounts.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ var AccountMetrics = collector.NewCollection(accountNamespace,
collector.WithPruningDelay(10*time.Minute),
collector.WithInitFunc(func() {
deps.Protocol.Events.Engine.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) {
accountData, exists, _ := deps.Protocol.MainEngineInstance().Ledger.Account(block.ProtocolBlock().IssuerID, deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot())
accountData, exists, _ := deps.Protocol.MainEngineInstance().Ledger.Account(block.ProtocolBlock().Header.IssuerID, deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot())
if exists {
deps.Collector.Update(accountNamespace, credits, float64(accountData.Credits.Value), accountData.ID.String())
}
Expand Down
30 changes: 15 additions & 15 deletions components/metrics/metrics_scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,22 +38,22 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace,
collector.WithHelp("Current size of each node's queue (in work units)."),
collector.WithInitFunc(func() {
deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())

}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())

}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockDropped.Hook(func(block *blocks.Block, _ error) {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())

}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())

}, event.WithWorkerPool(Component.WorkerPool))
}),
Expand All @@ -66,25 +66,25 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace,
collector.WithInitFunc(func() {
deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) {
if _, isBasic := block.BasicBlock(); isBasic {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) {
if _, isBasic := block.BasicBlock(); isBasic {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockDropped.Hook(func(block *blocks.Block, _ error) {
if _, isBasic := block.BasicBlock(); isBasic {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) {
if _, isBasic := block.BasicBlock(); isBasic {
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))
}),
Expand All @@ -97,25 +97,25 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace,
collector.WithInitFunc(func() {
deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) {
if _, isValidation := block.ValidationBlock(); isValidation {
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) {
if _, isValidation := block.ValidationBlock(); isValidation {
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockDropped.Hook(func(block *blocks.Block, _ error) {
if _, isValidation := block.ValidationBlock(); isValidation {
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))

deps.Protocol.Events.Engine.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) {
if _, isValidation := block.ValidationBlock(); isValidation {
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().IssuerID)), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String())
}
}, event.WithWorkerPool(Component.WorkerPool))
}),
Expand All @@ -127,14 +127,14 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace,
collector.WithHelp("Current amount of mana of each issuer in the queue."),
collector.WithInitFunc(func() {
deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) {
mana, err := deps.Protocol.MainEngineInstance().Ledger.ManaManager().GetManaOnAccount(block.ProtocolBlock().IssuerID, block.SlotCommitmentID().Slot())
mana, err := deps.Protocol.MainEngineInstance().Ledger.ManaManager().GetManaOnAccount(block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot())
if err != nil {
deps.Protocol.MainEngineInstance().ErrorHandler("metrics")(ierrors.Wrapf(err, "failed to retrieve mana on account %s for slot %d", block.ProtocolBlock().IssuerID, block.SlotCommitmentID().Slot()))
deps.Protocol.MainEngineInstance().ErrorHandler("metrics")(ierrors.Wrapf(err, "failed to retrieve mana on account %s for slot %d", block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot()))

return
}

deps.Collector.Update(schedulerNamespace, manaAmountPerNode, float64(mana), block.ProtocolBlock().IssuerID.String())
deps.Collector.Update(schedulerNamespace, manaAmountPerNode, float64(mana), block.ProtocolBlock().Header.IssuerID.String())
}, event.WithWorkerPool(Component.WorkerPool))
}),
)),
Expand Down
2 changes: 1 addition & 1 deletion components/p2p/component.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ func provide(c *dig.Container) error {
if err := c.Provide(func(deps p2pDeps) p2pResult {
res := p2pResult{}

privKeyFilePath := filepath.Join(deps.P2PDatabasePath, "identity.key")
privKeyFilePath := filepath.Join(deps.P2PDatabasePath, IdentityPrivateKeyFileName)

// make sure nobody copies around the peer store since it contains the private key of the node
Component.LogInfof(`WARNING: never share your "%s" folder as it contains your node's private key!`, deps.P2PDatabasePath)
Expand Down
3 changes: 2 additions & 1 deletion components/p2p/params.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@ import (

const (
// CfgPeers defines the static peers this node should retain a connection to (CLI).
CfgPeers = "peers"
CfgPeers = "peers"
IdentityPrivateKeyFileName = "identity.key"
)

// ParametersP2P contains the definition of configuration parameters used by the p2p plugin.
Expand Down
Loading

0 comments on commit 37a8edf

Please sign in to comment.