From 7df3ffc1d8861b19389aa9425d2f4037d404b957 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Wed, 6 Sep 2023 11:44:35 +0200 Subject: [PATCH 01/17] Implement logic to clone and rollback storage for engine switching. --- go.mod | 1 + go.sum | 2 + .../engine/accounts/accountsledger/manager.go | 44 ++++++++ .../accounts/accountsledger/snapshot.go | 2 + pkg/protocol/engine/eviction/state.go | 30 +++++ pkg/protocol/engine/utxoledger/snapshot.go | 28 +++++ pkg/protocol/enginemanager/enginemanager.go | 104 +++++++++++++++--- pkg/protocol/protocol_fork.go | 2 - pkg/storage/permanent/commitments.go | 10 ++ pkg/storage/permanent/permanent.go | 19 ++++ pkg/storage/permanent/settings.go | 11 ++ pkg/storage/prunable/bucket_manager.go | 6 +- pkg/storage/prunable/prunable.go | 98 +++++++++++++++++ pkg/storage/storage.go | 40 +++++++ 14 files changed, 379 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 8ee963cad..2533b8df2 100644 --- a/go.mod +++ b/go.mod @@ -121,6 +121,7 @@ require ( github.com/multiformats/go-multistream v0.4.1 // indirect github.com/onsi/ginkgo/v2 v2.12.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect + github.com/otiai10/copy v1.12.0 // indirect github.com/pasztorpisti/qs v0.0.0-20171216220353-8d6c33ee906c // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect diff --git a/go.sum b/go.sum index 0b10673ac..136f65b6d 100644 --- a/go.sum +++ b/go.sum @@ -468,6 +468,8 @@ github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= +github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY= +github.com/otiai10/copy v1.12.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pasztorpisti/qs v0.0.0-20171216220353-8d6c33ee906c h1:Gcce/r5tSQeprxswXXOwQ/RBU1bjQWVd9dB7QKoPXBE= diff --git a/pkg/protocol/engine/accounts/accountsledger/manager.go b/pkg/protocol/engine/accounts/accountsledger/manager.go index 35114d498..dfcc2c951 100644 --- a/pkg/protocol/engine/accounts/accountsledger/manager.go +++ b/pkg/protocol/engine/accounts/accountsledger/manager.go @@ -8,6 +8,7 @@ import ( "github.com/iotaledger/hive.go/ds/shrinkingmap" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" @@ -266,6 +267,49 @@ func (m *Manager) PastAccounts(accountIDs iotago.AccountIDs, targetIndex iotago. return result, nil } +func (m *Manager) Rollback(targetIndex iotago.SlotIndex) error { + for index := m.latestCommittedSlot; index > targetIndex; index-- { + slotDiff := lo.PanicOnErr(m.slotDiff(index)) + var internalErr error + + if err := slotDiff.Stream(func(accountID iotago.AccountID, accountDiff *model.AccountDiff, destroyed bool) bool { + accountData, exists, err := m.accountsTree.Get(accountID) + if err != nil { + internalErr = ierrors.Wrapf(err, "unable to retrieve account %s to rollback in slot %d", accountID, index) + + return false + } + + if !exists { + accountData = accounts.NewAccountData(accountID) + } + + if _, err := m.rollbackAccountTo(accountData, targetIndex); err != nil { + internalErr = ierrors.Wrapf(err, "unable to rollback account %s to target slot index %d", accountID, targetIndex) + + return false + } + + // TODO: Saving accountData after each slot - would it be better to buffer them in memory and save them at the end? + if err := m.accountsTree.Set(accountID, accountData); err != nil { + internalErr = ierrors.Wrapf(err, "failed to save rolled back account %s to target slot index %d", accountID, targetIndex) + + return false + } + + return true + }); err != nil { + return ierrors.Wrapf(err, "error in streaming account diffs for slot %s", index) + } + + if internalErr != nil { + return ierrors.Wrapf(internalErr, "error in rolling back account for slot %s", index) + } + } + + return nil +} + // AddAccount adds a new account to the Account tree, allotting to it the balance on the given output. // The Account will be created associating the given output as the latest state of the account. func (m *Manager) AddAccount(output *utxoledger.Output, blockIssuanceCredits iotago.BlockIssuanceCredits) error { diff --git a/pkg/protocol/engine/accounts/accountsledger/snapshot.go b/pkg/protocol/engine/accounts/accountsledger/snapshot.go index f54122791..d5d842740 100644 --- a/pkg/protocol/engine/accounts/accountsledger/snapshot.go +++ b/pkg/protocol/engine/accounts/accountsledger/snapshot.go @@ -215,6 +215,8 @@ func (m *Manager) readSlotDiffs(reader io.ReadSeeker, slotDiffCount uint64) erro func (m *Manager) writeSlotDiffs(pWriter *utils.PositionedWriter, targetIndex iotago.SlotIndex) (slotDiffsCount uint64, err error) { // write slot diffs until being able to reach targetIndex, where the exported tree is at slotIndex := iotago.SlotIndex(1) + + // TODO: shouldn't that be from last finalized slot? maxCommittableAge := m.apiProvider.APIForSlot(targetIndex).ProtocolParameters().MaxCommittableAge() if targetIndex > maxCommittableAge { slotIndex = targetIndex - maxCommittableAge diff --git a/pkg/protocol/engine/eviction/state.go b/pkg/protocol/engine/eviction/state.go index 0e55d69a2..aa31a2f69 100644 --- a/pkg/protocol/engine/eviction/state.go +++ b/pkg/protocol/engine/eviction/state.go @@ -278,6 +278,36 @@ func (s *State) Import(reader io.ReadSeeker) error { return nil } +func (s *State) Rollback(lowerTarget, targetIndex iotago.SlotIndex) error { + s.evictionMutex.RLock() + defer s.evictionMutex.RUnlock() + + start, _ := s.delayedBlockEvictionThreshold(lowerTarget) + + latestNonEmptySlot := iotago.SlotIndex(0) + + for currentSlot := start; currentSlot <= targetIndex; currentSlot++ { + _, err := s.rootBlockStorageFunc(currentSlot) + if err != nil { + continue + } + + latestNonEmptySlot = currentSlot + } + + if latestNonEmptySlot > s.optsRootBlocksEvictionDelay { + latestNonEmptySlot -= s.optsRootBlocksEvictionDelay + } else { + latestNonEmptySlot = 0 + } + + if err := s.latestNonEmptyStore.Set([]byte{latestNonEmptySlotKey}, latestNonEmptySlot.MustBytes()); err != nil { + return ierrors.Wrap(err, "failed to store latest non empty slot") + } + + return nil +} + // PopulateFromStorage populates the root blocks from the storage. func (s *State) PopulateFromStorage(latestCommitmentIndex iotago.SlotIndex) { for index := lo.Return1(s.delayedBlockEvictionThreshold(latestCommitmentIndex)); index <= latestCommitmentIndex; index++ { diff --git a/pkg/protocol/engine/utxoledger/snapshot.go b/pkg/protocol/engine/utxoledger/snapshot.go index dbd68d7a1..e60ec1ba0 100644 --- a/pkg/protocol/engine/utxoledger/snapshot.go +++ b/pkg/protocol/engine/utxoledger/snapshot.go @@ -312,3 +312,31 @@ func (m *Manager) Export(writer io.WriteSeeker, targetIndex iotago.SlotIndex) er return nil } + +// Rollback rolls back ledger state to the given target slot. +func (m *Manager) Rollback(targetSlot iotago.SlotIndex) error { + m.WriteLockLedger() + defer m.WriteUnlockLedger() + + ledgerIndex, err := m.ReadLedgerIndexWithoutLocking() + if err != nil { + return err + } + + for diffIndex := ledgerIndex; diffIndex > targetSlot; diffIndex-- { + slotDiff, err := m.SlotDiffWithoutLocking(diffIndex) + if err != nil { + return err + } + + if err := m.RollbackDiffWithoutLocking(slotDiff.Index, slotDiff.Outputs, slotDiff.Spents); err != nil { + return err + } + } + + if err := m.stateTree.Commit(); err != nil { + return ierrors.Wrap(err, "unable to commit state tree") + } + + return nil +} diff --git a/pkg/protocol/enginemanager/enginemanager.go b/pkg/protocol/enginemanager/enginemanager.go index 97deba261..c5c8c1633 100644 --- a/pkg/protocol/enginemanager/enginemanager.go +++ b/pkg/protocol/enginemanager/enginemanager.go @@ -15,14 +15,17 @@ import ( "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/protocol/engine" + "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts/accountsledger" "github.com/iotaledger/iota-core/pkg/protocol/engine/attestation" "github.com/iotaledger/iota-core/pkg/protocol/engine/blockdag" + "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/engine/booker" "github.com/iotaledger/iota-core/pkg/protocol/engine/clock" "github.com/iotaledger/iota-core/pkg/protocol/engine/commitmentfilter" "github.com/iotaledger/iota-core/pkg/protocol/engine/congestioncontrol/scheduler" "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/blockgadget" "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/slotgadget" + "github.com/iotaledger/iota-core/pkg/protocol/engine/eviction" "github.com/iotaledger/iota-core/pkg/protocol/engine/filter" "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" @@ -138,13 +141,14 @@ func (e *EngineManager) LoadActiveEngine(snapshotPath string) (*engine.Engine, e if len(info.Name) > 0 { if exists, isDirectory, err := ioutils.PathExists(e.directory.Path(info.Name)); err == nil && exists && isDirectory { // Load previous engine as active - e.activeInstance = e.loadEngineInstance(info.Name, snapshotPath) + e.activeInstance = e.loadEngineInstanceFromSnapshot(info.Name, snapshotPath) } } if e.activeInstance == nil { // Start with a new instance and set to active - instance := e.newEngineInstance(snapshotPath) + instance := e.loadEngineInstanceFromSnapshot(lo.PanicOnErr(uuid.NewUUID()).String(), snapshotPath) + if err := e.SetActiveInstance(instance); err != nil { return nil, err } @@ -191,14 +195,14 @@ func (e *EngineManager) SetActiveInstance(instance *engine.Engine) error { return ioutils.WriteJSONToFile(e.infoFilePath(), info, 0o644) } -func (e *EngineManager) loadEngineInstance(dirName string, snapshotPath string) *engine.Engine { +func (e *EngineManager) loadEngineInstanceFromSnapshot(engineAlias string, snapshotPath string) *engine.Engine { errorHandler := func(err error) { - e.errorHandler(ierrors.Wrapf(err, "engine (%s)", dirName[0:8])) + e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) } - newEngine := engine.New(e.workers.CreateGroup(dirName), + newEngine := engine.New(e.workers.CreateGroup(engineAlias), errorHandler, - storage.New(e.directory.Path(dirName), e.dbVersion, errorHandler, e.storageOptions...), + storage.New(e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...), e.filterProvider, e.commitmentFilterProvider, e.blockDAGProvider, @@ -224,19 +228,91 @@ func (e *EngineManager) loadEngineInstance(dirName string, snapshotPath string) return newEngine } -func (e *EngineManager) newEngineInstance(snapshotPath string) *engine.Engine { - dirName := lo.PanicOnErr(uuid.NewUUID()).String() - return e.loadEngineInstance(dirName, snapshotPath) +func (e *EngineManager) loadEngineInstanceWithStorage(engineAlias string, storage *storage.Storage) *engine.Engine { + errorHandler := func(err error) { + e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) + } + + newEngine := engine.New(e.workers.CreateGroup(engineAlias), + errorHandler, + storage, + e.filterProvider, + e.commitmentFilterProvider, + e.blockDAGProvider, + e.bookerProvider, + e.clockProvider, + e.blockGadgetProvider, + e.slotGadgetProvider, + e.sybilProtectionProvider, + e.notarizationProvider, + e.attestationProvider, + e.ledgerProvider, + e.schedulerProvider, + e.tipManagerProvider, + e.tipSelectionProvider, + e.retainerProvider, + e.upgradeOrchestratorProvider, + e.syncManagerProvider, + e.engineOptions..., + ) + + e.engineCreated.Trigger(newEngine) + + return newEngine } func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine, error) { - // Dump a snapshot at the target index - snapshotPath := filepath.Join(os.TempDir(), fmt.Sprintf("snapshot_%d_%s.bin", index, lo.PanicOnErr(uuid.NewUUID()))) - if err := e.activeInstance.WriteSnapshot(snapshotPath, index); err != nil { - return nil, ierrors.Wrapf(err, "error exporting snapshot for index %s", index) + fmt.Println("fork at slot ", index) + engineAlias := lo.PanicOnErr(uuid.NewUUID()).String() + + errorHandler := func(err error) { + e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) + } + + // TODO: lock active instance so it doesn't use storage when we clone it + // Copy raw data on disk. + newStorage, err := storage.CloneStorage(e.activeInstance.Storage, e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...) + if err != nil { + return nil, ierrors.Wrapf(err, "failed to copy storage from active engine instance (%s) to new engine instance (%s)", e.activeInstance.Storage.Directory(), e.directory.Path(engineAlias)) + } + + // Remove commitments that after forking point. + if err := newStorage.Commitments().Rollback(index, newStorage.Settings().LatestCommitment().Index()); err != nil { + return nil, ierrors.Wrap(err, "failed to rollback commitments") + } + + // Create temporary components and rollback their state, which will be reflected on disk. + evictionState := eviction.NewState(newStorage.LatestNonEmptySlot(), newStorage.RootBlocks) + blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) + accountsManager := accountsledger.New(newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) + accountsManager.SetLatestCommittedSlot(newStorage.Settings().LatestCommitment().Index()) + + if err := accountsManager.Rollback(index); err != nil { + return nil, ierrors.Wrap(err, "failed to rollback accounts manager") + } + + if err := evictionState.Rollback(newStorage.Settings().LatestFinalizedSlot(), index); err != nil { + return nil, ierrors.Wrap(err, "failed to rollback eviction state") + } + + if err := newStorage.Ledger().Rollback(index); err != nil { + return nil, err + } + + targetCommitment, err := newStorage.Commitments().Load(index) + if err != nil { + return nil, ierrors.Wrapf(err, "error while retrieving commitment for target index %d", index) + } + + if err := newStorage.Settings().Rollback(targetCommitment); err != nil { + return nil, err + } + + if err := newStorage.RollbackPrunable(index); err != nil { + return nil, err } - return e.newEngineInstance(snapshotPath), nil + return e.loadEngineInstanceWithStorage(engineAlias, newStorage), nil } func (e *EngineManager) OnEngineCreated(handler func(*engine.Engine)) (unsubscribe func()) { diff --git a/pkg/protocol/protocol_fork.go b/pkg/protocol/protocol_fork.go index 6434ab43e..13b0f161d 100644 --- a/pkg/protocol/protocol_fork.go +++ b/pkg/protocol/protocol_fork.go @@ -331,8 +331,6 @@ func (p *Protocol) switchEngines() { if success { p.Events.MainEngineSwitched.Trigger(p.MainEngineInstance()) - // TODO: copy over old slots from the old engine to the new one - // Cleanup filesystem if err := oldEngine.RemoveFromFilesystem(); err != nil { p.HandleError(ierrors.Wrap(err, "error removing storage directory after switching engines")) diff --git a/pkg/storage/permanent/commitments.go b/pkg/storage/permanent/commitments.go index 1b5a99177..49608142e 100644 --- a/pkg/storage/permanent/commitments.go +++ b/pkg/storage/permanent/commitments.go @@ -88,3 +88,13 @@ func (c *Commitments) Import(reader io.ReadSeeker) (err error) { return nil } + +func (c *Commitments) Rollback(targetIndex iotago.SlotIndex, lastCommittedIndex iotago.SlotIndex) error { + for slotIndex := targetIndex + 1; slotIndex <= lastCommittedIndex; slotIndex++ { + if err := c.store.KVStore().Delete(lo.PanicOnErr(slotIndex.Bytes())); err != nil { + return ierrors.Wrapf(err, "failed to remove forked commitment for slot %d", slotIndex) + } + } + + return nil +} diff --git a/pkg/storage/permanent/permanent.go b/pkg/storage/permanent/permanent.go index 72ce83a9f..c0dbaee5e 100644 --- a/pkg/storage/permanent/permanent.go +++ b/pkg/storage/permanent/permanent.go @@ -1,6 +1,8 @@ package permanent import ( + copydir "github.com/otiai10/copy" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" @@ -47,6 +49,23 @@ func New(dbConfig database.Config, errorHandler func(error), opts ...options.Opt }) } +func Clone(source *Permanent, dbConfig database.Config, errorHandler func(error), opts ...options.Option[Permanent]) (*Permanent, error) { + source.store.Close() + + if err := copydir.Copy(source.dbConfig.Directory, dbConfig.Directory); err != nil { + return nil, ierrors.Wrap(err, "failed to copy permanent storage directory to new storage path") + } + + source.store = database.NewDBInstance(source.dbConfig) + source.settings = NewSettings(lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{settingsPrefix}))) + source.commitments = NewCommitments(lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{commitmentsPrefix})), source.settings.APIProvider()) + source.utxoLedger = utxoledger.New(lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{ledgerPrefix})), source.settings.APIProvider()) + source.accounts = lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{accountsPrefix})) + source.latestNonEmptySlot = lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{latestNonEmptySlotPrefix})) + + return New(dbConfig, errorHandler, opts...), nil +} + func (p *Permanent) Settings() *Settings { return p.settings } diff --git a/pkg/storage/permanent/settings.go b/pkg/storage/permanent/settings.go index def431988..dcbeca02e 100644 --- a/pkg/storage/permanent/settings.go +++ b/pkg/storage/permanent/settings.go @@ -330,6 +330,7 @@ func (s *Settings) Export(writer io.WriteSeeker, targetCommitment *iotago.Commit return ierrors.Wrap(err, "failed to stream write protocol version epoch mapping") } + // TODO: rollback future protocol parameters if it was added after targetCommitment.Index() // Export future protocol parameters if err := stream.WriteCollection(writer, func() (uint64, error) { s.mutex.RLock() @@ -512,6 +513,16 @@ func (s *Settings) Import(reader io.ReadSeeker) (err error) { return nil } +func (s *Settings) Rollback(targetCommitment *model.Commitment) error { + // TODO: rollback future protocol parameters if it was added after targetCommitment.Index() + + if err := s.SetLatestCommitment(targetCommitment); err != nil { + return ierrors.Wrap(err, "failed to set latest commitment") + } + + return nil +} + func (s *Settings) String() string { s.mutex.RLock() defer s.mutex.RUnlock() diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index c4ec6ce8c..7bb1396b4 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -77,6 +77,7 @@ func (b *BucketManager) Shutdown() { b.openDBs.Each(func(index iotago.EpochIndex, db *database.DBInstance) { db.Close() + b.openDBs.Remove(index) }) } @@ -96,7 +97,8 @@ func (b *BucketManager) TotalSize() int64 { b.openDBs.Each(func(key iotago.EpochIndex, val *database.DBInstance) { size, err := dbPrunableDirectorySize(b.dbConfig.Directory, key) if err != nil { - b.errorHandler(ierrors.Wrapf(err, "dbPrunableDirectorySize failed for %s: %s", b.dbConfig.Directory, key)) + b.errorHandler(ierrors.Wrapf(err, "dbPrunableDirectorySize failed for key %s: %s", b.dbConfig.Directory, key)) + return } sum += size @@ -123,7 +125,7 @@ func (b *BucketManager) BucketSize(epoch iotago.EpochIndex) (int64, error) { size, err := dbPrunableDirectorySize(b.dbConfig.Directory, epoch) if err != nil { - return 0, ierrors.Wrapf(err, "dbPrunableDirectorySize failed for %s: %s", b.dbConfig.Directory, epoch) + return 0, ierrors.Wrapf(err, "dbPrunableDirectorySize failed for epoch %s: %s", b.dbConfig.Directory, epoch) } return size, nil diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 64c55189e..e786a0d44 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -1,10 +1,15 @@ package prunable import ( + "os" + + copydir "github.com/otiai10/copy" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/runtime/ioutils" "github.com/iotaledger/hive.go/runtime/options" + "github.com/iotaledger/hive.go/serializer/v2/byteutils" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/storage/database" @@ -47,6 +52,30 @@ func New(dbConfig database.Config, apiProvider api.Provider, errorHandler func(e } } +func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, errorHandler func(error), opts ...options.Option[BucketManager]) (*Prunable, error) { + dir := utils.NewDirectory(dbConfig.Directory, true) + semiPermanentDBConfig := dbConfig.WithDirectory(dir.PathWithCreate("semipermanent")) + + // Close forked prunable storage before copying its contents. + source.semiPermanentDB.Close() + source.prunableSlotStore.Shutdown() + + // Copy the storage on disk to new location. + if err := copydir.Copy(source.prunableSlotStore.dbConfig.Directory, semiPermanentDBConfig.Directory); err != nil { + return nil, ierrors.Wrap(err, "failed to copy prunable storage directory to new storage path") + } + + // Create a newly opened instance of prunable database. + // `prunableSlotStore` will be opened automatically as the engine requests it, so no need to open it here. + source.semiPermanentDB = database.NewDBInstance(source.semiPermanentDBConfig) + source.decidedUpgradeSignals = epochstore.NewStore(kvstore.Realm{epochPrefixDecidedUpgradeSignals}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayDecidedUpgradeSignals, model.VersionAndHash.Bytes, model.VersionAndHashFromBytes) + source.poolRewards = epochstore.NewEpochKVStore(kvstore.Realm{epochPrefixPoolRewards}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayPoolRewards) + source.poolStats = epochstore.NewStore(kvstore.Realm{epochPrefixPoolStats}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayPoolStats, (*model.PoolsStats).Bytes, model.PoolsStatsFromBytes) + source.committee = epochstore.NewStore(kvstore.Realm{epochPrefixCommittee}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayCommittee, (*account.Accounts).Bytes, account.AccountsFromBytes) + + return New(dbConfig, apiProvider, errorHandler, opts...), nil +} + func (p *Prunable) RestoreFromDisk() (lastPrunedEpoch iotago.EpochIndex) { lastPrunedEpoch = p.prunableSlotStore.RestoreFromDisk() @@ -118,3 +147,72 @@ func (p *Prunable) Flush() { p.errorHandler(err) } } + +func (p *Prunable) Rollback(forkingSlot iotago.SlotIndex) error { + forkingSlotTimeProvider := p.apiProvider.APIForSlot(forkingSlot).TimeProvider() + // remove all buckets that are newer than epoch of the forkingSlot + forkingEpoch := forkingSlotTimeProvider.EpochFromSlot(forkingSlot) + firstForkedSlotEpoch := forkingSlotTimeProvider.EpochFromSlot(forkingSlot + 1) + + for epochIdx := forkingEpoch + 1; ; epochIdx++ { + if exists, err := PathExists(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { + return ierrors.Wrapf(err, "failed to check if bucket directory exists in forkedPrunable storage for epoch %d", epochIdx) + } else if !exists { + break + } + + if err := os.RemoveAll(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { + return ierrors.Wrapf(err, "failed to remove bucket directory in forkedPrunable storage for epoch %d", epochIdx) + } + + // Remove entries for epochs bigger or equal epochFromSlot(forkingPoint+1) in semiPermanent storage. + // Those entries are part of the fork and values from the old storage should not be used + // values from the candidate storage should be used in its place; that's why we copy those entries + // from the candidate engine to old storage. + if epochIdx >= firstForkedSlotEpoch { + if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolRewards}, epochIdx.MustBytes())); err != nil { + return err + } + if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolStats}, epochIdx.MustBytes())); err != nil { + return err + } + if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixDecidedUpgradeSignals}, epochIdx.MustBytes())); err != nil { + return err + } + // TODO: remove committee for the next epoch in some conditions + if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixCommittee}, (epochIdx + 1).MustBytes())); err != nil { + return err + } + } + } + + // If the forking slot is the last slot of an epoch, then don't need to clean anything as the bucket with the + // first forked slot contains only forked blocks and was removed in the previous step. + // The only situation in which blocks need to be copied is if the first forked slot is in the middle of the bucket. + // Then, we need to remove data in source bucket in slots [forkingSlot+1; bucketEnd], + // and then copy data for those slots from the candidate storage to avoid syncing the data again. + if forkingSlotTimeProvider.EpochStart(firstForkedSlotEpoch) < forkingSlot+1 { + // getDBInstance opens the DB if needed, so no need to manually create KV instance as with semiPermanentDB + oldBucketKvStore := p.prunableSlotStore.getDBInstance(firstForkedSlotEpoch).KVStore() + for clearSlot := forkingSlot + 1; clearSlot <= forkingSlotTimeProvider.EpochEnd(firstForkedSlotEpoch); clearSlot++ { + // delete slot prefix from forkedPrunable storage that will be eventually copied into the new engine + if err := oldBucketKvStore.DeletePrefix(clearSlot.MustBytes()); err != nil { + return ierrors.Wrapf(err, "error while clearing slot %d in bucket for epoch %d", clearSlot, firstForkedSlotEpoch) + } + } + } + + return nil +} + +func PathExists(path string) (bool, error) { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false, nil + } + + return false, err + } + + return true, nil +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index ee220e88c..14cee3d60 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -4,6 +4,7 @@ import ( "sync" "time" + "github.com/iotaledger/hive.go/ierrors" hivedb "github.com/iotaledger/hive.go/kvstore/database" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/iota-core/pkg/model" @@ -76,6 +77,41 @@ func New(directory string, dbVersion byte, errorHandler func(error), opts ...opt }) } +func CloneStorage(source *Storage, directory string, dbVersion byte, errorHandler func(error), opts ...options.Option[Storage]) (*Storage, error) { + s := options.Apply(&Storage{ + dir: utils.NewDirectory(directory, true), + errorHandler: errorHandler, + lastPrunedEpoch: model.NewEvictionIndex[iotago.EpochIndex](), + optsDBEngine: hivedb.EngineRocksDB, + optsPruningDelay: 30, + optPruningSizeEnabled: false, + optsPruningSizeMaxTargetSizeBytes: 30 * 1024 * 1024 * 1024, // 30GB + optsPruningSizeReductionPercentage: 0.1, + optsPruningSizeCooldownTime: 5 * time.Minute, + }, opts) + + dbConfig := database.Config{ + Engine: s.optsDBEngine, + Directory: s.dir.PathWithCreate(permanentDirName), + Version: dbVersion, + PrefixHealth: []byte{storePrefixHealth}, + } + + permanentClone, err := permanent.Clone(source.permanent, dbConfig, errorHandler) + if err != nil { + return nil, ierrors.Wrap(err, "error while cloning permanent storage") + } + prunableClone, err := prunable.Clone(source.prunable, dbConfig.WithDirectory(s.dir.PathWithCreate(prunableDirName)), permanentClone.Settings().APIProvider(), s.errorHandler, s.optsBucketManagerOptions...) + if err != nil { + return nil, ierrors.Wrap(err, "error while cloning prunable storage") + } + + s.permanent = permanentClone + s.prunable = prunableClone + + return s, nil +} + func (s *Storage) Directory() string { return s.dir.Path() } @@ -106,3 +142,7 @@ func (s *Storage) Flush() { s.permanent.Flush() s.prunable.Flush() } + +func (s *Storage) RollbackPrunable(forkingSlot iotago.SlotIndex) error { + return s.prunable.Rollback(forkingSlot) +} From d33e95952519662367100cf5db6f291f6e810a25 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Wed, 6 Sep 2023 12:56:11 +0200 Subject: [PATCH 02/17] Implement unit test for cloning storage. --- pkg/storage/prunable/prunable.go | 5 +- pkg/storage/storage_test.go | 117 +++++++++++++++++++++++++++++- pkg/storage/testframework_test.go | 16 ++-- 3 files changed, 125 insertions(+), 13 deletions(-) diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index e786a0d44..a90026d6a 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -53,15 +53,12 @@ func New(dbConfig database.Config, apiProvider api.Provider, errorHandler func(e } func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, errorHandler func(error), opts ...options.Option[BucketManager]) (*Prunable, error) { - dir := utils.NewDirectory(dbConfig.Directory, true) - semiPermanentDBConfig := dbConfig.WithDirectory(dir.PathWithCreate("semipermanent")) - // Close forked prunable storage before copying its contents. source.semiPermanentDB.Close() source.prunableSlotStore.Shutdown() // Copy the storage on disk to new location. - if err := copydir.Copy(source.prunableSlotStore.dbConfig.Directory, semiPermanentDBConfig.Directory); err != nil { + if err := copydir.Copy(source.prunableSlotStore.dbConfig.Directory, dbConfig.Directory); err != nil { return nil, ierrors.Wrap(err, "failed to copy prunable storage directory to new storage path") } diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index 5aad3b9bf..bf0b377f0 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -6,13 +6,14 @@ import ( "github.com/stretchr/testify/require" "github.com/iotaledger/hive.go/ds/types" + "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/iota-core/pkg/storage" "github.com/iotaledger/iota-core/pkg/storage/database" iotago "github.com/iotaledger/iota.go/v4" ) func TestStorage_PruneByEpochIndex(t *testing.T) { - tf := NewTestFramework(t) + tf := NewTestFramework(t, t.TempDir()) defer tf.Shutdown() totalEpochs := 10 @@ -52,7 +53,7 @@ func TestStorage_PruneByEpochIndex(t *testing.T) { } func TestStorage_PruneByDepth(t *testing.T) { - tf := NewTestFramework(t) + tf := NewTestFramework(t, t.TempDir()) defer tf.Shutdown() totalEpochs := 20 @@ -125,7 +126,7 @@ func TestStorage_PruneByDepth(t *testing.T) { } func TestStorage_PruneBySize(t *testing.T) { - tf := NewTestFramework(t, + tf := NewTestFramework(t, t.TempDir(), storage.WithPruningDelay(2), storage.WithPruningSizeEnable(true), storage.WithPruningSizeMaxTargetSizeBytes(15*MB), @@ -166,7 +167,7 @@ func TestStorage_PruneBySize(t *testing.T) { } func TestStorage_RestoreFromDisk(t *testing.T) { - tf := NewTestFramework(t, storage.WithPruningDelay(1)) + tf := NewTestFramework(t, t.TempDir(), storage.WithPruningDelay(1)) totalEpochs := 9 tf.GeneratePermanentData(5 * MB) @@ -209,3 +210,111 @@ func TestStorage_RestoreFromDisk(t *testing.T) { types.NewTuple(0, false), ) } + +func TestStorage_CopyFromForkedStorageEmpty(t *testing.T) { + tf1 := NewTestFramework(t, t.TempDir()) + + totalEpochs := 14 + // Generate data in the old storage (source). It contains data since the genesis and one epoch after the fork. + for i := 0; i <= totalEpochs; i++ { + tf1.GeneratePrunableData(iotago.EpochIndex(i), 500*KB) + tf1.GenerateSemiPermanentData(iotago.EpochIndex(i)) + } + + clonedStorage, err := storage.CloneStorage(tf1.Instance, t.TempDir(), 0, func(err error) { + t.Log(err) + }) + require.NoError(t, err) + + // Assert that permanent storage contains exactly the same data. + permanentKVStoreSource, err := tf1.Instance.Accounts().WithRealm(kvstore.EmptyPrefix) + require.NoError(t, err) + permanentKVStoreTarget, err := clonedStorage.Accounts().WithRealm(kvstore.EmptyPrefix) + require.NoError(t, err) + + require.NoError(t, permanentKVStoreSource.Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, sourceValue kvstore.Value) bool { + targetValue, getErr := permanentKVStoreTarget.Get(key) + require.NoError(t, getErr) + + require.NotNil(t, targetValue) + require.EqualValues(t, sourceValue, targetValue) + + return true + })) + + require.NoError(t, permanentKVStoreTarget.Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, sourceValue kvstore.Value) bool { + targetValue, getErr := permanentKVStoreSource.Get(key) + require.NoError(t, getErr) + + require.NotNil(t, targetValue) + require.EqualValues(t, sourceValue, targetValue) + + return true + })) + + // Assert that semiPermanentStorage contains exactly the same data. + rewardsKVStoreSource, err := tf1.Instance.RewardsForEpoch(0) + require.NoError(t, err) + semiPermanentKVStoreSource, err := rewardsKVStoreSource.WithRealm(kvstore.EmptyPrefix) + require.NoError(t, err) + rewardsKVStoreTarget, err := clonedStorage.RewardsForEpoch(0) + require.NoError(t, err) + semiPermanentKVStoreTarget, err := rewardsKVStoreTarget.WithRealm(kvstore.EmptyPrefix) + require.NoError(t, err) + + require.NoError(t, semiPermanentKVStoreSource.Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, sourceValue kvstore.Value) bool { + targetValue, getErr := semiPermanentKVStoreTarget.Get(key) + require.NoError(t, getErr) + + require.NotNil(t, targetValue) + require.EqualValues(t, sourceValue, targetValue) + + return true + })) + + require.NoError(t, semiPermanentKVStoreTarget.Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, sourceValue kvstore.Value) bool { + targetValue, getErr := semiPermanentKVStoreSource.Get(key) + require.NoError(t, getErr) + + require.NotNil(t, targetValue) + require.EqualValues(t, sourceValue, targetValue) + + return true + })) + + // Assert that prunableSlotStorage contains exactly the same data. + for epochIdx := 0; epochIdx <= totalEpochs; epochIdx++ { + // little hack to retrieve underlying prunableSlotStore KVStore without any realm + epochStartSlot := tf1.apiProvider.CurrentAPI().TimeProvider().EpochStart(iotago.EpochIndex(epochIdx)) + + attestationKVStoreSource, err := tf1.Instance.Attestations(epochStartSlot) + require.NoError(t, err) + prunableSlotKVStoreSource, err := attestationKVStoreSource.WithRealm(kvstore.EmptyPrefix) + require.NoError(t, err) + + attestationKVStoreTarget, err := clonedStorage.Attestations(epochStartSlot) + require.NoError(t, err) + prunableSlotKVStoreTarget, err := attestationKVStoreTarget.WithRealm([]byte{}) + require.NoError(t, err) + + require.NoError(t, prunableSlotKVStoreSource.Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, sourceValue kvstore.Value) bool { + targetValue, getErr := prunableSlotKVStoreTarget.Get(key) + require.NoError(t, getErr) + + require.NotNil(t, targetValue) + require.EqualValues(t, sourceValue, targetValue) + + return true + })) + + require.NoError(t, prunableSlotKVStoreTarget.Iterate(kvstore.EmptyPrefix, func(key kvstore.Key, sourceValue kvstore.Value) bool { + targetValue, getErr := prunableSlotKVStoreSource.Get(key) + require.NoError(t, getErr) + + require.NotNil(t, targetValue) + require.EqualValues(t, sourceValue, targetValue) + + return true + })) + } +} diff --git a/pkg/storage/testframework_test.go b/pkg/storage/testframework_test.go index b684efb96..571097270 100644 --- a/pkg/storage/testframework_test.go +++ b/pkg/storage/testframework_test.go @@ -6,6 +6,7 @@ import ( "math/rand" "path/filepath" "testing" + "time" "github.com/stretchr/testify/require" @@ -41,12 +42,11 @@ type TestFramework struct { storageFactoryFunc func() *storage.Storage } -func NewTestFramework(t *testing.T, storageOpts ...options.Option[storage.Storage]) *TestFramework { +func NewTestFramework(t *testing.T, baseDir string, storageOpts ...options.Option[storage.Storage]) *TestFramework { errorHandler := func(err error) { t.Log(err) } - baseDir := t.TempDir() storageFactoryFunc := func() *storage.Storage { instance := storage.New(baseDir, 0, errorHandler, storageOpts...) require.NoError(t, instance.Settings().StoreProtocolParametersForStartEpoch(iotago.NewV3ProtocolParameters(), 0)) @@ -87,8 +87,8 @@ func (t *TestFramework) GeneratePrunableData(epoch iotago.EpochIndex, size int64 initialStorageSize := t.Instance.PrunableDatabaseSize() apiForEpoch := t.apiProvider.APIForEpoch(epoch) + startSlot := apiForEpoch.TimeProvider().EpochStart(epoch) endSlot := apiForEpoch.TimeProvider().EpochEnd(epoch) - var createdBytes int64 for createdBytes < size { block := tpkg.RandProtocolBlock(&iotago.BasicBlock{ @@ -100,7 +100,9 @@ func (t *TestFramework) GeneratePrunableData(epoch iotago.EpochIndex, size int64 modelBlock, err := model.BlockFromBlock(block, apiForEpoch) require.NoError(t.t, err) - blockStorageForSlot, err := t.Instance.Blocks(endSlot) + // block slot is randomly selected within the epoch + blockSlot := startSlot + iotago.SlotIndex(rand.Intn(int(endSlot-startSlot+1))) + blockStorageForSlot, err := t.Instance.Blocks(blockSlot) require.NoError(t.t, err) err = blockStorageForSlot.Store(modelBlock) require.NoError(t.t, err) @@ -110,6 +112,10 @@ func (t *TestFramework) GeneratePrunableData(epoch iotago.EpochIndex, size int64 } t.Instance.Flush() + + // Sleep to let RocksDB perform compaction. + time.Sleep(100 * time.Millisecond) + t.assertPrunableSizeGreater(initialStorageSize + size) // fmt.Printf("> created %d MB of bucket prunable data\n\tPermanent: %dMB\n\tPrunable: %dMB\n", createdBytes/MB, t.Instance.PermanentDatabaseSize()/MB, t.Instance.PrunableDatabaseSize()/MB) @@ -140,7 +146,7 @@ func (t *TestFramework) GenerateSemiPermanentData(epoch iotago.EpochIndex) { versionAndHash := model.VersionAndHash{ Version: 1, - Hash: iotago.Identifier{2}, + Hash: tpkg.Rand32ByteArray(), } err = decidedUpgradeSignalsStore.Store(epoch, versionAndHash) require.NoError(t.t, err) From 7d626c39b877df7427bbd5b1ddd39f5dda9885a8 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Fri, 8 Sep 2023 07:21:11 +0200 Subject: [PATCH 03/17] Debugging attestation divergence after fork WIP --- .../engine/attestation/attestations.go | 1 + .../attestation/slotattestation/manager.go | 39 ++ pkg/protocol/engine/eviction/state.go | 2 - pkg/protocol/enginemanager/enginemanager.go | 22 +- .../sybilprotectionv1/performance/rollback.go | 161 +++++++ pkg/storage/database/utils.go | 4 +- pkg/storage/prunable/prunable.go | 81 ++-- pkg/storage/storage.go | 4 +- pkg/tests/protocol_engine_switching_test.go | 1 - pkg/testsuite/mock/node.go | 393 +++++++++--------- 10 files changed, 460 insertions(+), 248 deletions(-) create mode 100644 pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go diff --git a/pkg/protocol/engine/attestation/attestations.go b/pkg/protocol/engine/attestation/attestations.go index 74bd0dc94..f860b95d9 100644 --- a/pkg/protocol/engine/attestation/attestations.go +++ b/pkg/protocol/engine/attestation/attestations.go @@ -22,6 +22,7 @@ type Attestations interface { Import(reader io.ReadSeeker) (err error) Export(writer io.WriteSeeker, targetSlot iotago.SlotIndex) (err error) + Rollback(index iotago.SlotIndex) (err error) RestoreFromDisk() (err error) diff --git a/pkg/protocol/engine/attestation/slotattestation/manager.go b/pkg/protocol/engine/attestation/slotattestation/manager.go index 3779e7753..77b40b488 100644 --- a/pkg/protocol/engine/attestation/slotattestation/manager.go +++ b/pkg/protocol/engine/attestation/slotattestation/manager.go @@ -1,10 +1,13 @@ package slotattestation import ( + "fmt" + "github.com/iotaledger/hive.go/ads" "github.com/iotaledger/hive.go/core/memstorage" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/iota-core/pkg/core/account" @@ -147,6 +150,7 @@ func (m *Manager) GetMap(index iotago.SlotIndex) (ads.Map[iotago.AccountID, *iot // AddAttestationFromValidationBlock adds an attestation from a block to the future attestations (beyond the attestation window). func (m *Manager) AddAttestationFromValidationBlock(block *blocks.Block) { + fmt.Println("AddAttestationFromValidationBlock", block.ID()) // Only track validator blocks. if _, isValidationBlock := block.ValidationBlock(); !isValidationBlock { return @@ -256,6 +260,7 @@ func (m *Manager) Commit(index iotago.SlotIndex) (newCW uint64, attestationsRoot // Add all attestations to the tree and calculate the new cumulative weight. for _, a := range attestations { + fmt.Println("pending attestation while committing", index, lo.PanicOnErr(a.BlockID(m.apiProvider.APIForSlot(index)))) // TODO: which weight are we using here? The current one? Or the one of the slot of the attestation/commitmentID? if _, exists := m.committeeFunc(index).GetSeat(a.IssuerID); exists { if err := tree.Set(a.IssuerID, a); err != nil { @@ -275,6 +280,40 @@ func (m *Manager) Commit(index iotago.SlotIndex) (newCW uint64, attestationsRoot return m.lastCumulativeWeight, iotago.Identifier(tree.Root()), nil } +// Rollback rolls back the component state as if the last committed slot was targetSlot. +// It populates pendingAttestation store with previously committed attestations in order to create correct commitment in the future. +// As it modifies in-memory storage, it should only be called on the target engine as calling it on a temporary component will have no effect. +func (m *Manager) Rollback(targetSlot iotago.SlotIndex) error { + m.commitmentMutex.RLock() + defer m.commitmentMutex.RUnlock() + + if targetSlot > m.lastCommittedSlot { + return ierrors.Errorf("slot %d is newer than last committed slot %d", targetSlot, m.lastCommittedSlot) + } + attestationSlotIndex, isValid := m.computeAttestationCommitmentOffset(targetSlot) + if !isValid { + return nil + } + + // We only need to export the committed attestations at targetSlot as these contain all the attestations for the + // slots of targetSlot - attestationCommitmentOffset to targetSlot. This is sufficient to reconstruct the pending attestations + // for targetSlot+1. + attestationsStorage, err := m.attestationsForSlot(targetSlot) + if err != nil { + return ierrors.Wrapf(err, "failed to get attestations of slot %d", targetSlot) + } + + if err = attestationsStorage.Stream(func(key iotago.AccountID, value *iotago.Attestation) error { + m.applyToPendingAttestations(value, attestationSlotIndex) + + return nil + }); err != nil { + return ierrors.Wrapf(err, "failed to stream attestations of slot %d", targetSlot) + } + + return nil +} + func (m *Manager) computeAttestationCommitmentOffset(slot iotago.SlotIndex) (cutoffIndex iotago.SlotIndex, isValid bool) { if slot < m.apiProvider.APIForSlot(slot).ProtocolParameters().MaxCommittableAge() { return 0, false diff --git a/pkg/protocol/engine/eviction/state.go b/pkg/protocol/engine/eviction/state.go index aa31a2f69..bbaecdb20 100644 --- a/pkg/protocol/engine/eviction/state.go +++ b/pkg/protocol/engine/eviction/state.go @@ -283,7 +283,6 @@ func (s *State) Rollback(lowerTarget, targetIndex iotago.SlotIndex) error { defer s.evictionMutex.RUnlock() start, _ := s.delayedBlockEvictionThreshold(lowerTarget) - latestNonEmptySlot := iotago.SlotIndex(0) for currentSlot := start; currentSlot <= targetIndex; currentSlot++ { @@ -304,7 +303,6 @@ func (s *State) Rollback(lowerTarget, targetIndex iotago.SlotIndex) error { if err := s.latestNonEmptyStore.Set([]byte{latestNonEmptySlotKey}, latestNonEmptySlot.MustBytes()); err != nil { return ierrors.Wrap(err, "failed to store latest non empty slot") } - return nil } diff --git a/pkg/protocol/enginemanager/enginemanager.go b/pkg/protocol/enginemanager/enginemanager.go index c5c8c1633..a301b646e 100644 --- a/pkg/protocol/enginemanager/enginemanager.go +++ b/pkg/protocol/enginemanager/enginemanager.go @@ -262,9 +262,8 @@ func (e *EngineManager) loadEngineInstanceWithStorage(engineAlias string, storag } func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine, error) { - fmt.Println("fork at slot ", index) engineAlias := lo.PanicOnErr(uuid.NewUUID()).String() - + fmt.Println("fork", e.activeInstance.Name(), "into", engineAlias) errorHandler := func(err error) { e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) } @@ -277,16 +276,18 @@ func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine } // Remove commitments that after forking point. - if err := newStorage.Commitments().Rollback(index, newStorage.Settings().LatestCommitment().Index()); err != nil { + latestCommitment := newStorage.Settings().LatestCommitment() + if err := newStorage.Commitments().Rollback(index, latestCommitment.Index()); err != nil { return nil, ierrors.Wrap(err, "failed to rollback commitments") } - // Create temporary components and rollback their state, which will be reflected on disk. evictionState := eviction.NewState(newStorage.LatestNonEmptySlot(), newStorage.RootBlocks) + evictionState.Initialize(latestCommitment.Index()) + blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) accountsManager := accountsledger.New(newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) - accountsManager.SetLatestCommittedSlot(newStorage.Settings().LatestCommitment().Index()) + accountsManager.SetLatestCommittedSlot(latestCommitment.Index()) if err := accountsManager.Rollback(index); err != nil { return nil, ierrors.Wrap(err, "failed to rollback accounts manager") } @@ -294,7 +295,6 @@ func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine if err := evictionState.Rollback(newStorage.Settings().LatestFinalizedSlot(), index); err != nil { return nil, ierrors.Wrap(err, "failed to rollback eviction state") } - if err := newStorage.Ledger().Rollback(index); err != nil { return nil, err } @@ -308,11 +308,17 @@ func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine return nil, err } - if err := newStorage.RollbackPrunable(index); err != nil { + if err := newStorage.RollbackPrunable(index, latestCommitment.Index()); err != nil { return nil, err } - return e.loadEngineInstanceWithStorage(engineAlias, newStorage), nil + candidateEngine := e.loadEngineInstanceWithStorage(engineAlias, newStorage) + + if err := candidateEngine.Attestations.Rollback(index); err != nil { + return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") + } + + return candidateEngine, nil } func (e *EngineManager) OnEngineCreated(handler func(*engine.Engine)) (unsubscribe func()) { diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go new file mode 100644 index 000000000..8cb368a45 --- /dev/null +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go @@ -0,0 +1,161 @@ +package performance + +// +//func (t *Tracker) Rollback(targetSlotIndex iotago.SlotIndex) error { +// t.mutex.Lock() +// defer t.mutex.Unlock() +// +// timeProvider := t.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() +// targetEpoch := timeProvider.EpochFromSlot(targetSlotIndex) +// +// // if the target index is the last slot of the epoch, the epoch was committed +// if timeProvider.EpochEnd(targetEpoch) != targetSlotIndex { +// targetEpoch-- +// } +// +// err := t.rollbackPerformanceFactor(timeProvider.EpochStart(targetEpoch+1), targetSlotIndex) +// if err != nil { +// return ierrors.Wrap(err, "unable to export performance factor") +// } +// +// err = t.rollbackPoolRewards(targetEpoch) +// if err != nil { +// return ierrors.Wrap(err, "unable to export pool rewards") +// } +// +// err = t.rollbackPoolsStats(targetEpoch) +// if err != nil { +// return ierrors.Wrap(err, "unable to export pool stats") +// } +// +// err = t.rollbackCommittees(targetSlotIndex) +// if err != nil { +// return ierrors.Wrap(err, "unable to export committees") +// } +// +// return nil +//} +// +//func (t *Tracker) rollbackPerformanceFactor(targetSlot, lastCommittedSlot iotago.SlotIndex) error { +// t.performanceFactorsMutex.RLock() +// defer t.performanceFactorsMutex.RUnlock() +// +// for currentSlot := targetSlot; currentSlot <= targetSlot; currentSlot++ { +// // TODO: decrease this in import/export to uint16 in pf Load/Store/... if we are sure on the performance factor calculation and its expected upper bond +// // TODO: clean the current epoch only as future epochs will be removed on disk +// performanceFactors, err := t.performanceFactorsFunc(currentSlot) +// if err != nil { +// return ierrors.Wrapf(err, "unable to get performance factors for slot index %d", currentSlot) +// } +// +// } +// +// return nil +//} +// +//func (t *Tracker) rollbackPoolRewards(targetEpoch, lastCommittedEpoch iotago.EpochIndex) error { +// +// for epoch := targetEpoch + 1; epoch <= lastCommittedEpoch; epoch++ { +// rewardsMap, err := t.rewardsStorePerEpochFunc(epoch) +// if err != nil { +// return ierrors.Wrapf(err, "unable to get rewards store for epoch index %d", epoch) +// } +// +// if err := rewardsMap.Clear(); err != nil { +// return ierrors.Wrapf(err, "error while clearing rewards store for epoch %d", epoch) +// } +// } +// +// return nil +//} +// +//func (t *Tracker) rollbackPoolsStats(targetEpoch iotago.EpochIndex) error { +// var epochCount uint64 +// if err := pWriter.WriteValue("pools stats epoch count", epochCount, true); err != nil { +// return ierrors.Wrap(err, "unable to write epoch count") +// } +// // export all stored pools +// var innerErr error +// if err := t.poolStatsStore.StreamBytes(func(key []byte, value []byte) error { +// epochIndex := iotago.EpochIndex(binary.LittleEndian.Uint64(key)) +// if epochIndex > targetEpoch { +// // continue +// return nil +// } +// if err := pWriter.WriteBytes(key); err != nil { +// innerErr = ierrors.Wrapf(err, "unable to write epoch index %d", epochIndex) +// return innerErr +// } +// if err := pWriter.WriteBytes(value); err != nil { +// innerErr = ierrors.Wrapf(err, "unable to write pools stats for epoch %d", epochIndex) +// return innerErr +// } +// epochCount++ +// +// return nil +// }); err != nil { +// return ierrors.Wrap(err, "unable to iterate over pools stats") +// } else if innerErr != nil { +// return ierrors.Wrap(innerErr, "error while iterating over pools stats") +// } +// if err := pWriter.WriteValueAtBookmark("pools stats epoch count", epochCount); err != nil { +// return ierrors.Wrap(err, "unable to write stats epoch count at bookmarked position") +// } +// +// return nil +//} +// +//func (t *Tracker) rollbackCommittees(targetSlot iotago.SlotIndex) error { +// var epochCount uint64 +// if err := pWriter.WriteValue("committees epoch count", epochCount, true); err != nil { +// return ierrors.Wrap(err, "unable to write committees epoch count") +// } +// apiForSlot := t.apiProvider.APIForSlot(targetSlot) +// epochFromTargetSlot := apiForSlot.TimeProvider().EpochFromSlot(targetSlot) +// +// pointOfNoReturn := apiForSlot.TimeProvider().EpochEnd(epochFromTargetSlot) - apiForSlot.ProtocolParameters().MaxCommittableAge() +// +// var innerErr error +// err := t.committeeStore.StreamBytes(func(epochBytes []byte, committeeBytes []byte) error { +// epoch := iotago.EpochIndex(binary.LittleEndian.Uint64(epochBytes)) +// +// // We have a committee for an epoch higher than the targetSlot +// // 1. we trust the point of no return, we export the committee for the next epoch +// // 2. if we don't trust the point-of-no-return +// // - we were able to rotate a committee, then we export it +// // - we were not able to rotate a committee (reused), then we don't export it +// if epoch > epochFromTargetSlot && targetSlot < pointOfNoReturn { +// committee, _, err := account.AccountsFromBytes(committeeBytes) +// if err != nil { +// innerErr = ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) +// return innerErr +// } +// if committee.IsReused() { +// return nil +// } +// } +// +// if err := pWriter.WriteBytes(epochBytes); err != nil { +// innerErr = ierrors.Wrap(err, "unable to write epoch index") +// return innerErr +// } +// if err := pWriter.WriteBytes(committeeBytes); err != nil { +// innerErr = ierrors.Wrap(err, "unable to write epoch committee") +// return innerErr +// } +// epochCount++ +// +// return nil +// }) +// if err != nil { +// return ierrors.Wrapf(err, "unable to iterate over committee base store: %w", innerErr) +// } +// if innerErr != nil { +// return ierrors.Wrap(err, "error while iterating over committee base store") +// } +// if err = pWriter.WriteValueAtBookmark("committees epoch count", epochCount); err != nil { +// return ierrors.Wrap(err, "unable to write committee epoch count at bookmarked position") +// } +// +// return nil +//} diff --git a/pkg/storage/database/utils.go b/pkg/storage/database/utils.go index 6f009247f..3333684df 100644 --- a/pkg/storage/database/utils.go +++ b/pkg/storage/database/utils.go @@ -1,6 +1,8 @@ package database -import "github.com/iotaledger/hive.go/kvstore" +import ( + "github.com/iotaledger/hive.go/kvstore" +) func FlushAndClose(store kvstore.KVStore) error { if err := store.Flush(); err != nil { diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index a90026d6a..1e691e3aa 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -145,56 +145,69 @@ func (p *Prunable) Flush() { } } -func (p *Prunable) Rollback(forkingSlot iotago.SlotIndex) error { - forkingSlotTimeProvider := p.apiProvider.APIForSlot(forkingSlot).TimeProvider() - // remove all buckets that are newer than epoch of the forkingSlot - forkingEpoch := forkingSlotTimeProvider.EpochFromSlot(forkingSlot) - firstForkedSlotEpoch := forkingSlotTimeProvider.EpochFromSlot(forkingSlot + 1) - - for epochIdx := forkingEpoch + 1; ; epochIdx++ { - if exists, err := PathExists(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { - return ierrors.Wrapf(err, "failed to check if bucket directory exists in forkedPrunable storage for epoch %d", epochIdx) - } else if !exists { - break - } +func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex, lastCommittedIndex iotago.SlotIndex) error { + timeProvider := p.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() + targetSlotEpoch := timeProvider.EpochFromSlot(targetSlotIndex) + lastCommittedEpoch := targetSlotEpoch + // if the target index is the last slot of the epoch, the epoch was committed + if timeProvider.EpochEnd(targetSlotEpoch) != targetSlotIndex { + lastCommittedEpoch-- + } + pointOfNoReturn := timeProvider.EpochEnd(targetSlotEpoch) - p.apiProvider.APIForSlot(targetSlotIndex).ProtocolParameters().MaxCommittableAge() + + for epochIdx := lastCommittedEpoch + 1; ; epochIdx++ { + // only remove if epochIdx bigger than epoch of target slot index + if epochIdx > targetSlotEpoch { + if exists, err := PathExists(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { + return ierrors.Wrapf(err, "failed to check if bucket directory exists in forkedPrunable storage for epoch %d", epochIdx) + } else if !exists { + break + } - if err := os.RemoveAll(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { - return ierrors.Wrapf(err, "failed to remove bucket directory in forkedPrunable storage for epoch %d", epochIdx) - } + if err := os.RemoveAll(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { + return ierrors.Wrapf(err, "failed to remove bucket directory in forkedPrunable storage for epoch %d", epochIdx) + } + } // Remove entries for epochs bigger or equal epochFromSlot(forkingPoint+1) in semiPermanent storage. // Those entries are part of the fork and values from the old storage should not be used // values from the candidate storage should be used in its place; that's why we copy those entries // from the candidate engine to old storage. - if epochIdx >= firstForkedSlotEpoch { - if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolRewards}, epochIdx.MustBytes())); err != nil { - return err - } - if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolStats}, epochIdx.MustBytes())); err != nil { - return err - } - if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixDecidedUpgradeSignals}, epochIdx.MustBytes())); err != nil { - return err - } - // TODO: remove committee for the next epoch in some conditions + if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolRewards}, epochIdx.MustBytes())); err != nil { + return err + } + if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolStats}, epochIdx.MustBytes())); err != nil { + return err + } + if epochIdx > targetSlotEpoch && targetSlotIndex < pointOfNoReturn { + // TODO: rollback committee using + //committee, _, err := account.AccountsFromBytes(committeeBytes) + //if err != nil { + // innerErr = ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) + // return innerErr + //} + //if committee.IsReused() { + // return nil + //} if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixCommittee}, (epochIdx + 1).MustBytes())); err != nil { return err } } + + if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixDecidedUpgradeSignals}, epochIdx.MustBytes())); err != nil { + return err + } } // If the forking slot is the last slot of an epoch, then don't need to clean anything as the bucket with the // first forked slot contains only forked blocks and was removed in the previous step. - // The only situation in which blocks need to be copied is if the first forked slot is in the middle of the bucket. - // Then, we need to remove data in source bucket in slots [forkingSlot+1; bucketEnd], - // and then copy data for those slots from the candidate storage to avoid syncing the data again. - if forkingSlotTimeProvider.EpochStart(firstForkedSlotEpoch) < forkingSlot+1 { - // getDBInstance opens the DB if needed, so no need to manually create KV instance as with semiPermanentDB - oldBucketKvStore := p.prunableSlotStore.getDBInstance(firstForkedSlotEpoch).KVStore() - for clearSlot := forkingSlot + 1; clearSlot <= forkingSlotTimeProvider.EpochEnd(firstForkedSlotEpoch); clearSlot++ { + // We need to remove data in the bucket in slots [forkingSlot+1; bucketEnd]. + if lastCommittedEpoch != targetSlotEpoch { + oldBucketKvStore := p.prunableSlotStore.getDBInstance(targetSlotEpoch).KVStore() + for clearSlot := targetSlotIndex + 1; clearSlot <= timeProvider.EpochEnd(targetSlotEpoch); clearSlot++ { // delete slot prefix from forkedPrunable storage that will be eventually copied into the new engine if err := oldBucketKvStore.DeletePrefix(clearSlot.MustBytes()); err != nil { - return ierrors.Wrapf(err, "error while clearing slot %d in bucket for epoch %d", clearSlot, firstForkedSlotEpoch) + return ierrors.Wrapf(err, "error while clearing slot %d in bucket for epoch %d", clearSlot, targetSlotEpoch) } } } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 14cee3d60..35cb04cb1 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -143,6 +143,6 @@ func (s *Storage) Flush() { s.prunable.Flush() } -func (s *Storage) RollbackPrunable(forkingSlot iotago.SlotIndex) error { - return s.prunable.Rollback(forkingSlot) +func (s *Storage) RollbackPrunable(targetIndex iotago.SlotIndex, lastCommittedIndex iotago.SlotIndex) error { + return s.prunable.Rollback(targetIndex, lastCommittedIndex) } diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go index 0d29be462..a5f5a750a 100644 --- a/pkg/tests/protocol_engine_switching_test.go +++ b/pkg/tests/protocol_engine_switching_test.go @@ -319,7 +319,6 @@ func TestProtocol_EngineSwitching(t *testing.T) { manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA) manualPOA.SetOnline("node0", "node1", "node2", "node3", "node4", "node6", "node7") } - // Merge the partitions { ts.MergePartitionsToMain() diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go index 301587852..4576240bd 100644 --- a/pkg/testsuite/mock/node.go +++ b/pkg/testsuite/mock/node.go @@ -3,7 +3,6 @@ package mock import ( "context" "crypto/ed25519" - "encoding/json" "fmt" "sync" "sync/atomic" @@ -19,20 +18,14 @@ import ( "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/blockfactory" - "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/network" "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" - "github.com/iotaledger/iota-core/pkg/protocol/engine/commitmentfilter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/filter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" - "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager" iotago "github.com/iotaledger/iota.go/v4" - "github.com/iotaledger/iota.go/v4/merklehasher" ) type Node struct { @@ -146,43 +139,43 @@ func (n *Node) hookLogging(failOnBlockFiltered bool) { fmt.Printf("%s > Network.BlockReceived: from %s %s - %d\n", n.Name, source, block.ID(), block.ID().Index()) }) - events.Network.BlockRequestReceived.Hook(func(blockID iotago.BlockID, source identity.ID) { - fmt.Printf("%s > Network.BlockRequestReceived: from %s %s\n", n.Name, source, blockID) - }) - - events.Network.SlotCommitmentReceived.Hook(func(commitment *model.Commitment, source identity.ID) { - fmt.Printf("%s > Network.SlotCommitmentReceived: from %s %s\n", n.Name, source, commitment.ID()) - }) - - events.Network.SlotCommitmentRequestReceived.Hook(func(commitmentID iotago.CommitmentID, source identity.ID) { - fmt.Printf("%s > Network.SlotCommitmentRequestReceived: from %s %s\n", n.Name, source, commitmentID) - }) - - events.Network.AttestationsReceived.Hook(func(commitment *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], source network.PeerID) { - fmt.Printf("%s > Network.AttestationsReceived: from %s %s number of attestations: %d with merkleProof: %s - %s\n", n.Name, source, commitment.ID(), len(attestations), lo.PanicOnErr(json.Marshal(merkleProof)), lo.Map(attestations, func(a *iotago.Attestation) iotago.BlockID { - return lo.PanicOnErr(a.BlockID(lo.PanicOnErr(n.Protocol.APIForVersion(a.ProtocolVersion)))) - })) - }) - - events.Network.AttestationsRequestReceived.Hook(func(id iotago.CommitmentID, source network.PeerID) { - fmt.Printf("%s > Network.AttestationsRequestReceived: from %s %s\n", n.Name, source, id) - }) - - events.ChainManager.RequestCommitment.Hook(func(commitmentID iotago.CommitmentID) { - fmt.Printf("%s > ChainManager.RequestCommitment: %s\n", n.Name, commitmentID) - }) - - events.ChainManager.CommitmentBelowRoot.Hook(func(commitmentID iotago.CommitmentID) { - fmt.Printf("%s > ChainManager.CommitmentBelowRoot: %s\n", n.Name, commitmentID) - }) + //events.Network.BlockRequestReceived.Hook(func(blockID iotago.BlockID, source identity.ID) { + // fmt.Printf("%s > Network.BlockRequestReceived: from %s %s\n", n.Name, source, blockID) + //}) + + //events.Network.SlotCommitmentReceived.Hook(func(commitment *model.Commitment, source identity.ID) { + // fmt.Printf("%s > Network.SlotCommitmentReceived: from %s %s\n", n.Name, source, commitment.ID()) + //}) + // + //events.Network.SlotCommitmentRequestReceived.Hook(func(commitmentID iotago.CommitmentID, source identity.ID) { + // fmt.Printf("%s > Network.SlotCommitmentRequestReceived: from %s %s\n", n.Name, source, commitmentID) + //}) + + //events.Network.AttestationsReceived.Hook(func(commitment *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], source network.PeerID) { + // fmt.Printf("%s > Network.AttestationsReceived: from %s %s number of attestations: %d with merkleProof: %s - %s\n", n.Name, source, commitment.ID(), len(attestations), lo.PanicOnErr(json.Marshal(merkleProof)), lo.Map(attestations, func(a *iotago.Attestation) iotago.BlockID { + // return lo.PanicOnErr(a.BlockID(lo.PanicOnErr(n.Protocol.APIForVersion(a.ProtocolVersion)))) + // })) + //}) + // + //events.Network.AttestationsRequestReceived.Hook(func(id iotago.CommitmentID, source network.PeerID) { + // fmt.Printf("%s > Network.AttestationsRequestReceived: from %s %s\n", n.Name, source, id) + //}) + + //events.ChainManager.RequestCommitment.Hook(func(commitmentID iotago.CommitmentID) { + // fmt.Printf("%s > ChainManager.RequestCommitment: %s\n", n.Name, commitmentID) + //}) + + //events.ChainManager.CommitmentBelowRoot.Hook(func(commitmentID iotago.CommitmentID) { + // fmt.Printf("%s > ChainManager.CommitmentBelowRoot: %s\n", n.Name, commitmentID) + //}) events.ChainManager.ForkDetected.Hook(func(fork *chainmanager.Fork) { fmt.Printf("%s > ChainManager.ForkDetected: %s\n", n.Name, fork) }) - events.Engine.TipManager.BlockAdded.Hook(func(tipMetadata tipmanager.TipMetadata) { - fmt.Printf("%s > TipManager.BlockAdded: %s in pool %d\n", n.Name, tipMetadata.ID(), tipMetadata.TipPool().Get()) - }) + //events.Engine.TipManager.BlockAdded.Hook(func(tipMetadata tipmanager.TipMetadata) { + // fmt.Printf("%s > TipManager.BlockAdded: %s in pool %d\n", n.Name, tipMetadata.ID(), tipMetadata.TipPool().Get()) + //}) events.CandidateEngineActivated.Hook(func(e *engine.Engine) { fmt.Printf("%s > CandidateEngineActivated: %s, ChainID:%s Index:%s\n", n.Name, e.Name(), e.ChainID(), e.ChainID().Index()) @@ -214,84 +207,84 @@ func (n *Node) attachEngineLogs(failOnBlockFiltered bool, instance *engine.Engin defer n.mutex.Unlock() n.attachedBlocks = append(n.attachedBlocks, block) }) - - events.BlockDAG.BlockSolid.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] BlockDAG.BlockSolid: %s\n", n.Name, engineName, block.ID()) - }) - + // + //events.BlockDAG.BlockSolid.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] BlockDAG.BlockSolid: %s\n", n.Name, engineName, block.ID()) + //}) + // events.BlockDAG.BlockInvalid.Hook(func(block *blocks.Block, err error) { fmt.Printf("%s > [%s] BlockDAG.BlockInvalid: %s - %s\n", n.Name, engineName, block.ID(), err) }) - - events.BlockDAG.BlockMissing.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] BlockDAG.BlockMissing: %s\n", n.Name, engineName, block.ID()) - }) - - events.BlockDAG.MissingBlockAttached.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] BlockDAG.MissingBlockAttached: %s\n", n.Name, engineName, block.ID()) - }) - - events.SeatManager.BlockProcessed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] SybilProtection.BlockProcessed: %s\n", n.Name, engineName, block.ID()) - }) - - events.Booker.BlockBooked.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Booker.BlockBooked: %s\n", n.Name, engineName, block.ID()) - }) - - events.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Scheduler.BlockScheduled: %s\n", n.Name, engineName, block.ID()) - }) - - events.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Scheduler.BlockEnqueued: %s\n", n.Name, engineName, block.ID()) - }) - - events.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Scheduler.BlockSkipped: %s\n", n.Name, engineName, block.ID()) - }) - - events.Scheduler.BlockDropped.Hook(func(block *blocks.Block, err error) { - fmt.Printf("%s > [%s] Scheduler.BlockDropped: %s - %s\n", n.Name, engineName, block.ID(), err.Error()) - }) - - events.Clock.AcceptedTimeUpdated.Hook(func(newTime time.Time) { - fmt.Printf("%s > [%s] Clock.AcceptedTimeUpdated: %s [Slot %d]\n", n.Name, engineName, newTime, instance.CurrentAPI().TimeProvider().SlotFromTime(newTime)) - }) - - events.Clock.ConfirmedTimeUpdated.Hook(func(newTime time.Time) { - fmt.Printf("%s > [%s] Clock.ConfirmedTimeUpdated: %s [Slot %d]\n", n.Name, engineName, newTime, instance.CurrentAPI().TimeProvider().SlotFromTime(newTime)) - }) - - events.Filter.BlockPreAllowed.Hook(func(block *model.Block) { - fmt.Printf("%s > [%s] Filter.BlockPreAllowed: %s\n", n.Name, engineName, block.ID()) - }) - - events.Filter.BlockPreFiltered.Hook(func(event *filter.BlockPreFilteredEvent) { - fmt.Printf("%s > [%s] Filter.BlockPreFiltered: %s - %s\n", n.Name, engineName, event.Block.ID(), event.Reason.Error()) - if failOnBlockFiltered { - n.Testing.Fatal("no blocks should be prefiltered") - } - }) - - events.CommitmentFilter.BlockAllowed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] CommitmentFilter.BlockAllowed: %s\n", n.Name, engineName, block.ID()) - }) - - events.CommitmentFilter.BlockFiltered.Hook(func(event *commitmentfilter.BlockFilteredEvent) { - fmt.Printf("%s > [%s] CommitmentFilter.BlockFiltered: %s - %s\n", n.Name, engineName, event.Block.ID(), event.Reason.Error()) - if failOnBlockFiltered { - n.Testing.Fatal("no blocks should be filtered") - } - }) - - events.BlockRequester.Tick.Hook(func(blockID iotago.BlockID) { - fmt.Printf("%s > [%s] BlockRequester.Tick: %s\n", n.Name, engineName, blockID) - }) - - events.BlockProcessed.Hook(func(blockID iotago.BlockID) { - fmt.Printf("%s > [%s] Engine.BlockProcessed: %s\n", n.Name, engineName, blockID) - }) + // + //events.BlockDAG.BlockMissing.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] BlockDAG.BlockMissing: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.BlockDAG.MissingBlockAttached.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] BlockDAG.MissingBlockAttached: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.SeatManager.BlockProcessed.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] SybilProtection.BlockProcessed: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.Booker.BlockBooked.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Booker.BlockBooked: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Scheduler.BlockScheduled: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Scheduler.BlockEnqueued: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Scheduler.BlockSkipped: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.Scheduler.BlockDropped.Hook(func(block *blocks.Block, err error) { + // fmt.Printf("%s > [%s] Scheduler.BlockDropped: %s - %s\n", n.Name, engineName, block.ID(), err.Error()) + //}) + // + //events.Clock.AcceptedTimeUpdated.Hook(func(newTime time.Time) { + // fmt.Printf("%s > [%s] Clock.AcceptedTimeUpdated: %s [Slot %d]\n", n.Name, engineName, newTime, instance.CurrentAPI().TimeProvider().SlotFromTime(newTime)) + //}) + // + //events.Clock.ConfirmedTimeUpdated.Hook(func(newTime time.Time) { + // fmt.Printf("%s > [%s] Clock.ConfirmedTimeUpdated: %s [Slot %d]\n", n.Name, engineName, newTime, instance.CurrentAPI().TimeProvider().SlotFromTime(newTime)) + //}) + // + //events.Filter.BlockPreAllowed.Hook(func(block *model.Block) { + // fmt.Printf("%s > [%s] Filter.BlockPreAllowed: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.Filter.BlockPreFiltered.Hook(func(event *filter.BlockPreFilteredEvent) { + // fmt.Printf("%s > [%s] Filter.BlockPreFiltered: %s - %s\n", n.Name, engineName, event.Block.ID(), event.Reason.Error()) + // if failOnBlockFiltered { + // n.Testing.Fatal("no blocks should be prefiltered") + // } + //}) + // + //events.CommitmentFilter.BlockAllowed.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] CommitmentFilter.BlockAllowed: %s\n", n.Name, engineName, block.ID()) + //}) + // + //events.CommitmentFilter.BlockFiltered.Hook(func(event *commitmentfilter.BlockFilteredEvent) { + // fmt.Printf("%s > [%s] CommitmentFilter.BlockFiltered: %s - %s\n", n.Name, engineName, event.Block.ID(), event.Reason.Error()) + // if failOnBlockFiltered { + // n.Testing.Fatal("no blocks should be filtered") + // } + //}) + // + //events.BlockRequester.Tick.Hook(func(blockID iotago.BlockID) { + // fmt.Printf("%s > [%s] BlockRequester.Tick: %s\n", n.Name, engineName, blockID) + //}) + // + //events.BlockProcessed.Hook(func(blockID iotago.BlockID) { + // fmt.Printf("%s > [%s] Engine.BlockProcessed: %s\n", n.Name, engineName, blockID) + //}) events.Notarization.SlotCommitted.Hook(func(details *notarization.SlotCommittedDetails) { var acceptedBlocks iotago.BlockIDs @@ -324,92 +317,92 @@ func (n *Node) attachEngineLogs(failOnBlockFiltered bool, instance *engine.Engin fmt.Printf("%s > [%s] NotarizationManager.LatestCommitmentUpdated: %s\n", n.Name, engineName, commitment.ID()) }) - events.BlockGadget.BlockPreAccepted.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreAccepted: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) - }) - - events.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockAccepted: %s @ slot %s committing to %s\n", n.Name, engineName, block.ID(), block.ID().Index(), block.ProtocolBlock().SlotCommitmentID) - }) - - events.BlockGadget.BlockPreConfirmed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) - }) - - events.BlockGadget.BlockConfirmed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) - }) - - events.SlotGadget.SlotFinalized.Hook(func(slotIndex iotago.SlotIndex) { - fmt.Printf("%s > [%s] Consensus.SlotGadget.SlotFinalized: %s\n", n.Name, engineName, slotIndex) - }) - - events.SeatManager.OnlineCommitteeSeatAdded.Hook(func(seat account.SeatIndex, accountID iotago.AccountID) { - fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatAdded: %d - %s\n", n.Name, engineName, seat, accountID) - }) - - events.SeatManager.OnlineCommitteeSeatRemoved.Hook(func(seat account.SeatIndex) { - fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatRemoved: %d\n", n.Name, engineName, seat) - }) - - events.ConflictDAG.ConflictCreated.Hook(func(conflictID iotago.TransactionID) { - fmt.Printf("%s > [%s] ConflictDAG.ConflictCreated: %s\n", n.Name, engineName, conflictID) - }) - - events.ConflictDAG.ConflictEvicted.Hook(func(conflictID iotago.TransactionID) { - fmt.Printf("%s > [%s] ConflictDAG.ConflictEvicted: %s\n", n.Name, engineName, conflictID) - }) - events.ConflictDAG.ConflictRejected.Hook(func(conflictID iotago.TransactionID) { - fmt.Printf("%s > [%s] ConflictDAG.ConflictRejected: %s\n", n.Name, engineName, conflictID) - }) - - events.ConflictDAG.ConflictAccepted.Hook(func(conflictID iotago.TransactionID) { - fmt.Printf("%s > [%s] ConflictDAG.ConflictAccepted: %s\n", n.Name, engineName, conflictID) - }) - - instance.Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { - fmt.Printf("%s > [%s] Ledger.TransactionAttached: %s\n", n.Name, engineName, transactionMetadata.ID()) - - transactionMetadata.OnSolid(func() { - fmt.Printf("%s > [%s] MemPool.TransactionSolid: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnExecuted(func() { - fmt.Printf("%s > [%s] MemPool.TransactionExecuted: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnBooked(func() { - fmt.Printf("%s > [%s] MemPool.TransactionBooked: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnConflicting(func() { - fmt.Printf("%s > [%s] MemPool.TransactionConflicting: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnAccepted(func() { - fmt.Printf("%s > [%s] MemPool.TransactionAccepted: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnRejected(func() { - fmt.Printf("%s > [%s] MemPool.TransactionRejected: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnInvalid(func(err error) { - fmt.Printf("%s > [%s] MemPool.TransactionInvalid(%s): %s\n", n.Name, engineName, err, transactionMetadata.ID()) - }) - - transactionMetadata.OnOrphaned(func() { - fmt.Printf("%s > [%s] MemPool.TransactionOrphaned: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnCommitted(func() { - fmt.Printf("%s > [%s] MemPool.TransactionCommitted: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - - transactionMetadata.OnPending(func() { - fmt.Printf("%s > [%s] MemPool.TransactionPending: %s\n", n.Name, engineName, transactionMetadata.ID()) - }) - }) + //events.BlockGadget.BlockPreAccepted.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreAccepted: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) + //}) + // + //events.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockAccepted: %s @ slot %s committing to %s\n", n.Name, engineName, block.ID(), block.ID().Index(), block.ProtocolBlock().SlotCommitmentID) + //}) + // + //events.BlockGadget.BlockPreConfirmed.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) + //}) + // + //events.BlockGadget.BlockConfirmed.Hook(func(block *blocks.Block) { + // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) + //}) + // + //events.SlotGadget.SlotFinalized.Hook(func(slotIndex iotago.SlotIndex) { + // fmt.Printf("%s > [%s] Consensus.SlotGadget.SlotFinalized: %s\n", n.Name, engineName, slotIndex) + //}) + // + //events.SeatManager.OnlineCommitteeSeatAdded.Hook(func(seat account.SeatIndex, accountID iotago.AccountID) { + // fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatAdded: %d - %s\n", n.Name, engineName, seat, accountID) + //}) + // + //events.SeatManager.OnlineCommitteeSeatRemoved.Hook(func(seat account.SeatIndex) { + // fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatRemoved: %d\n", n.Name, engineName, seat) + //}) + // + //events.ConflictDAG.ConflictCreated.Hook(func(conflictID iotago.TransactionID) { + // fmt.Printf("%s > [%s] ConflictDAG.ConflictCreated: %s\n", n.Name, engineName, conflictID) + //}) + // + //events.ConflictDAG.ConflictEvicted.Hook(func(conflictID iotago.TransactionID) { + // fmt.Printf("%s > [%s] ConflictDAG.ConflictEvicted: %s\n", n.Name, engineName, conflictID) + //}) + //events.ConflictDAG.ConflictRejected.Hook(func(conflictID iotago.TransactionID) { + // fmt.Printf("%s > [%s] ConflictDAG.ConflictRejected: %s\n", n.Name, engineName, conflictID) + //}) + // + //events.ConflictDAG.ConflictAccepted.Hook(func(conflictID iotago.TransactionID) { + // fmt.Printf("%s > [%s] ConflictDAG.ConflictAccepted: %s\n", n.Name, engineName, conflictID) + //}) + // + //instance.Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { + // fmt.Printf("%s > [%s] Ledger.TransactionAttached: %s\n", n.Name, engineName, transactionMetadata.ID()) + // + // transactionMetadata.OnSolid(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionSolid: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnExecuted(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionExecuted: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnBooked(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionBooked: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnConflicting(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionConflicting: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnAccepted(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionAccepted: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnRejected(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionRejected: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnInvalid(func(err error) { + // fmt.Printf("%s > [%s] MemPool.TransactionInvalid(%s): %s\n", n.Name, engineName, err, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnOrphaned(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionOrphaned: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnCommitted(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionCommitted: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + // + // transactionMetadata.OnPending(func() { + // fmt.Printf("%s > [%s] MemPool.TransactionPending: %s\n", n.Name, engineName, transactionMetadata.ID()) + // }) + //}) } func (n *Node) Wait() { From 23a831e6a3c025ed7501199b2d76773bb60af8fa Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Fri, 8 Sep 2023 10:22:37 +0200 Subject: [PATCH 04/17] Fix the engine switching test. --- pkg/protocol/engine/attestation/slotattestation/manager.go | 5 ----- pkg/storage/prunable/prunable.go | 4 +++- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pkg/protocol/engine/attestation/slotattestation/manager.go b/pkg/protocol/engine/attestation/slotattestation/manager.go index 77b40b488..f72592eed 100644 --- a/pkg/protocol/engine/attestation/slotattestation/manager.go +++ b/pkg/protocol/engine/attestation/slotattestation/manager.go @@ -1,13 +1,10 @@ package slotattestation import ( - "fmt" - "github.com/iotaledger/hive.go/ads" "github.com/iotaledger/hive.go/core/memstorage" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/lo" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/iota-core/pkg/core/account" @@ -150,7 +147,6 @@ func (m *Manager) GetMap(index iotago.SlotIndex) (ads.Map[iotago.AccountID, *iot // AddAttestationFromValidationBlock adds an attestation from a block to the future attestations (beyond the attestation window). func (m *Manager) AddAttestationFromValidationBlock(block *blocks.Block) { - fmt.Println("AddAttestationFromValidationBlock", block.ID()) // Only track validator blocks. if _, isValidationBlock := block.ValidationBlock(); !isValidationBlock { return @@ -260,7 +256,6 @@ func (m *Manager) Commit(index iotago.SlotIndex) (newCW uint64, attestationsRoot // Add all attestations to the tree and calculate the new cumulative weight. for _, a := range attestations { - fmt.Println("pending attestation while committing", index, lo.PanicOnErr(a.BlockID(m.apiProvider.APIForSlot(index)))) // TODO: which weight are we using here? The current one? Or the one of the slot of the attestation/commitmentID? if _, exists := m.committeeFunc(index).GetSeat(a.IssuerID); exists { if err := tree.Set(a.IssuerID, a); err != nil { diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 1e691e3aa..23051dbae 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -155,6 +155,9 @@ func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex, lastCommittedIndex } pointOfNoReturn := timeProvider.EpochEnd(targetSlotEpoch) - p.apiProvider.APIForSlot(targetSlotIndex).ProtocolParameters().MaxCommittableAge() + // Shutdown prunable slot store in order to flush and get consistent state on disk after reopening. + p.prunableSlotStore.Shutdown() + for epochIdx := lastCommittedEpoch + 1; ; epochIdx++ { // only remove if epochIdx bigger than epoch of target slot index if epochIdx > targetSlotEpoch { @@ -167,7 +170,6 @@ func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex, lastCommittedIndex if err := os.RemoveAll(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { return ierrors.Wrapf(err, "failed to remove bucket directory in forkedPrunable storage for epoch %d", epochIdx) } - } // Remove entries for epochs bigger or equal epochFromSlot(forkingPoint+1) in semiPermanent storage. // Those entries are part of the fork and values from the old storage should not be used From 735a768f98d78ed6b4e83b2230ff734e650ec7eb Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Fri, 8 Sep 2023 15:39:47 +0200 Subject: [PATCH 05/17] Cleanup code --- pkg/protocol/engine/eviction/state.go | 1 + pkg/protocol/enginemanager/enginemanager.go | 7 +- .../sybilprotectionv1/performance/rollback.go | 161 ------------------ pkg/storage/database/db_instance.go | 4 + pkg/storage/prunable/bucket_manager.go | 44 ++++- pkg/storage/prunable/epochstore/epoch_kv.go | 4 + pkg/storage/prunable/epochstore/store.go | 4 + pkg/storage/prunable/prunable.go | 88 ++++------ pkg/storage/storage.go | 4 +- 9 files changed, 94 insertions(+), 223 deletions(-) delete mode 100644 pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go diff --git a/pkg/protocol/engine/eviction/state.go b/pkg/protocol/engine/eviction/state.go index bbaecdb20..c5ba5ef25 100644 --- a/pkg/protocol/engine/eviction/state.go +++ b/pkg/protocol/engine/eviction/state.go @@ -303,6 +303,7 @@ func (s *State) Rollback(lowerTarget, targetIndex iotago.SlotIndex) error { if err := s.latestNonEmptyStore.Set([]byte{latestNonEmptySlotKey}, latestNonEmptySlot.MustBytes()); err != nil { return ierrors.Wrap(err, "failed to store latest non empty slot") } + return nil } diff --git a/pkg/protocol/enginemanager/enginemanager.go b/pkg/protocol/enginemanager/enginemanager.go index a301b646e..1d070b910 100644 --- a/pkg/protocol/enginemanager/enginemanager.go +++ b/pkg/protocol/enginemanager/enginemanager.go @@ -1,7 +1,6 @@ package enginemanager import ( - "fmt" "os" "path/filepath" @@ -263,7 +262,6 @@ func (e *EngineManager) loadEngineInstanceWithStorage(engineAlias string, storag func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine, error) { engineAlias := lo.PanicOnErr(uuid.NewUUID()).String() - fmt.Println("fork", e.activeInstance.Name(), "into", engineAlias) errorHandler := func(err error) { e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) } @@ -280,7 +278,7 @@ func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine if err := newStorage.Commitments().Rollback(index, latestCommitment.Index()); err != nil { return nil, ierrors.Wrap(err, "failed to rollback commitments") } - // Create temporary components and rollback their state, which will be reflected on disk. + // Create temporary components and rollback their permanent state, which will be reflected on disk. evictionState := eviction.NewState(newStorage.LatestNonEmptySlot(), newStorage.RootBlocks) evictionState.Initialize(latestCommitment.Index()) @@ -308,12 +306,13 @@ func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine return nil, err } - if err := newStorage.RollbackPrunable(index, latestCommitment.Index()); err != nil { + if err := newStorage.RollbackPrunable(index); err != nil { return nil, err } candidateEngine := e.loadEngineInstanceWithStorage(engineAlias, newStorage) + // Rollback attestations already on created engine instance, because this action modifies the in-memory storage. if err := candidateEngine.Attestations.Rollback(index); err != nil { return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") } diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go deleted file mode 100644 index 8cb368a45..000000000 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/rollback.go +++ /dev/null @@ -1,161 +0,0 @@ -package performance - -// -//func (t *Tracker) Rollback(targetSlotIndex iotago.SlotIndex) error { -// t.mutex.Lock() -// defer t.mutex.Unlock() -// -// timeProvider := t.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() -// targetEpoch := timeProvider.EpochFromSlot(targetSlotIndex) -// -// // if the target index is the last slot of the epoch, the epoch was committed -// if timeProvider.EpochEnd(targetEpoch) != targetSlotIndex { -// targetEpoch-- -// } -// -// err := t.rollbackPerformanceFactor(timeProvider.EpochStart(targetEpoch+1), targetSlotIndex) -// if err != nil { -// return ierrors.Wrap(err, "unable to export performance factor") -// } -// -// err = t.rollbackPoolRewards(targetEpoch) -// if err != nil { -// return ierrors.Wrap(err, "unable to export pool rewards") -// } -// -// err = t.rollbackPoolsStats(targetEpoch) -// if err != nil { -// return ierrors.Wrap(err, "unable to export pool stats") -// } -// -// err = t.rollbackCommittees(targetSlotIndex) -// if err != nil { -// return ierrors.Wrap(err, "unable to export committees") -// } -// -// return nil -//} -// -//func (t *Tracker) rollbackPerformanceFactor(targetSlot, lastCommittedSlot iotago.SlotIndex) error { -// t.performanceFactorsMutex.RLock() -// defer t.performanceFactorsMutex.RUnlock() -// -// for currentSlot := targetSlot; currentSlot <= targetSlot; currentSlot++ { -// // TODO: decrease this in import/export to uint16 in pf Load/Store/... if we are sure on the performance factor calculation and its expected upper bond -// // TODO: clean the current epoch only as future epochs will be removed on disk -// performanceFactors, err := t.performanceFactorsFunc(currentSlot) -// if err != nil { -// return ierrors.Wrapf(err, "unable to get performance factors for slot index %d", currentSlot) -// } -// -// } -// -// return nil -//} -// -//func (t *Tracker) rollbackPoolRewards(targetEpoch, lastCommittedEpoch iotago.EpochIndex) error { -// -// for epoch := targetEpoch + 1; epoch <= lastCommittedEpoch; epoch++ { -// rewardsMap, err := t.rewardsStorePerEpochFunc(epoch) -// if err != nil { -// return ierrors.Wrapf(err, "unable to get rewards store for epoch index %d", epoch) -// } -// -// if err := rewardsMap.Clear(); err != nil { -// return ierrors.Wrapf(err, "error while clearing rewards store for epoch %d", epoch) -// } -// } -// -// return nil -//} -// -//func (t *Tracker) rollbackPoolsStats(targetEpoch iotago.EpochIndex) error { -// var epochCount uint64 -// if err := pWriter.WriteValue("pools stats epoch count", epochCount, true); err != nil { -// return ierrors.Wrap(err, "unable to write epoch count") -// } -// // export all stored pools -// var innerErr error -// if err := t.poolStatsStore.StreamBytes(func(key []byte, value []byte) error { -// epochIndex := iotago.EpochIndex(binary.LittleEndian.Uint64(key)) -// if epochIndex > targetEpoch { -// // continue -// return nil -// } -// if err := pWriter.WriteBytes(key); err != nil { -// innerErr = ierrors.Wrapf(err, "unable to write epoch index %d", epochIndex) -// return innerErr -// } -// if err := pWriter.WriteBytes(value); err != nil { -// innerErr = ierrors.Wrapf(err, "unable to write pools stats for epoch %d", epochIndex) -// return innerErr -// } -// epochCount++ -// -// return nil -// }); err != nil { -// return ierrors.Wrap(err, "unable to iterate over pools stats") -// } else if innerErr != nil { -// return ierrors.Wrap(innerErr, "error while iterating over pools stats") -// } -// if err := pWriter.WriteValueAtBookmark("pools stats epoch count", epochCount); err != nil { -// return ierrors.Wrap(err, "unable to write stats epoch count at bookmarked position") -// } -// -// return nil -//} -// -//func (t *Tracker) rollbackCommittees(targetSlot iotago.SlotIndex) error { -// var epochCount uint64 -// if err := pWriter.WriteValue("committees epoch count", epochCount, true); err != nil { -// return ierrors.Wrap(err, "unable to write committees epoch count") -// } -// apiForSlot := t.apiProvider.APIForSlot(targetSlot) -// epochFromTargetSlot := apiForSlot.TimeProvider().EpochFromSlot(targetSlot) -// -// pointOfNoReturn := apiForSlot.TimeProvider().EpochEnd(epochFromTargetSlot) - apiForSlot.ProtocolParameters().MaxCommittableAge() -// -// var innerErr error -// err := t.committeeStore.StreamBytes(func(epochBytes []byte, committeeBytes []byte) error { -// epoch := iotago.EpochIndex(binary.LittleEndian.Uint64(epochBytes)) -// -// // We have a committee for an epoch higher than the targetSlot -// // 1. we trust the point of no return, we export the committee for the next epoch -// // 2. if we don't trust the point-of-no-return -// // - we were able to rotate a committee, then we export it -// // - we were not able to rotate a committee (reused), then we don't export it -// if epoch > epochFromTargetSlot && targetSlot < pointOfNoReturn { -// committee, _, err := account.AccountsFromBytes(committeeBytes) -// if err != nil { -// innerErr = ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) -// return innerErr -// } -// if committee.IsReused() { -// return nil -// } -// } -// -// if err := pWriter.WriteBytes(epochBytes); err != nil { -// innerErr = ierrors.Wrap(err, "unable to write epoch index") -// return innerErr -// } -// if err := pWriter.WriteBytes(committeeBytes); err != nil { -// innerErr = ierrors.Wrap(err, "unable to write epoch committee") -// return innerErr -// } -// epochCount++ -// -// return nil -// }) -// if err != nil { -// return ierrors.Wrapf(err, "unable to iterate over committee base store: %w", innerErr) -// } -// if innerErr != nil { -// return ierrors.Wrap(err, "error while iterating over committee base store") -// } -// if err = pWriter.WriteValueAtBookmark("committees epoch count", epochCount); err != nil { -// return ierrors.Wrap(err, "unable to write committee epoch count at bookmarked position") -// } -// -// return nil -//} diff --git a/pkg/storage/database/db_instance.go b/pkg/storage/database/db_instance.go index 671b073e1..0a77d5c71 100644 --- a/pkg/storage/database/db_instance.go +++ b/pkg/storage/database/db_instance.go @@ -38,6 +38,10 @@ func (d *DBInstance) Close() { } } +//func (d *DBInstance) Open() { +// d.store.Replace(StoreWithDefaultSettings(dbConfig.Directory, true, dbConfig.Engine)) +//} + func (d *DBInstance) KVStore() kvstore.KVStore { return d.store } diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index 7bb1396b4..6141f3ea1 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -195,13 +195,28 @@ func (b *BucketManager) Prune(epoch iotago.EpochIndex) error { return ierrors.Wrapf(database.ErrNoPruningNeeded, "epoch %d is already pruned", epoch) } + b.DeleteBucket(epoch) + + b.lastPrunedEpoch.MarkEvicted(epoch) + + return nil +} + +// DeleteBucket deletes directory that stores the data for the given bucket and returns boolean +// flag indicating whether a directory for that bucket existed. +func (b *BucketManager) DeleteBucket(epoch iotago.EpochIndex) (deleted bool) { b.openDBsMutex.Lock() defer b.openDBsMutex.Unlock() + if exists, err := PathExists(dbPathFromIndex(b.dbConfig.Directory, epoch)); err != nil { + panic(err) + } else if !exists { + return false + } + db, exists := b.openDBs.Get(epoch) if exists { db.Close() - b.openDBs.Remove(epoch) } @@ -211,11 +226,22 @@ func (b *BucketManager) Prune(epoch iotago.EpochIndex) error { // Delete the db size since we pruned the whole directory b.dbSizes.Delete(epoch) - b.lastPrunedEpoch.MarkEvicted(epoch) - return nil + return true } +// RollbackBucket removes data in the bucket in slots [targetSlotIndex+1; epochEndSlot]. +func (b *BucketManager) RollbackBucket(epochIndex iotago.EpochIndex, targetSlotIndex, epochEndSlot iotago.SlotIndex) error { + oldBucketKvStore := b.getDBInstance(epochIndex).KVStore() + for clearSlot := targetSlotIndex + 1; clearSlot <= epochEndSlot; clearSlot++ { + // delete slot prefix from forkedPrunable storage that will be eventually copied into the new engine + if err := oldBucketKvStore.DeletePrefix(clearSlot.MustBytes()); err != nil { + return ierrors.Wrapf(err, "error while clearing slot %d in bucket for epoch %d", clearSlot, epochIndex) + } + } + + return nil +} func (b *BucketManager) Flush() error { b.openDBsMutex.RLock() defer b.openDBsMutex.RUnlock() @@ -229,3 +255,15 @@ func (b *BucketManager) Flush() error { return err } + +func PathExists(path string) (bool, error) { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false, nil + } + + return false, err + } + + return true, nil +} diff --git a/pkg/storage/prunable/epochstore/epoch_kv.go b/pkg/storage/prunable/epochstore/epoch_kv.go index 0bec3e6b4..3fd580eb7 100644 --- a/pkg/storage/prunable/epochstore/epoch_kv.go +++ b/pkg/storage/prunable/epochstore/epoch_kv.go @@ -49,6 +49,10 @@ func (e *EpochKVStore) GetEpoch(epoch iotago.EpochIndex) (kvstore.KVStore, error return lo.PanicOnErr(e.kv.WithExtendedRealm(epoch.MustBytes())), nil } +func (e *EpochKVStore) DeleteEpoch(epoch iotago.EpochIndex) error { + return e.kv.DeletePrefix(epoch.MustBytes()) +} + func (e *EpochKVStore) Prune(epoch iotago.EpochIndex, defaultPruningDelay iotago.EpochIndex) error { // The epoch we're trying to prune already takes into account the defaultPruningDelay. // Therefore, we don't need to do anything if it is greater equal e.pruningDelay and take the difference otherwise. diff --git a/pkg/storage/prunable/epochstore/store.go b/pkg/storage/prunable/epochstore/store.go index 38eba9491..f93d71832 100644 --- a/pkg/storage/prunable/epochstore/store.go +++ b/pkg/storage/prunable/epochstore/store.go @@ -101,6 +101,10 @@ func (s *Store[V]) StreamBytes(consumer func([]byte, []byte) error) error { return innerErr } +func (s *Store[V]) DeleteEpoch(epoch iotago.EpochIndex) error { + return s.kv.DeletePrefix(epoch.MustBytes()) +} + func (s *Store[V]) Prune(epoch iotago.EpochIndex, defaultPruningDelay iotago.EpochIndex) error { // The epoch we're trying to prune already takes into account the defaultPruningDelay. // Therefore, we don't need to do anything if it is greater equal s.pruningDelay and take the difference otherwise. diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 23051dbae..4d45a9205 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -1,15 +1,12 @@ package prunable import ( - "os" - copydir "github.com/otiai10/copy" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/runtime/ioutils" "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/serializer/v2/byteutils" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/storage/database" @@ -64,6 +61,7 @@ func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, // Create a newly opened instance of prunable database. // `prunableSlotStore` will be opened automatically as the engine requests it, so no need to open it here. + // TODO: create a re-openable and lockable KVStore source.semiPermanentDB = database.NewDBInstance(source.semiPermanentDBConfig) source.decidedUpgradeSignals = epochstore.NewStore(kvstore.Realm{epochPrefixDecidedUpgradeSignals}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayDecidedUpgradeSignals, model.VersionAndHash.Bytes, model.VersionAndHashFromBytes) source.poolRewards = epochstore.NewEpochKVStore(kvstore.Realm{epochPrefixPoolRewards}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayPoolRewards) @@ -145,7 +143,7 @@ func (p *Prunable) Flush() { } } -func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex, lastCommittedIndex iotago.SlotIndex) error { +func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex) error { timeProvider := p.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() targetSlotEpoch := timeProvider.EpochFromSlot(targetSlotIndex) lastCommittedEpoch := targetSlotEpoch @@ -153,77 +151,61 @@ func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex, lastCommittedIndex if timeProvider.EpochEnd(targetSlotEpoch) != targetSlotIndex { lastCommittedEpoch-- } - pointOfNoReturn := timeProvider.EpochEnd(targetSlotEpoch) - p.apiProvider.APIForSlot(targetSlotIndex).ProtocolParameters().MaxCommittableAge() - // Shutdown prunable slot store in order to flush and get consistent state on disk after reopening. + if err := p.prunableSlotStore.RollbackBucket(targetSlotEpoch, targetSlotIndex, timeProvider.EpochEnd(targetSlotEpoch)); err != nil { + return ierrors.Wrapf(err, "error while rolling back slots in a bucket for epoch %d", targetSlotEpoch) + } + + // Shut down the prunableSlotStore in order to flush and get consistent state on disk after reopening. p.prunableSlotStore.Shutdown() + // Removed entries that belong to the old fork and cannot be re-used. for epochIdx := lastCommittedEpoch + 1; ; epochIdx++ { - // only remove if epochIdx bigger than epoch of target slot index if epochIdx > targetSlotEpoch { - if exists, err := PathExists(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { - return ierrors.Wrapf(err, "failed to check if bucket directory exists in forkedPrunable storage for epoch %d", epochIdx) - } else if !exists { + if deleted := p.prunableSlotStore.DeleteBucket(epochIdx); !deleted { break } - if err := os.RemoveAll(dbPathFromIndex(p.prunableSlotStore.dbConfig.Directory, epochIdx)); err != nil { - return ierrors.Wrapf(err, "failed to remove bucket directory in forkedPrunable storage for epoch %d", epochIdx) + shouldRollback, err := p.shouldRollbackCommittee(epochIdx+1, targetSlotIndex) + if err != nil { + return ierrors.Wrapf(err, "error while checking if committee for epoch %d should be rolled back", epochIdx) } - } - // Remove entries for epochs bigger or equal epochFromSlot(forkingPoint+1) in semiPermanent storage. - // Those entries are part of the fork and values from the old storage should not be used - // values from the candidate storage should be used in its place; that's why we copy those entries - // from the candidate engine to old storage. - if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolRewards}, epochIdx.MustBytes())); err != nil { - return err - } - if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixPoolStats}, epochIdx.MustBytes())); err != nil { - return err - } - if epochIdx > targetSlotEpoch && targetSlotIndex < pointOfNoReturn { - // TODO: rollback committee using - //committee, _, err := account.AccountsFromBytes(committeeBytes) - //if err != nil { - // innerErr = ierrors.Wrapf(err, "failed to parse committee bytes for epoch %d", epoch) - // return innerErr - //} - //if committee.IsReused() { - // return nil - //} - if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixCommittee}, (epochIdx + 1).MustBytes())); err != nil { - return err + if shouldRollback { + if err := p.committee.DeleteEpoch(epochIdx + 1); err != nil { + return ierrors.Wrapf(err, "error while deleting committee for epoch %d", epochIdx) + } } } - if err := p.semiPermanentDB.KVStore().DeletePrefix(byteutils.ConcatBytes(kvstore.Realm{epochPrefixDecidedUpgradeSignals}, epochIdx.MustBytes())); err != nil { - return err + if err := p.poolRewards.DeleteEpoch(epochIdx); err != nil { + return ierrors.Wrapf(err, "error while deleting pool rewards for epoch %d", epochIdx) + } + if err := p.poolStats.DeleteEpoch(epochIdx); err != nil { + return ierrors.Wrapf(err, "error while deleting pool stats for epoch %d", epochIdx) } - } - // If the forking slot is the last slot of an epoch, then don't need to clean anything as the bucket with the - // first forked slot contains only forked blocks and was removed in the previous step. - // We need to remove data in the bucket in slots [forkingSlot+1; bucketEnd]. - if lastCommittedEpoch != targetSlotEpoch { - oldBucketKvStore := p.prunableSlotStore.getDBInstance(targetSlotEpoch).KVStore() - for clearSlot := targetSlotIndex + 1; clearSlot <= timeProvider.EpochEnd(targetSlotEpoch); clearSlot++ { - // delete slot prefix from forkedPrunable storage that will be eventually copied into the new engine - if err := oldBucketKvStore.DeletePrefix(clearSlot.MustBytes()); err != nil { - return ierrors.Wrapf(err, "error while clearing slot %d in bucket for epoch %d", clearSlot, targetSlotEpoch) - } + if err := p.decidedUpgradeSignals.DeleteEpoch(epochIdx); err != nil { + return ierrors.Wrapf(err, "error while deleting decided upgrade signals for epoch %d", epochIdx) } } return nil } -func PathExists(path string) (bool, error) { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - return false, nil +// Remove committee for the next epoch only if forking point is before point of no return and committee is reused. +// Always remove committees for epochs that are newer than targetSlotEpoch+1. +func (p *Prunable) shouldRollbackCommittee(epochIndex iotago.EpochIndex, targetSlotIndex iotago.SlotIndex) (bool, error) { + timeProvider := p.apiProvider.APIForSlot(targetSlotIndex).TimeProvider() + targetSlotEpoch := timeProvider.EpochFromSlot(targetSlotIndex) + pointOfNoReturn := timeProvider.EpochEnd(targetSlotEpoch) - p.apiProvider.APIForSlot(targetSlotIndex).ProtocolParameters().MaxCommittableAge() + + if epochIndex == targetSlotEpoch+1 && targetSlotIndex < pointOfNoReturn { + committee, err := p.committee.Load(targetSlotEpoch + 1) + if err != nil { + return false, err } - return false, err + return committee.IsReused(), nil } return true, nil diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 35cb04cb1..5577b5246 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -143,6 +143,6 @@ func (s *Storage) Flush() { s.prunable.Flush() } -func (s *Storage) RollbackPrunable(targetIndex iotago.SlotIndex, lastCommittedIndex iotago.SlotIndex) error { - return s.prunable.Rollback(targetIndex, lastCommittedIndex) +func (s *Storage) RollbackPrunable(targetIndex iotago.SlotIndex) error { + return s.prunable.Rollback(targetIndex) } From 8766ee8abfbd68e2915b3ae5e712b7f416ed9b62 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Mon, 11 Sep 2023 14:29:08 +0200 Subject: [PATCH 06/17] Start working on locking and openable KVStore --- pkg/storage/database/db_instance.go | 30 +++++-- pkg/storage/database/syncedkvstore.go | 105 +++++++++++++++++++++++++ pkg/storage/permanent/permanent.go | 10 +-- pkg/storage/prunable/bucket_manager.go | 6 ++ pkg/storage/prunable/prunable.go | 15 ++-- 5 files changed, 149 insertions(+), 17 deletions(-) create mode 100644 pkg/storage/database/syncedkvstore.go diff --git a/pkg/storage/database/db_instance.go b/pkg/storage/database/db_instance.go index 0a77d5c71..bbdbbb053 100644 --- a/pkg/storage/database/db_instance.go +++ b/pkg/storage/database/db_instance.go @@ -1,13 +1,17 @@ package database import ( + "fmt" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/lo" ) type DBInstance struct { - store kvstore.KVStore // KVStore that is used to access the DB instance + store *synchedKVStore // KVStore that is used to access the DB instance healthTracker *kvstore.StoreHealthTracker + dbConfig Config } func NewDBInstance(dbConfig Config) *DBInstance { @@ -24,12 +28,15 @@ func NewDBInstance(dbConfig Config) *DBInstance { } return &DBInstance{ - store: db, + store: &synchedKVStore{store: db}, healthTracker: storeHealthTracker, + dbConfig: dbConfig, } } func (d *DBInstance) Close() { + fmt.Println("close kvstore", d.dbConfig.Directory) + if err := d.healthTracker.MarkHealthy(); err != nil { panic(err) } @@ -38,9 +45,22 @@ func (d *DBInstance) Close() { } } -//func (d *DBInstance) Open() { -// d.store.Replace(StoreWithDefaultSettings(dbConfig.Directory, true, dbConfig.Engine)) -//} +func (d *DBInstance) Open() { + fmt.Println("open kvstore", d.dbConfig.Directory) + d.store.Replace(lo.PanicOnErr(StoreWithDefaultSettings(d.dbConfig.Directory, false, d.dbConfig.Engine))) + _, err := d.store.store.WithRealm(kvstore.EmptyPrefix) + if err != nil { + panic(err) + } +} + +func (d *DBInstance) Lock() { + d.store.Lock() +} + +func (d *DBInstance) Unlock() { + d.store.Unlock() +} func (d *DBInstance) KVStore() kvstore.KVStore { return d.store diff --git a/pkg/storage/database/syncedkvstore.go b/pkg/storage/database/syncedkvstore.go new file mode 100644 index 000000000..b6b89ed5e --- /dev/null +++ b/pkg/storage/database/syncedkvstore.go @@ -0,0 +1,105 @@ +package database + +import ( + "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/runtime/syncutils" +) + +type synchedKVStore struct { + store kvstore.KVStore // KVStore that is used to access the DB instance + + syncutils.RWMutex +} + +func (s *synchedKVStore) Replace(newKVStore kvstore.KVStore) { + s.store = newKVStore +} + +func (s *synchedKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + s.RLock() + defer s.RUnlock() + + return s.store.WithRealm(realm) +} + +func (s *synchedKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + s.RLock() + defer s.RUnlock() + + return s.store.WithExtendedRealm(realm) +} + +func (s *synchedKVStore) Realm() kvstore.Realm { + return s.store.Realm() +} + +func (s *synchedKVStore) Iterate(prefix kvstore.KeyPrefix, kvConsumerFunc kvstore.IteratorKeyValueConsumerFunc, direction ...kvstore.IterDirection) error { + s.RLock() + defer s.RUnlock() + + return s.store.Iterate(prefix, kvConsumerFunc, direction...) +} + +func (s *synchedKVStore) IterateKeys(prefix kvstore.KeyPrefix, consumerFunc kvstore.IteratorKeyConsumerFunc, direction ...kvstore.IterDirection) error { + s.RLock() + defer s.RUnlock() + + return s.store.IterateKeys(prefix, consumerFunc, direction...) +} + +func (s *synchedKVStore) Clear() error { + s.RLock() + defer s.RUnlock() + + return s.store.Clear() +} + +func (s *synchedKVStore) Get(key kvstore.Key) (value kvstore.Value, err error) { + s.RLock() + defer s.RUnlock() + + return s.store.Get(key) +} + +func (s *synchedKVStore) Set(key kvstore.Key, value kvstore.Value) error { + s.RLock() + defer s.RUnlock() + + return s.store.Set(key, value) +} + +func (s *synchedKVStore) Has(key kvstore.Key) (bool, error) { + s.RLock() + defer s.RUnlock() + + return s.store.Has(key) +} + +func (s *synchedKVStore) Delete(key kvstore.Key) error { + s.RLock() + defer s.RUnlock() + + return s.store.Delete(key) +} + +func (s *synchedKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { + s.RLock() + defer s.RUnlock() + + return s.store.DeletePrefix(prefix) +} + +func (s *synchedKVStore) Flush() error { + return s.store.Flush() +} + +func (s *synchedKVStore) Close() error { + return s.store.Close() +} + +func (s *synchedKVStore) Batched() (kvstore.BatchedMutations, error) { + s.RLock() + defer s.RUnlock() + + return s.store.Batched() +} diff --git a/pkg/storage/permanent/permanent.go b/pkg/storage/permanent/permanent.go index c0dbaee5e..882e232cf 100644 --- a/pkg/storage/permanent/permanent.go +++ b/pkg/storage/permanent/permanent.go @@ -50,18 +50,16 @@ func New(dbConfig database.Config, errorHandler func(error), opts ...options.Opt } func Clone(source *Permanent, dbConfig database.Config, errorHandler func(error), opts ...options.Option[Permanent]) (*Permanent, error) { + source.store.Lock() + defer source.store.Unlock() + source.store.Close() if err := copydir.Copy(source.dbConfig.Directory, dbConfig.Directory); err != nil { return nil, ierrors.Wrap(err, "failed to copy permanent storage directory to new storage path") } - source.store = database.NewDBInstance(source.dbConfig) - source.settings = NewSettings(lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{settingsPrefix}))) - source.commitments = NewCommitments(lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{commitmentsPrefix})), source.settings.APIProvider()) - source.utxoLedger = utxoledger.New(lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{ledgerPrefix})), source.settings.APIProvider()) - source.accounts = lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{accountsPrefix})) - source.latestNonEmptySlot = lo.PanicOnErr(source.store.KVStore().WithExtendedRealm(kvstore.Realm{latestNonEmptySlotPrefix})) + source.store.Open() return New(dbConfig, errorHandler, opts...), nil } diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index 6141f3ea1..ee8c4d269 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -29,6 +29,8 @@ type BucketManager struct { dbSizes *shrinkingmap.ShrinkingMap[iotago.EpochIndex, int64] optsMaxOpenDBs int + + mutex syncutils.RWMutex } func NewBucketManager(dbConfig database.Config, errorHandler func(error), opts ...options.Option[BucketManager]) *BucketManager { @@ -171,6 +173,10 @@ func (b *BucketManager) RestoreFromDisk() (lastPrunedEpoch iotago.EpochIndex) { // epochIndex 1 -> db 1 // epochIndex 2 -> db 2 func (b *BucketManager) getDBInstance(index iotago.EpochIndex) (db *database.DBInstance) { + // Lock global mutex + b.mutex.RLock() + defer b.mutex.RUnlock() + b.openDBsMutex.Lock() defer b.openDBsMutex.Unlock() diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 4d45a9205..9eb3abfc1 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -50,6 +50,13 @@ func New(dbConfig database.Config, apiProvider api.Provider, errorHandler func(e } func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, errorHandler func(error), opts ...options.Option[BucketManager]) (*Prunable, error) { + // Lock semi pemanent DB and prunable slot store so that nobody can try to use or open them while cloning. + source.semiPermanentDB.Lock() + defer source.semiPermanentDB.Unlock() + + source.prunableSlotStore.mutex.Lock() + defer source.prunableSlotStore.mutex.Unlock() + // Close forked prunable storage before copying its contents. source.semiPermanentDB.Close() source.prunableSlotStore.Shutdown() @@ -61,12 +68,8 @@ func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, // Create a newly opened instance of prunable database. // `prunableSlotStore` will be opened automatically as the engine requests it, so no need to open it here. - // TODO: create a re-openable and lockable KVStore - source.semiPermanentDB = database.NewDBInstance(source.semiPermanentDBConfig) - source.decidedUpgradeSignals = epochstore.NewStore(kvstore.Realm{epochPrefixDecidedUpgradeSignals}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayDecidedUpgradeSignals, model.VersionAndHash.Bytes, model.VersionAndHashFromBytes) - source.poolRewards = epochstore.NewEpochKVStore(kvstore.Realm{epochPrefixPoolRewards}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayPoolRewards) - source.poolStats = epochstore.NewStore(kvstore.Realm{epochPrefixPoolStats}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayPoolStats, (*model.PoolsStats).Bytes, model.PoolsStatsFromBytes) - source.committee = epochstore.NewStore(kvstore.Realm{epochPrefixCommittee}, kvstore.Realm{lastPrunedEpochKey}, source.semiPermanentDB.KVStore(), pruningDelayCommittee, (*account.Accounts).Bytes, account.AccountsFromBytes) + + source.semiPermanentDB.Open() return New(dbConfig, apiProvider, errorHandler, opts...), nil } From 71306dbad458046224a1022e3573aec3f3cac2f3 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:11:22 +0200 Subject: [PATCH 07/17] Improve error message --- pkg/protocol/engine/utxoledger/snapshot.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/protocol/engine/utxoledger/snapshot.go b/pkg/protocol/engine/utxoledger/snapshot.go index e60ec1ba0..02e0ab24f 100644 --- a/pkg/protocol/engine/utxoledger/snapshot.go +++ b/pkg/protocol/engine/utxoledger/snapshot.go @@ -255,13 +255,13 @@ func (m *Manager) Export(writer io.WriteSeeker, targetIndex iotago.SlotIndex) er // Get all UTXOs and sort them by outputID outputIDs, err := m.UnspentOutputsIDs(ReadLockLedger(false)) if err != nil { - return err + return ierrors.Wrap(err, "error while retrieving unspent outputIDs") } for _, outputID := range outputIDs.RemoveDupsAndSort() { output, err := m.ReadOutputByOutputIDWithoutLocking(outputID) if err != nil { - return err + return ierrors.Wrapf(err, "error while retrieving output %s", outputID) } if err := utils.WriteBytesFunc(writer, output.SnapshotBytes(), &relativeCountersPosition); err != nil { @@ -274,12 +274,12 @@ func (m *Manager) Export(writer io.WriteSeeker, targetIndex iotago.SlotIndex) er for diffIndex := ledgerIndex; diffIndex > targetIndex; diffIndex-- { slotDiff, err := m.SlotDiffWithoutLocking(diffIndex) if err != nil { - return err + return ierrors.Wrapf(err, "error while retrieving slot diffs for slot %s", diffIndex) } written, err := WriteSlotDiffToSnapshotWriter(writer, slotDiff) if err != nil { - return err + return ierrors.Wrapf(err, "error while writing slot diffs for slot %s", diffIndex) } relativeCountersPosition += written From 35bab8ff72681f8018c45ab053c4d60793fb7490 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:12:26 +0200 Subject: [PATCH 08/17] Implement syncedKVstore that allows re-opening a kvstore without affecting dependent components --- pkg/storage/database/db_instance.go | 42 +++-- pkg/storage/database/syncedkvstore.go | 241 +++++++++++++++++++------ pkg/storage/database/utils.go | 10 +- pkg/storage/permanent/permanent.go | 9 +- pkg/storage/prunable/bucket_manager.go | 1 + pkg/storage/prunable/prunable.go | 15 +- pkg/testsuite/mock/node.go | 5 + 7 files changed, 242 insertions(+), 81 deletions(-) diff --git a/pkg/storage/database/db_instance.go b/pkg/storage/database/db_instance.go index bbdbbb053..d0287b293 100644 --- a/pkg/storage/database/db_instance.go +++ b/pkg/storage/database/db_instance.go @@ -1,15 +1,14 @@ package database import ( - "fmt" - "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/runtime/syncutils" ) type DBInstance struct { - store *synchedKVStore // KVStore that is used to access the DB instance + store *syncedKVStore // KVStore that is used to access the DB instance healthTracker *kvstore.StoreHealthTracker dbConfig Config } @@ -19,7 +18,15 @@ func NewDBInstance(dbConfig Config) *DBInstance { if err != nil { panic(err) } - storeHealthTracker, err := kvstore.NewStoreHealthTracker(db, dbConfig.PrefixHealth, dbConfig.Version, nil) + + syncedStore := &syncedKVStore{ + storeInstance: db, + parentStore: nil, + dbPrefix: kvstore.EmptyPrefix, + instanceMutex: new(syncutils.RWMutex), + } + + storeHealthTracker, err := kvstore.NewStoreHealthTracker(syncedStore, dbConfig.PrefixHealth, dbConfig.Version, nil) if err != nil { panic(ierrors.Wrapf(err, "database in %s is corrupted, delete database and resync node", dbConfig.Directory)) } @@ -28,30 +35,43 @@ func NewDBInstance(dbConfig Config) *DBInstance { } return &DBInstance{ - store: &synchedKVStore{store: db}, + store: syncedStore, healthTracker: storeHealthTracker, dbConfig: dbConfig, } } func (d *DBInstance) Close() { - fmt.Println("close kvstore", d.dbConfig.Directory) + d.MarkHealthy() + + d.store.Lock() + defer d.store.Unlock() + if err := FlushAndClose(d.store); err != nil { + panic(err) + } +} + +func (d *DBInstance) MarkHealthy() { if err := d.healthTracker.MarkHealthy(); err != nil { panic(err) } +} + +// TODO: make markCorrupted and markHealthy not lock the kvstore so that we can first lock the kvstore and then mark as healthy and corrupted without causing a deadlock +func (d *DBInstance) MarkCorrupted() { + if err := d.healthTracker.MarkCorrupted(); err != nil { + panic(err) + } +} +func (d *DBInstance) CloseWithoutLocking() { if err := FlushAndClose(d.store); err != nil { panic(err) } } func (d *DBInstance) Open() { - fmt.Println("open kvstore", d.dbConfig.Directory) d.store.Replace(lo.PanicOnErr(StoreWithDefaultSettings(d.dbConfig.Directory, false, d.dbConfig.Engine))) - _, err := d.store.store.WithRealm(kvstore.EmptyPrefix) - if err != nil { - panic(err) - } } func (d *DBInstance) Lock() { diff --git a/pkg/storage/database/syncedkvstore.go b/pkg/storage/database/syncedkvstore.go index b6b89ed5e..eb191915c 100644 --- a/pkg/storage/database/syncedkvstore.go +++ b/pkg/storage/database/syncedkvstore.go @@ -1,105 +1,236 @@ package database import ( + "fmt" + "sync" + + "github.com/iotaledger/hive.go/ds/types" "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/kvstore/utils" "github.com/iotaledger/hive.go/runtime/syncutils" + "github.com/iotaledger/hive.go/serializer/v2/byteutils" ) -type synchedKVStore struct { - store kvstore.KVStore // KVStore that is used to access the DB instance +type syncedKVStore struct { + storeInstance kvstore.KVStore // KVStore that is used to access the DB instance + parentStore *syncedKVStore + dbPrefix kvstore.KeyPrefix + + instanceMutex *syncutils.RWMutex +} + +func (s *syncedKVStore) instance() kvstore.KVStore { + if s.storeInstance != nil { + return s.storeInstance + } + + return s.parentStore.instance() +} + +func (s *syncedKVStore) Lock() { + s.instanceMutex.Lock() +} + +func (s *syncedKVStore) Unlock() { + s.instanceMutex.Unlock() +} + +func (s *syncedKVStore) Replace(newKVStore kvstore.KVStore) { + if s.storeInstance == nil { + s.parentStore.Replace(newKVStore) + + return + } + + s.storeInstance = newKVStore +} + +func (s *syncedKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() + + return s.withRealm(realm) +} +func (s *syncedKVStore) withRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + return &syncedKVStore{ + storeInstance: nil, + parentStore: s, + instanceMutex: s.instanceMutex, + dbPrefix: realm, + }, nil +} +func (s *syncedKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() + + return s.withRealm(s.buildKeyPrefix(realm)) +} - syncutils.RWMutex +func (s *syncedKVStore) Realm() kvstore.Realm { + return s.dbPrefix } -func (s *synchedKVStore) Replace(newKVStore kvstore.KVStore) { - s.store = newKVStore +func (s *syncedKVStore) Iterate(prefix kvstore.KeyPrefix, kvConsumerFunc kvstore.IteratorKeyValueConsumerFunc, direction ...kvstore.IterDirection) error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() + return s.instance().Iterate(s.buildKeyPrefix(prefix), func(key kvstore.Key, value kvstore.Value) bool { + fmt.Println("realm", s.dbPrefix, "prefix", prefix, "key", key, "value", value) + + return kvConsumerFunc(utils.CopyBytes(key)[len(s.dbPrefix):], value) + }, direction...) } -func (s *synchedKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) IterateKeys(prefix kvstore.KeyPrefix, consumerFunc kvstore.IteratorKeyConsumerFunc, direction ...kvstore.IterDirection) error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.WithRealm(realm) + return s.instance().IterateKeys(s.buildKeyPrefix(prefix), func(key kvstore.Key) bool { + return consumerFunc(utils.CopyBytes(key)[len(s.dbPrefix):]) + }, direction...) } -func (s *synchedKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) Clear() error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.WithExtendedRealm(realm) + return s.instance().DeletePrefix(s.dbPrefix) } -func (s *synchedKVStore) Realm() kvstore.Realm { - return s.store.Realm() +func (s *syncedKVStore) Get(key kvstore.Key) (value kvstore.Value, err error) { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() + + return s.instance().Get(byteutils.ConcatBytes(s.dbPrefix, key)) } -func (s *synchedKVStore) Iterate(prefix kvstore.KeyPrefix, kvConsumerFunc kvstore.IteratorKeyValueConsumerFunc, direction ...kvstore.IterDirection) error { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) Set(key kvstore.Key, value kvstore.Value) error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.Iterate(prefix, kvConsumerFunc, direction...) + return s.instance().Set(byteutils.ConcatBytes(s.dbPrefix, key), value) } -func (s *synchedKVStore) IterateKeys(prefix kvstore.KeyPrefix, consumerFunc kvstore.IteratorKeyConsumerFunc, direction ...kvstore.IterDirection) error { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) Has(key kvstore.Key) (bool, error) { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.IterateKeys(prefix, consumerFunc, direction...) + return s.instance().Has(byteutils.ConcatBytes(s.dbPrefix, key)) } -func (s *synchedKVStore) Clear() error { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) Delete(key kvstore.Key) error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.Clear() + return s.instance().Delete(byteutils.ConcatBytes(s.dbPrefix, key)) } -func (s *synchedKVStore) Get(key kvstore.Key) (value kvstore.Value, err error) { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.Get(key) + return s.instance().DeletePrefix(s.buildKeyPrefix(prefix)) } -func (s *synchedKVStore) Set(key kvstore.Key, value kvstore.Value) error { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) Flush() error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.Set(key, value) + return s.FlushWithoutLocking() } -func (s *synchedKVStore) Has(key kvstore.Key) (bool, error) { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) FlushWithoutLocking() error { + return s.instance().Flush() +} +func (s *syncedKVStore) Close() error { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.Has(key) + return s.CloseWithoutLocking() +} +func (s *syncedKVStore) CloseWithoutLocking() error { + return s.instance().Close() } -func (s *synchedKVStore) Delete(key kvstore.Key) error { - s.RLock() - defer s.RUnlock() +func (s *syncedKVStore) Batched() (kvstore.BatchedMutations, error) { + s.instanceMutex.RLock() + defer s.instanceMutex.RUnlock() - return s.store.Delete(key) + return &syncedBachedMutations{ + parentStore: s, + dbPrefix: s.dbPrefix, + setOperations: make(map[string]kvstore.Value), + deleteOperations: make(map[string]types.Empty), + }, nil } -func (s *synchedKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { - s.RLock() - defer s.RUnlock() +// builds a key usable using the realm and the given prefix. +func (s *syncedKVStore) buildKeyPrefix(prefix kvstore.KeyPrefix) kvstore.KeyPrefix { + return byteutils.ConcatBytes(s.dbPrefix, prefix) +} - return s.store.DeletePrefix(prefix) +type syncedBachedMutations struct { + parentStore *syncedKVStore + dbPrefix kvstore.KeyPrefix + setOperations map[string]kvstore.Value + deleteOperations map[string]types.Empty + operationsMutex sync.Mutex } -func (s *synchedKVStore) Flush() error { - return s.store.Flush() +func (s *syncedBachedMutations) Set(key kvstore.Key, value kvstore.Value) error { + stringKey := byteutils.ConcatBytesToString(s.dbPrefix, key) + + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + delete(s.deleteOperations, stringKey) + s.setOperations[stringKey] = value + + return nil } -func (s *synchedKVStore) Close() error { - return s.store.Close() +func (s *syncedBachedMutations) Delete(key kvstore.Key) error { + stringKey := byteutils.ConcatBytesToString(s.dbPrefix, key) + + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + delete(s.setOperations, stringKey) + s.deleteOperations[stringKey] = types.Void + + return nil +} + +func (s *syncedBachedMutations) Cancel() { + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + s.setOperations = make(map[string]kvstore.Value) + s.deleteOperations = make(map[string]types.Empty) } -func (s *synchedKVStore) Batched() (kvstore.BatchedMutations, error) { - s.RLock() - defer s.RUnlock() +func (s *syncedBachedMutations) Commit() error { + s.parentStore.instanceMutex.RLock() + defer s.parentStore.instanceMutex.RUnlock() + + batched, err := s.parentStore.instance().Batched() + if err != nil { + return err + } + + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + for key, value := range s.setOperations { + if err = batched.Set([]byte(key), value); err != nil { + return err + } + } + + for key := range s.deleteOperations { + if err = batched.Delete([]byte(key)); err != nil { + return err + } + } - return s.store.Batched() + return batched.Commit() } diff --git a/pkg/storage/database/utils.go b/pkg/storage/database/utils.go index 3333684df..7f9d63de5 100644 --- a/pkg/storage/database/utils.go +++ b/pkg/storage/database/utils.go @@ -1,13 +1,9 @@ package database -import ( - "github.com/iotaledger/hive.go/kvstore" -) - -func FlushAndClose(store kvstore.KVStore) error { - if err := store.Flush(); err != nil { +func FlushAndClose(store *syncedKVStore) error { + if err := store.FlushWithoutLocking(); err != nil { return err } - return store.Close() + return store.CloseWithoutLocking() } diff --git a/pkg/storage/permanent/permanent.go b/pkg/storage/permanent/permanent.go index 5f14ca542..256663cdc 100644 --- a/pkg/storage/permanent/permanent.go +++ b/pkg/storage/permanent/permanent.go @@ -53,10 +53,11 @@ func New(dbConfig database.Config, errorHandler func(error), opts ...options.Opt } func Clone(source *Permanent, dbConfig database.Config, errorHandler func(error), opts ...options.Option[Permanent]) (*Permanent, error) { + source.store.MarkHealthy() + // TODO: mark healthy within the lock source.store.Lock() - defer source.store.Unlock() - source.store.Close() + source.store.CloseWithoutLocking() if err := copydir.Copy(source.dbConfig.Directory, dbConfig.Directory); err != nil { return nil, ierrors.Wrap(err, "failed to copy permanent storage directory to new storage path") @@ -64,6 +65,10 @@ func Clone(source *Permanent, dbConfig database.Config, errorHandler func(error) source.store.Open() + source.store.Unlock() + // TODO: mark corrupted within the lock + source.store.MarkCorrupted() + return New(dbConfig, errorHandler, opts...), nil } diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index ee8c4d269..270e8b8b1 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -78,6 +78,7 @@ func (b *BucketManager) Shutdown() { defer b.openDBsMutex.Unlock() b.openDBs.Each(func(index iotago.EpochIndex, db *database.DBInstance) { + // TODO: lock the database before closing so that no one can use it before we close it db.Close() b.openDBs.Remove(index) }) diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 9eb3abfc1..25106bf52 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -50,26 +50,29 @@ func New(dbConfig database.Config, apiProvider api.Provider, errorHandler func(e } func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, errorHandler func(error), opts ...options.Option[BucketManager]) (*Prunable, error) { - // Lock semi pemanent DB and prunable slot store so that nobody can try to use or open them while cloning. + source.semiPermanentDB.MarkHealthy() + // TODO: mark healthy within the lock + // Lock semi-permanent DB and prunable slot store so that nobody can try to use or open them while cloning. source.semiPermanentDB.Lock() - defer source.semiPermanentDB.Unlock() source.prunableSlotStore.mutex.Lock() defer source.prunableSlotStore.mutex.Unlock() // Close forked prunable storage before copying its contents. - source.semiPermanentDB.Close() + source.semiPermanentDB.CloseWithoutLocking() source.prunableSlotStore.Shutdown() // Copy the storage on disk to new location. if err := copydir.Copy(source.prunableSlotStore.dbConfig.Directory, dbConfig.Directory); err != nil { return nil, ierrors.Wrap(err, "failed to copy prunable storage directory to new storage path") } - - // Create a newly opened instance of prunable database. - // `prunableSlotStore` will be opened automatically as the engine requests it, so no need to open it here. + // TODO: it's possible to copy prunable slot store separately bucket-after-bucket + // to minimize time of locking of the most recent bucket that could be used and semi permanent storage. source.semiPermanentDB.Open() + source.semiPermanentDB.Unlock() + source.semiPermanentDB.MarkCorrupted() + // TODO: mark healthy within the lock return New(dbConfig, apiProvider, errorHandler, opts...), nil } diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go index 42cecac93..629f85161 100644 --- a/pkg/testsuite/mock/node.go +++ b/pkg/testsuite/mock/node.go @@ -3,6 +3,7 @@ package mock import ( "context" "crypto/ed25519" + "encoding/json" "fmt" "sync" "sync/atomic" @@ -24,8 +25,12 @@ import ( "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" + "github.com/iotaledger/iota-core/pkg/protocol/engine/commitmentfilter" + "github.com/iotaledger/iota-core/pkg/protocol/engine/filter" + "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/iota.go/v4/merklehasher" ) // idAliases contains a list of aliases registered for a set of IDs. From a47e75442014dca8accbd08fa45ac65ae7fa80b2 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Wed, 13 Sep 2023 15:40:44 +0200 Subject: [PATCH 09/17] Implement openableKVStore to enable using healthtracker while holding a lock on a store. --- pkg/storage/database/db_instance.go | 36 +++-- pkg/storage/database/realmkvstore.go | 173 ++++++++++++++++++++++++ pkg/storage/database/syncedkvstore.go | 186 ++++++++------------------ pkg/storage/database/utils.go | 2 +- pkg/storage/permanent/permanent.go | 7 +- pkg/storage/prunable/prunable.go | 6 +- 6 files changed, 245 insertions(+), 165 deletions(-) create mode 100644 pkg/storage/database/realmkvstore.go diff --git a/pkg/storage/database/db_instance.go b/pkg/storage/database/db_instance.go index d0287b293..ee8c2f6ad 100644 --- a/pkg/storage/database/db_instance.go +++ b/pkg/storage/database/db_instance.go @@ -8,7 +8,7 @@ import ( ) type DBInstance struct { - store *syncedKVStore // KVStore that is used to access the DB instance + store *lockedKVStore // KVStore that is used to access the DB instance healthTracker *kvstore.StoreHealthTracker dbConfig Config } @@ -19,14 +19,16 @@ func NewDBInstance(dbConfig Config) *DBInstance { panic(err) } - syncedStore := &syncedKVStore{ - storeInstance: db, - parentStore: nil, - dbPrefix: kvstore.EmptyPrefix, + lockableKVStore := &lockedKVStore{ + openableKVStore: &openableKVStore{ + storeInstance: db, + parentStore: nil, + dbPrefix: kvstore.EmptyPrefix, + }, instanceMutex: new(syncutils.RWMutex), } - storeHealthTracker, err := kvstore.NewStoreHealthTracker(syncedStore, dbConfig.PrefixHealth, dbConfig.Version, nil) + storeHealthTracker, err := kvstore.NewStoreHealthTracker(lockableKVStore.openableKVStore, dbConfig.PrefixHealth, dbConfig.Version, nil) if err != nil { panic(ierrors.Wrapf(err, "database in %s is corrupted, delete database and resync node", dbConfig.Directory)) } @@ -35,36 +37,24 @@ func NewDBInstance(dbConfig Config) *DBInstance { } return &DBInstance{ - store: syncedStore, + store: lockableKVStore, healthTracker: storeHealthTracker, dbConfig: dbConfig, } } func (d *DBInstance) Close() { - d.MarkHealthy() - d.store.Lock() defer d.store.Unlock() - if err := FlushAndClose(d.store); err != nil { - panic(err) - } + d.CloseWithoutLocking() } -func (d *DBInstance) MarkHealthy() { +func (d *DBInstance) CloseWithoutLocking() { if err := d.healthTracker.MarkHealthy(); err != nil { panic(err) } -} -// TODO: make markCorrupted and markHealthy not lock the kvstore so that we can first lock the kvstore and then mark as healthy and corrupted without causing a deadlock -func (d *DBInstance) MarkCorrupted() { - if err := d.healthTracker.MarkCorrupted(); err != nil { - panic(err) - } -} -func (d *DBInstance) CloseWithoutLocking() { if err := FlushAndClose(d.store); err != nil { panic(err) } @@ -72,6 +62,10 @@ func (d *DBInstance) CloseWithoutLocking() { func (d *DBInstance) Open() { d.store.Replace(lo.PanicOnErr(StoreWithDefaultSettings(d.dbConfig.Directory, false, d.dbConfig.Engine))) + + if err := d.healthTracker.MarkCorrupted(); err != nil { + panic(err) + } } func (d *DBInstance) Lock() { diff --git a/pkg/storage/database/realmkvstore.go b/pkg/storage/database/realmkvstore.go new file mode 100644 index 000000000..953ac0188 --- /dev/null +++ b/pkg/storage/database/realmkvstore.go @@ -0,0 +1,173 @@ +package database + +import ( + "sync" + + "github.com/iotaledger/hive.go/ds/types" + "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/kvstore/utils" + "github.com/iotaledger/hive.go/serializer/v2/byteutils" +) + +type openableKVStore struct { + storeInstance kvstore.KVStore // KVStore that is used to access the DB instance + parentStore *openableKVStore + dbPrefix kvstore.KeyPrefix +} + +func (s *openableKVStore) instance() kvstore.KVStore { + if s.storeInstance != nil { + return s.storeInstance + } + + return s.parentStore.instance() +} + +func (s *openableKVStore) Replace(newKVStore kvstore.KVStore) { + if s.storeInstance == nil { + s.parentStore.Replace(newKVStore) + + return + } + + s.storeInstance = newKVStore +} + +func (s *openableKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + return s.withRealm(realm) +} +func (s *openableKVStore) withRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + return &openableKVStore{ + storeInstance: nil, + parentStore: s, + dbPrefix: realm, + }, nil +} +func (s *openableKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + return s.withRealm(s.buildKeyPrefix(realm)) +} + +func (s *openableKVStore) Realm() kvstore.Realm { + return s.dbPrefix +} + +func (s *openableKVStore) Iterate(prefix kvstore.KeyPrefix, kvConsumerFunc kvstore.IteratorKeyValueConsumerFunc, direction ...kvstore.IterDirection) error { + return s.instance().Iterate(s.buildKeyPrefix(prefix), func(key kvstore.Key, value kvstore.Value) bool { + return kvConsumerFunc(utils.CopyBytes(key)[len(s.dbPrefix):], value) + }, direction...) +} + +func (s *openableKVStore) IterateKeys(prefix kvstore.KeyPrefix, consumerFunc kvstore.IteratorKeyConsumerFunc, direction ...kvstore.IterDirection) error { + return s.instance().IterateKeys(s.buildKeyPrefix(prefix), func(key kvstore.Key) bool { + return consumerFunc(utils.CopyBytes(key)[len(s.dbPrefix):]) + }, direction...) +} + +func (s *openableKVStore) Clear() error { + return s.instance().DeletePrefix(s.dbPrefix) +} + +func (s *openableKVStore) Get(key kvstore.Key) (value kvstore.Value, err error) { + return s.instance().Get(byteutils.ConcatBytes(s.dbPrefix, key)) +} + +func (s *openableKVStore) Set(key kvstore.Key, value kvstore.Value) error { + return s.instance().Set(byteutils.ConcatBytes(s.dbPrefix, key), value) +} + +func (s *openableKVStore) Has(key kvstore.Key) (bool, error) { + return s.instance().Has(byteutils.ConcatBytes(s.dbPrefix, key)) +} + +func (s *openableKVStore) Delete(key kvstore.Key) error { + return s.instance().Delete(byteutils.ConcatBytes(s.dbPrefix, key)) +} + +func (s *openableKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { + return s.instance().DeletePrefix(s.buildKeyPrefix(prefix)) +} + +func (s *openableKVStore) Flush() error { + return s.instance().Flush() +} +func (s *openableKVStore) Close() error { + return s.instance().Close() +} + +func (s *openableKVStore) Batched() (kvstore.BatchedMutations, error) { + return &openableKVStoreBatchedMutations{ + parentStore: s, + dbPrefix: s.dbPrefix, + setOperations: make(map[string]kvstore.Value), + deleteOperations: make(map[string]types.Empty), + }, nil +} + +// builds a key usable using the realm and the given prefix. +func (s *openableKVStore) buildKeyPrefix(prefix kvstore.KeyPrefix) kvstore.KeyPrefix { + return byteutils.ConcatBytes(s.dbPrefix, prefix) +} + +type openableKVStoreBatchedMutations struct { + parentStore *openableKVStore + dbPrefix kvstore.KeyPrefix + setOperations map[string]kvstore.Value + deleteOperations map[string]types.Empty + operationsMutex sync.Mutex +} + +func (s *openableKVStoreBatchedMutations) Set(key kvstore.Key, value kvstore.Value) error { + stringKey := byteutils.ConcatBytesToString(s.dbPrefix, key) + + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + delete(s.deleteOperations, stringKey) + s.setOperations[stringKey] = value + + return nil +} + +func (s *openableKVStoreBatchedMutations) Delete(key kvstore.Key) error { + stringKey := byteutils.ConcatBytesToString(s.dbPrefix, key) + + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + delete(s.setOperations, stringKey) + s.deleteOperations[stringKey] = types.Void + + return nil +} + +func (s *openableKVStoreBatchedMutations) Cancel() { + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + s.setOperations = make(map[string]kvstore.Value) + s.deleteOperations = make(map[string]types.Empty) +} + +func (s *openableKVStoreBatchedMutations) Commit() error { + batched, err := s.parentStore.instance().Batched() + if err != nil { + return err + } + + s.operationsMutex.Lock() + defer s.operationsMutex.Unlock() + + for key, value := range s.setOperations { + if err = batched.Set([]byte(key), value); err != nil { + return err + } + } + + for key := range s.deleteOperations { + if err = batched.Delete([]byte(key)); err != nil { + return err + } + } + + return batched.Commit() +} diff --git a/pkg/storage/database/syncedkvstore.go b/pkg/storage/database/syncedkvstore.go index eb191915c..07a61730d 100644 --- a/pkg/storage/database/syncedkvstore.go +++ b/pkg/storage/database/syncedkvstore.go @@ -1,236 +1,158 @@ package database import ( - "fmt" - "sync" - "github.com/iotaledger/hive.go/ds/types" "github.com/iotaledger/hive.go/kvstore" - "github.com/iotaledger/hive.go/kvstore/utils" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/serializer/v2/byteutils" ) -type syncedKVStore struct { - storeInstance kvstore.KVStore // KVStore that is used to access the DB instance - parentStore *syncedKVStore - dbPrefix kvstore.KeyPrefix +type lockedKVStore struct { + *openableKVStore instanceMutex *syncutils.RWMutex } -func (s *syncedKVStore) instance() kvstore.KVStore { - if s.storeInstance != nil { - return s.storeInstance - } - - return s.parentStore.instance() -} - -func (s *syncedKVStore) Lock() { +func (s *lockedKVStore) Lock() { s.instanceMutex.Lock() } -func (s *syncedKVStore) Unlock() { +func (s *lockedKVStore) Unlock() { s.instanceMutex.Unlock() } -func (s *syncedKVStore) Replace(newKVStore kvstore.KVStore) { - if s.storeInstance == nil { - s.parentStore.Replace(newKVStore) - - return - } - - s.storeInstance = newKVStore -} - -func (s *syncedKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { +func (s *lockedKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() return s.withRealm(realm) } -func (s *syncedKVStore) withRealm(realm kvstore.Realm) (kvstore.KVStore, error) { - return &syncedKVStore{ - storeInstance: nil, - parentStore: s, +func (s *lockedKVStore) withRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + return &lockedKVStore{ + openableKVStore: &openableKVStore{ + storeInstance: nil, + parentStore: s.openableKVStore, + dbPrefix: realm, + }, + instanceMutex: s.instanceMutex, - dbPrefix: realm, }, nil } -func (s *syncedKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { + +func (s *lockedKVStore) WithExtendedRealm(realm kvstore.Realm) (kvstore.KVStore, error) { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() return s.withRealm(s.buildKeyPrefix(realm)) } -func (s *syncedKVStore) Realm() kvstore.Realm { - return s.dbPrefix -} - -func (s *syncedKVStore) Iterate(prefix kvstore.KeyPrefix, kvConsumerFunc kvstore.IteratorKeyValueConsumerFunc, direction ...kvstore.IterDirection) error { +func (s *lockedKVStore) Iterate(prefix kvstore.KeyPrefix, kvConsumerFunc kvstore.IteratorKeyValueConsumerFunc, direction ...kvstore.IterDirection) error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().Iterate(s.buildKeyPrefix(prefix), func(key kvstore.Key, value kvstore.Value) bool { - fmt.Println("realm", s.dbPrefix, "prefix", prefix, "key", key, "value", value) - return kvConsumerFunc(utils.CopyBytes(key)[len(s.dbPrefix):], value) - }, direction...) + return s.openableKVStore.Iterate(prefix, kvConsumerFunc, direction...) } -func (s *syncedKVStore) IterateKeys(prefix kvstore.KeyPrefix, consumerFunc kvstore.IteratorKeyConsumerFunc, direction ...kvstore.IterDirection) error { +func (s *lockedKVStore) IterateKeys(prefix kvstore.KeyPrefix, consumerFunc kvstore.IteratorKeyConsumerFunc, direction ...kvstore.IterDirection) error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().IterateKeys(s.buildKeyPrefix(prefix), func(key kvstore.Key) bool { - return consumerFunc(utils.CopyBytes(key)[len(s.dbPrefix):]) - }, direction...) + return s.openableKVStore.IterateKeys(prefix, consumerFunc, direction...) } -func (s *syncedKVStore) Clear() error { +func (s *lockedKVStore) Clear() error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().DeletePrefix(s.dbPrefix) + return s.openableKVStore.Clear() } -func (s *syncedKVStore) Get(key kvstore.Key) (value kvstore.Value, err error) { +func (s *lockedKVStore) Get(key kvstore.Key) (value kvstore.Value, err error) { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().Get(byteutils.ConcatBytes(s.dbPrefix, key)) + return s.openableKVStore.Get(key) } -func (s *syncedKVStore) Set(key kvstore.Key, value kvstore.Value) error { +func (s *lockedKVStore) Set(key kvstore.Key, value kvstore.Value) error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().Set(byteutils.ConcatBytes(s.dbPrefix, key), value) + return s.openableKVStore.Set(key, value) } -func (s *syncedKVStore) Has(key kvstore.Key) (bool, error) { +func (s *lockedKVStore) Has(key kvstore.Key) (bool, error) { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().Has(byteutils.ConcatBytes(s.dbPrefix, key)) + return s.openableKVStore.Has(key) } -func (s *syncedKVStore) Delete(key kvstore.Key) error { +func (s *lockedKVStore) Delete(key kvstore.Key) error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().Delete(byteutils.ConcatBytes(s.dbPrefix, key)) + return s.openableKVStore.Delete(key) } -func (s *syncedKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { +func (s *lockedKVStore) DeletePrefix(prefix kvstore.KeyPrefix) error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return s.instance().DeletePrefix(s.buildKeyPrefix(prefix)) + return s.openableKVStore.DeletePrefix(prefix) } -func (s *syncedKVStore) Flush() error { +func (s *lockedKVStore) Flush() error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() return s.FlushWithoutLocking() } -func (s *syncedKVStore) FlushWithoutLocking() error { - return s.instance().Flush() +func (s *lockedKVStore) FlushWithoutLocking() error { + return s.openableKVStore.Flush() } -func (s *syncedKVStore) Close() error { + +func (s *lockedKVStore) Close() error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() return s.CloseWithoutLocking() } -func (s *syncedKVStore) CloseWithoutLocking() error { - return s.instance().Close() +func (s *lockedKVStore) CloseWithoutLocking() error { + return s.openableKVStore.Close() } -func (s *syncedKVStore) Batched() (kvstore.BatchedMutations, error) { +func (s *lockedKVStore) Batched() (kvstore.BatchedMutations, error) { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() - return &syncedBachedMutations{ - parentStore: s, - dbPrefix: s.dbPrefix, - setOperations: make(map[string]kvstore.Value), - deleteOperations: make(map[string]types.Empty), + return &syncedBatchedMutations{ + openableKVStoreBatchedMutations: &openableKVStoreBatchedMutations{ + parentStore: s.openableKVStore, + dbPrefix: s.dbPrefix, + setOperations: make(map[string]kvstore.Value), + deleteOperations: make(map[string]types.Empty), + }, + + parentStore: s, }, nil } // builds a key usable using the realm and the given prefix. -func (s *syncedKVStore) buildKeyPrefix(prefix kvstore.KeyPrefix) kvstore.KeyPrefix { +func (s *lockedKVStore) buildKeyPrefix(prefix kvstore.KeyPrefix) kvstore.KeyPrefix { return byteutils.ConcatBytes(s.dbPrefix, prefix) } -type syncedBachedMutations struct { - parentStore *syncedKVStore - dbPrefix kvstore.KeyPrefix - setOperations map[string]kvstore.Value - deleteOperations map[string]types.Empty - operationsMutex sync.Mutex -} - -func (s *syncedBachedMutations) Set(key kvstore.Key, value kvstore.Value) error { - stringKey := byteutils.ConcatBytesToString(s.dbPrefix, key) - - s.operationsMutex.Lock() - defer s.operationsMutex.Unlock() - - delete(s.deleteOperations, stringKey) - s.setOperations[stringKey] = value - - return nil -} - -func (s *syncedBachedMutations) Delete(key kvstore.Key) error { - stringKey := byteutils.ConcatBytesToString(s.dbPrefix, key) - - s.operationsMutex.Lock() - defer s.operationsMutex.Unlock() - - delete(s.setOperations, stringKey) - s.deleteOperations[stringKey] = types.Void +type syncedBatchedMutations struct { + *openableKVStoreBatchedMutations - return nil + parentStore *lockedKVStore } -func (s *syncedBachedMutations) Cancel() { - s.operationsMutex.Lock() - defer s.operationsMutex.Unlock() - - s.setOperations = make(map[string]kvstore.Value) - s.deleteOperations = make(map[string]types.Empty) -} - -func (s *syncedBachedMutations) Commit() error { +func (s *syncedBatchedMutations) Commit() error { s.parentStore.instanceMutex.RLock() defer s.parentStore.instanceMutex.RUnlock() - batched, err := s.parentStore.instance().Batched() - if err != nil { - return err - } - - s.operationsMutex.Lock() - defer s.operationsMutex.Unlock() - - for key, value := range s.setOperations { - if err = batched.Set([]byte(key), value); err != nil { - return err - } - } - - for key := range s.deleteOperations { - if err = batched.Delete([]byte(key)); err != nil { - return err - } - } - - return batched.Commit() + return s.openableKVStoreBatchedMutations.Commit() } diff --git a/pkg/storage/database/utils.go b/pkg/storage/database/utils.go index 7f9d63de5..0b47cf41b 100644 --- a/pkg/storage/database/utils.go +++ b/pkg/storage/database/utils.go @@ -1,6 +1,6 @@ package database -func FlushAndClose(store *syncedKVStore) error { +func FlushAndClose(store *lockedKVStore) error { if err := store.FlushWithoutLocking(); err != nil { return err } diff --git a/pkg/storage/permanent/permanent.go b/pkg/storage/permanent/permanent.go index 256663cdc..fd576d9c6 100644 --- a/pkg/storage/permanent/permanent.go +++ b/pkg/storage/permanent/permanent.go @@ -53,9 +53,8 @@ func New(dbConfig database.Config, errorHandler func(error), opts ...options.Opt } func Clone(source *Permanent, dbConfig database.Config, errorHandler func(error), opts ...options.Option[Permanent]) (*Permanent, error) { - source.store.MarkHealthy() - // TODO: mark healthy within the lock source.store.Lock() + defer source.store.Unlock() source.store.CloseWithoutLocking() @@ -65,10 +64,6 @@ func Clone(source *Permanent, dbConfig database.Config, errorHandler func(error) source.store.Open() - source.store.Unlock() - // TODO: mark corrupted within the lock - source.store.MarkCorrupted() - return New(dbConfig, errorHandler, opts...), nil } diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 25106bf52..2555d32ff 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -50,10 +50,9 @@ func New(dbConfig database.Config, apiProvider api.Provider, errorHandler func(e } func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, errorHandler func(error), opts ...options.Option[BucketManager]) (*Prunable, error) { - source.semiPermanentDB.MarkHealthy() - // TODO: mark healthy within the lock // Lock semi-permanent DB and prunable slot store so that nobody can try to use or open them while cloning. source.semiPermanentDB.Lock() + defer source.semiPermanentDB.Unlock() source.prunableSlotStore.mutex.Lock() defer source.prunableSlotStore.mutex.Unlock() @@ -70,9 +69,6 @@ func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, // to minimize time of locking of the most recent bucket that could be used and semi permanent storage. source.semiPermanentDB.Open() - source.semiPermanentDB.Unlock() - source.semiPermanentDB.MarkCorrupted() - // TODO: mark healthy within the lock return New(dbConfig, apiProvider, errorHandler, opts...), nil } From 964caac713e15dcbe491d82cd210e670f056372b Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:11:21 +0200 Subject: [PATCH 10/17] Improve storage copy tests. --- pkg/storage/storage_test.go | 1 + pkg/tests/protocol_engine_switching_test.go | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index bf0b377f0..b050cd7f0 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -220,6 +220,7 @@ func TestStorage_CopyFromForkedStorageEmpty(t *testing.T) { tf1.GeneratePrunableData(iotago.EpochIndex(i), 500*KB) tf1.GenerateSemiPermanentData(iotago.EpochIndex(i)) } + tf1.GeneratePermanentData(1 * MB) clonedStorage, err := storage.CloneStorage(tf1.Instance, t.TempDir(), 0, func(err error) { t.Log(err) diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go index a5f5a750a..093e5e66c 100644 --- a/pkg/tests/protocol_engine_switching_test.go +++ b/pkg/tests/protocol_engine_switching_test.go @@ -361,5 +361,12 @@ func TestProtocol_EngineSwitching(t *testing.T) { wg.Wait() } + // Make sure that nodes that switched their engine still have blocks with prefix P0 from before the fork. + // Those nodes should also have all the blocks from the target fork P1 and should not have blocks from P2. + // This is to make sure that the storage was copied correctly during engine switching. + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, ts.Nodes()...) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), false, ts.Nodes()...) + ts.AssertEqualStoredCommitmentAtIndex(expectedCommittedSlotAfterPartitionMerge, ts.Nodes()...) } From 258b2171e448e40abc5b077e5940a13d11692c17 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Thu, 14 Sep 2023 10:12:37 +0200 Subject: [PATCH 11/17] Improve comments. --- pkg/storage/database/db_instance.go | 4 ++++ pkg/storage/database/{realmkvstore.go => openablekvstore.go} | 0 pkg/storage/prunable/bucket_manager.go | 3 +-- pkg/storage/prunable/prunable.go | 2 -- 4 files changed, 5 insertions(+), 4 deletions(-) rename pkg/storage/database/{realmkvstore.go => openablekvstore.go} (100%) diff --git a/pkg/storage/database/db_instance.go b/pkg/storage/database/db_instance.go index ee8c2f6ad..d072cdf0a 100644 --- a/pkg/storage/database/db_instance.go +++ b/pkg/storage/database/db_instance.go @@ -28,6 +28,8 @@ func NewDBInstance(dbConfig Config) *DBInstance { instanceMutex: new(syncutils.RWMutex), } + // HealthTracker state is only modified while holding the lock on the lockableKVStore; + // that's why it needs to use openableKVStore (which does not lock) instead of lockableKVStore to avoid a deadlock. storeHealthTracker, err := kvstore.NewStoreHealthTracker(lockableKVStore.openableKVStore, dbConfig.PrefixHealth, dbConfig.Version, nil) if err != nil { panic(ierrors.Wrapf(err, "database in %s is corrupted, delete database and resync node", dbConfig.Directory)) @@ -60,6 +62,8 @@ func (d *DBInstance) CloseWithoutLocking() { } } +// Open re-opens a closed DBInstance. It must only be called while holding a lock on DBInstance, +// otherwise it might cause a race condition and corruption of node's state. func (d *DBInstance) Open() { d.store.Replace(lo.PanicOnErr(StoreWithDefaultSettings(d.dbConfig.Directory, false, d.dbConfig.Engine))) diff --git a/pkg/storage/database/realmkvstore.go b/pkg/storage/database/openablekvstore.go similarity index 100% rename from pkg/storage/database/realmkvstore.go rename to pkg/storage/database/openablekvstore.go diff --git a/pkg/storage/prunable/bucket_manager.go b/pkg/storage/prunable/bucket_manager.go index 270e8b8b1..33195433e 100644 --- a/pkg/storage/prunable/bucket_manager.go +++ b/pkg/storage/prunable/bucket_manager.go @@ -78,7 +78,6 @@ func (b *BucketManager) Shutdown() { defer b.openDBsMutex.Unlock() b.openDBs.Each(func(index iotago.EpochIndex, db *database.DBInstance) { - // TODO: lock the database before closing so that no one can use it before we close it db.Close() b.openDBs.Remove(index) }) @@ -174,7 +173,7 @@ func (b *BucketManager) RestoreFromDisk() (lastPrunedEpoch iotago.EpochIndex) { // epochIndex 1 -> db 1 // epochIndex 2 -> db 2 func (b *BucketManager) getDBInstance(index iotago.EpochIndex) (db *database.DBInstance) { - // Lock global mutex + // Lock global mutex to prevent closing and copying storage data on disk during engine switching. b.mutex.RLock() defer b.mutex.RUnlock() diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 2555d32ff..4ce47d5d1 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -65,8 +65,6 @@ func Clone(source *Prunable, dbConfig database.Config, apiProvider api.Provider, if err := copydir.Copy(source.prunableSlotStore.dbConfig.Directory, dbConfig.Directory); err != nil { return nil, ierrors.Wrap(err, "failed to copy prunable storage directory to new storage path") } - // TODO: it's possible to copy prunable slot store separately bucket-after-bucket - // to minimize time of locking of the most recent bucket that could be used and semi permanent storage. source.semiPermanentDB.Open() From 9d3b7c8dc290a7ab5f91136781b50bb0c255f038 Mon Sep 17 00:00:00 2001 From: Piotr Macek <4007944+piotrm50@users.noreply.github.com> Date: Thu, 14 Sep 2023 16:13:25 +0200 Subject: [PATCH 12/17] Implement unit tests for creating a rolledback forked engine --- pkg/protocol/block_dispatcher.go | 2 +- .../engine/accounts/accountsledger/manager.go | 1 - pkg/protocol/protocol.go | 6 +- pkg/protocol/protocol_fork.go | 4 +- .../performance/performance.go | 6 +- .../sybilprotectionv1/sybilprotection.go | 5 +- pkg/storage/prunable/prunable.go | 31 +- pkg/tests/protocol_engine_rollback_test.go | 781 ++++++++++++++++++ 8 files changed, 815 insertions(+), 21 deletions(-) create mode 100644 pkg/tests/protocol_engine_rollback_test.go diff --git a/pkg/protocol/block_dispatcher.go b/pkg/protocol/block_dispatcher.go index 74362e6f7..92c18337d 100644 --- a/pkg/protocol/block_dispatcher.go +++ b/pkg/protocol/block_dispatcher.go @@ -101,7 +101,7 @@ func (b *BlockDispatcher) Dispatch(block *model.Block, src peer.ID) error { func (b *BlockDispatcher) initEngineMonitoring() { b.monitorLatestEngineCommitment(b.protocol.MainEngineInstance()) - b.protocol.engineManager.OnEngineCreated(b.monitorLatestEngineCommitment) + b.protocol.EngineManager.OnEngineCreated(b.monitorLatestEngineCommitment) b.protocol.Events.ChainManager.CommitmentPublished.Hook(func(chainCommitment *chainmanager.ChainCommitment) { // as soon as a commitment is solid, it's chain is known and it can be dispatched diff --git a/pkg/protocol/engine/accounts/accountsledger/manager.go b/pkg/protocol/engine/accounts/accountsledger/manager.go index 8e923765a..45590c3ff 100644 --- a/pkg/protocol/engine/accounts/accountsledger/manager.go +++ b/pkg/protocol/engine/accounts/accountsledger/manager.go @@ -295,7 +295,6 @@ func (m *Manager) Rollback(targetIndex iotago.SlotIndex) error { return false } - // TODO: Saving accountData after each slot - would it be better to buffer them in memory and save them at the end? if err := m.accountsTree.Set(accountID, accountData); err != nil { internalErr = ierrors.Wrapf(err, "failed to save rolled back account %s to target slot index %d", accountID, targetIndex) diff --git a/pkg/protocol/protocol.go b/pkg/protocol/protocol.go index 08bfa692f..f458cff09 100644 --- a/pkg/protocol/protocol.go +++ b/pkg/protocol/protocol.go @@ -59,7 +59,7 @@ type Protocol struct { context context.Context Events *Events BlockDispatcher *BlockDispatcher - engineManager *enginemanager.EngineManager + EngineManager *enginemanager.EngineManager ChainManager *chainmanager.Manager Workers *workerpool.Group @@ -196,7 +196,7 @@ func (p *Protocol) shutdown() { } func (p *Protocol) initEngineManager() { - p.engineManager = enginemanager.New( + p.EngineManager = enginemanager.New( p.Workers.CreateGroup("EngineManager"), p.HandleError, p.optsBaseDirectory, @@ -222,7 +222,7 @@ func (p *Protocol) initEngineManager() { p.optsSyncManagerProvider, ) - mainEngine, err := p.engineManager.LoadActiveEngine(p.optsSnapshotPath) + mainEngine, err := p.EngineManager.LoadActiveEngine(p.optsSnapshotPath) if err != nil { panic(fmt.Sprintf("could not load active engine: %s", err)) } diff --git a/pkg/protocol/protocol_fork.go b/pkg/protocol/protocol_fork.go index 0cdfdd542..a76508ccb 100644 --- a/pkg/protocol/protocol_fork.go +++ b/pkg/protocol/protocol_fork.go @@ -85,7 +85,7 @@ func (p *Protocol) onForkDetected(fork *chainmanager.Fork) { // 2. The candidate engine never becomes synced or its chain is not heavier than the main chain -> discard it after a timeout. // 3. The candidate engine is not creating the same commitments as the chain we decided to switch to -> discard it immediately. snapshotTargetIndex := fork.ForkingPoint.Index() - 1 - candidateEngineInstance, err := p.engineManager.ForkEngineAtSlot(snapshotTargetIndex) + candidateEngineInstance, err := p.EngineManager.ForkEngineAtSlot(snapshotTargetIndex) if err != nil { p.HandleError(ierrors.Wrap(err, "error creating new candidate engine")) return @@ -310,7 +310,7 @@ func (p *Protocol) switchEngines() { return false } - if err := p.engineManager.SetActiveInstance(candidateEngineInstance); err != nil { + if err := p.EngineManager.SetActiveInstance(candidateEngineInstance); err != nil { p.HandleError(ierrors.Wrap(err, "error switching engines")) return false diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go index d5e5048da..8cbb401af 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go @@ -1,6 +1,8 @@ package performance import ( + "fmt" + "github.com/iotaledger/hive.go/ds" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" @@ -92,7 +94,7 @@ func (t *Tracker) ApplyEpoch(epoch iotago.EpochIndex, committee *account.Account timeProvider := t.apiProvider.APIForEpoch(epoch).TimeProvider() epochStartSlot := timeProvider.EpochStart(epoch) epochEndSlot := timeProvider.EpochEnd(epoch) - + fmt.Println("apply epoch", epoch, committee.TotalStake(), committee.TotalValidatorStake()) profitMargin := calculateProfitMargin(committee.TotalValidatorStake(), committee.TotalStake()) poolsStats := &model.PoolsStats{ TotalStake: committee.TotalStake(), @@ -103,7 +105,7 @@ func (t *Tracker) ApplyEpoch(epoch iotago.EpochIndex, committee *account.Account if err := t.poolStatsStore.Store(epoch, poolsStats); err != nil { panic(ierrors.Wrapf(err, "failed to store pool stats for epoch %d", epoch)) } - + fmt.Println("commit epoch", epoch) rewardsMap, err := t.rewardsMap(epoch) if err != nil { panic(ierrors.Wrapf(err, "failed to create rewards tree for epoch %d", epoch)) diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go index 59ca80c96..fe3de89f2 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go @@ -87,8 +87,10 @@ func NewProvider(opts ...options.Option[SybilProtection]) module.Provider[*engin panic("failed to load committee for last finalized slot to initialize sybil protection") } o.seatManager.ImportCommittee(currentEpoch, committee) + fmt.Println("committee import", committee.TotalStake(), currentEpoch) if nextCommittee, nextCommitteeExists := o.performanceTracker.LoadCommitteeForEpoch(currentEpoch + 1); nextCommitteeExists { o.seatManager.ImportCommittee(currentEpoch+1, nextCommittee) + fmt.Println("next committee", nextCommittee.TotalStake(), currentEpoch+1) } o.TriggerInitialized() @@ -134,7 +136,7 @@ func (o *SybilProtection) CommitSlot(slot iotago.SlotIndex) (committeeRoot, rewa } committee.SetReused() - + fmt.Println("reuse committee", currentEpoch, "stake", committee.TotalValidatorStake()) o.seatManager.SetCommittee(nextEpoch, committee) o.events.CommitteeSelected.Trigger(committee, nextEpoch) @@ -247,6 +249,7 @@ func (o *SybilProtection) slotFinalized(slot iotago.SlotIndex) { if slot+apiForSlot.ProtocolParameters().EpochNearingThreshold() == epochEndSlot && epochEndSlot > o.lastCommittedSlot+apiForSlot.ProtocolParameters().MaxCommittableAge() { newCommittee := o.selectNewCommittee(slot) + fmt.Println("new committee selection finalization", epoch, newCommittee.TotalStake(), newCommittee.TotalValidatorStake()) o.events.CommitteeSelected.Trigger(newCommittee, epoch+1) } } diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index 4ce47d5d1..e69c3c0a5 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -1,6 +1,8 @@ package prunable import ( + "fmt" + copydir "github.com/otiai10/copy" "github.com/iotaledger/hive.go/ierrors" @@ -161,20 +163,23 @@ func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex) error { // Removed entries that belong to the old fork and cannot be re-used. for epochIdx := lastCommittedEpoch + 1; ; epochIdx++ { + fmt.Println("rollback before if", epochIdx, targetSlotEpoch) if epochIdx > targetSlotEpoch { - if deleted := p.prunableSlotStore.DeleteBucket(epochIdx); !deleted { - break - } - - shouldRollback, err := p.shouldRollbackCommittee(epochIdx+1, targetSlotIndex) + shouldRollback, err := p.shouldRollbackCommittee(epochIdx, targetSlotIndex) if err != nil { return ierrors.Wrapf(err, "error while checking if committee for epoch %d should be rolled back", epochIdx) } + + fmt.Println("rollback committee", shouldRollback, "epochIdx", epochIdx, "lastCommittedEpoch", lastCommittedEpoch, "targetSlotEpoch", targetSlotEpoch) if shouldRollback { - if err := p.committee.DeleteEpoch(epochIdx + 1); err != nil { + if err := p.committee.DeleteEpoch(epochIdx); err != nil { return ierrors.Wrapf(err, "error while deleting committee for epoch %d", epochIdx) } } + + if deleted := p.prunableSlotStore.DeleteBucket(epochIdx); !deleted { + break + } } if err := p.poolRewards.DeleteEpoch(epochIdx); err != nil { @@ -199,13 +204,17 @@ func (p *Prunable) shouldRollbackCommittee(epochIndex iotago.EpochIndex, targetS targetSlotEpoch := timeProvider.EpochFromSlot(targetSlotIndex) pointOfNoReturn := timeProvider.EpochEnd(targetSlotEpoch) - p.apiProvider.APIForSlot(targetSlotIndex).ProtocolParameters().MaxCommittableAge() - if epochIndex == targetSlotEpoch+1 && targetSlotIndex < pointOfNoReturn { - committee, err := p.committee.Load(targetSlotEpoch + 1) - if err != nil { - return false, err + if epochIndex >= targetSlotEpoch+1 { + if targetSlotIndex < pointOfNoReturn { + committee, err := p.committee.Load(targetSlotEpoch + 1) + if err != nil { + return false, err + } + + return committee.IsReused(), nil } - return committee.IsReused(), nil + return false, nil } return true, nil diff --git a/pkg/tests/protocol_engine_rollback_test.go b/pkg/tests/protocol_engine_rollback_test.go new file mode 100644 index 000000000..940de5d6c --- /dev/null +++ b/pkg/tests/protocol_engine_rollback_test.go @@ -0,0 +1,781 @@ +package tests + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/iotaledger/hive.go/core/eventticker" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/runtime/module" + "github.com/iotaledger/hive.go/runtime/options" + "github.com/iotaledger/iota-core/pkg/core/account" + "github.com/iotaledger/iota-core/pkg/protocol" + "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager/mock" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1" + "github.com/iotaledger/iota-core/pkg/storage" + "github.com/iotaledger/iota-core/pkg/testsuite" + mock2 "github.com/iotaledger/iota-core/pkg/testsuite/mock" + iotago "github.com/iotaledger/iota.go/v4" +) + +func TestProtocol_EngineRollbackFinalization(t *testing.T) { + ts := testsuite.NewTestSuite(t, + testsuite.WithLivenessThreshold(1), + testsuite.WithMinCommittableAge(2), + testsuite.WithMaxCommittableAge(3), + testsuite.WithEpochNearingThreshold(5), + testsuite.WithSlotsPerEpochExponent(3), + testsuite.WithGenesisTimestampOffset(1000*10), + + testsuite.WithWaitFor(15*time.Second), + ) + defer ts.Shutdown() + + node0 := ts.AddValidatorNode("node0") + node1 := ts.AddValidatorNode("node1") + node2 := ts.AddValidatorNode("node2") + node3 := ts.AddValidatorNode("node3") + + poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] { + return module.Provide(func(e *engine.Engine) seatmanager.SeatManager { + poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA) + + for _, node := range []*mock2.Node{node0, node1, node2, node3} { + if node.Validator { + poa.AddAccount(node.AccountID, node.Name) + } + } + poa.SetOnline("node0", "node1", "node2", "node3") + + return poa + }) + } + + nodeOptions := make(map[string][]options.Option[protocol.Protocol]) + for _, node := range ts.Nodes() { + nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ + protocol.WithChainManagerOptions( + chainmanager.WithCommitmentRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), + ), + ), + protocol.WithSybilProtectionProvider( + sybilprotectionv1.NewProvider( + sybilprotectionv1.WithSeatManagerProvider( + poaProvider(), + ), + ), + ), + protocol.WithEngineOptions( + engine.WithBlockRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.BlockID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.BlockID](500*time.Millisecond), + ), + ), + protocol.WithStorageOptions( + storage.WithPruningDelay(20), + ), + } + } + + ts.Run(false, nodeOptions) + + // Verify that nodes have the expected states. + + expectedCommittee := []iotago.AccountID{ + node0.AccountID, + node1.AccountID, + node2.AccountID, + node3.AccountID, + } + expectedOnlineCommitteeFull := []account.SeatIndex{ + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)), + } + + for _, node := range ts.Nodes() { + node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + } + + { + genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment.RMC = ts.API.ProtocolParameters().CongestionControlParameters().RMCMin + ts.AssertNodeState(ts.Nodes(), + testsuite.WithSnapshotImported(true), + testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), + testsuite.WithLatestCommitment(genesisCommitment), + testsuite.WithLatestFinalizedSlot(0), + testsuite.WithChainID(genesisCommitment.MustID()), + testsuite.WithStorageCommitments([]*iotago.Commitment{genesisCommitment}), + + testsuite.WithSybilProtectionCommittee(0, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(0), + testsuite.WithActiveRootBlocks(ts.Blocks("Genesis")), + testsuite.WithStorageRootBlocks(ts.Blocks("Genesis")), + ) + } + + // Issue up to slot 11 - just before committee selection for the next epoch. + // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(8), + testsuite.WithLatestCommitmentSlotIndex(9), + testsuite.WithEqualStoredCommitmentAtIndex(9), + testsuite.WithLatestCommitmentCumulativeWeight(28), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(9, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(9), + ) + + for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} { + var attestationBlocks []*blocks.Block + for _, node := range ts.Nodes() { + if node.Validator { + attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name))) + } + } + ts.AssertAttestationsForSlot(slot, attestationBlocks, ts.Nodes()...) + } + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", ts.Nodes(), true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(13), + testsuite.WithLatestCommitmentSlotIndex(14), + testsuite.WithEqualStoredCommitmentAtIndex(14), + testsuite.WithLatestCommitmentCumulativeWeight(48), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(14, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(14), + ) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(13) + require.NoError(t, err) + + // Assert state of the forked engine after rollback. + { + require.EqualValues(t, 13, newEngine.Storage.Settings().LatestCommitment().Index()) + require.EqualValues(t, 13, newEngine.Storage.Settings().LatestFinalizedSlot()) + require.EqualValues(t, 13, newEngine.EvictionState.LastEvictedSlot()) + + for epochIndex := 0; epochIndex <= 2; epochIndex++ { + committeeEpoch, err := newEngine.Storage.Committee().Load(iotago.EpochIndex(epochIndex)) + require.NoError(t, err) + require.Len(t, committeeEpoch.IDs(), 4) + } + + // Commmittee for the future epoch does not exist. + committeeEpoch3, err := newEngine.Storage.Committee().Load(3) + require.NoError(t, err) + require.Nil(t, committeeEpoch3) + + for slotIndex := 1; slotIndex <= 13; slotIndex++ { + copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) + } + + // Commitment for the first slot after the fork does not exist. + _, err = newEngine.Storage.Commitments().Load(iotago.SlotIndex(14)) + require.Error(t, err) + } +} + +func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { + ts := testsuite.NewTestSuite(t, + testsuite.WithLivenessThreshold(1), + testsuite.WithMinCommittableAge(2), + testsuite.WithMaxCommittableAge(3), + testsuite.WithEpochNearingThreshold(5), + testsuite.WithSlotsPerEpochExponent(3), + testsuite.WithGenesisTimestampOffset(1000*10), + + testsuite.WithWaitFor(15*time.Second), + ) + defer ts.Shutdown() + + node0 := ts.AddValidatorNode("node0") + node1 := ts.AddValidatorNode("node1") + node2 := ts.AddValidatorNode("node2") + node3 := ts.AddValidatorNode("node3") + + poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] { + return module.Provide(func(e *engine.Engine) seatmanager.SeatManager { + poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA) + + for _, node := range []*mock2.Node{node0, node1, node2, node3} { + if node.Validator { + poa.AddAccount(node.AccountID, node.Name) + } + } + poa.SetOnline("node0", "node1", "node2", "node3") + + return poa + }) + } + + nodeOptions := make(map[string][]options.Option[protocol.Protocol]) + for _, node := range ts.Nodes() { + nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ + protocol.WithChainManagerOptions( + chainmanager.WithCommitmentRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), + ), + ), + protocol.WithSybilProtectionProvider( + sybilprotectionv1.NewProvider( + sybilprotectionv1.WithSeatManagerProvider( + poaProvider(), + ), + ), + ), + protocol.WithEngineOptions( + engine.WithBlockRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.BlockID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.BlockID](500*time.Millisecond), + ), + ), + protocol.WithStorageOptions( + storage.WithPruningDelay(20), + ), + } + } + + ts.Run(false, nodeOptions) + + // Verify that nodes have the expected states. + + expectedCommittee := []iotago.AccountID{ + node0.AccountID, + node1.AccountID, + node2.AccountID, + node3.AccountID, + } + expectedOnlineCommitteeFull := []account.SeatIndex{ + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)), + } + + expectedOnlineCommitteeHalf := []account.SeatIndex{ + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)), + } + + for _, node := range ts.Nodes() { + node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + } + + { + genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment.RMC = ts.API.ProtocolParameters().CongestionControlParameters().RMCMin + ts.AssertNodeState(ts.Nodes(), + testsuite.WithSnapshotImported(true), + testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), + testsuite.WithLatestCommitment(genesisCommitment), + testsuite.WithLatestFinalizedSlot(0), + testsuite.WithChainID(genesisCommitment.MustID()), + testsuite.WithStorageCommitments([]*iotago.Commitment{genesisCommitment}), + + testsuite.WithSybilProtectionCommittee(0, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(0), + testsuite.WithActiveRootBlocks(ts.Blocks("Genesis")), + testsuite.WithStorageRootBlocks(ts.Blocks("Genesis")), + ) + } + + // Issue up to slot 11 - just before committee selection for the next epoch. + // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(8), + testsuite.WithLatestCommitmentSlotIndex(9), + testsuite.WithEqualStoredCommitmentAtIndex(9), + testsuite.WithLatestCommitmentCumulativeWeight(28), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(9, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(9), + ) + + for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} { + var attestationBlocks []*blocks.Block + for _, node := range ts.Nodes() { + if node.Validator { + attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name))) + } + } + ts.AssertAttestationsForSlot(slot, attestationBlocks, ts.Nodes()...) + } + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + // Update online committee. + for _, node := range ts.Nodes() { + manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA) + manualPOA.SetOnline("node0", "node1") + manualPOA.SetOffline("node2", "node3") + } + + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16}, 4, "P0:11.3", []*mock2.Node{node0, node1}, true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(8), + testsuite.WithLatestCommitmentSlotIndex(14), + testsuite.WithEqualStoredCommitmentAtIndex(14), + testsuite.WithLatestCommitmentCumulativeWeight(44), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(14, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeHalf...), + testsuite.WithEvictedSlot(14), + ) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(13) + require.NoError(t, err) + + // Assert state of the forked engine after rollback. + { + require.EqualValues(t, 13, newEngine.Storage.Settings().LatestCommitment().Index()) + require.EqualValues(t, 8, newEngine.Storage.Settings().LatestFinalizedSlot()) + require.EqualValues(t, 13, newEngine.EvictionState.LastEvictedSlot()) + + for epochIndex := 0; epochIndex <= 2; epochIndex++ { + committeeEpoch, err := newEngine.Storage.Committee().Load(iotago.EpochIndex(epochIndex)) + require.NoError(t, err) + require.Len(t, committeeEpoch.IDs(), 4) + } + + // Commmittee for the future epoch does not exist. + committeeEpoch3, err := newEngine.Storage.Committee().Load(3) + require.NoError(t, err) + require.Nil(t, committeeEpoch3) + + for slotIndex := 1; slotIndex <= 13; slotIndex++ { + copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) + } + + // Commitment for the first slot after the fork does not exist. + _, err = newEngine.Storage.Commitments().Load(iotago.SlotIndex(14)) + require.Error(t, err) + } +} + +func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { + ts := testsuite.NewTestSuite(t, + testsuite.WithLivenessThreshold(1), + testsuite.WithMinCommittableAge(2), + testsuite.WithMaxCommittableAge(3), + testsuite.WithEpochNearingThreshold(5), + testsuite.WithSlotsPerEpochExponent(3), + testsuite.WithGenesisTimestampOffset(1000*10), + + testsuite.WithWaitFor(15*time.Second), + ) + defer ts.Shutdown() + + node0 := ts.AddValidatorNode("node0") + node1 := ts.AddValidatorNode("node1") + node2 := ts.AddValidatorNode("node2") + node3 := ts.AddValidatorNode("node3") + + poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] { + return module.Provide(func(e *engine.Engine) seatmanager.SeatManager { + poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA) + + for _, node := range []*mock2.Node{node0, node1, node2, node3} { + if node.Validator { + poa.AddAccount(node.AccountID, node.Name) + } + } + poa.SetOnline("node0", "node1", "node2", "node3") + + return poa + }) + } + + nodeOptions := make(map[string][]options.Option[protocol.Protocol]) + for _, node := range ts.Nodes() { + nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ + protocol.WithChainManagerOptions( + chainmanager.WithCommitmentRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), + ), + ), + protocol.WithSybilProtectionProvider( + sybilprotectionv1.NewProvider( + sybilprotectionv1.WithSeatManagerProvider( + poaProvider(), + ), + ), + ), + protocol.WithEngineOptions( + engine.WithBlockRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.BlockID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.BlockID](500*time.Millisecond), + ), + ), + protocol.WithStorageOptions( + storage.WithPruningDelay(20), + ), + } + } + + ts.Run(false, nodeOptions) + + // Verify that nodes have the expected states. + + expectedCommittee := []iotago.AccountID{ + node0.AccountID, + node1.AccountID, + node2.AccountID, + node3.AccountID, + } + expectedOnlineCommitteeFull := []account.SeatIndex{ + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)), + } + + expectedOnlineCommitteeHalf := []account.SeatIndex{ + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)), + } + + for _, node := range ts.Nodes() { + node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + } + + { + genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment.RMC = ts.API.ProtocolParameters().CongestionControlParameters().RMCMin + ts.AssertNodeState(ts.Nodes(), + testsuite.WithSnapshotImported(true), + testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), + testsuite.WithLatestCommitment(genesisCommitment), + testsuite.WithLatestFinalizedSlot(0), + testsuite.WithChainID(genesisCommitment.MustID()), + testsuite.WithStorageCommitments([]*iotago.Commitment{genesisCommitment}), + + testsuite.WithSybilProtectionCommittee(0, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(0), + testsuite.WithActiveRootBlocks(ts.Blocks("Genesis")), + testsuite.WithStorageRootBlocks(ts.Blocks("Genesis")), + ) + } + + // Issue up to slot 11 - just before committee selection for the next epoch. + // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(8), + testsuite.WithLatestCommitmentSlotIndex(9), + testsuite.WithEqualStoredCommitmentAtIndex(9), + testsuite.WithLatestCommitmentCumulativeWeight(28), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(9, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(9), + ) + + for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} { + var attestationBlocks []*blocks.Block + for _, node := range ts.Nodes() { + if node.Validator { + attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name))) + } + } + ts.AssertAttestationsForSlot(slot, attestationBlocks, ts.Nodes()...) + } + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + // Update online committee. + for _, node := range ts.Nodes() { + manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA) + manualPOA.SetOnline("node0", "node1") + manualPOA.SetOffline("node2", "node3") + } + + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15, 16, 17, 18, 19}, 4, "P0:11.3", []*mock2.Node{node0, node1}, true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(8), + testsuite.WithLatestCommitmentSlotIndex(17), + testsuite.WithEqualStoredCommitmentAtIndex(17), + testsuite.WithLatestCommitmentCumulativeWeight(50), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(17, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeHalf...), + testsuite.WithEvictedSlot(17), + ) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(15) + require.NoError(t, err) + + // Assert state of the forked engine after rollback. + { + require.EqualValues(t, 15, newEngine.Storage.Settings().LatestCommitment().Index()) + require.EqualValues(t, 8, newEngine.Storage.Settings().LatestFinalizedSlot()) + require.EqualValues(t, 15, newEngine.EvictionState.LastEvictedSlot()) + + for epochIndex := 0; epochIndex <= 2; epochIndex++ { + committeeEpoch, err := newEngine.Storage.Committee().Load(iotago.EpochIndex(epochIndex)) + require.NoError(t, err) + require.Len(t, committeeEpoch.IDs(), 4) + } + + // Commmittee for the future epoch does not exist. + committeeEpoch3, err := newEngine.Storage.Committee().Load(3) + require.NoError(t, err) + require.Nil(t, committeeEpoch3) + + for slotIndex := 1; slotIndex <= 15; slotIndex++ { + copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) + } + + // Commitment for the first slot after the fork does not exist. + _, err = newEngine.Storage.Commitments().Load(iotago.SlotIndex(16)) + require.Error(t, err) + } +} + +func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T) { + ts := testsuite.NewTestSuite(t, + testsuite.WithLivenessThreshold(1), + testsuite.WithMinCommittableAge(2), + testsuite.WithMaxCommittableAge(3), + testsuite.WithEpochNearingThreshold(5), + testsuite.WithSlotsPerEpochExponent(3), + testsuite.WithGenesisTimestampOffset(1000*10), + + testsuite.WithWaitFor(15*time.Second), + ) + defer ts.Shutdown() + + node0 := ts.AddValidatorNode("node0") + node1 := ts.AddValidatorNode("node1") + node2 := ts.AddValidatorNode("node2") + node3 := ts.AddValidatorNode("node3") + + poaProvider := func() module.Provider[*engine.Engine, seatmanager.SeatManager] { + return module.Provide(func(e *engine.Engine) seatmanager.SeatManager { + poa := mock.NewManualPOAProvider()(e).(*mock.ManualPOA) + + for _, node := range []*mock2.Node{node0, node1, node2, node3} { + if node.Validator { + poa.AddAccount(node.AccountID, node.Name) + } + } + poa.SetOnline("node0", "node1", "node2", "node3") + + return poa + }) + } + + nodeOptions := make(map[string][]options.Option[protocol.Protocol]) + for _, node := range ts.Nodes() { + nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ + protocol.WithChainManagerOptions( + chainmanager.WithCommitmentRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), + ), + ), + protocol.WithSybilProtectionProvider( + sybilprotectionv1.NewProvider( + sybilprotectionv1.WithSeatManagerProvider( + poaProvider(), + ), + ), + ), + protocol.WithEngineOptions( + engine.WithBlockRequesterOptions( + eventticker.RetryInterval[iotago.SlotIndex, iotago.BlockID](1*time.Second), + eventticker.RetryJitter[iotago.SlotIndex, iotago.BlockID](500*time.Millisecond), + ), + ), + protocol.WithStorageOptions( + storage.WithPruningDelay(20), + ), + } + } + + ts.Run(false, nodeOptions) + + // Verify that nodes have the expected states. + + expectedCommittee := []iotago.AccountID{ + node0.AccountID, + node1.AccountID, + node2.AccountID, + node3.AccountID, + } + expectedOnlineCommitteeFull := []account.SeatIndex{ + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node2.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node3.AccountID)), + } + + expectedOnlineCommitteeHalf := []account.SeatIndex{ + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node0.AccountID)), + lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().Committee(1).GetSeat(node1.AccountID)), + } + + for _, node := range ts.Nodes() { + node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + } + + { + genesisCommitment := iotago.NewEmptyCommitment(ts.API.ProtocolParameters().Version()) + genesisCommitment.RMC = ts.API.ProtocolParameters().CongestionControlParameters().RMCMin + ts.AssertNodeState(ts.Nodes(), + testsuite.WithSnapshotImported(true), + testsuite.WithProtocolParameters(ts.API.ProtocolParameters()), + testsuite.WithLatestCommitment(genesisCommitment), + testsuite.WithLatestFinalizedSlot(0), + testsuite.WithChainID(genesisCommitment.MustID()), + testsuite.WithStorageCommitments([]*iotago.Commitment{genesisCommitment}), + + testsuite.WithSybilProtectionCommittee(0, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(0), + testsuite.WithActiveRootBlocks(ts.Blocks("Genesis")), + testsuite.WithStorageRootBlocks(ts.Blocks("Genesis")), + ) + } + + // Issue up to slot 11 - just before committee selection for the next epoch. + // Committee will be reused at slot 10 is finalized or slot 12 is committed, whichever happens first. + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 4, "Genesis", ts.Nodes(), true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(8), + testsuite.WithLatestCommitmentSlotIndex(9), + testsuite.WithEqualStoredCommitmentAtIndex(9), + testsuite.WithLatestCommitmentCumulativeWeight(28), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(9, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeFull...), + testsuite.WithEvictedSlot(9), + ) + + for _, slot := range []iotago.SlotIndex{4, 5, 6, 7, 8, 9} { + var attestationBlocks []*blocks.Block + for _, node := range ts.Nodes() { + if node.Validator { + attestationBlocks = append(attestationBlocks, ts.Block(fmt.Sprintf("P0:%d.3-%s", slot, node.Name))) + } + } + ts.AssertAttestationsForSlot(slot, attestationBlocks, ts.Nodes()...) + } + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + // Update online committee. + for _, node := range ts.Nodes() { + manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock.ManualPOA) + manualPOA.SetOnline("node0", "node1") + manualPOA.SetOffline("node2", "node3") + } + + { + ts.IssueBlocksAtSlots("P0:", []iotago.SlotIndex{12, 13, 14, 15}, 4, "P0:11.3", []*mock2.Node{node0, node1}, true, nil) + + ts.AssertNodeState(ts.Nodes(), + testsuite.WithLatestFinalizedSlot(8), + testsuite.WithLatestCommitmentSlotIndex(13), + testsuite.WithEqualStoredCommitmentAtIndex(13), + testsuite.WithLatestCommitmentCumulativeWeight(42), // 7 for each slot starting from 4 + testsuite.WithSybilProtectionCommittee(13, expectedCommittee), + testsuite.WithSybilProtectionOnlineCommittee(expectedOnlineCommitteeHalf...), + testsuite.WithEvictedSlot(13), + ) + + ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) + } + + newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(9) + require.NoError(t, err) + + // Assert state of the forked engine after rollback. + { + require.EqualValues(t, 9, newEngine.Storage.Settings().LatestCommitment().Index()) + require.EqualValues(t, 8, newEngine.Storage.Settings().LatestFinalizedSlot()) + require.EqualValues(t, 9, newEngine.EvictionState.LastEvictedSlot()) + + for epochIndex := 0; epochIndex <= 1; epochIndex++ { + committeeEpoch, err := newEngine.Storage.Committee().Load(iotago.EpochIndex(epochIndex)) + require.NoError(t, err) + require.Len(t, committeeEpoch.IDs(), 4) + } + + // Commmittee for the future epoch does not exist. + committeeEpoch2, err := newEngine.Storage.Committee().Load(2) + require.NoError(t, err) + require.Nil(t, committeeEpoch2) + + for slotIndex := 1; slotIndex <= 9; slotIndex++ { + copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slotIndex)) + require.NoError(t, err) + require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) + } + + // Commitment for the first slot after the fork does not exist. + _, err = newEngine.Storage.Commitments().Load(iotago.SlotIndex(10)) + require.Error(t, err) + } +} + +// TODO: test fork before point of no return (slot 12) +// TODO: test fork on last slot of an epoch (slot 15) From ca2f8f4bf5affa80aa4bd80be421cb25c2feaeb1 Mon Sep 17 00:00:00 2001 From: Andrea V <1577639+karimodm@users.noreply.github.com> Date: Tue, 19 Sep 2023 11:35:13 +0200 Subject: [PATCH 13/17] Even mocked PoA needs to be imported to obtain pools' data --- .../sybilprotection/seatmanager/mock/mockseatmanager.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go b/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go index ba10e8baa..223f212df 100644 --- a/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go +++ b/pkg/protocol/sybilprotection/seatmanager/mock/mockseatmanager.go @@ -119,7 +119,9 @@ func (m *ManualPOA) RotateCommittee(_ iotago.EpochIndex, _ *account.Accounts) *a func (m *ManualPOA) SetCommittee(_ iotago.EpochIndex, _ *account.Accounts) { } -func (m *ManualPOA) ImportCommittee(_ iotago.EpochIndex, _ *account.Accounts) { +func (m *ManualPOA) ImportCommittee(_ iotago.EpochIndex, validators *account.Accounts) { + m.accounts = validators + m.committee = m.accounts.SelectCommittee(validators.IDs()...) } func (m *ManualPOA) Shutdown() {} From 3a3b48e0e53a0d002f660d1214e8b2ec51a8982e Mon Sep 17 00:00:00 2001 From: Andrea V <1577639+karimodm@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:00:19 +0200 Subject: [PATCH 14/17] Address some review comments --- pkg/protocol/enginemanager/enginemanager.go | 9 +- .../performance/performance.go | 5 +- .../{syncedkvstore.go => lockedkvstore.go} | 5 + pkg/storage/prunable/prunable.go | 1 - pkg/storage/storage.go | 6 +- pkg/storage/storage_prunable.go | 4 + pkg/testsuite/mock/node.go | 183 +++++++++--------- 7 files changed, 107 insertions(+), 106 deletions(-) rename pkg/storage/database/{syncedkvstore.go => lockedkvstore.go} (96%) diff --git a/pkg/protocol/enginemanager/enginemanager.go b/pkg/protocol/enginemanager/enginemanager.go index 1d070b910..fdb4ea57b 100644 --- a/pkg/protocol/enginemanager/enginemanager.go +++ b/pkg/protocol/enginemanager/enginemanager.go @@ -45,8 +45,6 @@ type engineInfo struct { Name string `json:"name"` } -// region EngineManager //////////////////////////////////////////////////////////////////////////////////////////////// - type EngineManager struct { directory *utils.Directory dbVersion byte @@ -261,12 +259,11 @@ func (e *EngineManager) loadEngineInstanceWithStorage(engineAlias string, storag } func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine, error) { - engineAlias := lo.PanicOnErr(uuid.NewUUID()).String() + engineAlias := newEngineAlias() errorHandler := func(err error) { e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) } - // TODO: lock active instance so it doesn't use storage when we clone it // Copy raw data on disk. newStorage, err := storage.CloneStorage(e.activeInstance.Storage, e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...) if err != nil { @@ -324,4 +321,6 @@ func (e *EngineManager) OnEngineCreated(handler func(*engine.Engine)) (unsubscri return e.engineCreated.Hook(handler).Unhook } -// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// +func newEngineAlias() string { + return lo.PanicOnErr(uuid.NewUUID()).String() +} diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go index 8cbb401af..3a0613676 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/performance/performance.go @@ -1,8 +1,6 @@ package performance import ( - "fmt" - "github.com/iotaledger/hive.go/ds" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" @@ -94,7 +92,6 @@ func (t *Tracker) ApplyEpoch(epoch iotago.EpochIndex, committee *account.Account timeProvider := t.apiProvider.APIForEpoch(epoch).TimeProvider() epochStartSlot := timeProvider.EpochStart(epoch) epochEndSlot := timeProvider.EpochEnd(epoch) - fmt.Println("apply epoch", epoch, committee.TotalStake(), committee.TotalValidatorStake()) profitMargin := calculateProfitMargin(committee.TotalValidatorStake(), committee.TotalStake()) poolsStats := &model.PoolsStats{ TotalStake: committee.TotalStake(), @@ -105,7 +102,7 @@ func (t *Tracker) ApplyEpoch(epoch iotago.EpochIndex, committee *account.Account if err := t.poolStatsStore.Store(epoch, poolsStats); err != nil { panic(ierrors.Wrapf(err, "failed to store pool stats for epoch %d", epoch)) } - fmt.Println("commit epoch", epoch) + rewardsMap, err := t.rewardsMap(epoch) if err != nil { panic(ierrors.Wrapf(err, "failed to create rewards tree for epoch %d", epoch)) diff --git a/pkg/storage/database/syncedkvstore.go b/pkg/storage/database/lockedkvstore.go similarity index 96% rename from pkg/storage/database/syncedkvstore.go rename to pkg/storage/database/lockedkvstore.go index 07a61730d..3fa4bf1aa 100644 --- a/pkg/storage/database/syncedkvstore.go +++ b/pkg/storage/database/lockedkvstore.go @@ -2,6 +2,7 @@ package database import ( "github.com/iotaledger/hive.go/ds/types" + "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/serializer/v2/byteutils" @@ -117,6 +118,10 @@ func (s *lockedKVStore) Close() error { s.instanceMutex.RLock() defer s.instanceMutex.RUnlock() + if err := s.FlushWithoutLocking(); err != nil { + return ierrors.Wrap(err, "failed to flush database") + } + return s.CloseWithoutLocking() } func (s *lockedKVStore) CloseWithoutLocking() error { diff --git a/pkg/storage/prunable/prunable.go b/pkg/storage/prunable/prunable.go index e69c3c0a5..ba234ee16 100644 --- a/pkg/storage/prunable/prunable.go +++ b/pkg/storage/prunable/prunable.go @@ -163,7 +163,6 @@ func (p *Prunable) Rollback(targetSlotIndex iotago.SlotIndex) error { // Removed entries that belong to the old fork and cannot be re-used. for epochIdx := lastCommittedEpoch + 1; ; epochIdx++ { - fmt.Println("rollback before if", epochIdx, targetSlotEpoch) if epochIdx > targetSlotEpoch { shouldRollback, err := p.shouldRollbackCommittee(epochIdx, targetSlotIndex) if err != nil { diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 86c02c7b2..c20cc7a10 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -142,8 +142,4 @@ func (s *Storage) Shutdown() { func (s *Storage) Flush() { s.permanent.Flush() s.prunable.Flush() -} - -func (s *Storage) RollbackPrunable(targetIndex iotago.SlotIndex) error { - return s.prunable.Rollback(targetIndex) -} +} \ No newline at end of file diff --git a/pkg/storage/storage_prunable.go b/pkg/storage/storage_prunable.go index 05095b9dc..c61711d05 100644 --- a/pkg/storage/storage_prunable.go +++ b/pkg/storage/storage_prunable.go @@ -73,3 +73,7 @@ func (s *Storage) RestoreFromDisk() { s.lastPrunedEpoch.MarkEvicted(lastPrunedEpoch) } + +func (s *Storage) RollbackPrunable(targetIndex iotago.SlotIndex) error { + return s.prunable.Rollback(targetIndex) +} diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go index 629f85161..8986bc78e 100644 --- a/pkg/testsuite/mock/node.go +++ b/pkg/testsuite/mock/node.go @@ -20,6 +20,7 @@ import ( "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/blockfactory" + "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" @@ -222,11 +223,11 @@ func (n *Node) attachEngineLogs(failOnBlockFiltered bool, instance *engine.Engin defer n.mutex.Unlock() n.attachedBlocks = append(n.attachedBlocks, block) }) - // - //events.BlockDAG.BlockSolid.Hook(func(block *blocks.Block) { - // fmt.Printf("%s > [%s] BlockDAG.BlockSolid: %s\n", n.Name, engineName, block.ID()) - //}) - // + + events.BlockDAG.BlockSolid.Hook(func(block *blocks.Block) { + fmt.Printf("%s > [%s] BlockDAG.BlockSolid: %s\n", n.Name, engineName, block.ID()) + }) + events.BlockDAG.BlockInvalid.Hook(func(block *blocks.Block, err error) { fmt.Printf("%s > [%s] BlockDAG.BlockInvalid: %s - %s\n", n.Name, engineName, block.ID(), err) }) @@ -340,92 +341,92 @@ func (n *Node) attachEngineLogs(failOnBlockFiltered bool, instance *engine.Engin fmt.Printf("%s > [%s] NotarizationManager.LatestCommitmentUpdated: %s\n", n.Name, engineName, commitment.ID()) }) - //events.BlockGadget.BlockPreAccepted.Hook(func(block *blocks.Block) { - // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreAccepted: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) - //}) - // - //events.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) { - // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockAccepted: %s @ slot %s committing to %s\n", n.Name, engineName, block.ID(), block.ID().Index(), block.ProtocolBlock().SlotCommitmentID) - //}) - // - //events.BlockGadget.BlockPreConfirmed.Hook(func(block *blocks.Block) { - // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) - //}) - // - //events.BlockGadget.BlockConfirmed.Hook(func(block *blocks.Block) { - // fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) - //}) - // - //events.SlotGadget.SlotFinalized.Hook(func(slotIndex iotago.SlotIndex) { - // fmt.Printf("%s > [%s] Consensus.SlotGadget.SlotFinalized: %s\n", n.Name, engineName, slotIndex) - //}) - // - //events.SeatManager.OnlineCommitteeSeatAdded.Hook(func(seat account.SeatIndex, accountID iotago.AccountID) { - // fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatAdded: %d - %s\n", n.Name, engineName, seat, accountID) - //}) - // - //events.SeatManager.OnlineCommitteeSeatRemoved.Hook(func(seat account.SeatIndex) { - // fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatRemoved: %d\n", n.Name, engineName, seat) - //}) - // - //events.ConflictDAG.ConflictCreated.Hook(func(conflictID iotago.TransactionID) { - // fmt.Printf("%s > [%s] ConflictDAG.ConflictCreated: %s\n", n.Name, engineName, conflictID) - //}) - // - //events.ConflictDAG.ConflictEvicted.Hook(func(conflictID iotago.TransactionID) { - // fmt.Printf("%s > [%s] ConflictDAG.ConflictEvicted: %s\n", n.Name, engineName, conflictID) - //}) - //events.ConflictDAG.ConflictRejected.Hook(func(conflictID iotago.TransactionID) { - // fmt.Printf("%s > [%s] ConflictDAG.ConflictRejected: %s\n", n.Name, engineName, conflictID) - //}) - // - //events.ConflictDAG.ConflictAccepted.Hook(func(conflictID iotago.TransactionID) { - // fmt.Printf("%s > [%s] ConflictDAG.ConflictAccepted: %s\n", n.Name, engineName, conflictID) - //}) - // - //instance.Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { - // fmt.Printf("%s > [%s] Ledger.TransactionAttached: %s\n", n.Name, engineName, transactionMetadata.ID()) - // - // transactionMetadata.OnSolid(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionSolid: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnExecuted(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionExecuted: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnBooked(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionBooked: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnConflicting(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionConflicting: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnAccepted(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionAccepted: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnRejected(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionRejected: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnInvalid(func(err error) { - // fmt.Printf("%s > [%s] MemPool.TransactionInvalid(%s): %s\n", n.Name, engineName, err, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnOrphaned(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionOrphaned: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnCommitted(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionCommitted: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - // - // transactionMetadata.OnPending(func() { - // fmt.Printf("%s > [%s] MemPool.TransactionPending: %s\n", n.Name, engineName, transactionMetadata.ID()) - // }) - //}) + events.BlockGadget.BlockPreAccepted.Hook(func(block *blocks.Block) { + fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreAccepted: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) + }) + + events.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) { + fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockAccepted: %s @ slot %s committing to %s\n", n.Name, engineName, block.ID(), block.ID().Index(), block.ProtocolBlock().SlotCommitmentID) + }) + + events.BlockGadget.BlockPreConfirmed.Hook(func(block *blocks.Block) { + fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) + }) + + events.BlockGadget.BlockConfirmed.Hook(func(block *blocks.Block) { + fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) + }) + + events.SlotGadget.SlotFinalized.Hook(func(slotIndex iotago.SlotIndex) { + fmt.Printf("%s > [%s] Consensus.SlotGadget.SlotFinalized: %s\n", n.Name, engineName, slotIndex) + }) + + events.SeatManager.OnlineCommitteeSeatAdded.Hook(func(seat account.SeatIndex, accountID iotago.AccountID) { + fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatAdded: %d - %s\n", n.Name, engineName, seat, accountID) + }) + + events.SeatManager.OnlineCommitteeSeatRemoved.Hook(func(seat account.SeatIndex) { + fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatRemoved: %d\n", n.Name, engineName, seat) + }) + + events.ConflictDAG.ConflictCreated.Hook(func(conflictID iotago.TransactionID) { + fmt.Printf("%s > [%s] ConflictDAG.ConflictCreated: %s\n", n.Name, engineName, conflictID) + }) + + events.ConflictDAG.ConflictEvicted.Hook(func(conflictID iotago.TransactionID) { + fmt.Printf("%s > [%s] ConflictDAG.ConflictEvicted: %s\n", n.Name, engineName, conflictID) + }) + events.ConflictDAG.ConflictRejected.Hook(func(conflictID iotago.TransactionID) { + fmt.Printf("%s > [%s] ConflictDAG.ConflictRejected: %s\n", n.Name, engineName, conflictID) + }) + + events.ConflictDAG.ConflictAccepted.Hook(func(conflictID iotago.TransactionID) { + fmt.Printf("%s > [%s] ConflictDAG.ConflictAccepted: %s\n", n.Name, engineName, conflictID) + }) + + instance.Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { + fmt.Printf("%s > [%s] Ledger.TransactionAttached: %s\n", n.Name, engineName, transactionMetadata.ID()) + + transactionMetadata.OnSolid(func() { + fmt.Printf("%s > [%s] MemPool.TransactionSolid: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnExecuted(func() { + fmt.Printf("%s > [%s] MemPool.TransactionExecuted: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnBooked(func() { + fmt.Printf("%s > [%s] MemPool.TransactionBooked: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnConflicting(func() { + fmt.Printf("%s > [%s] MemPool.TransactionConflicting: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnAccepted(func() { + fmt.Printf("%s > [%s] MemPool.TransactionAccepted: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnRejected(func() { + fmt.Printf("%s > [%s] MemPool.TransactionRejected: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnInvalid(func(err error) { + fmt.Printf("%s > [%s] MemPool.TransactionInvalid(%s): %s\n", n.Name, engineName, err, transactionMetadata.ID()) + }) + + transactionMetadata.OnOrphaned(func() { + fmt.Printf("%s > [%s] MemPool.TransactionOrphaned: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnCommitted(func() { + fmt.Printf("%s > [%s] MemPool.TransactionCommitted: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + + transactionMetadata.OnPending(func() { + fmt.Printf("%s > [%s] MemPool.TransactionPending: %s\n", n.Name, engineName, transactionMetadata.ID()) + }) + }) } func (n *Node) Wait() { From a4ca65d16644fbdaecb7b47ac3ae4f11c7227e87 Mon Sep 17 00:00:00 2001 From: Andrea V <1577639+karimodm@users.noreply.github.com> Date: Tue, 19 Sep 2023 15:15:22 +0200 Subject: [PATCH 15/17] Address all the remaining review comments --- .../accounts/accountsledger/snapshot.go | 3 +- pkg/protocol/enginemanager/enginemanager.go | 29 ++--------- .../snapshotcreator/snapshotcreator.go | 2 +- pkg/storage/database/db_instance.go | 10 +--- pkg/storage/database/lockedkvstore.go | 7 +++ pkg/storage/database/openablekvstore.go | 8 ++++ pkg/storage/storage.go | 48 +++++++++---------- pkg/storage/storage_test.go | 2 +- pkg/storage/testframework_test.go | 6 +-- 9 files changed, 48 insertions(+), 67 deletions(-) diff --git a/pkg/protocol/engine/accounts/accountsledger/snapshot.go b/pkg/protocol/engine/accounts/accountsledger/snapshot.go index d5d842740..fe3063e49 100644 --- a/pkg/protocol/engine/accounts/accountsledger/snapshot.go +++ b/pkg/protocol/engine/accounts/accountsledger/snapshot.go @@ -215,9 +215,8 @@ func (m *Manager) readSlotDiffs(reader io.ReadSeeker, slotDiffCount uint64) erro func (m *Manager) writeSlotDiffs(pWriter *utils.PositionedWriter, targetIndex iotago.SlotIndex) (slotDiffsCount uint64, err error) { // write slot diffs until being able to reach targetIndex, where the exported tree is at slotIndex := iotago.SlotIndex(1) - - // TODO: shouldn't that be from last finalized slot? maxCommittableAge := m.apiProvider.APIForSlot(targetIndex).ProtocolParameters().MaxCommittableAge() + if targetIndex > maxCommittableAge { slotIndex = targetIndex - maxCommittableAge } diff --git a/pkg/protocol/enginemanager/enginemanager.go b/pkg/protocol/enginemanager/enginemanager.go index fdb4ea57b..96da59c00 100644 --- a/pkg/protocol/enginemanager/enginemanager.go +++ b/pkg/protocol/enginemanager/enginemanager.go @@ -197,32 +197,9 @@ func (e *EngineManager) loadEngineInstanceFromSnapshot(engineAlias string, snaps e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) } - newEngine := engine.New(e.workers.CreateGroup(engineAlias), - errorHandler, - storage.New(e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...), - e.filterProvider, - e.commitmentFilterProvider, - e.blockDAGProvider, - e.bookerProvider, - e.clockProvider, - e.blockGadgetProvider, - e.slotGadgetProvider, - e.sybilProtectionProvider, - e.notarizationProvider, - e.attestationProvider, - e.ledgerProvider, - e.schedulerProvider, - e.tipManagerProvider, - e.tipSelectionProvider, - e.retainerProvider, - e.upgradeOrchestratorProvider, - e.syncManagerProvider, - append(e.engineOptions, engine.WithSnapshotPath(snapshotPath))..., - ) + e.engineOptions = append(e.engineOptions, engine.WithSnapshotPath(snapshotPath)) - e.engineCreated.Trigger(newEngine) - - return newEngine + return e.loadEngineInstanceWithStorage(engineAlias, storage.Create(e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...)) } func (e *EngineManager) loadEngineInstanceWithStorage(engineAlias string, storage *storage.Storage) *engine.Engine { @@ -265,7 +242,7 @@ func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine } // Copy raw data on disk. - newStorage, err := storage.CloneStorage(e.activeInstance.Storage, e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...) + newStorage, err := storage.Clone(e.activeInstance.Storage, e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...) if err != nil { return nil, ierrors.Wrapf(err, "failed to copy storage from active engine instance (%s) to new engine instance (%s)", e.activeInstance.Storage.Directory(), e.directory.Path(engineAlias)) } diff --git a/pkg/protocol/snapshotcreator/snapshotcreator.go b/pkg/protocol/snapshotcreator/snapshotcreator.go index e12ce74cd..a14add174 100644 --- a/pkg/protocol/snapshotcreator/snapshotcreator.go +++ b/pkg/protocol/snapshotcreator/snapshotcreator.go @@ -54,7 +54,7 @@ func CreateSnapshot(opts ...options.Option[Options]) error { workers := workerpool.NewGroup("CreateSnapshot") defer workers.Shutdown() - s := storage.New(lo.PanicOnErr(os.MkdirTemp(os.TempDir(), "*")), opt.DataBaseVersion, errorHandler) + s := storage.Create(lo.PanicOnErr(os.MkdirTemp(os.TempDir(), "*")), opt.DataBaseVersion, errorHandler) defer s.Shutdown() if err := s.Settings().StoreProtocolParametersForStartEpoch(opt.ProtocolParameters, 0); err != nil { diff --git a/pkg/storage/database/db_instance.go b/pkg/storage/database/db_instance.go index d072cdf0a..14cdabeaf 100644 --- a/pkg/storage/database/db_instance.go +++ b/pkg/storage/database/db_instance.go @@ -4,7 +4,6 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore" "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/runtime/syncutils" ) type DBInstance struct { @@ -19,14 +18,7 @@ func NewDBInstance(dbConfig Config) *DBInstance { panic(err) } - lockableKVStore := &lockedKVStore{ - openableKVStore: &openableKVStore{ - storeInstance: db, - parentStore: nil, - dbPrefix: kvstore.EmptyPrefix, - }, - instanceMutex: new(syncutils.RWMutex), - } + lockableKVStore := newLockedKVStore(db) // HealthTracker state is only modified while holding the lock on the lockableKVStore; // that's why it needs to use openableKVStore (which does not lock) instead of lockableKVStore to avoid a deadlock. diff --git a/pkg/storage/database/lockedkvstore.go b/pkg/storage/database/lockedkvstore.go index 3fa4bf1aa..f72558bc5 100644 --- a/pkg/storage/database/lockedkvstore.go +++ b/pkg/storage/database/lockedkvstore.go @@ -14,6 +14,13 @@ type lockedKVStore struct { instanceMutex *syncutils.RWMutex } +func newLockedKVStore(storeInstance kvstore.KVStore) *lockedKVStore { + return &lockedKVStore{ + openableKVStore: newOpenableKVStore(storeInstance), + instanceMutex: new(syncutils.RWMutex), + } +} + func (s *lockedKVStore) Lock() { s.instanceMutex.Lock() } diff --git a/pkg/storage/database/openablekvstore.go b/pkg/storage/database/openablekvstore.go index 953ac0188..9ff04df3a 100644 --- a/pkg/storage/database/openablekvstore.go +++ b/pkg/storage/database/openablekvstore.go @@ -15,6 +15,14 @@ type openableKVStore struct { dbPrefix kvstore.KeyPrefix } +func newOpenableKVStore(storeInstance kvstore.KVStore) *openableKVStore { + return &openableKVStore{ + storeInstance: storeInstance, + parentStore: nil, + dbPrefix: kvstore.EmptyPrefix, + } +} + func (s *openableKVStore) instance() kvstore.KVStore { if s.storeInstance != nil { return s.storeInstance diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index c20cc7a10..f8b3dec60 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -64,32 +64,30 @@ func New(directory string, dbVersion byte, errorHandler func(error), opts ...opt optsPruningSizeMaxTargetSizeBytes: 30 * 1024 * 1024 * 1024, // 30GB optsPruningSizeReductionPercentage: 0.1, optsPruningSizeCooldownTime: 5 * time.Minute, - }, opts, - func(s *Storage) { - dbConfig := database.Config{ - Engine: s.optsDBEngine, - Directory: s.dir.PathWithCreate(permanentDirName), - Version: dbVersion, - PrefixHealth: []byte{storePrefixHealth}, - } - - s.permanent = permanent.New(dbConfig, errorHandler, s.optsPermanent...) - s.prunable = prunable.New(dbConfig.WithDirectory(s.dir.PathWithCreate(prunableDirName)), s.Settings().APIProvider(), s.errorHandler, s.optsBucketManagerOptions...) - }) + }, opts) } -func CloneStorage(source *Storage, directory string, dbVersion byte, errorHandler func(error), opts ...options.Option[Storage]) (*Storage, error) { - s := options.Apply(&Storage{ - dir: utils.NewDirectory(directory, true), - errorHandler: errorHandler, - lastPrunedEpoch: model.NewEvictionIndex[iotago.EpochIndex](), - optsDBEngine: hivedb.EngineRocksDB, - optsPruningDelay: 30, - optPruningSizeEnabled: false, - optsPruningSizeMaxTargetSizeBytes: 30 * 1024 * 1024 * 1024, // 30GB - optsPruningSizeReductionPercentage: 0.1, - optsPruningSizeCooldownTime: 5 * time.Minute, - }, opts) +// Create creates a new storage instance with the named database version in the given directory and initializes its permanent +// and prunable counterparts. +func Create(directory string, dbVersion byte, errorHandler func(error), opts ...options.Option[Storage]) *Storage { + s := New(directory, dbVersion, errorHandler, opts...) + dbConfig := database.Config{ + Engine: s.optsDBEngine, + Directory: s.dir.PathWithCreate(permanentDirName), + Version: dbVersion, + PrefixHealth: []byte{storePrefixHealth}, + } + + s.permanent = permanent.New(dbConfig, errorHandler, s.optsPermanent...) + s.prunable = prunable.New(dbConfig.WithDirectory(s.dir.PathWithCreate(prunableDirName)), s.Settings().APIProvider(), s.errorHandler, s.optsBucketManagerOptions...) + + return s +} + +// Clone creates a new storage instance with the named database version in the given directory and cloning the permannent +// and prunable counterparts from the given source storage. +func Clone(source *Storage, directory string, dbVersion byte, errorHandler func(error), opts ...options.Option[Storage]) (*Storage, error) { + s := New(directory, dbVersion, errorHandler, opts...) dbConfig := database.Config{ Engine: s.optsDBEngine, @@ -142,4 +140,4 @@ func (s *Storage) Shutdown() { func (s *Storage) Flush() { s.permanent.Flush() s.prunable.Flush() -} \ No newline at end of file +} diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index b050cd7f0..a6e4623dd 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -222,7 +222,7 @@ func TestStorage_CopyFromForkedStorageEmpty(t *testing.T) { } tf1.GeneratePermanentData(1 * MB) - clonedStorage, err := storage.CloneStorage(tf1.Instance, t.TempDir(), 0, func(err error) { + clonedStorage, err := storage.Clone(tf1.Instance, t.TempDir(), 0, func(err error) { t.Log(err) }) require.NoError(t, err) diff --git a/pkg/storage/testframework_test.go b/pkg/storage/testframework_test.go index bfba64a4c..7a75c2407 100644 --- a/pkg/storage/testframework_test.go +++ b/pkg/storage/testframework_test.go @@ -48,7 +48,7 @@ func NewTestFramework(t *testing.T, baseDir string, storageOpts ...options.Optio } storageFactoryFunc := func() *storage.Storage { - instance := storage.New(baseDir, 0, errorHandler, storageOpts...) + instance := storage.Create(baseDir, 0, errorHandler, storageOpts...) require.NoError(t, instance.Settings().StoreProtocolParametersForStartEpoch(iotago.NewV3ProtocolParameters(), 0)) return instance @@ -210,8 +210,8 @@ func (t *TestFramework) AssertPrunedUntil( expectedDecidedUpgrades *types.Tuple[int, bool], expectedPoolStats *types.Tuple[int, bool], expectedCommittee *types.Tuple[int, bool], - expectedRewards *types.Tuple[int, bool]) { - + expectedRewards *types.Tuple[int, bool], +) { t.assertPrunedState(expectedPrunable, t.Instance.LastPrunedEpoch, "prunable") t.assertPrunedState(expectedPoolStats, t.Instance.PoolStats().LastPrunedEpoch, "pool stats") t.assertPrunedState(expectedDecidedUpgrades, t.Instance.DecidedUpgradeSignals().LastPrunedEpoch, "decided upgrades") From fd800e396b223cb4fe4f11a75adc1dde5f91024d Mon Sep 17 00:00:00 2001 From: Andrea V <1577639+karimodm@users.noreply.github.com> Date: Tue, 19 Sep 2023 15:18:54 +0200 Subject: [PATCH 16/17] Unused dbVersion in base Storage constructor --- pkg/storage/database/lockedkvstore.go | 4 +++- pkg/storage/storage.go | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/storage/database/lockedkvstore.go b/pkg/storage/database/lockedkvstore.go index f72558bc5..cb365f46e 100644 --- a/pkg/storage/database/lockedkvstore.go +++ b/pkg/storage/database/lockedkvstore.go @@ -17,7 +17,7 @@ type lockedKVStore struct { func newLockedKVStore(storeInstance kvstore.KVStore) *lockedKVStore { return &lockedKVStore{ openableKVStore: newOpenableKVStore(storeInstance), - instanceMutex: new(syncutils.RWMutex), + instanceMutex: new(syncutils.RWMutex), } } @@ -35,6 +35,7 @@ func (s *lockedKVStore) WithRealm(realm kvstore.Realm) (kvstore.KVStore, error) return s.withRealm(realm) } + func (s *lockedKVStore) withRealm(realm kvstore.Realm) (kvstore.KVStore, error) { return &lockedKVStore{ openableKVStore: &openableKVStore{ @@ -131,6 +132,7 @@ func (s *lockedKVStore) Close() error { return s.CloseWithoutLocking() } + func (s *lockedKVStore) CloseWithoutLocking() error { return s.openableKVStore.Close() } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index f8b3dec60..85fc0eb3e 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -53,7 +53,7 @@ type Storage struct { } // New creates a new storage instance with the named database version in the given directory. -func New(directory string, dbVersion byte, errorHandler func(error), opts ...options.Option[Storage]) *Storage { +func New(directory string, errorHandler func(error), opts ...options.Option[Storage]) *Storage { return options.Apply(&Storage{ dir: utils.NewDirectory(directory, true), errorHandler: errorHandler, @@ -70,7 +70,7 @@ func New(directory string, dbVersion byte, errorHandler func(error), opts ...opt // Create creates a new storage instance with the named database version in the given directory and initializes its permanent // and prunable counterparts. func Create(directory string, dbVersion byte, errorHandler func(error), opts ...options.Option[Storage]) *Storage { - s := New(directory, dbVersion, errorHandler, opts...) + s := New(directory, errorHandler, opts...) dbConfig := database.Config{ Engine: s.optsDBEngine, Directory: s.dir.PathWithCreate(permanentDirName), @@ -87,7 +87,7 @@ func Create(directory string, dbVersion byte, errorHandler func(error), opts ... // Clone creates a new storage instance with the named database version in the given directory and cloning the permannent // and prunable counterparts from the given source storage. func Clone(source *Storage, directory string, dbVersion byte, errorHandler func(error), opts ...options.Option[Storage]) (*Storage, error) { - s := New(directory, dbVersion, errorHandler, opts...) + s := New(directory, errorHandler, opts...) dbConfig := database.Config{ Engine: s.optsDBEngine, From c237d8c458ef047e0dc4b2e20318250019d2a528 Mon Sep 17 00:00:00 2001 From: Andrea V <1577639+karimodm@users.noreply.github.com> Date: Tue, 19 Sep 2023 15:20:34 +0200 Subject: [PATCH 17/17] fumpt --- pkg/testsuite/mock/node.go | 40 +++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go index 8986bc78e..d5905990d 100644 --- a/pkg/testsuite/mock/node.go +++ b/pkg/testsuite/mock/node.go @@ -344,85 +344,85 @@ func (n *Node) attachEngineLogs(failOnBlockFiltered bool, instance *engine.Engin events.BlockGadget.BlockPreAccepted.Hook(func(block *blocks.Block) { fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreAccepted: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) }) - + events.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) { fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockAccepted: %s @ slot %s committing to %s\n", n.Name, engineName, block.ID(), block.ID().Index(), block.ProtocolBlock().SlotCommitmentID) }) - + events.BlockGadget.BlockPreConfirmed.Hook(func(block *blocks.Block) { fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) }) - + events.BlockGadget.BlockConfirmed.Hook(func(block *blocks.Block) { fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().SlotCommitmentID) }) - + events.SlotGadget.SlotFinalized.Hook(func(slotIndex iotago.SlotIndex) { fmt.Printf("%s > [%s] Consensus.SlotGadget.SlotFinalized: %s\n", n.Name, engineName, slotIndex) }) - + events.SeatManager.OnlineCommitteeSeatAdded.Hook(func(seat account.SeatIndex, accountID iotago.AccountID) { fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatAdded: %d - %s\n", n.Name, engineName, seat, accountID) }) - + events.SeatManager.OnlineCommitteeSeatRemoved.Hook(func(seat account.SeatIndex) { fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatRemoved: %d\n", n.Name, engineName, seat) }) - + events.ConflictDAG.ConflictCreated.Hook(func(conflictID iotago.TransactionID) { fmt.Printf("%s > [%s] ConflictDAG.ConflictCreated: %s\n", n.Name, engineName, conflictID) }) - + events.ConflictDAG.ConflictEvicted.Hook(func(conflictID iotago.TransactionID) { fmt.Printf("%s > [%s] ConflictDAG.ConflictEvicted: %s\n", n.Name, engineName, conflictID) }) events.ConflictDAG.ConflictRejected.Hook(func(conflictID iotago.TransactionID) { fmt.Printf("%s > [%s] ConflictDAG.ConflictRejected: %s\n", n.Name, engineName, conflictID) }) - + events.ConflictDAG.ConflictAccepted.Hook(func(conflictID iotago.TransactionID) { fmt.Printf("%s > [%s] ConflictDAG.ConflictAccepted: %s\n", n.Name, engineName, conflictID) }) - + instance.Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { fmt.Printf("%s > [%s] Ledger.TransactionAttached: %s\n", n.Name, engineName, transactionMetadata.ID()) - + transactionMetadata.OnSolid(func() { fmt.Printf("%s > [%s] MemPool.TransactionSolid: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnExecuted(func() { fmt.Printf("%s > [%s] MemPool.TransactionExecuted: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnBooked(func() { fmt.Printf("%s > [%s] MemPool.TransactionBooked: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnConflicting(func() { fmt.Printf("%s > [%s] MemPool.TransactionConflicting: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnAccepted(func() { fmt.Printf("%s > [%s] MemPool.TransactionAccepted: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnRejected(func() { fmt.Printf("%s > [%s] MemPool.TransactionRejected: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnInvalid(func(err error) { fmt.Printf("%s > [%s] MemPool.TransactionInvalid(%s): %s\n", n.Name, engineName, err, transactionMetadata.ID()) }) - + transactionMetadata.OnOrphaned(func() { fmt.Printf("%s > [%s] MemPool.TransactionOrphaned: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnCommitted(func() { fmt.Printf("%s > [%s] MemPool.TransactionCommitted: %s\n", n.Name, engineName, transactionMetadata.ID()) }) - + transactionMetadata.OnPending(func() { fmt.Printf("%s > [%s] MemPool.TransactionPending: %s\n", n.Name, engineName, transactionMetadata.ID()) })