Skip to content

Commit

Permalink
Merge pull request #6667 from multiversx/test-pruning-storer
Browse files Browse the repository at this point in the history
Test pruning storer
  • Loading branch information
BeniaminDrasovean authored Dec 18, 2024
2 parents c5f7e5a + 90ef10c commit 6a74c03
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 2 deletions.
1 change: 1 addition & 0 deletions storage/pruning/fullHistoryPruningStorer.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ func (fhps *FullHistoryPruningStorer) getOrOpenPersister(epoch uint32) (storage.
}

fhps.oldEpochsActivePersistersCache.Put([]byte(epochString), newPdata, 0)
log.Trace("full history pruning storer - init new storer", "epoch", epoch)
fhps.persistersMapByEpoch[epoch] = newPdata

return newPdata.getPersister(), nil
Expand Down
31 changes: 31 additions & 0 deletions storage/pruning/fullHistoryPruningStorer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/multiversx/mx-chain-go/storage/factory"
"github.com/multiversx/mx-chain-go/storage/pathmanager"
"github.com/multiversx/mx-chain-go/storage/pruning"
"github.com/multiversx/mx-chain-go/testscommon"
logger "github.com/multiversx/mx-chain-logger-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -399,3 +400,33 @@ func TestFullHistoryPruningStorer_IsInterfaceNil(t *testing.T) {
fhps, _ = pruning.NewFullHistoryPruningStorer(fhArgs)
require.False(t, fhps.IsInterfaceNil())
}

func TestFullHistoryPruningStorer_changeEpochClosesOldDbs(t *testing.T) {
t.Parallel()

shouldCleanCalled := false
args := getDefaultArgs()
fhArgs := pruning.FullHistoryStorerArgs{
StorerArgs: args,
NumOfOldActivePersisters: 2,
}
fhArgs.OldDataCleanerProvider = &testscommon.OldDataCleanerProviderStub{
ShouldCleanCalled: func() bool {
shouldCleanCalled = true
return true
},
}
fhps, err := pruning.NewFullHistoryPruningStorer(fhArgs)
require.Nil(t, err)

numEpochsChanged := 10
startEpoch := uint32(0)
for i := 0; i < numEpochsChanged; i++ {
startEpoch++
key := []byte(fmt.Sprintf("key-%d", i))
_, _ = fhps.GetFromEpoch(key, startEpoch)
err = fhps.ChangeEpochSimple(startEpoch)
require.Nil(t, err)
}
require.True(t, shouldCleanCalled)
}
10 changes: 8 additions & 2 deletions storage/pruning/pruningStorer.go
Original file line number Diff line number Diff line change
Expand Up @@ -779,7 +779,7 @@ func (ps *PruningStorer) changeEpoch(header data.HeaderHandler) error {
}
log.Debug("change epoch pruning storer success", "persister", ps.identifier, "epoch", epoch)

return nil
return ps.removeOldPersistersIfNeeded(header)
}

shardID := core.GetShardIDString(ps.shardCoordinator.SelfId())
Expand All @@ -802,6 +802,11 @@ func (ps *PruningStorer) changeEpoch(header data.HeaderHandler) error {
ps.activePersisters = append(singleItemPersisters, ps.activePersisters...)
ps.persistersMapByEpoch[epoch] = newPersister

return ps.removeOldPersistersIfNeeded(header)
}

func (ps *PruningStorer) removeOldPersistersIfNeeded(header data.HeaderHandler) error {
epoch := header.GetEpoch()
wasExtended := ps.extendSavedEpochsIfNeeded(header)
if wasExtended {
if len(ps.activePersisters) > int(ps.numOfActivePersisters) {
Expand All @@ -814,11 +819,12 @@ func (ps *PruningStorer) changeEpoch(header data.HeaderHandler) error {
return nil
}

err = ps.closeAndDestroyPersisters(epoch)
err := ps.closeAndDestroyPersisters(epoch)
if err != nil {
log.Warn("closing persisters", "error", err.Error())
return err
}

return nil
}

Expand Down

0 comments on commit 6a74c03

Please sign in to comment.