Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge rc spica patch rv3 into feat rv3 2024.12.18 #6682

Merged
1 change: 1 addition & 0 deletions storage/pruning/fullHistoryPruningStorer.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ func (fhps *FullHistoryPruningStorer) getOrOpenPersister(epoch uint32) (storage.
}

fhps.oldEpochsActivePersistersCache.Put([]byte(epochString), newPdata, 0)
log.Trace("full history pruning storer - init new storer", "epoch", epoch)
fhps.persistersMapByEpoch[epoch] = newPdata

return newPdata.getPersister(), nil
Expand Down
31 changes: 31 additions & 0 deletions storage/pruning/fullHistoryPruningStorer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/multiversx/mx-chain-go/storage/factory"
"github.com/multiversx/mx-chain-go/storage/pathmanager"
"github.com/multiversx/mx-chain-go/storage/pruning"
"github.com/multiversx/mx-chain-go/testscommon"
logger "github.com/multiversx/mx-chain-logger-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -399,3 +400,33 @@ func TestFullHistoryPruningStorer_IsInterfaceNil(t *testing.T) {
fhps, _ = pruning.NewFullHistoryPruningStorer(fhArgs)
require.False(t, fhps.IsInterfaceNil())
}

func TestFullHistoryPruningStorer_changeEpochClosesOldDbs(t *testing.T) {
t.Parallel()

shouldCleanCalled := false
args := getDefaultArgs()
fhArgs := pruning.FullHistoryStorerArgs{
StorerArgs: args,
NumOfOldActivePersisters: 2,
}
fhArgs.OldDataCleanerProvider = &testscommon.OldDataCleanerProviderStub{
ShouldCleanCalled: func() bool {
shouldCleanCalled = true
return true
},
}
fhps, err := pruning.NewFullHistoryPruningStorer(fhArgs)
require.Nil(t, err)

numEpochsChanged := 10
startEpoch := uint32(0)
for i := 0; i < numEpochsChanged; i++ {
startEpoch++
key := []byte(fmt.Sprintf("key-%d", i))
_, _ = fhps.GetFromEpoch(key, startEpoch)
err = fhps.ChangeEpochSimple(startEpoch)
require.Nil(t, err)
}
require.True(t, shouldCleanCalled)
}
10 changes: 8 additions & 2 deletions storage/pruning/pruningStorer.go
Original file line number Diff line number Diff line change
Expand Up @@ -779,7 +779,7 @@ func (ps *PruningStorer) changeEpoch(header data.HeaderHandler) error {
}
log.Debug("change epoch pruning storer success", "persister", ps.identifier, "epoch", epoch)

return nil
return ps.removeOldPersistersIfNeeded(header)
}

shardID := core.GetShardIDString(ps.shardCoordinator.SelfId())
Expand All @@ -802,6 +802,11 @@ func (ps *PruningStorer) changeEpoch(header data.HeaderHandler) error {
ps.activePersisters = append(singleItemPersisters, ps.activePersisters...)
ps.persistersMapByEpoch[epoch] = newPersister

return ps.removeOldPersistersIfNeeded(header)
}

func (ps *PruningStorer) removeOldPersistersIfNeeded(header data.HeaderHandler) error {
epoch := header.GetEpoch()
wasExtended := ps.extendSavedEpochsIfNeeded(header)
if wasExtended {
if len(ps.activePersisters) > int(ps.numOfActivePersisters) {
Expand All @@ -814,11 +819,12 @@ func (ps *PruningStorer) changeEpoch(header data.HeaderHandler) error {
return nil
}

err = ps.closeAndDestroyPersisters(epoch)
err := ps.closeAndDestroyPersisters(epoch)
if err != nil {
log.Warn("closing persisters", "error", err.Error())
return err
}

return nil
}

Expand Down
Loading