diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 10feacf5ef4..19fdaec07e0 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -1,17 +1,15 @@ -name: Build +name: Build and smoke test on: pull_request: - branches: [ master, rc/* ] - types: [opened, ready_for_review] - push: + branches: [master, rc/*] workflow_dispatch: jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] + runs-on: [ubuntu-latest, macos-latest, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -28,12 +26,23 @@ jobs: run: | go get -v -t -d ./... if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure fi + - name: Build run: | cd ${GITHUB_WORKSPACE}/cmd/node && go build . + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build . cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build . cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build . cd ${GITHUB_WORKSPACE}/cmd/termui && go build . + + # On GitHub, we only run the short tests, and we only run them for some OS/ARCH combinations. + - name: Run tests + run: | + GOOS=$(go env GOOS) + + if [[ "$GOOS" == darwin ]]; then + go test -short -v ./... + fi diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 9916e67d744..ca13a9f0313 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -15,7 +15,7 @@ jobs: build: strategy: matrix: - runs-on: [ubuntu-latest] # TODO add macos-latest when builds are possible on macs + runs-on: [ubuntu-latest, macos-latest, macos-13-xlarge] runs-on: ${{ matrix.runs-on }} name: Build steps: @@ -45,21 +45,23 @@ jobs: GOOS=$(go env GOOS) GOARCH=$(go env GOARCH) GOPATH=$(go env GOPATH) - ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".tgz" + ARCHIVE="multiversx_""$APP_VER_SHORT""_""$GOOS""_""$GOARCH"".zip" BUILD_DIR=${GITHUB_WORKSPACE}/build - WASM_VERSION=$(cat go.mod | grep mx-chain-vm-v | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') - WASMER_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${WASM_VERSION}/wasmer + VM_GO_VERSION=$(cat go.mod | grep mx-chain-vm-go | sort -n | tail -n -1| awk -F '/' '{print$3}'| sed 's/ /@/g') + VM_GO_DIR=${GOPATH}/pkg/mod/github.com/multiversx/${VM_GO_VERSION} echo "GOOS=${GOOS}" >> $GITHUB_ENV echo "GOARCH=${GOARCH}" >> $GITHUB_ENV echo "ARCHIVE=${ARCHIVE}" >> $GITHUB_ENV echo "BUILD_DIR=${BUILD_DIR}" >> $GITHUB_ENV - echo "WASMER_DIR=${WASMER_DIR}" >> $GITHUB_ENV + echo "VM_GO_VERSION=${VM_GO_VERSION}" >> $GITHUB_ENV + echo "VM_GO_DIR=${VM_GO_DIR}" >> $GITHUB_ENV - name: Build run: | mkdir -p ${BUILD_DIR} cd ${GITHUB_WORKSPACE}/cmd/node && go build -o "${BUILD_DIR}/node" -a -ldflags="-X main.appVersion=${APP_VER}" + cd ${GITHUB_WORKSPACE}/cmd/seednode && go build -o "${BUILD_DIR}/seednode" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/keygenerator && go build -o "${BUILD_DIR}/keygenerator" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/logviewer && go build -o "${BUILD_DIR}/logviewer" -a -ldflags="-X main.appVersion=${APP_VER}" cd ${GITHUB_WORKSPACE}/cmd/termui && go build -o "${BUILD_DIR}/termui" -a -ldflags="-X main.appVersion=${APP_VER}" @@ -69,24 +71,68 @@ jobs: cd ${GITHUB_WORKSPACE} if [[ "$GOOS" == linux && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_amd64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi.so ${BUILD_DIR}/libvmexeccapi.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_amd64.so ${BUILD_DIR}/libwasmer_linux_amd64.so fi + + # Actually, there's no runner for this combination (as of March 2024). if [[ "$GOOS" == linux && "$GOARCH" == arm64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_linux_arm64.so ${BUILD_DIR}; + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.so ${BUILD_DIR}/libvmexeccapi_arm.so + cp --verbose --no-preserve=mode,ownership ${VM_GO_DIR}/wasmer/libwasmer_linux_arm64_shim.so ${BUILD_DIR}/libwasmer_linux_arm64_shim.so fi + if [[ "$GOOS" == darwin && "$GOARCH" == amd64 ]]; then - cp -f ${WASMER_DIR}/libwasmer_darwin_amd64.dylib ${BUILD_DIR}; + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi.dylib ${BUILD_DIR}/libvmexeccapi.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_amd64.dylib ${BUILD_DIR}/libwasmer_darwin_amd64.dylib + fi + + if [[ "$GOOS" == darwin && "$GOARCH" == arm64 ]]; then + cp -v ${VM_GO_DIR}/wasmer2/libvmexeccapi_arm.dylib ${BUILD_DIR}/libvmexeccapi_arm.dylib + cp -v ${VM_GO_DIR}/wasmer/libwasmer_darwin_arm64_shim.dylib ${BUILD_DIR}/libwasmer_darwin_arm64_shim.dylib fi - cd ${BUILD_DIR} - tar czvf "${GITHUB_WORKSPACE}/${ARCHIVE}" * - stat ${GITHUB_WORKSPACE}/${ARCHIVE} + if [[ "$GOOS" == linux ]]; then + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/node + patchelf --set-rpath "\$ORIGIN" ${BUILD_DIR}/seednode + + ldd ${BUILD_DIR}/node + ldd ${BUILD_DIR}/seednode + fi + + if [[ "$GOOS" == darwin ]]; then + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/node + install_name_tool -add_rpath "@loader_path" ${BUILD_DIR}/seednode + + otool -L ${BUILD_DIR}/node + otool -L ${BUILD_DIR}/seednode + fi + + - name: Smoke test + run: | + # Remove all downloaded Go packages, so that we can test the binary's independence from them (think of Wasmer libraries). + sudo rm -rf ${GOPATH}/pkg/mod + + # Test binaries in different current directories. + cd ${BUILD_DIR} && ./node --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/node --version + cd / && ${BUILD_DIR}/node --version + + cd ${BUILD_DIR} && ./seednode --version + cd ${GITHUB_WORKSPACE} && ${BUILD_DIR}/seednode --version + cd / && ${BUILD_DIR}/seednode --version + + - name: Package build output + run: | + sudo chown -R $USER: ${BUILD_DIR} + chmod -R 755 ${BUILD_DIR} + ls -al ${BUILD_DIR} + zip -r -j ${ARCHIVE} ${BUILD_DIR} - name: Save artifacts uses: actions/upload-artifact@v3 with: name: ${{ env.ARCHIVE }} - path: ${{ github.workspace }}/${{ env.ARCHIVE }} + path: ${{ env.ARCHIVE }} if-no-files-found: error release: @@ -113,6 +159,6 @@ jobs: run: | gh release create --draft --notes="Release draft from Github Actions" vNext sleep 10 - for i in $(find ./assets -name '*.tgz' -type f); do + for i in $(find ./assets -name '*.zip' -type f); do gh release upload vNext ${i} done diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index fd61f481c39..e7025c033d9 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -28,6 +28,7 @@ const ( bootstrapStatusPath = "/bootstrapstatus" connectedPeersRatingsPath = "/connected-peers-ratings" managedKeys = "/managed-keys" + loadedKeys = "/loaded-keys" managedKeysCount = "/managed-keys/count" eligibleManagedKeys = "/managed-keys/eligible" waitingManagedKeys = "/managed-keys/waiting" @@ -44,6 +45,7 @@ type nodeFacadeHandler interface { GetConnectedPeersRatingsOnMainNetwork() (string, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) @@ -129,6 +131,11 @@ func NewNodeGroup(facade nodeFacadeHandler) (*nodeGroup, error) { Method: http.MethodGet, Handler: ng.managedKeys, }, + { + Path: loadedKeys, + Method: http.MethodGet, + Handler: ng.loadedKeys, + }, { Path: eligibleManagedKeys, Method: http.MethodGet, @@ -411,6 +418,19 @@ func (ng *nodeGroup) managedKeys(c *gin.Context) { ) } +// loadedKeys returns all keys loaded by the current node +func (ng *nodeGroup) loadedKeys(c *gin.Context) { + keys := ng.getFacade().GetLoadedKeys() + c.JSON( + http.StatusOK, + shared.GenericAPIResponse{ + Data: gin.H{"loadedKeys": keys}, + Error: "", + Code: shared.ReturnCodeSuccess, + }, + ) +} + // managedKeysEligible returns the node's eligible managed keys func (ng *nodeGroup) managedKeysEligible(c *gin.Context) { keys, err := ng.getFacade().GetEligibleManagedKeys() diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index 6aa00d91693..4bc6e6c738e 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -81,6 +81,13 @@ type managedKeysResponse struct { generalResponse } +type loadedKeysResponse struct { + Data struct { + LoadedKeys []string `json:"loadedKeys"` + } `json:"data"` + generalResponse +} + type managedEligibleKeysResponse struct { Data struct { Keys []string `json:"eligibleKeys"` @@ -764,6 +771,36 @@ func TestNodeGroup_ManagedKeys(t *testing.T) { assert.Equal(t, providedKeys, response.Data.ManagedKeys) } +func TestNodeGroup_LoadedKeys(t *testing.T) { + t.Parallel() + + providedKeys := []string{ + "pk1", + "pk2", + } + facade := mock.FacadeStub{ + GetLoadedKeysCalled: func() []string { + return providedKeys + }, + } + + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/loaded-keys", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &loadedKeysResponse{} + loadResponse(resp.Body, response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, "", response.Error) + assert.Equal(t, providedKeys, response.Data.LoadedKeys) +} + func TestNodeGroup_ManagedKeysEligible(t *testing.T) { t.Parallel() @@ -1046,6 +1083,7 @@ func getNodeRoutesConfig() config.ApiRoutesConfig { {Name: "/connected-peers-ratings", Open: true}, {Name: "/managed-keys/count", Open: true}, {Name: "/managed-keys", Open: true}, + {Name: "/loaded-keys", Open: true}, {Name: "/managed-keys/eligible", Open: true}, {Name: "/managed-keys/waiting", Open: true}, {Name: "/waiting-epochs-left/:key", Open: true}, diff --git a/api/groups/validatorGroup.go b/api/groups/validatorGroup.go index f2c206b34f3..1120ae4186d 100644 --- a/api/groups/validatorGroup.go +++ b/api/groups/validatorGroup.go @@ -10,13 +10,18 @@ import ( "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/common" ) -const statisticsPath = "/statistics" +const ( + statisticsPath = "/statistics" + auctionPath = "/auction" +) // validatorFacadeHandler defines the methods to be implemented by a facade for validator requests type validatorFacadeHandler interface { ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) IsInterfaceNil() bool } @@ -43,6 +48,11 @@ func NewValidatorGroup(facade validatorFacadeHandler) (*validatorGroup, error) { Method: http.MethodGet, Handler: ng.statistics, }, + { + Path: auctionPath, + Method: http.MethodGet, + Handler: ng.auction, + }, } ng.endpoints = endpoints @@ -74,6 +84,31 @@ func (vg *validatorGroup) statistics(c *gin.Context) { ) } +// auction will return the list of the validators in the auction list +func (vg *validatorGroup) auction(c *gin.Context) { + valStats, err := vg.getFacade().AuctionListApi() + if err != nil { + c.JSON( + http.StatusBadRequest, + shared.GenericAPIResponse{ + Data: nil, + Error: err.Error(), + Code: shared.ReturnCodeRequestError, + }, + ) + return + } + + c.JSON( + http.StatusOK, + shared.GenericAPIResponse{ + Data: gin.H{"auctionList": valStats}, + Error: "", + Code: shared.ReturnCodeSuccess, + }, + ) +} + func (vg *validatorGroup) getFacade() validatorFacadeHandler { vg.mutFacade.RLock() defer vg.mutFacade.RUnlock() diff --git a/api/groups/validatorGroup_test.go b/api/groups/validatorGroup_test.go index 0bb20a869cd..0bbd1ebf742 100644 --- a/api/groups/validatorGroup_test.go +++ b/api/groups/validatorGroup_test.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,11 +35,18 @@ func TestNewValidatorGroup(t *testing.T) { } // ValidatorStatisticsResponse is the response for the validator statistics endpoint. -type ValidatorStatisticsResponse struct { +type validatorStatisticsResponse struct { Result map[string]*validator.ValidatorStatistics `json:"statistics"` Error string `json:"error"` } +type auctionListResponse struct { + Data struct { + Result []*common.AuctionListValidatorAPIResponse `json:"auctionList"` + } `json:"data"` + Error string +} + func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { t.Parallel() @@ -60,7 +68,7 @@ func TestValidatorStatistics_ErrorWhenFacadeFails(t *testing.T) { resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) - response := ValidatorStatisticsResponse{} + response := validatorStatisticsResponse{} loadResponse(resp.Body, &response) assert.Equal(t, http.StatusBadRequest, resp.Code) @@ -97,7 +105,7 @@ func TestValidatorStatistics_ReturnsSuccessfully(t *testing.T) { response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -147,14 +155,13 @@ func TestValidatorGroup_UpdateFacade(t *testing.T) { require.NoError(t, err) ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) - req, _ := http.NewRequest("GET", "/validator/statistics", nil) resp := httptest.NewRecorder() ws.ServeHTTP(resp, req) response := shared.GenericAPIResponse{} loadResponse(resp.Body, &response) - validatorStatistics := ValidatorStatisticsResponse{} + validatorStatistics := validatorStatisticsResponse{} mapResponseData := response.Data.(map[string]interface{}) mapResponseDataBytes, _ := json.Marshal(mapResponseData) _ = json.Unmarshal(mapResponseDataBytes, &validatorStatistics) @@ -191,12 +198,71 @@ func TestValidatorGroup_IsInterfaceNil(t *testing.T) { require.False(t, validatorGroup.IsInterfaceNil()) } +func TestAuctionList_ErrorWhenFacadeFails(t *testing.T) { + t.Parallel() + + errStr := "error in facade" + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return nil, errors.New(errStr) + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusBadRequest, resp.Code) + assert.Contains(t, response.Error, errStr) +} + +func TestAuctionList_ReturnsSuccessfully(t *testing.T) { + t.Parallel() + + auctionListToReturn := []*common.AuctionListValidatorAPIResponse{ + { + Owner: "owner", + NumStakedNodes: 4, + TotalTopUp: "1234", + TopUpPerNode: "4321", + QualifiedTopUp: "4444", + }, + } + facade := mock.FacadeStub{ + AuctionListHandler: func() ([]*common.AuctionListValidatorAPIResponse, error) { + return auctionListToReturn, nil + }, + } + + validatorGroup, err := groups.NewValidatorGroup(&facade) + require.NoError(t, err) + + ws := startWebServer(validatorGroup, "validator", getValidatorRoutesConfig()) + req, _ := http.NewRequest("GET", "/validator/auction", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := auctionListResponse{} + loadResponse(resp.Body, &response) + + assert.Equal(t, http.StatusOK, resp.Code) + assert.Equal(t, response.Data.Result, auctionListToReturn) +} + func getValidatorRoutesConfig() config.ApiRoutesConfig { return config.ApiRoutesConfig{ APIPackages: map[string]config.APIPackageConfig{ "validator": { Routes: []config.RouteConfig{ {Name: "/statistics", Open: true}, + {Name: "/auction", Open: true}, }, }, }, diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 50572622897..e40645c1ac3 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -91,10 +91,12 @@ type FacadeStub struct { IsDataTrieMigratedCalled func(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCountCalled func() int GetManagedKeysCalled func() []string + GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) P2PPrometheusMetricsEnabledCalled func() bool + AuctionListHandler func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetTokenSupply - @@ -195,12 +197,20 @@ func (f *FacadeStub) PprofEnabled() bool { // GetHeartbeats returns the slice of heartbeat info func (f *FacadeStub) GetHeartbeats() ([]data.PubKeyHeartbeat, error) { - return f.GetHeartbeatsHandler() + if f.GetHeartbeatsHandler != nil { + return f.GetHeartbeatsHandler() + } + + return nil, nil } // GetBalance is the mock implementation of a handler's GetBalance method func (f *FacadeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return f.GetBalanceCalled(address, options) + if f.GetBalanceCalled != nil { + return f.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // GetValueForKey is the mock implementation of a handler's GetValueForKey method @@ -285,7 +295,11 @@ func (f *FacadeStub) GetAllIssuedESDTs(tokenType string) ([]string, error) { // GetAccount - func (f *FacadeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return f.GetAccountCalled(address, options) + if f.GetAccountCalled != nil { + return f.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetAccounts - @@ -299,72 +313,137 @@ func (f *FacadeStub) GetAccounts(addresses []string, options api.AccountQueryOpt // CreateTransaction is mock implementation of a handler's CreateTransaction method func (f *FacadeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) { - return f.CreateTransactionHandler(txArgs) + if f.CreateTransactionHandler != nil { + return f.CreateTransactionHandler(txArgs) + } + + return nil, nil, nil } // GetTransaction is the mock implementation of a handler's GetTransaction method func (f *FacadeStub) GetTransaction(hash string, withResults bool) (*transaction.ApiTransactionResult, error) { - return f.GetTransactionHandler(hash, withResults) + if f.GetTransactionHandler != nil { + return f.GetTransactionHandler(hash, withResults) + } + + return nil, nil } // SimulateTransactionExecution is the mock implementation of a handler's SimulateTransactionExecution method func (f *FacadeStub) SimulateTransactionExecution(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { - return f.SimulateTransactionExecutionHandler(tx) + if f.SimulateTransactionExecutionHandler != nil { + return f.SimulateTransactionExecutionHandler(tx) + } + + return nil, nil } // SendBulkTransactions is the mock implementation of a handler's SendBulkTransactions method func (f *FacadeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return f.SendBulkTransactionsHandler(txs) + if f.SendBulkTransactionsHandler != nil { + return f.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // ValidateTransaction - func (f *FacadeStub) ValidateTransaction(tx *transaction.Transaction) error { - return f.ValidateTransactionHandler(tx) + if f.ValidateTransactionHandler != nil { + return f.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (f *FacadeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + if f.ValidateTransactionForSimulationHandler != nil { + return f.ValidateTransactionForSimulationHandler(tx, bypassSignature) + } + + return nil } // ValidatorStatisticsApi is the mock implementation of a handler's ValidatorStatisticsApi method func (f *FacadeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { - return f.ValidatorStatisticsHandler() + if f.ValidatorStatisticsHandler != nil { + return f.ValidatorStatisticsHandler() + } + + return nil, nil +} + +// AuctionListApi is the mock implementation of a handler's AuctionListApi method +func (f *FacadeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if f.AuctionListHandler != nil { + return f.AuctionListHandler() + } + + return nil, nil } // ExecuteSCQuery is a mock implementation. func (f *FacadeStub) ExecuteSCQuery(query *process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) { - return f.ExecuteSCQueryHandler(query) + if f.ExecuteSCQueryHandler != nil { + return f.ExecuteSCQueryHandler(query) + } + + return nil, api.BlockInfo{}, nil } // StatusMetrics is the mock implementation for the StatusMetrics func (f *FacadeStub) StatusMetrics() external.StatusMetricsHandler { - return f.StatusMetricsHandler() + if f.StatusMetricsHandler != nil { + return f.StatusMetricsHandler() + } + + return nil } // GetTotalStakedValue - func (f *FacadeStub) GetTotalStakedValue() (*api.StakeValues, error) { - return f.GetTotalStakedValueHandler() + if f.GetTotalStakedValueHandler != nil { + return f.GetTotalStakedValueHandler() + } + + return nil, nil } // GetDirectStakedList - func (f *FacadeStub) GetDirectStakedList() ([]*api.DirectStakedValue, error) { - return f.GetDirectStakedListHandler() + if f.GetDirectStakedListHandler != nil { + return f.GetDirectStakedListHandler() + } + + return nil, nil } // GetDelegatorsList - func (f *FacadeStub) GetDelegatorsList() ([]*api.Delegator, error) { - return f.GetDelegatorsListHandler() + if f.GetDelegatorsListHandler != nil { + return f.GetDelegatorsListHandler() + } + + return nil, nil } // ComputeTransactionGasLimit - func (f *FacadeStub) ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) { - return f.ComputeTransactionGasLimitHandler(tx) + if f.ComputeTransactionGasLimitHandler != nil { + return f.ComputeTransactionGasLimitHandler(tx) + } + + return nil, nil } // NodeConfig - func (f *FacadeStub) NodeConfig() map[string]interface{} { - return f.NodeConfigCalled() + if f.NodeConfigCalled != nil { + return f.NodeConfigCalled() + } + + return nil } // EncodeAddressPubkey - @@ -382,17 +461,29 @@ func (f *FacadeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetQueryHandler - func (f *FacadeStub) GetQueryHandler(name string) (debug.QueryHandler, error) { - return f.GetQueryHandlerCalled(name) + if f.GetQueryHandlerCalled != nil { + return f.GetQueryHandlerCalled(name) + } + + return nil, nil } // GetPeerInfo - func (f *FacadeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { - return f.GetPeerInfoCalled(pid) + if f.GetPeerInfoCalled != nil { + return f.GetPeerInfoCalled(pid) + } + + return nil, nil } // GetConnectedPeersRatingsOnMainNetwork - func (f *FacadeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { - return f.GetConnectedPeersRatingsOnMainNetworkCalled() + if f.GetConnectedPeersRatingsOnMainNetworkCalled != nil { + return f.GetConnectedPeersRatingsOnMainNetworkCalled() + } + + return "", nil } // GetEpochStartDataAPI - @@ -402,12 +493,20 @@ func (f *FacadeStub) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataA // GetBlockByNonce - func (f *FacadeStub) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByNonceCalled(nonce, options) + if f.GetBlockByNonceCalled != nil { + return f.GetBlockByNonceCalled(nonce, options) + } + + return nil, nil } // GetBlockByHash - func (f *FacadeStub) GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) { - return f.GetBlockByHashCalled(hash, options) + if f.GetBlockByHashCalled != nil { + return f.GetBlockByHashCalled(hash, options) + } + + return nil, nil } // GetBlockByRound - @@ -596,6 +695,14 @@ func (f *FacadeStub) GetManagedKeys() []string { return make([]string, 0) } +// GetLoadedKeys - +func (f *FacadeStub) GetLoadedKeys() []string { + if f.GetLoadedKeysCalled != nil { + return f.GetLoadedKeysCalled() + } + return make([]string, 0) +} + // GetEligibleManagedKeys - func (f *FacadeStub) GetEligibleManagedKeys() ([]string, error) { if f.GetEligibleManagedKeysCalled != nil { diff --git a/api/shared/interface.go b/api/shared/interface.go index 9be6e66c7b8..4b775ebdd39 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -115,6 +115,7 @@ type FacadeHandler interface { ComputeTransactionGasLimit(tx *transaction.Transaction) (*transaction.CostResponse, error) EncodeAddressPubkey(pk []byte) (string, error) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) RestApiInterface() string @@ -130,6 +131,7 @@ type FacadeHandler interface { IsDataTrieMigrated(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/cmd/assessment/main.go b/cmd/assessment/main.go index 8e61205de2b..47642c03faa 100644 --- a/cmd/assessment/main.go +++ b/cmd/assessment/main.go @@ -12,7 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/cmd/assessment/benchmarks" "github.com/multiversx/mx-chain-go/cmd/assessment/benchmarks/factory" - "github.com/multiversx/mx-chain-go/cmd/assessment/hostParameters" + "github.com/multiversx/mx-chain-go/common/hostParameters" logger "github.com/multiversx/mx-chain-logger-go" "github.com/urfave/cli" ) diff --git a/cmd/assessment/testdata/cpucalculate.wasm b/cmd/assessment/testdata/cpucalculate.wasm old mode 100644 new mode 100755 index 1dc0dc30389..8f04b918eaa Binary files a/cmd/assessment/testdata/cpucalculate.wasm and b/cmd/assessment/testdata/cpucalculate.wasm differ diff --git a/cmd/assessment/testdata/storage100.wasm b/cmd/assessment/testdata/storage100.wasm old mode 100644 new mode 100755 index afc590aa0e6..b1b9701c7af Binary files a/cmd/assessment/testdata/storage100.wasm and b/cmd/assessment/testdata/storage100.wasm differ diff --git a/cmd/node/config/api.toml b/cmd/node/config/api.toml index 2c7fb1d7889..a10ec049554 100644 --- a/cmd/node/config/api.toml +++ b/cmd/node/config/api.toml @@ -43,6 +43,9 @@ # /node/managed-keys will return the keys managed by the node { Name = "/managed-keys", Open = true }, + # /node/loaded-keys will return the keys loaded by the node + { Name = "/loaded-keys", Open = true }, + # /node/managed-keys/count will return the number of keys managed by the node { Name = "/managed-keys/count", Open = true }, @@ -170,7 +173,10 @@ [APIPackages.validator] Routes = [ # /validator/statistics will return a list of validators statistics for all validators - { Name = "/statistics", Open = true } + { Name = "/statistics", Open = true }, + + # /validator/auction will return a list of nodes that are in the auction list + { Name = "/auction", Open = true }, ] [APIPackages.vm-values] diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index a5c0ba65510..f88480788cb 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -35,10 +35,13 @@ # SyncProcessTimeInMillis is the value in milliseconds used when processing blocks while synchronizing blocks SyncProcessTimeInMillis = 12000 - # SetGuardianEpochsDelay represents the delay in epochs between the execution time of the SetGuardian transaction and - # the activation of the configured guardian. - # Make sure that this is greater than the unbonding period! - SetGuardianEpochsDelay = 2 # TODO: for mainnet should be 20, 2 is just for testing + # SetGuardianEpochsDelay represents the delay in epochs between the execution time of the SetGuardian transaction and + # the activation of the configured guardian. + # Make sure that this is greater than the unbonding period! + SetGuardianEpochsDelay = 2 # TODO: for mainnet should be 20, 2 is just for testing + +[HardwareRequirements] + CPUFlags = ["SSE4", "SSE42"] [Versions] DefaultVersion = "default" @@ -489,6 +492,7 @@ [Antiflood] Enabled = true NumConcurrentResolverJobs = 50 + NumConcurrentResolvingTrieNodesJobs = 3 [Antiflood.FastReacting] IntervalInSeconds = 1 ReservedPercent = 20.0 @@ -617,6 +621,7 @@ Type = "json" [EpochStartConfig] + GenesisEpoch = 0 MinRoundsBetweenEpochs = 20 RoundsPerEpoch = 200 # Min and Max ShuffledOutRestartThreshold represents the minimum and maximum duration of an epoch (in percentage) after a node which @@ -626,6 +631,7 @@ MinNumConnectedPeersToStart = 2 MinNumOfPeersToConsiderBlockValid = 2 + ExtraDelayForRequestBlockInfoInMilliseconds = 3000 # ResourceStats, if enabled, will output in a folder called "stats" # resource statistics. For example: number of active go routines, memory allocation, number of GC sweeps, etc. @@ -655,6 +661,7 @@ PeerStatePruningEnabled = true MaxStateTrieLevelInMemory = 5 MaxPeerTrieLevelInMemory = 5 + StateStatisticsEnabled = false CollectStateChangesEnabled = false [BlockSizeThrottleConfig] @@ -666,9 +673,8 @@ TimeOutForSCExecutionInMilliseconds = 10000 # 10 seconds = 10000 milliseconds WasmerSIGSEGVPassthrough = false # must be false for release WasmVMVersions = [ - { StartEpoch = 0, Version = "v1.3" }, - { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + { StartEpoch = 0, Version = "v1.4" }, + { StartEpoch = 1, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly ] [VirtualMachine.Querying] @@ -676,9 +682,8 @@ TimeOutForSCExecutionInMilliseconds = 10000 # 10 seconds = 10000 milliseconds WasmerSIGSEGVPassthrough = false # must be false for release WasmVMVersions = [ - { StartEpoch = 0, Version = "v1.3" }, - { StartEpoch = 1, Version = "v1.4" }, - { StartEpoch = 3, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly + { StartEpoch = 0, Version = "v1.4" }, + { StartEpoch = 1, Version = "v1.5" }, # TODO: set also the RoundActivations.DisableAsyncCallV1 accordingly ] [VirtualMachine.GasConfig] diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index dd6e20d3589..10e51b24a86 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -62,7 +62,7 @@ ESDTEnableEpoch = 1 # GovernanceEnableEpoch represents the epoch when governance is enabled - GovernanceEnableEpoch = 5 + GovernanceEnableEpoch = 1 # DelegationManagerEnableEpoch represents the epoch when the delegation manager is enabled # epoch should not be 0 @@ -90,9 +90,6 @@ # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 1 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 1000000 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 1 @@ -106,9 +103,6 @@ # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 1 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 1000000 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 1 @@ -252,44 +246,64 @@ DeterministicSortOnValidatorsInfoEnableEpoch = 1 # SCProcessorV2EnableEpoch represents the epoch when SC processor V2 will be used - SCProcessorV2EnableEpoch = 3 + SCProcessorV2EnableEpoch = 1 # AutoBalanceDataTriesEnableEpoch represents the epoch when the data tries are automatically balanced by inserting at the hashed key instead of the normal key - AutoBalanceDataTriesEnableEpoch = 3 + AutoBalanceDataTriesEnableEpoch = 1 + + # MigrateDataTrieEnableEpoch represents the epoch when the data tries migration is enabled + MigrateDataTrieEnableEpoch = 1 # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured - KeepExecOrderOnCreatedSCRsEnableEpoch = 3 + KeepExecOrderOnCreatedSCRsEnableEpoch = 1 # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled - MultiClaimOnDelegationEnableEpoch = 3 + MultiClaimOnDelegationEnableEpoch = 1 # ChangeUsernameEnableEpoch represents the epoch when changing username is enabled - ChangeUsernameEnableEpoch = 3 + ChangeUsernameEnableEpoch = 4 # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled - ConsistentTokensValuesLengthCheckEnableEpoch = 3 + ConsistentTokensValuesLengthCheckEnableEpoch = 1 # FixDelegationChangeOwnerOnAccountEnableEpoch represents the epoch when the fix for the delegation system smart contract is enabled - FixDelegationChangeOwnerOnAccountEnableEpoch = 3 + FixDelegationChangeOwnerOnAccountEnableEpoch = 1 # DynamicGasCostForDataTrieStorageLoadEnableEpoch represents the epoch when dynamic gas cost for data trie storage load will be enabled - DynamicGasCostForDataTrieStorageLoadEnableEpoch = 3 + DynamicGasCostForDataTrieStorageLoadEnableEpoch = 1 # ScToScLogEventEnableEpoch represents the epoch when the sc to sc log event feature is enabled - ScToScLogEventEnableEpoch = 3 + ScToScLogEventEnableEpoch = 1 # NFTStopCreateEnableEpoch represents the epoch when NFT stop create feature is enabled - NFTStopCreateEnableEpoch = 3 + NFTStopCreateEnableEpoch = 1 # ChangeOwnerAddressCrossShardThroughSCEnableEpoch represents the epoch when the change owner address built in function will work also through a smart contract call cross shard - ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 3 + ChangeOwnerAddressCrossShardThroughSCEnableEpoch = 1 # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled - FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 3 + FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 1 # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled CurrentRandomnessOnSortingEnableEpoch = 4 + # StakeLimitsEnableEpoch represents the epoch when stake limits on validators are enabled + StakeLimitsEnableEpoch = 5 + + # StakingV4Step1EnableEpoch represents the epoch when staking v4 is initialized. This is the epoch in which + # all nodes from staking queue are moved in the auction list + StakingV4Step1EnableEpoch = 4 + + # StakingV4Step2EnableEpoch represents the epoch when staking v4 is enabled. Should have a greater value than StakingV4Step1EnableEpoch. + # From this epoch, all shuffled out nodes are moved to auction nodes. No auction nodes selection is done yet. + StakingV4Step2EnableEpoch = 5 + + # StakingV4Step3EnableEpoch represents the epoch in which selected nodes from auction will be distributed to waiting list + StakingV4Step3EnableEpoch = 6 + + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 4 + # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers BLSMultiSignerEnableEpoch = [ { EnableEpoch = 0, Type = "no-KOSK" }, @@ -298,13 +312,17 @@ # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ - { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, - { EpochEnable = 1, MaxNumNodes = 56, NodesToShufflePerShard = 2 } + { EpochEnable = 0, MaxNumNodes = 48, NodesToShufflePerShard = 4 }, # 4 shuffled out keys / shard will not be reached normally + { EpochEnable = 1, MaxNumNodes = 64, NodesToShufflePerShard = 2 }, + # Staking v4 configuration, where: + # - Enable epoch = StakingV4Step3EnableEpoch + # - NodesToShufflePerShard = same as previous entry in MaxNodesChangeEnableEpoch + # - MaxNumNodes = (MaxNumNodesFromPreviousEpochEnable - (numOfShards+1)*NodesToShufflePerShard) + { EpochEnable = 6, MaxNumNodes = 56, NodesToShufflePerShard = 2 }, ] [GasSchedule] # GasScheduleByEpochs holds the configuration for the gas schedule that will be applied from specific epochs GasScheduleByEpochs = [ - { StartEpoch = 0, FileName = "gasScheduleV1.toml" }, - { StartEpoch = 1, FileName = "gasScheduleV7.toml" }, + { StartEpoch = 0, FileName = "gasScheduleV7.toml" }, ] diff --git a/cmd/node/config/enableRounds.toml b/cmd/node/config/enableRounds.toml index e9940cf1b7c..d7be75bb524 100644 --- a/cmd/node/config/enableRounds.toml +++ b/cmd/node/config/enableRounds.toml @@ -10,4 +10,4 @@ [RoundActivations] [RoundActivations.DisableAsyncCallV1] Options = [] - Round = "500" + Round = "100" diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml index 0dd790a83f6..6e9010a4c34 100644 --- a/cmd/node/config/fullArchiveP2P.toml +++ b/cmd/node/config/fullArchiveP2P.toml @@ -23,10 +23,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section @@ -71,10 +72,10 @@ [Sharding] # The targeted number of peer connections - TargetPeerCount = 36 + TargetPeerCount = 41 MaxIntraShardValidators = 7 MaxCrossShardValidators = 15 - MaxIntraShardObservers = 2 + MaxIntraShardObservers = 7 MaxCrossShardObservers = 3 MaxSeeders = 2 diff --git a/cmd/node/config/genesis.json b/cmd/node/config/genesis.json index 10cc1e97d95..27f74229b85 100644 --- a/cmd/node/config/genesis.json +++ b/cmd/node/config/genesis.json @@ -1,92 +1,497 @@ [ { - "address": "erd1ulhw20j7jvgfgak5p05kv667k5k9f320sgef5ayxkt9784ql0zssrzyhjp", - "supply": "2222222222222222222222224", - "balance": "2219722222222222222222224", + "info": "delegator1 for legacy delegation", + "address": "erd1z48u9l275l2uy4augfytpp2355wvdnc4gwc9ms9gdhdqru3fz9eq5wqe3e", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd17c4fs6mz2aa2hcvva2jfxdsrdknu4220496jmswer9njznt22eds0rxlr4", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator2 for legacy delegation", + "address": "erd1qm5erxm0va3hcw9jfxtp2gl3u9q9p4k242pk9xx3vezefkgj2vhs0wk8cx", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd10d2gufxesrp8g409tzxljlaefhs0rsgjle3l7nq38de59txxt8csj54cd3", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator3 for legacy delegation", + "address": "erd1rqwt9vpfn072tahtcvup2dxz4nvqfs3n5p9eh0jnypkppxmdheaqpcqfzz", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1e0vueugj66l5cgrz83se0a74c3hst7u4w55t3usfa3at8yhfq94qtajf2c", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator4 for legacy delegation", + "address": "erd17uygg7qq05mjhectgymj6fwq59ysr4p92cy0yz3jrxxj6253p40sj77wr6", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1fn9faxsh6felld6c2vd82par6nzshkj609550qu3dngh8faxjz5syukjcq", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator5 for legacy delegation", + "address": "erd1qslr87nj5gkv2396js3fa2za5kqgwugqnz4j4qqh22mxpnse2lws8srsq6", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd12ymx62jlp0dez40slu22dxmese5fl0rwrtqzlnff844rtltnlpdse9ecsm", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator6 for legacy delegation", + "address": "erd17pjlqg55c6v3fjvpqec8peefk74g8neygr84ymw7cqzudmzaw7lqnln7sz", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1qsrfugd567kv68sysp455cshqr30257c8jnuq2q7zct943w82feszr8n32", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator7 for legacy delegation", + "address": "erd19ztfuew6ejrwq5mpax4xztjwh5j63u9vge4dum9vlyy7hg3pc86qgmt6nm", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd153a3wkfng4cupvkd86k07nl0acq548s72xr3yvpjut6u6fnpzads9zyq37", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator8 for legacy delegation", + "address": "erd1age5t5qfrke4vm47vaq9a4yewllh6227qm4fcy3rc7g5ktmzyatsgf4wcw", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" } }, { - "address": "erd1yajssshtsc75x87cxvylnwu4r9dv3c2tegufrd07fjmw72krlq9spmw32d", - "supply": "2222222222222222222222222", - "balance": "2219722222222222222222222", + "info": "delegator9 for legacy delegation", + "address": "erd1jt0vv29trqs3nddzkxsf950xx0t5uvyncmuamwryneh9wsee5wpsgue96d", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", "stakingvalue": "0", "delegation": { "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", - "value": "2500000000000000000000" + "value": "1000000000000000000000" + } + }, + { + "info": "delegator10 for legacy delegation", + "address": "erd1c83rmk3n8ys4g9dkg3q70thx3v787qtpfmk23epu4xsadpyd3dnsejf2r7", + "supply": "1001000000000000000000", + "balance": "1000000000000000000", + "stakingvalue": "0", + "delegation": { + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "value": "1000000000000000000000" + } + }, + { + "info": "wallet1 2500*8 staked + 10000 initial balance", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "supply": "30000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "20000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet2 2500*6 staked + 10000 initial balance", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "supply": "25000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "15000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet3 2500*4 staked + 10000 initial balance", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "supply": "20000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "10000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet4 2500*4 staked + 10000 initial balance", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "supply": "20000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "10000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet5 2500*3 staked + 10000 initial balance", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "supply": "17500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "7500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet6 2500*3 staked + 10000 initial balance", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "supply": "17500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "7500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet7 2500*2 staked + 10000 initial balance", + "address": "erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg", + "supply": "15000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "5000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet8 2500*2 staked + 10000 initial balance", + "address": "erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g", + "supply": "15000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "5000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet9 2500 staked + 10000 initial balance", + "address": "erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet10 2500 staked + 10000 initial balance", + "address": "erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet11 2500 staked + 10000 initial balance", + "address": "erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet12 2500 staked + 10000 initial balance", + "address": "erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet13 2500 staked + 10000 initial balance", + "address": "erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet14 2500 staked + 10000 initial balance", + "address": "erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet15 2500 staked + 10000 initial balance", + "address": "erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet16 2500*3 staked + 10000 initial balance", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "supply": "17500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "7500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet17 2500*2 staked + 10000 initial balance", + "address": "erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw", + "supply": "15000000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "5000000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet18 2500 staked + 10000 initial balance", + "address": "erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet19 2500 staked + 10000 initial balance", + "address": "erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet20 2500 staked + 10000 initial balance", + "address": "erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet21 2500 staked + 10000 initial balance", + "address": "erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet22 2500 staked + 10000 initial balance", + "address": "erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet23 2500 staked + 10000 initial balance", + "address": "erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet24 2500 staked + 10000 initial balance", + "address": "erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet25 2500 staked + 10000 initial balance", + "address": "erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet26 2500 staked + 10000 initial balance", + "address": "erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet27 2500 staked + 10000 initial balance", + "address": "erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet28 2500 staked + 10000 initial balance", + "address": "erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet29 2500 staked + 10000 initial balance", + "address": "erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet30 2500 staked + 10000 initial balance", + "address": "erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet31 2500 staked + 10000 initial balance", + "address": "erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet32 2500 staked + 10000 initial balance", + "address": "erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet33 2500 staked + 10000 initial balance", + "address": "erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88", + "supply": "12500000000000000000000", + "balance": "10000000000000000000000", + "stakingvalue": "2500000000000000000000", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet34 no staking, initial funds - 10 million EGLD", + "address": "erd17ndrqg38lqf2zjgeqvle90rsn9ejrd9upx8evkyvh8e0m5xlph5scv9l6n", + "supply": "10000000000000000000000000", + "balance": "10000000000000000000000000", + "stakingvalue": "0", + "delegation": { + "address": "", + "value": "0" + } + }, + { + "info": "wallet35 no staking, initial funds - 9509990 EGLD", + "address": "erd1zed89c8226rs7f59zh2xea39qk9ym9tsmt4s0sg2uw7u9nvtzt3q8fdj2e", + "supply": "9509990000000000000000000", + "balance": "9509990000000000000000000", + "stakingvalue": "0", + "delegation": { + "address": "", + "value": "0" } } -] \ No newline at end of file +] diff --git a/cmd/node/config/genesisContracts/dns.wasm b/cmd/node/config/genesisContracts/dns.wasm index ea613050171..ce692a1260b 100644 Binary files a/cmd/node/config/genesisContracts/dns.wasm and b/cmd/node/config/genesisContracts/dns.wasm differ diff --git a/cmd/node/config/genesisSmartContracts.json b/cmd/node/config/genesisSmartContracts.json index f102c18d489..198798c36fe 100644 --- a/cmd/node/config/genesisSmartContracts.json +++ b/cmd/node/config/genesisSmartContracts.json @@ -11,7 +11,7 @@ "owner": "erd188anxz35atlef7cucszypmvx88lhz4m7a7t7lhcwt6sfphpsqlkswfhcx2", "filename": "./config/genesisContracts/dns.wasm", "vm-type": "0500", - "init-parameters": "056bc75e2d63100000", + "init-parameters": "00", "type": "dns", "version": "0.2.*" } diff --git a/cmd/node/config/nodesSetup.json b/cmd/node/config/nodesSetup.json index 239fd9a52f6..741d9009ad8 100644 --- a/cmd/node/config/nodesSetup.json +++ b/cmd/node/config/nodesSetup.json @@ -1,48 +1,395 @@ { "startTime": 0, - "roundDuration": 4000, - "consensusGroupSize": 3, - "minNodesPerShard": 3, - "metaChainConsensusGroupSize": 3, - "metaChainMinNodes": 3, - "hysteresis": 0, + "roundDuration": 6000, + "consensusGroupSize": 7, + "minNodesPerShard": 10, + "metaChainConsensusGroupSize": 10, + "metaChainMinNodes": 10, + "hysteresis": 0.2, "adaptivity": false, "initialNodes": [ { - "pubkey": "cbc8c9a6a8d9c874e89eb9366139368ae728bd3eda43f173756537877ba6bca87e01a97b815c9f691df73faa16f66b15603056540aa7252d73fecf05d24cd36b44332a88386788fbdb59d04502e8ecb0132d8ebd3d875be4c83e8b87c55eb901", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "309198fd7b6bccb1b6279ea2e5c5f2a33b9f6fe5f23180778d8721710344989b07d10dd1bd19307b3cd06eab9d1358062511610ccdad681ae1165016256cc1fdc0fed5041a5c29d7773b2994022bf2dc9efb937b3bb7cc9d72670448fad7d091", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "ef9522d654bc08ebf2725468f41a693aa7f3cf1cb93922cff1c8c81fba78274016010916f4a7e5b0855c430a724a2d0b3acd1fe8e61e37273a17d58faa8c0d3ef6b883a33ec648950469a1e9757b978d9ae662a019068a401cff56eea059fd08", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "cd8cc9d70a8f78df0922470f1ebee727f57a70fb0571c0b512475c8c7d3ce1d9b70dd28e6582c038b18d05f8fbd6ac0a167a38614c40353b32ef47896d13c45bde57e86d87dd6e6f73420db93aeb79a1fd8f4e297f70478685a38ed73e49598f", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "e91ab494cedd4da346f47aaa1a3e792bea24fb9f6cc40d3546bc4ca36749b8bfb0164e40dbad2195a76ee0fd7fb7da075ecbf1b35a2ac20638d53ea5520644f8c16952225c48304bb202867e2d71d396bff5a5971f345bcfe32c7b6b0ca34c84", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "e441b8a7d35545f7a02f0d57d144c89810d67ecb9abb5804d1bcfd8b270dc30978c56fbf9618e9a6621ca4e6c545c90807f7fe5edfd34ccab83db7bc0bd68536bb65403503d213c9763ec2137d80081579bb1b327f22c677bdb19891f4aae980", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "8f8bf2e6ad1566cd06ba968b319d264b8ce4f8700032a88556c2ecc3992017654d69d9661ad67b12c8e49289a2925a0c3ab3c161a22c16e772a4fe8a84b273b7ac7c00d9da8fa90a9bb710961faa6e0e2e092f383f2fc365f1cda35d803f0901", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - legacy delegation", + "pubkey": "72d8341713a271d510c7dfd02455ef86e9af4c66e06ac28fbb4c4e8df1e944e685bae2bee2af4101c19922b64e44e40b5d4905755594a719c1d50dc210515495d0de9b3a1d4ed42fd45a973353fe2c2548c45bb2157d8bf68e0937cc20fe1011", + "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd", + "initialRating": 5000001 }, { - "pubkey": "aa930dc117738baead60088e9fd53ebc3157ad219f6a11ad4ee662eedb406baad013160ec1083fa68bf25b4ce7503e00e0e6dfbb4e405107a350d88feda2d01ae5b7b27a068d6accc980e498b36fc9ab1df4f3bcffec9f1611e20dea05b55a92", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "796d2f56ee9fa8f4ff9fc7735bbb4d644be8dd044f7f965362aed3d562e00b11f730b2fe21eb9c9fd100b68a5d3dbf07bae63d25739f1304ab638330f0e8c207a76de648e2523ad1693f4ffe9def8a1d5aca2c6c1c14e1fcc20089db069d1a0e", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "70cf21360c0d276bb49af3a76e1bc193f05f688c0f8029a895742dbc4713fe2c36b8a90dd9455b308c3fbf5e3a3ea115ec1a6c353af028d104402a0f1813d6178740b62911470d75eab62ae630d7f1181c68fc1e966967749dc98eab35c03f0c", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "4cc0bfcdf4515927e3ae7890197bfd7c2e3c6f80ff7570fc6313d095e0d4e518ecc81db7087fefb8af166723a32ded1324f4ee00a6a97d206a024fd95ab60f7fe18c896c829ac43782c56d922415fb4ddbc0384936e3f860feb0666da43bcd19", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "ea4a05326f44746beff6302f4a0452ad789113186ede483a577294d3bdf638a0742a57d453edbc61db32e04e101b7c021a1480a8d4989856a83b375d66fe61df64effc0cb68a18bebbc99b7e12ebc3084c17599b83bba33c435b8953974d2484", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "3449c9d672472ea52e83be4a8d6ce55785044233d848ac0a9a4b7fc22bf5b1bf5776f95f4b459f4e4cd3ba1d20b46a197c5a55ec00fa8d795d35f2232a6fae360129d186e3a0c219d42d8357de2823c8566da7841c2739b30d1581c4a38ec80e", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "86b5dcfb9372b0865f0531782827bed66cb7313ab0924c052d3701c59d3c686748e757bb9e20ad1924d3531dc1eb1206f89d00791e79ea994e0a8b5d4ef92335f0d83f09cc358b718b103dd44d772e2286123ceffb6bd8236b8be7e4eb3e1308", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "1bdef34453a83d19748b02d964c43409a66535678b6234c42d289ed2b7571bf60d35ba7a43cd951d4fc9fc2e8ff618038a2edc9cb0181dcac0b62a0859aafd8d9993aa3069c14fec11cb66a653311a37861d225847bf535bcb920b4f0ea98b8b", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 }, { - "pubkey": "227a5a5ec0c58171b7f4ee9ecc304ea7b176fb626741a25c967add76d6cd361d6995929f9b60a96237381091cefb1b061225e5bb930b40494a5ac9d7524fd67dfe478e5ccd80f17b093cff5722025761fb0217c39dbd5ae45e01eb5a3113be93", - "address": "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "3bd60bd8c5ace7d1999cd4bfd93dcb7bdc397d8d84efa5fd89439b7b727b0331bd00e3feae85df79e7d2b5eba1ea07003972fde3a7eb8d4ba25583a848be812e89b75fe8f3531d810ba2aaef629748ace6ac5ae73d8a2e6a65bb379f5be3b906", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "2b9a1e5e291471f9eb0cf1a52db991f2dbb85446d132f47381b7912b041be00cccab748c25bdd6165cd6a517b6e99b0133bda4dc091dcdf6d17bc460ac0e8c6fe4b2460a980dd8dea8c857647bc5826a2b77fc8ba92c02deb2ba3daafb4d5407", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "e464c43c4f3442ec520d4a6464d7fa96397ab711adf38b5a96f208303337f6a97ffdbd22e34a10deef6aa21ff078360d2bf7fae627a1ec55a9f120b35224b8e461b0f4de7d3ce800e6b910f37c4d03cce7039ce3a4a3a79ac0511c36435ccf85", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet1 with 8 BLS keys", + "pubkey": "dab5c34e66e096d1f328fd9b045136724c8ccbf7b5a4bcf1e8c9edc9510712c0a7feff7818563aa799b37f1cdcfb330cc49d8482c7154988d33f63fe2526b27945326112c832fdf72a1b35f10da34c6e08b4079d9c56195c1ab64c84eab93b95", + "address": "erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7", + "initialRating": 5000001 + }, + { + "info": "multikey - group1 - wallet2 with 6 BLS keys", + "pubkey": "a5624bec34e06d5026a334654be9e0118c8a02720a6bd868a51d5eb687819442cded09d1fef2e9d9db8bb2d5be01f1148b4819aee9e6a48b9c530285dbc4d800f4dd10d7f9a75d4b36de8fb52aec672cec91e0256f7e9848b10219748d9e708b", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "eedbc5a3141b92148205d178d150905e68ca745ba54617025f84b33c91233afda2c2109084821e14f9a50d3f220fbc000ce6ad432f2a1865da9c6547016ecc7e07242ef490c0bdda29ec677f3e833f54eb2cf27e95b10b8edbdfa7de4e1bc000", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "48c77bbf5d619fb13d76883736c664493b323c00f89131c9d70b3e4ac875103bd8248e790e47a82c9fdcd46fe33b52093a4b3b248ce20e6f958acd22dfb17335fcaf752bab5e29934f0a7e0af54fb2f51a9e6b1be30abdd701f7c9fbd0ad5d8e", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "dd59e326a160d3b49ca25f2ab93b4f7ba766b124de66b68507b9e7a9cf69df7c4fca695592eb31e7e63061daef52d30cc1d362fc612d22631398cad4af46969e35407b293808133fc130b8f930ba41c6b88bc9ed9b884892113593d3ffc55297", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "6b1a8bf3e5e7bacaaf99e3c89891a8a4ec7e9022986043f8206db9171ede3bd8cdcbbd7e8e1234180de5d651110ef706a8d964cb35048bc961611b55c8d9bd1b942b93c7e1b88157e7f79f2c08dbabe1af4612afe6044ab1be316976111b7019", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet2 with 6 BLS keys", + "pubkey": "d8f8ef204ac892dd1a04648fc449ffdd11418dbd5c8fe623e5efda0bcae47cb41eb99c981585d80be1d668d8b7466619b6ead4d83976cc4f6879627a455603a74ab2adbfb5fed0f1a2b954363d97cbd3ac7feb284c83ac64422fad518e589c8e", + "address": "erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "f547e115b1ada7cf9b8aeef45ee0d9ec4b206315ef44be706d994a0571688cd96291d1ab6c3761df29d00a2ba290a3185e4796bc49891906f86e16da01af3fd52320944b96b60e679ac8e686d4819e97e15e5fe46503c556b4acdd8079624005", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "0e5cc5f218a8fa9bae1f6b452430c0de205e6b251d0f1d606d1ce28203fb556768e6c4545ce8e90d640ef2cc1062f40ccf2ede124b926cbf3b2b0050b0e19f67e7e36ac1a7049178a77cbd65ee30cd0a40d9f98846ce439cc120717501f03180", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "760dc22525dce5be65a3a55ee07f7f012e0a89f435daec56eb475b0b5ca2d84b157894b8df64dfb570ecc633d5e1611639d43976e29f11c232236a9548b0145ee4e43fe495252c8c1f006b8df51d3835dee64a826f43167096b347b6919aa292", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet3 with 4 BLS keys", + "pubkey": "39316473217f7c435a543efa078254198dd079e3c6505e7cc1564b033de8a161dc2e9c392b1e584440510113b5942816102d7be5f4af9461af21a454fc1938a962b256c1c1d1f939198029ed0bf22c62893038d5687787cb46436c0ef4f12417", + "address": "erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "6a3b9f6b5fd38e79433daa9bf03543314f8a2a3d9f1fec8ebe2bc1ee97f135d83845dcecd201207c1b31d7624ddb330ae67fbfab4137cd734d96bc0975ae8bcfeecc4441b384d39d6900cdb7436450c23b4cc7674ec50055ea4a90861c503a91", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "ee8c987b9af9bba2912763fb7fcd6d6575db60806c5041fa91816ecc339ccfd60bf3cf49fb7017158f0b8e6050276907620bc040816207f14a952bb86752816231ae7f31ff701862cfe0abca367fc4cd63bafd4ad6e4df67612e4ec71462650c", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "cfe96f6b010d08f211c83f4ae3eb451d1d5205a50bdcd451706044dc21f523d25f214ab89dd5aab7ae03111197d6e6156e70ab348c9b0fab0a7839ea57fef6cd2324882b4387014dba201e6f87d5ca395e14d900e4563494f4f11a69ef6cdf14", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group2 - wallet4 with 4 BLS keys", + "pubkey": "05e9e43732ecff55e553b35b5ee1416065818db162a6fbf096186a1230d88bd057cebb72c5afaec16a803c4c4f69770752fe29be73a4069d0d01666ede963271192d4f324f2b3dcaec8b2c871c23cf185579a039dd5ab093c7cd9bca53e09c85", + "address": "erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet5 with 3 BLS keys", + "pubkey": "82cfc47999d1499cb880d46e8280b8c4fe576dff20a8ca6f6ac551887c637f935153f9ce2f21921a532477535d42ac05f730760c78415756add2eab6d57d94916f3ad51590b23404739d152f89b6d052df48cace1793897cd4eba722247a6195", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet5 with 3 BLS keys", + "pubkey": "7675098e73574db9c59bdce61e4b80251c6536201715dca40b2b69c09ce097690f3a9095d22b006531e3b13b30894803bd7ede3e6d80c9064c431f8671db085ab1052354cb26a7a2436340b273b6c95c84ab94bb9531b99c5f883602b5284017", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet5 with 3 BLS keys", + "pubkey": "c50f398a853c670ed625a12eddae175df5a90e034a54484a832566fc91f9b83d5daf1bc821cc347ba7e45f3acd4e1d00d0d7f52235824fd1326a7f370b58fc7dd98edfff4a41739a2015c6ed3a3c0bf3c986efeee187be70f1133fc4379dad95", + "address": "erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet6 with 3 BLS keys", + "pubkey": "4bd3f30f608b22b32100c6360def228ec95aa24e3048010bb64606392f602e180a0b2a12f7f92ef1d7f73ce1271ae30693bec692b15802c7ba079939640570fdc7f4d411c084ed0fe612ee223227ca3d02dc9732cf686ba8885007de53f8ec89", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet6 with 3 BLS keys", + "pubkey": "71e8458f92997a00c1cd0e638b9ec42ab136828fc13f0ec643b60af451270cc81d50f4c4578a7c93a700ee21e065281593e7995d2454356cbfdeadb9ffe7bf33ba8f7a31a1d2e76bba5a5f88a613ef37e35595838d0b7f4bd12da7d6fe743499", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet6 with 3 BLS keys", + "pubkey": "d9c30948bffad18776b786f6367142b76605ac6e33a8d38c68c31c7afb099f1a83efb752a87afaf9d04a4a8fb656e40bfe2a4aa6e0c16b82d22bd6c232c2ce5e6672ac6232d2da6945bc033b04cbaaeb4b9af4b29585094e034ab8dcfb8b9c19", + "address": "erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet7 with 2 BLS keys", + "pubkey": "55fc7ab2e8c0a07bef2e1a9b35764cee1d604cb5634b7226a7310ce56a1f02e99d248fc5b416c4253ac7b88353b1a60f31e1104534e36cb00f46bdcb20a0d24f453e2c8d3cc48dc3c6086edbe16149aae14eb3a4d24ee2b217a4759bc0c0ea88", + "address": "erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet7 with 2 BLS keys", + "pubkey": "a8e662e63ad0e87f2dc66cbed41d398b73a2da2aaced6cc466ed378b62daee28b3db8e8327a06278a094b05840965c17448ffc8a1c96e532a7960d1a15d2cabd16edadc476bfb4af3a825aff801f615d127b70b4745b88e01627a99ba52d5317", + "address": "erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet8 with 2 BLS keys", + "pubkey": "f5e5eb9dd18aeb5d4829ab08795a9e4c8632a4fd248feed68382add1f2474d3cec042d51b897871bfee1f1c1fbeabf13d1c39d4f9b412948d27737f2b82e85474b7049a700ee8735373564791f0d20692dd1f8b494de7bab0a8415f01532ed90", + "address": "erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet8 with 2 BLS keys", + "pubkey": "e66d7ac5e51a382164aeaae9924dda3272296a145d3c6178b962e3b7eb83e75515e665c327e86f3ef597ca840f8c5c0ace19ac9a8fbcdc573f9237d112fb1c467d646737863ccd1fe61f4c4341f9805f8e1fe98348b50c3c3f93f62de3975980", + "address": "erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet9", + "pubkey": "2a1c49643e564cdf28bba96dfd6cd8ad38a5958b2b3c9a8293ffb54e9df0a0188a67de2fb947b8ae3dd06b7411aaae0e8bedef795ad3b35ac9f1402dcd0631d9d530b01b3880362fbd3ed9a8488ecabfb1b46cac225c5d48c39be3e28503f90f", + "address": "erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet10", + "pubkey": "5c9784523f360a802d4687c9c76bcef41a738d034aa8503a055c33898504b09670c1f637ca632e5290b3acf79a2191072f68c4192a9cbeb34f50c4a941e34247a64f642a6a074bec683bdfb83587cfdc0390ebd74505cb836cf04f3268e32f99", + "address": "erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet11", + "pubkey": "db7f7726c3e68abb28d070f529f2c222755d863aa9d7c0200fde10c93ccb8edcee8d45c9eb925bd5a0fa33c54d19270b7058f6e72256dad84214375f189310a73153cd84feef4b493ab61437b0cbcc2c592e6c093653a533631c8e0ab036c207", + "address": "erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg", + "initialRating": 5000001 + }, + { + "info": "multikey - group3 - wallet12", + "pubkey": "a27b6f47c53263e5c8d69779a169d50605cdd7ddb4b5384f2d46e08ace6f787a60f6cf26256b62fafba9c91a87ff070bc99254fcb5a73239fc14f2108de62189005b51b21e2922b37c6cc657017832e3a59dfcc7a54ac5dcb997136da4e2748b", + "address": "erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d", + "initialRating": 5000001 + }, + { + "info": "single key 1 - wallet13", + "pubkey": "d3e0427c22ff9cc80ef4156f976644cfa25c54e5a69ed199132053f8cbbfddd4eb15a2f732a3c9b392169c8b1d060e0b5ab0d88b4dd7b4010fa051a17ef81bdbace5e68025965b00bf48e14a9ec8d8e2a8bcc9e62f97ddac3268f6b805f7b80e", + "address": "erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g", + "initialRating": 5000001 + }, + { + "info": "single key 2 - wallet14", + "pubkey": "b0b6349b3f693e08c433970d10efb2fe943eac4057a945146bee5fd163687f4e1800d541aa0f11bf9e4cb6552f512e126068e68eb471d18fcc477ddfe0b9b3334f34e30d8b7b2c08f914f4ae54454f75fb28922ba9fd28785bcadc627031fa8a", + "address": "erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj", + "initialRating": 5000001 + }, + { + "info": "single key 3 - wallet15", + "pubkey": "67c301358a41bef74df2ae6aa9914e3a5e7a4b528bbd19596cca4b2fd97a62ab2c0a88b02adf1c5973a82c7544cdc40539ae62a9ac05351cfc59c300bbf4492f4266c550987355c39cff8e84ff74e012c7fd372c240eeb916ef87eead82ffd98", + "address": "erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6", + "initialRating": 5000001 + }, + { + "info": "single key 4 - wallet16 with 3 BLS keys", + "pubkey": "ab0a22ba2be6560af8520208393381760f9d4f69fca4f152b0a3fe7b124dd7f932fd8c1fbb372792c235baafac36030ceaf6ebf215de4e8d8d239f347f2fed10a75a07cbf9dc56efbbfca2e319152a363df122c300cdeb2faa02a61ebefd8a0e", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "initialRating": 5000001 + }, + { + "info": "single key 5 - wallet16 with 3 BLS keys", + "pubkey": "caa87d67e195b52355d2c8f7f74c829395b134bd4a911f158e04b2d7e66a5ba195265743f10cf190105512fb3df9d708a8056c07a6165874d8749742502c0eada7d15b6c55f22c2cce2cf5001288f6b2d89319e6ff888344c01adcd362be8998", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "initialRating": 5000001 + }, + { + "info": "single key 6 - wallet16 with 3 BLS keys", + "pubkey": "598be7548d6bb605bd19d83037bf58a7797a4e48b33011a60a5633cf6fe8d59906130777c46f50a50d3d0f958effb5147befd5d67cbec7c5daddeaade4dca5d8a54fe0394fde7b6455e4fc4db91f33f907d450b45fc2d4a9990f96d893093d91", + "address": "erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha", + "initialRating": 5000001 + }, + { + "info": "single key 7 - wallet17 with 2 BLS keys", + "pubkey": "69b277b127d025638dbb54d36baa8321540f6210fc5edaac77f94798c039a383aead3ae7c93cdfb8b4caab93a952d101ee2322c129b6ce2726359a65aa326bd35e54c974118503944fcaf80be80b5c3fc9cf86d574d0096140f16fbc55fc4984", + "address": "erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw", + "initialRating": 5000001 + }, + { + "info": "single key 8 - wallet17 with 2 BLS keys", + "pubkey": "a006ad94b28c414c6ec0a5effb84594f39ede4f82b60aa077e2065b89407c78dd6479ebceed7bd42ed2779c34b718f11651427e550948cb8be2e6cea03a128ac3c52e599ada6f34912b119f94de472af0397a68769f1b3f647e87090918e030b", + "address": "erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw", + "initialRating": 5000001 + }, + { + "info": "single key 9 - wallet18", + "pubkey": "91874fdfa8dfb85faf4f404b21c95fbb5d154db5a6abe46bd7860de9e5ddb78b61b5c6ddcf86e5ec8a237e130ed0fc0e418fb97d6fce5f6642ba33f99eff694ec7fb2921b423899a9a5888914bd625636a9b1ea186566561cd35b79aaca20e88", + "address": "erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9", + "initialRating": 5000001 + }, + { + "info": "single key 10 - wallet19", + "pubkey": "cc3e0c1021f8c4c092499547b064cffef19d07f0bf250e5265cea1e49b282a7f6efb4b415ad37db2ef6efa253475f511e74efc2f76c087c9798f72187986bb752f61d0ac220045f8e2d945343f3bbb8ef34a6025fb855dd7d953a81477ad2309", + "address": "erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme", + "initialRating": 5000001 + }, + { + "info": "single key 11 - wallet20", + "pubkey": "c2885340a6ba4341d68f80ce419deadf374bc52e2749c278b5bce5f795e9a90a04ef4f07a0b47777feb1982749b57a174b4927338df9da99a417a2df3152a9ebaf3465bfc092058324edf6892313f24be4612eb5663bb59d67a831dda135aa8b", + "address": "erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz", + "initialRating": 5000001 + }, + { + "info": "single key 12 - wallet21", + "pubkey": "cf8a2f97b7822acb16016a6debaaedea39959c9ac60b80e50f24734a0e0f6128ed1d216f5aed71866ca34bb30b6e8300e7995237744e766f6016ca28d4ebb2274326cb7af1a3c12d795cc127a4bf9aa9497d89ef0450c40f675afd1afa761012", + "address": "erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5", + "initialRating": 5000001 + }, + { + "info": "single key 13 - wallet22", + "pubkey": "95a81b70474d59c1292bc5742db1a7b9bf03cb516ede6fb5cb3489ee812de8cccfc648f3ff3cda26106396a38c1c1f183b722392397a752d949c5123888b7a8ec012fe518f6efc25015a620b1559e4609286b52921e06b79fd563a9b3b4c4e16", + "address": "erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc", + "initialRating": 5000001 + }, + { + "info": "single key 14 - wallet23", + "pubkey": "5909def579148f456e8490659b859f80f8ccd62b5adda411e1acdc615c2ec795a88632cf2ec210a56ba91973fd3f07160f559f82f7afaafee008679fefb1b0cd2f26f4324197e6239c000accd1c427138568a8a9e276690c154d3df71a1f970c", + "address": "erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t", + "initialRating": 5000001 + }, + { + "info": "single key 15 - wallet24", + "pubkey": "58d6cfe7e8c3ec675da17e492c4ba97759fa15fc0f41bbe29d1d49d5f5ca7db142450ada15e1e4bf4657614e26cceb04ed5c0ca17207b0e24c4baf5f91afc092d43a02aaeae76218420817c85292f8de7d3a2b4f3c8615c2bb6a6d1c74267788", + "address": "erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa", + "initialRating": 5000001 + }, + { + "info": "single key 16 - wallet25", + "pubkey": "eb79770be0ae70e1d6932832eab94117b0c1a2442b3fdb380b1ad5a809b6221a4905e02a628886c925d152c4e5006413fe69d1f11cf543f4802d4ce4e5eac2b18b78a79215c737e2e098b40802044bc6e946b712299286c34f6d33d8b681790d", + "address": "erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w", + "initialRating": 5000001 + }, + { + "info": "single key 17 - wallet26", + "pubkey": "bc03265a52610464f2f0431a69647be3106924f5bf67cf87cd889bf86d81739b3f0f37bad11ab93c5209dc4496f4130d69a9649596b97884b7e91e0b4d7c59169dd0729ac3e3bcd308efac56bc29d3cc249d8759580ab117943aa40df3baac05", + "address": "erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg", + "initialRating": 5000001 + }, + { + "info": "single key 18 - wallet27", + "pubkey": "aa4be8f36c2880ee4d2ca79dbd7a53537e3965f255dfb5c75324fe29fcb6ce56148fbaea334268e413f0df95f580c40fb3484165b2852236e3a1aa68151ac3327d981cfae52d99f9a564bd3139cdd768661854dae78880d9320191cdb2989815", + "address": "erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc", + "initialRating": 5000001 + }, + { + "info": "single key 19 - wallet28", + "pubkey": "3e86fea8365791b3becfc9aa2bc239f6be58725e61e46e7935c56479ad285e0781da1f277980d2e1d0ecff3982f2d90f321aa03f3d934adf260628d0ed0dc81a98dfaf1e6278e042d6c78dc65f2fa79d3b457754a321b8a0d7bf9998feeea817", + "address": "erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f", + "initialRating": 5000001 + }, + { + "info": "single key 20 - wallet29", + "pubkey": "aa92cf6e0ac62df09e7adca139c41a162ad668e7a797770b6d195cd9b175d0fca9eac3f4bf859967139f2ba109741a144e3dc5e6ccaeb6cd21f1d202b10f08832274cd9cdf6b10dbc2c60acdd1c70ae9beae2139e2b69eccbcde32a7f3991393", + "address": "erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5", + "initialRating": 5000001 + }, + { + "info": "single key 21 - wallet30", + "pubkey": "f2b7819d1c2e2e1d007edcf896034085645f3c81e7c7fe21aa7ad4f35f8b863ee1db13448d15a3d0d15018f741a991010a9374710b628e41ef078be8a10249f2a3000598432c28186af1c04a219ac914434dca9c27e61485d701505112093f8a", + "address": "erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p", + "initialRating": 5000001 + }, + { + "info": "single key 22 - wallet31", + "pubkey": "292742eee9d12dade21b4cd8bcd44c210c26d927ef6dbd9cad59008643a971a86ea6dfce247515d4266789b3fe8e35167278e781e52b4cd7b9781554ba67ecc08680eb19628e7741c94d8456090a08aceab1c8d2ed39bf59e8e282381aa32a0a", + "address": "erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u", + "initialRating": 5000001 + }, + { "info": "single key 23 - wallet32", + "pubkey": "11f784d2970d65769ce267710b3d08b28b78c3f79283758918c8ef15717ccbe90c23348cafe0e98a5d101b8dafbe7d081c6821dee8bf40ba150664ccc2dbbdd6358c92404e677d82910ce61f1d7584fbbbc9ebf71b7f35a118556e2a5c220501", + "address": "erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp", + "initialRating": 5000001 + }, + { + "info": "single key 24", + "pubkey": "0382c11222db8a15e42e3ff64893df46c7720b439fb2a546462815ac0a8fa3bed99fceae5da9b68524e36f61cc074d09ceafec274c54f182c56a77583f9421f19c777265c43da1d5747304b36f0367cf3e8e5f63f41dad1a4362d9e1997a9e16", + "address": "erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88", + "initialRating": 5000001 } ] } diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index 62d30fd19f7..519b7684b2c 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -23,10 +23,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = false - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 0 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 0 # not taken into account if the type is not "default with manual scale" # P2P peer discovery section @@ -71,10 +72,10 @@ [Sharding] # The targeted number of peer connections - TargetPeerCount = 36 + TargetPeerCount = 41 MaxIntraShardValidators = 7 MaxCrossShardValidators = 15 - MaxIntraShardObservers = 2 + MaxIntraShardObservers = 7 MaxCrossShardObservers = 3 MaxSeeders = 2 diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 98d5c02557f..42e16624ab8 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -8,7 +8,7 @@ # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default NodeDisplayName = "" - # Identity represents the keybase/GitHub identity when the node does not run in multikey mode + # Identity represents the GitHub identity when the node does not run in multikey mode # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default Identity = "" @@ -28,7 +28,7 @@ # ] PreferredConnections = [] - # ConnectionWatcherType represents the type of a connection watcher needed. + # ConnectionWatcherType represents the type of the connection watcher needed. # possible options: # - "disabled" - no connection watching should be made # - "print" - new connection found will be printed in the log file @@ -71,7 +71,7 @@ # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity [[NamedIdentity]] - # Identity represents the keybase/GitHub identity for the current NamedIdentity + # Identity represents the GitHub identity for the current NamedIdentity Identity = "" # NodeName represents the name that will be given to the names of the current identity NodeName = "" diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index 1f4c9456292..372cd0eba03 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -8,9 +8,11 @@ NumRoundsWithoutBleed = 100 MaximumPercentageToBleed = 0.5 BleedPercentagePerRound = 0.00001 - MaxNumberOfNodesForStake = 36 + MaxNumberOfNodesForStake = 64 UnJailValue = "2500000000000000000" #0.1% of genesis node price ActivateBLSPubKeyMessageVerification = false + StakeLimitPercentage = 1.0 #fraction of value 1 - 100%, for the time being no stake limit + NodeLimitPercentage = 0.1 #fraction of value 0.1 - 10% [ESDTSystemSCConfig] BaseIssuingCost = "5000000000000000000" #5 eGLD @@ -39,3 +41,10 @@ [DelegationSystemSCConfig] MinServiceFee = 0 MaxServiceFee = 10000 + +# Changing this config is not backwards compatible +[SoftAuctionConfig] + TopUpStep = "10000000000000000000" # 10 EGLD + MinTopUp = "1000000000000000000" # 1 EGLD should be minimum + MaxTopUp = "32000000000000000000000000" # 32 mil EGLD + MaxNumberOfIterations = 100000 # 100k max number of iterations for soft auction config diff --git a/cmd/node/config/testKeys/delegators.pem b/cmd/node/config/testKeys/delegators.pem new file mode 100644 index 00000000000..78f89d05110 --- /dev/null +++ b/cmd/node/config/testKeys/delegators.pem @@ -0,0 +1,50 @@ +-----BEGIN PRIVATE KEY for erd1z48u9l275l2uy4augfytpp2355wvdnc4gwc9ms9gdhdqru3fz9eq5wqe3e----- +MzJlYzk2ZTgxMDMyYzBiZjhmN2UxNjhhODliNTc2MGMxMzM5NmMyNmEyNDhiYzU0 +NjhlMTVmZTlmZDc3NDM4YTE1NGZjMmZkNWVhN2Q1YzI1N2JjNDI0OGIwODU1MWE1 +MWNjNmNmMTU0M2IwNWRjMGE4NmRkYTAxZjIyOTExNzI= +-----END PRIVATE KEY for erd1z48u9l275l2uy4augfytpp2355wvdnc4gwc9ms9gdhdqru3fz9eq5wqe3e----- +-----BEGIN PRIVATE KEY for erd1qm5erxm0va3hcw9jfxtp2gl3u9q9p4k242pk9xx3vezefkgj2vhs0wk8cx----- +NWI5ODczMjc4YjExNmFkMmE2NjY2NTI2MmVmNDhlN2FlYWM4OWRlMTAyMDhkZGEw +ODdmMWVjMThkMDBkMzc5NTA2ZTk5MTliNmY2NzYzN2MzOGIyNDk5NjE1MjNmMWUx +NDA1MGQ2Y2FhYTgzNjI5OGQxNjY0NTk0ZDkxMjUzMmY= +-----END PRIVATE KEY for erd1qm5erxm0va3hcw9jfxtp2gl3u9q9p4k242pk9xx3vezefkgj2vhs0wk8cx----- +-----BEGIN PRIVATE KEY for erd1rqwt9vpfn072tahtcvup2dxz4nvqfs3n5p9eh0jnypkppxmdheaqpcqfzz----- +MmFjNGZlYTlkNmI3OWQ5ZGU5MjkyZmZlZGE4ZjkwYWNmODQzNzVmZDIwOGEyMjkz +YjcxN2JhNWI1ZWI1MjQ3ZjE4MWNiMmIwMjk5YmZjYTVmNmViYzMzODE1MzRjMmFj +ZDgwNGMyMzNhMDRiOWJiZTUzMjA2YzEwOWI2ZGJlN2E= +-----END PRIVATE KEY for erd1rqwt9vpfn072tahtcvup2dxz4nvqfs3n5p9eh0jnypkppxmdheaqpcqfzz----- +-----BEGIN PRIVATE KEY for erd17uygg7qq05mjhectgymj6fwq59ysr4p92cy0yz3jrxxj6253p40sj77wr6----- +MzU0N2M5MWRhNzRhMTY5MDZjMzhkMTY5ODQ4MWRiOGI1Zjk0YWJjM2VlNDgyMjY5 +ZDhjMDEzMTdlOWVlYWUxYWY3MDg4NDc4MDA3ZDM3MmJlNzBiNDEzNzJkMjVjMGEx +NDkwMWQ0MjU1NjA4ZjIwYTMyMTk4ZDJkMmE5MTBkNWY= +-----END PRIVATE KEY for erd17uygg7qq05mjhectgymj6fwq59ysr4p92cy0yz3jrxxj6253p40sj77wr6----- +-----BEGIN PRIVATE KEY for erd1qslr87nj5gkv2396js3fa2za5kqgwugqnz4j4qqh22mxpnse2lws8srsq6----- +MDc5N2IzZDFmMmY1YzYzOTYxYjdhMThmNmI2OWZlNDk0NmJkNjUyOGFhNjU3ZTQw +Zjg3NjY2MmM3MmNhMWQ3ODA0M2UzM2ZhNzJhMjJjYzU0NGJhOTQyMjllYTg1ZGE1 +ODA4NzcxMDA5OGFiMmE4MDE3NTJiNjYwY2UxOTU3ZGQ= +-----END PRIVATE KEY for erd1qslr87nj5gkv2396js3fa2za5kqgwugqnz4j4qqh22mxpnse2lws8srsq6----- +-----BEGIN PRIVATE KEY for erd17pjlqg55c6v3fjvpqec8peefk74g8neygr84ymw7cqzudmzaw7lqnln7sz----- +NTBkMzFiMzdmOWMyM2NmMWMyYjgzZThkNGRmYzgzNjU5OTkyOGIxMzVhZDI3OGQ0 +Yzk5Y2UyZjFhMDUzMjI4YWYwNjVmMDIyOTRjNjk5MTRjOTgxMDY3MDcwZTcyOWI3 +YWE4M2NmMjQ0MGNmNTI2ZGRlYzAwNWM2ZWM1ZDc3YmU= +-----END PRIVATE KEY for erd17pjlqg55c6v3fjvpqec8peefk74g8neygr84ymw7cqzudmzaw7lqnln7sz----- +-----BEGIN PRIVATE KEY for erd19ztfuew6ejrwq5mpax4xztjwh5j63u9vge4dum9vlyy7hg3pc86qgmt6nm----- +Mjg2ZjFlOGJmNDY5OGU3ODMwNzc2YTRjZjdiMDcwZDNhZGUzYzQyMzgwN2U1ODdk +MTYxN2Y3NDBlNWZiYzU3MjI4OTY5ZTY1ZGFjYzg2ZTA1MzYxZTlhYTYxMmU0ZWJk +MjVhOGYwYWM0NjZhZGU2Y2FjZjkwOWViYTIyMWMxZjQ= +-----END PRIVATE KEY for erd19ztfuew6ejrwq5mpax4xztjwh5j63u9vge4dum9vlyy7hg3pc86qgmt6nm----- +-----BEGIN PRIVATE KEY for erd1age5t5qfrke4vm47vaq9a4yewllh6227qm4fcy3rc7g5ktmzyatsgf4wcw----- +MzkzMDA1MjU2OWY2MDhkYjYxOGI1NDYzMmI1ZWFkZGNhYmJhODQ2NGJmMjY4NWU4 +YmU0YWY5MDNkNzAwYTQ0NGVhMzM0NWQwMDkxZGIzNTY2ZWJlNjc0MDVlZDQ5OTc3 +ZmY3ZDI5NWUwNmVhOWMxMjIzYzc5MTRiMmY2MjI3NTc= +-----END PRIVATE KEY for erd1age5t5qfrke4vm47vaq9a4yewllh6227qm4fcy3rc7g5ktmzyatsgf4wcw----- +-----BEGIN PRIVATE KEY for erd1jt0vv29trqs3nddzkxsf950xx0t5uvyncmuamwryneh9wsee5wpsgue96d----- +NTBlZDI0NzM3ZWNlOGEzYWZlYjJlZTY3N2NiNzUxYWI0ZTA4OWNhMGY3ODhlNjNj +MmVhNWQzMGE2MmMzNmE4ZTkyZGVjNjI4YWIxODIxMTliNWEyYjFhMDkyZDFlNjMz +ZDc0ZTMwOTNjNmY5ZGRiODY0OWU2ZTU3NDMzOWEzODM= +-----END PRIVATE KEY for erd1jt0vv29trqs3nddzkxsf950xx0t5uvyncmuamwryneh9wsee5wpsgue96d----- +-----BEGIN PRIVATE KEY for erd1c83rmk3n8ys4g9dkg3q70thx3v787qtpfmk23epu4xsadpyd3dnsejf2r7----- +OGQ1MDBkNjg3NTA4MWU0Y2JjODk5ZTMxNmYwOGVmMDVkZDMyODRkMWFhZDUzYmJk +NGRmYmY4MTAyMzEyYmY4YmMxZTIzZGRhMzMzOTIxNTQxNWI2NDQ0MWU3YWVlNjhi +M2M3ZjAxNjE0ZWVjYThlNDNjYTlhMWQ2ODQ4ZDhiNjc= +-----END PRIVATE KEY for erd1c83rmk3n8ys4g9dkg3q70thx3v787qtpfmk23epu4xsadpyd3dnsejf2r7----- diff --git a/cmd/node/config/testKeys/group1/allValidatorsKeys.pem b/cmd/node/config/testKeys/group1/allValidatorsKeys.pem new file mode 100644 index 00000000000..0a34418f748 --- /dev/null +++ b/cmd/node/config/testKeys/group1/allValidatorsKeys.pem @@ -0,0 +1,60 @@ +-----BEGIN PRIVATE KEY for 309198fd7b6bccb1b6279ea2e5c5f2a33b9f6fe5f23180778d8721710344989b07d10dd1bd19307b3cd06eab9d1358062511610ccdad681ae1165016256cc1fdc0fed5041a5c29d7773b2994022bf2dc9efb937b3bb7cc9d72670448fad7d091----- +NmRjMzcwNGQ0YzhkOTcyM2I2MjBmZmUwOTkyNDk5ODhiNzc3NmRiMDliYTI3NjAx +MWY1MTc1ZWM1ZTZlNWIzNg== +-----END PRIVATE KEY for 309198fd7b6bccb1b6279ea2e5c5f2a33b9f6fe5f23180778d8721710344989b07d10dd1bd19307b3cd06eab9d1358062511610ccdad681ae1165016256cc1fdc0fed5041a5c29d7773b2994022bf2dc9efb937b3bb7cc9d72670448fad7d091----- +-----BEGIN PRIVATE KEY for cd8cc9d70a8f78df0922470f1ebee727f57a70fb0571c0b512475c8c7d3ce1d9b70dd28e6582c038b18d05f8fbd6ac0a167a38614c40353b32ef47896d13c45bde57e86d87dd6e6f73420db93aeb79a1fd8f4e297f70478685a38ed73e49598f----- +ZGMyYmYxYzVjNzY1OTI2MjVmZGVmNzFkNGJiNjlkZTFiYmNkMGIyZmUwYWU4NzY2 +YzQyMmFmMjM1NmQ2MWY2OA== +-----END PRIVATE KEY for cd8cc9d70a8f78df0922470f1ebee727f57a70fb0571c0b512475c8c7d3ce1d9b70dd28e6582c038b18d05f8fbd6ac0a167a38614c40353b32ef47896d13c45bde57e86d87dd6e6f73420db93aeb79a1fd8f4e297f70478685a38ed73e49598f----- +-----BEGIN PRIVATE KEY for e441b8a7d35545f7a02f0d57d144c89810d67ecb9abb5804d1bcfd8b270dc30978c56fbf9618e9a6621ca4e6c545c90807f7fe5edfd34ccab83db7bc0bd68536bb65403503d213c9763ec2137d80081579bb1b327f22c677bdb19891f4aae980----- +MTA0NWEwNjFlYzVmY2E5NWZiZmQwYmY2YWJjYjRiNDM4ODI0M2U0MzdjZTAwZTZl +ZTMzYTcxY2MyZThlNTQxMw== +-----END PRIVATE KEY for e441b8a7d35545f7a02f0d57d144c89810d67ecb9abb5804d1bcfd8b270dc30978c56fbf9618e9a6621ca4e6c545c90807f7fe5edfd34ccab83db7bc0bd68536bb65403503d213c9763ec2137d80081579bb1b327f22c677bdb19891f4aae980----- +-----BEGIN PRIVATE KEY for 72d8341713a271d510c7dfd02455ef86e9af4c66e06ac28fbb4c4e8df1e944e685bae2bee2af4101c19922b64e44e40b5d4905755594a719c1d50dc210515495d0de9b3a1d4ed42fd45a973353fe2c2548c45bb2157d8bf68e0937cc20fe1011----- +YzFkZWY5YTY3YTBhYmI1MzVjNjYyYjE1MTIwMjA2NjgwZTc0MjBhODYyNTkyZjRi +NTQ2NjE5NDM0YTBlOTI2Nw== +-----END PRIVATE KEY for 72d8341713a271d510c7dfd02455ef86e9af4c66e06ac28fbb4c4e8df1e944e685bae2bee2af4101c19922b64e44e40b5d4905755594a719c1d50dc210515495d0de9b3a1d4ed42fd45a973353fe2c2548c45bb2157d8bf68e0937cc20fe1011----- +-----BEGIN PRIVATE KEY for 796d2f56ee9fa8f4ff9fc7735bbb4d644be8dd044f7f965362aed3d562e00b11f730b2fe21eb9c9fd100b68a5d3dbf07bae63d25739f1304ab638330f0e8c207a76de648e2523ad1693f4ffe9def8a1d5aca2c6c1c14e1fcc20089db069d1a0e----- +YzUyY2M3YzVkY2Y5MWZkMDgyZDcwZDZlZDg0NWY1YWZkZDNiODRiZWFjOWE4MTU3 +YWFiYTAxNTQ1ODIxMmUxOQ== +-----END PRIVATE KEY for 796d2f56ee9fa8f4ff9fc7735bbb4d644be8dd044f7f965362aed3d562e00b11f730b2fe21eb9c9fd100b68a5d3dbf07bae63d25739f1304ab638330f0e8c207a76de648e2523ad1693f4ffe9def8a1d5aca2c6c1c14e1fcc20089db069d1a0e----- +-----BEGIN PRIVATE KEY for 4cc0bfcdf4515927e3ae7890197bfd7c2e3c6f80ff7570fc6313d095e0d4e518ecc81db7087fefb8af166723a32ded1324f4ee00a6a97d206a024fd95ab60f7fe18c896c829ac43782c56d922415fb4ddbc0384936e3f860feb0666da43bcd19----- +NWM4OTQzMjExNWU1ZjVkMGI2YzEzOGI4MjI2MjVlZmM2MDk2NzIyNWRmMThlNzVj +MTFhMTYzMGM5MmRlOTI1YQ== +-----END PRIVATE KEY for 4cc0bfcdf4515927e3ae7890197bfd7c2e3c6f80ff7570fc6313d095e0d4e518ecc81db7087fefb8af166723a32ded1324f4ee00a6a97d206a024fd95ab60f7fe18c896c829ac43782c56d922415fb4ddbc0384936e3f860feb0666da43bcd19----- +-----BEGIN PRIVATE KEY for 3449c9d672472ea52e83be4a8d6ce55785044233d848ac0a9a4b7fc22bf5b1bf5776f95f4b459f4e4cd3ba1d20b46a197c5a55ec00fa8d795d35f2232a6fae360129d186e3a0c219d42d8357de2823c8566da7841c2739b30d1581c4a38ec80e----- +NjIwZjkzZGZhMmQ1ZWY2NzliY2EzYTQ1MzE2NTg1ODU2OTVjNDM5NzM2NTgzNTJk +ZGM2OWU0MjQ4ZGQxNjQ0NQ== +-----END PRIVATE KEY for 3449c9d672472ea52e83be4a8d6ce55785044233d848ac0a9a4b7fc22bf5b1bf5776f95f4b459f4e4cd3ba1d20b46a197c5a55ec00fa8d795d35f2232a6fae360129d186e3a0c219d42d8357de2823c8566da7841c2739b30d1581c4a38ec80e----- +-----BEGIN PRIVATE KEY for 1bdef34453a83d19748b02d964c43409a66535678b6234c42d289ed2b7571bf60d35ba7a43cd951d4fc9fc2e8ff618038a2edc9cb0181dcac0b62a0859aafd8d9993aa3069c14fec11cb66a653311a37861d225847bf535bcb920b4f0ea98b8b----- +YWNkNzhjNzk2OTc5YjIxMTk5ZDc0YzgwNmExNzE1Y2EyNjNiMGMyNDI2MzFhZmNi +YzdlODNmYTRmMzFkNjMzMw== +-----END PRIVATE KEY for 1bdef34453a83d19748b02d964c43409a66535678b6234c42d289ed2b7571bf60d35ba7a43cd951d4fc9fc2e8ff618038a2edc9cb0181dcac0b62a0859aafd8d9993aa3069c14fec11cb66a653311a37861d225847bf535bcb920b4f0ea98b8b----- +-----BEGIN PRIVATE KEY for 3bd60bd8c5ace7d1999cd4bfd93dcb7bdc397d8d84efa5fd89439b7b727b0331bd00e3feae85df79e7d2b5eba1ea07003972fde3a7eb8d4ba25583a848be812e89b75fe8f3531d810ba2aaef629748ace6ac5ae73d8a2e6a65bb379f5be3b906----- +YWQ0ODk0ZmIzYjhkOTBiN2QzNTNhN2NhZjc4NTE1MjlhOTRkNjkyMjIyMGU4OTI5 +YzdjODMzOGJiNDRlZWExMw== +-----END PRIVATE KEY for 3bd60bd8c5ace7d1999cd4bfd93dcb7bdc397d8d84efa5fd89439b7b727b0331bd00e3feae85df79e7d2b5eba1ea07003972fde3a7eb8d4ba25583a848be812e89b75fe8f3531d810ba2aaef629748ace6ac5ae73d8a2e6a65bb379f5be3b906----- +-----BEGIN PRIVATE KEY for 2b9a1e5e291471f9eb0cf1a52db991f2dbb85446d132f47381b7912b041be00cccab748c25bdd6165cd6a517b6e99b0133bda4dc091dcdf6d17bc460ac0e8c6fe4b2460a980dd8dea8c857647bc5826a2b77fc8ba92c02deb2ba3daafb4d5407----- +NTk1NTg4YWMyMWI4ZGU4MThjYzdkMDI4NThmZDU4ZDk5NTg3Mjk0NDRiMzk0OWM5 +MzBjYjIwZGEyYWNlZTMzYg== +-----END PRIVATE KEY for 2b9a1e5e291471f9eb0cf1a52db991f2dbb85446d132f47381b7912b041be00cccab748c25bdd6165cd6a517b6e99b0133bda4dc091dcdf6d17bc460ac0e8c6fe4b2460a980dd8dea8c857647bc5826a2b77fc8ba92c02deb2ba3daafb4d5407----- +-----BEGIN PRIVATE KEY for e464c43c4f3442ec520d4a6464d7fa96397ab711adf38b5a96f208303337f6a97ffdbd22e34a10deef6aa21ff078360d2bf7fae627a1ec55a9f120b35224b8e461b0f4de7d3ce800e6b910f37c4d03cce7039ce3a4a3a79ac0511c36435ccf85----- +NDk1MzMwNThiY2VmZjNmOTFmMTRlMTI4MWE0OWRiZDkyYzAwOTVjOTcxMTViMmY3 +Yzk3OWFkNjdjOWVlNjM0YQ== +-----END PRIVATE KEY for e464c43c4f3442ec520d4a6464d7fa96397ab711adf38b5a96f208303337f6a97ffdbd22e34a10deef6aa21ff078360d2bf7fae627a1ec55a9f120b35224b8e461b0f4de7d3ce800e6b910f37c4d03cce7039ce3a4a3a79ac0511c36435ccf85----- +-----BEGIN PRIVATE KEY for dab5c34e66e096d1f328fd9b045136724c8ccbf7b5a4bcf1e8c9edc9510712c0a7feff7818563aa799b37f1cdcfb330cc49d8482c7154988d33f63fe2526b27945326112c832fdf72a1b35f10da34c6e08b4079d9c56195c1ab64c84eab93b95----- +NDZlZDkwNzcwNTQwNjcyZTlmYTQzODUyNzc3YjM0OGM1MmIzNmM3YjAzZGYwMmJk +ZjE0NmM0MTkxMjQwNjE0NQ== +-----END PRIVATE KEY for dab5c34e66e096d1f328fd9b045136724c8ccbf7b5a4bcf1e8c9edc9510712c0a7feff7818563aa799b37f1cdcfb330cc49d8482c7154988d33f63fe2526b27945326112c832fdf72a1b35f10da34c6e08b4079d9c56195c1ab64c84eab93b95----- +-----BEGIN PRIVATE KEY for a5624bec34e06d5026a334654be9e0118c8a02720a6bd868a51d5eb687819442cded09d1fef2e9d9db8bb2d5be01f1148b4819aee9e6a48b9c530285dbc4d800f4dd10d7f9a75d4b36de8fb52aec672cec91e0256f7e9848b10219748d9e708b----- +YzY2MjU0NGU0OWM1YTRkMTdmZjQ4YjZkZjU0YzdkZmUzZWRlY2M1Yjk2ZWM1MjMx +OGRjZjAyZjkwMjdjNTg1ZQ== +-----END PRIVATE KEY for a5624bec34e06d5026a334654be9e0118c8a02720a6bd868a51d5eb687819442cded09d1fef2e9d9db8bb2d5be01f1148b4819aee9e6a48b9c530285dbc4d800f4dd10d7f9a75d4b36de8fb52aec672cec91e0256f7e9848b10219748d9e708b----- +-----BEGIN PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +NGJjZmIzODdkYmJkN2Q3NzIxOWVmOWFkZGI3OTMyZmRlYzcwNjZiOTk3MmVkNjg3 +ZjkyYmIyMzg5MGFhOTMzMQ== +-----END PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +-----BEGIN PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- +ZDE1ZDk1YzdhMGU1ZGY5MDRmNzQxODI2NDFiN2FlOGEwYmJkYzE5Y2RkOGNhMGZh +MzEyNDI3OTY2YjNkODE1YQ== +-----END PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- diff --git a/cmd/node/config/testKeys/group2/allValidatorsKeys.pem b/cmd/node/config/testKeys/group2/allValidatorsKeys.pem new file mode 100644 index 00000000000..cbd478d5b5b --- /dev/null +++ b/cmd/node/config/testKeys/group2/allValidatorsKeys.pem @@ -0,0 +1,60 @@ +-----BEGIN PRIVATE KEY for eedbc5a3141b92148205d178d150905e68ca745ba54617025f84b33c91233afda2c2109084821e14f9a50d3f220fbc000ce6ad432f2a1865da9c6547016ecc7e07242ef490c0bdda29ec677f3e833f54eb2cf27e95b10b8edbdfa7de4e1bc000----- +YzU4OWY2MTQ1MjUyZjg4MmExYmIwY2QyNzVjOTQ5MzZlMjMxYTk0ZTZhYmNjM2Q1 +ZGY3OTA2Mzc0M2NhZmMwYw== +-----END PRIVATE KEY for eedbc5a3141b92148205d178d150905e68ca745ba54617025f84b33c91233afda2c2109084821e14f9a50d3f220fbc000ce6ad432f2a1865da9c6547016ecc7e07242ef490c0bdda29ec677f3e833f54eb2cf27e95b10b8edbdfa7de4e1bc000----- +-----BEGIN PRIVATE KEY for 48c77bbf5d619fb13d76883736c664493b323c00f89131c9d70b3e4ac875103bd8248e790e47a82c9fdcd46fe33b52093a4b3b248ce20e6f958acd22dfb17335fcaf752bab5e29934f0a7e0af54fb2f51a9e6b1be30abdd701f7c9fbd0ad5d8e----- +ZjJmZjg3MTNmMzdjZmYxYTljZTM5MTA4ZjA3OGFkOTc2OGViYzg2MDY0NTEyYjg2 +OTFhYTk0MmE3ODQzODQ1Mw== +-----END PRIVATE KEY for 48c77bbf5d619fb13d76883736c664493b323c00f89131c9d70b3e4ac875103bd8248e790e47a82c9fdcd46fe33b52093a4b3b248ce20e6f958acd22dfb17335fcaf752bab5e29934f0a7e0af54fb2f51a9e6b1be30abdd701f7c9fbd0ad5d8e----- +-----BEGIN PRIVATE KEY for dd59e326a160d3b49ca25f2ab93b4f7ba766b124de66b68507b9e7a9cf69df7c4fca695592eb31e7e63061daef52d30cc1d362fc612d22631398cad4af46969e35407b293808133fc130b8f930ba41c6b88bc9ed9b884892113593d3ffc55297----- +NThiOGMyNWVmMThmNTJhM2NhYTRiMjEwMWRhMTdhN2YwMTg1MWU2Y2RjZTRiZjM5 +ZTNmOGRjNzY0OThmMmU1OQ== +-----END PRIVATE KEY for dd59e326a160d3b49ca25f2ab93b4f7ba766b124de66b68507b9e7a9cf69df7c4fca695592eb31e7e63061daef52d30cc1d362fc612d22631398cad4af46969e35407b293808133fc130b8f930ba41c6b88bc9ed9b884892113593d3ffc55297----- +-----BEGIN PRIVATE KEY for 6b1a8bf3e5e7bacaaf99e3c89891a8a4ec7e9022986043f8206db9171ede3bd8cdcbbd7e8e1234180de5d651110ef706a8d964cb35048bc961611b55c8d9bd1b942b93c7e1b88157e7f79f2c08dbabe1af4612afe6044ab1be316976111b7019----- +NzJhNGVhN2E4ZmExZjQ3ZGUxY2ZjNzQxZGFjOGU5Zjc4ZDdiMWQyNWNlMDBkNTY1 +YWMyOGZkYzkxNDQ1NTYzNA== +-----END PRIVATE KEY for 6b1a8bf3e5e7bacaaf99e3c89891a8a4ec7e9022986043f8206db9171ede3bd8cdcbbd7e8e1234180de5d651110ef706a8d964cb35048bc961611b55c8d9bd1b942b93c7e1b88157e7f79f2c08dbabe1af4612afe6044ab1be316976111b7019----- +-----BEGIN PRIVATE KEY for d8f8ef204ac892dd1a04648fc449ffdd11418dbd5c8fe623e5efda0bcae47cb41eb99c981585d80be1d668d8b7466619b6ead4d83976cc4f6879627a455603a74ab2adbfb5fed0f1a2b954363d97cbd3ac7feb284c83ac64422fad518e589c8e----- +NDFiNDAxYjBkZDdmMDFhNDEwNmZjYmNjMDAwZDkwMWY5NWYwZTg4YjQ4ZjFmNzlh +MmY1ZmE5NWZjOTNjNWQxZA== +-----END PRIVATE KEY for d8f8ef204ac892dd1a04648fc449ffdd11418dbd5c8fe623e5efda0bcae47cb41eb99c981585d80be1d668d8b7466619b6ead4d83976cc4f6879627a455603a74ab2adbfb5fed0f1a2b954363d97cbd3ac7feb284c83ac64422fad518e589c8e----- +-----BEGIN PRIVATE KEY for f547e115b1ada7cf9b8aeef45ee0d9ec4b206315ef44be706d994a0571688cd96291d1ab6c3761df29d00a2ba290a3185e4796bc49891906f86e16da01af3fd52320944b96b60e679ac8e686d4819e97e15e5fe46503c556b4acdd8079624005----- +ZmFmMDA2YjRhYjNiZDhiZTg4ZTYwMWZjNDIyNjVlZjliMTQwZTRiNDNjYTNhYjVh +YzVlNGQ4NmUxOTkzNzY2Mw== +-----END PRIVATE KEY for f547e115b1ada7cf9b8aeef45ee0d9ec4b206315ef44be706d994a0571688cd96291d1ab6c3761df29d00a2ba290a3185e4796bc49891906f86e16da01af3fd52320944b96b60e679ac8e686d4819e97e15e5fe46503c556b4acdd8079624005----- +-----BEGIN PRIVATE KEY for 0e5cc5f218a8fa9bae1f6b452430c0de205e6b251d0f1d606d1ce28203fb556768e6c4545ce8e90d640ef2cc1062f40ccf2ede124b926cbf3b2b0050b0e19f67e7e36ac1a7049178a77cbd65ee30cd0a40d9f98846ce439cc120717501f03180----- +YzFiYzc1YjNjM2U0NWM4MjM5OTRjNWM0MTQzZDNhNWMzOWQ3YWY2ZmM2OTE0ODZi +NzdmZGU3ZTY1YjljZGIzNw== +-----END PRIVATE KEY for 0e5cc5f218a8fa9bae1f6b452430c0de205e6b251d0f1d606d1ce28203fb556768e6c4545ce8e90d640ef2cc1062f40ccf2ede124b926cbf3b2b0050b0e19f67e7e36ac1a7049178a77cbd65ee30cd0a40d9f98846ce439cc120717501f03180----- +-----BEGIN PRIVATE KEY for 760dc22525dce5be65a3a55ee07f7f012e0a89f435daec56eb475b0b5ca2d84b157894b8df64dfb570ecc633d5e1611639d43976e29f11c232236a9548b0145ee4e43fe495252c8c1f006b8df51d3835dee64a826f43167096b347b6919aa292----- +OGM2NjdjNTM2NWViNDZhMGExMDZmZDA1ZmZhYmUxNWU5NjA4NzU3ZWE0MDA4MzE5 +YmM4NmQ5MjY3YzNiMDIxMQ== +-----END PRIVATE KEY for 760dc22525dce5be65a3a55ee07f7f012e0a89f435daec56eb475b0b5ca2d84b157894b8df64dfb570ecc633d5e1611639d43976e29f11c232236a9548b0145ee4e43fe495252c8c1f006b8df51d3835dee64a826f43167096b347b6919aa292----- +-----BEGIN PRIVATE KEY for 39316473217f7c435a543efa078254198dd079e3c6505e7cc1564b033de8a161dc2e9c392b1e584440510113b5942816102d7be5f4af9461af21a454fc1938a962b256c1c1d1f939198029ed0bf22c62893038d5687787cb46436c0ef4f12417----- +NzdkYjE3MzMyOWY0MjIyYTMxOTFlZDUwMzM2MWZjZDQ2NTkwZjRhZmIxZjYwNWQx +MTMxYjNjOTg5MzRhNDc2MQ== +-----END PRIVATE KEY for 39316473217f7c435a543efa078254198dd079e3c6505e7cc1564b033de8a161dc2e9c392b1e584440510113b5942816102d7be5f4af9461af21a454fc1938a962b256c1c1d1f939198029ed0bf22c62893038d5687787cb46436c0ef4f12417----- +-----BEGIN PRIVATE KEY for 6a3b9f6b5fd38e79433daa9bf03543314f8a2a3d9f1fec8ebe2bc1ee97f135d83845dcecd201207c1b31d7624ddb330ae67fbfab4137cd734d96bc0975ae8bcfeecc4441b384d39d6900cdb7436450c23b4cc7674ec50055ea4a90861c503a91----- +MjkwNThjZmJmYzAxM2I2YjJlYzgzMTA5MWY0MWIzNzVkNDUzMTRiZTNmOTRiNjA3 +MDY1MzJmZWEwNzUyMDUzZA== +-----END PRIVATE KEY for 6a3b9f6b5fd38e79433daa9bf03543314f8a2a3d9f1fec8ebe2bc1ee97f135d83845dcecd201207c1b31d7624ddb330ae67fbfab4137cd734d96bc0975ae8bcfeecc4441b384d39d6900cdb7436450c23b4cc7674ec50055ea4a90861c503a91----- +-----BEGIN PRIVATE KEY for ee8c987b9af9bba2912763fb7fcd6d6575db60806c5041fa91816ecc339ccfd60bf3cf49fb7017158f0b8e6050276907620bc040816207f14a952bb86752816231ae7f31ff701862cfe0abca367fc4cd63bafd4ad6e4df67612e4ec71462650c----- +NTk1NjY3ZjUzMjg2MjUxYjc2MWNlNDIyOWNjMmNlYTBlOWVmNDg4MjJmNTk3MmU3 +NDZiZDM2ZGY2ZTY0OTM0Ng== +-----END PRIVATE KEY for ee8c987b9af9bba2912763fb7fcd6d6575db60806c5041fa91816ecc339ccfd60bf3cf49fb7017158f0b8e6050276907620bc040816207f14a952bb86752816231ae7f31ff701862cfe0abca367fc4cd63bafd4ad6e4df67612e4ec71462650c----- +-----BEGIN PRIVATE KEY for cfe96f6b010d08f211c83f4ae3eb451d1d5205a50bdcd451706044dc21f523d25f214ab89dd5aab7ae03111197d6e6156e70ab348c9b0fab0a7839ea57fef6cd2324882b4387014dba201e6f87d5ca395e14d900e4563494f4f11a69ef6cdf14----- +MTJjMzU0MzQ1ZDMzNTc2YTk4ZDQ0NjljZmY4Y2FlYWQ1ZDRmODgxODIwOGI0M2Vi +MmM2YzZiY2E4NjU3MWUxMQ== +-----END PRIVATE KEY for cfe96f6b010d08f211c83f4ae3eb451d1d5205a50bdcd451706044dc21f523d25f214ab89dd5aab7ae03111197d6e6156e70ab348c9b0fab0a7839ea57fef6cd2324882b4387014dba201e6f87d5ca395e14d900e4563494f4f11a69ef6cdf14----- +-----BEGIN PRIVATE KEY for 05e9e43732ecff55e553b35b5ee1416065818db162a6fbf096186a1230d88bd057cebb72c5afaec16a803c4c4f69770752fe29be73a4069d0d01666ede963271192d4f324f2b3dcaec8b2c871c23cf185579a039dd5ab093c7cd9bca53e09c85----- +MGMwM2JmYjcyMDI1OGU1NWVkNTU1NDk5ZjNiYWNlMDIxMjU4OTc3NDAwYzA5NGQ2 +YTg4NzViZWQ4NDA4MzIzYg== +-----END PRIVATE KEY for 05e9e43732ecff55e553b35b5ee1416065818db162a6fbf096186a1230d88bd057cebb72c5afaec16a803c4c4f69770752fe29be73a4069d0d01666ede963271192d4f324f2b3dcaec8b2c871c23cf185579a039dd5ab093c7cd9bca53e09c85----- +-----BEGIN PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +NjIyMWJmYzY2ZmYwYzg1OWY5MTYxZDNkZjY3NGJmMWQ4ZjkwZjExZThmN2MyMWU3 +NzM5NDVmYTIzYTQ2YjUzYw== +-----END PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +-----BEGIN PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- +OTdkYTJmM2FkOWI1NzRmZTg2N2U1Y2YzMmEwMWYwNjEzOGE2OGM0NjUwNWQzNTI4 +NmJlM2Y4OTQzMDQ3YmIwMg== +-----END PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- diff --git a/cmd/node/config/testKeys/group3/allValidatorsKeys.pem b/cmd/node/config/testKeys/group3/allValidatorsKeys.pem new file mode 100644 index 00000000000..3503b12fbf2 --- /dev/null +++ b/cmd/node/config/testKeys/group3/allValidatorsKeys.pem @@ -0,0 +1,64 @@ +-----BEGIN PRIVATE KEY for 82cfc47999d1499cb880d46e8280b8c4fe576dff20a8ca6f6ac551887c637f935153f9ce2f21921a532477535d42ac05f730760c78415756add2eab6d57d94916f3ad51590b23404739d152f89b6d052df48cace1793897cd4eba722247a6195----- +OWQyYTcwMWQxOGNlNzE4NjQzNDNhNDI5YWY4OGM1YTc3YTEzMjg3MjY1ZDFhMDEz +ZjZhYWFhZGI1NDU4YTM0NA== +-----END PRIVATE KEY for 82cfc47999d1499cb880d46e8280b8c4fe576dff20a8ca6f6ac551887c637f935153f9ce2f21921a532477535d42ac05f730760c78415756add2eab6d57d94916f3ad51590b23404739d152f89b6d052df48cace1793897cd4eba722247a6195----- +-----BEGIN PRIVATE KEY for 7675098e73574db9c59bdce61e4b80251c6536201715dca40b2b69c09ce097690f3a9095d22b006531e3b13b30894803bd7ede3e6d80c9064c431f8671db085ab1052354cb26a7a2436340b273b6c95c84ab94bb9531b99c5f883602b5284017----- +MWFjOWZkZDFlNWZhMmI5NzAxZTVjZWY4ZGFjMTUzMDgyMjE5MjE2YWFhMTU1NzM0 +NzdhMmNjZjhhN2Q4OTkzNg== +-----END PRIVATE KEY for 7675098e73574db9c59bdce61e4b80251c6536201715dca40b2b69c09ce097690f3a9095d22b006531e3b13b30894803bd7ede3e6d80c9064c431f8671db085ab1052354cb26a7a2436340b273b6c95c84ab94bb9531b99c5f883602b5284017----- +-----BEGIN PRIVATE KEY for c50f398a853c670ed625a12eddae175df5a90e034a54484a832566fc91f9b83d5daf1bc821cc347ba7e45f3acd4e1d00d0d7f52235824fd1326a7f370b58fc7dd98edfff4a41739a2015c6ed3a3c0bf3c986efeee187be70f1133fc4379dad95----- +MTE5NWQzZjk0OTk1MDNhMDBjMzhmOWY2NzQwNDZmMzQ4MGZiODk4YzZiZWNmOGVi +ODU5ZDU2MWUxOWY5MGY0YQ== +-----END PRIVATE KEY for c50f398a853c670ed625a12eddae175df5a90e034a54484a832566fc91f9b83d5daf1bc821cc347ba7e45f3acd4e1d00d0d7f52235824fd1326a7f370b58fc7dd98edfff4a41739a2015c6ed3a3c0bf3c986efeee187be70f1133fc4379dad95----- +-----BEGIN PRIVATE KEY for 4bd3f30f608b22b32100c6360def228ec95aa24e3048010bb64606392f602e180a0b2a12f7f92ef1d7f73ce1271ae30693bec692b15802c7ba079939640570fdc7f4d411c084ed0fe612ee223227ca3d02dc9732cf686ba8885007de53f8ec89----- +ZGU3YmUzZGU1NzdiNjk3OTY4ODJkYzljYjY2MzE5NTc2YzJlM2M4Y2Q4MDRlMjJm +YzMyMmZmYmVlM2Y3MGY1Mg== +-----END PRIVATE KEY for 4bd3f30f608b22b32100c6360def228ec95aa24e3048010bb64606392f602e180a0b2a12f7f92ef1d7f73ce1271ae30693bec692b15802c7ba079939640570fdc7f4d411c084ed0fe612ee223227ca3d02dc9732cf686ba8885007de53f8ec89----- +-----BEGIN PRIVATE KEY for 71e8458f92997a00c1cd0e638b9ec42ab136828fc13f0ec643b60af451270cc81d50f4c4578a7c93a700ee21e065281593e7995d2454356cbfdeadb9ffe7bf33ba8f7a31a1d2e76bba5a5f88a613ef37e35595838d0b7f4bd12da7d6fe743499----- +ZjJkOTY0ODVlZDk3YmQ1YWQ3M2M0OTk0NDg1ODIyMGNiMTY0ZDg1YTAwZWEzZTlm +YzYwMjY1ZGM3YjliMTMzNQ== +-----END PRIVATE KEY for 71e8458f92997a00c1cd0e638b9ec42ab136828fc13f0ec643b60af451270cc81d50f4c4578a7c93a700ee21e065281593e7995d2454356cbfdeadb9ffe7bf33ba8f7a31a1d2e76bba5a5f88a613ef37e35595838d0b7f4bd12da7d6fe743499----- +-----BEGIN PRIVATE KEY for d9c30948bffad18776b786f6367142b76605ac6e33a8d38c68c31c7afb099f1a83efb752a87afaf9d04a4a8fb656e40bfe2a4aa6e0c16b82d22bd6c232c2ce5e6672ac6232d2da6945bc033b04cbaaeb4b9af4b29585094e034ab8dcfb8b9c19----- +MmJjYzZkZmYzMDc5MjlmNjg1M2M5OTViZjA5ZWRiYjMxYWFhNjYwZDVjMTc1NTM3 +NzFjMmYwNGEwOWFkOWMxZg== +-----END PRIVATE KEY for d9c30948bffad18776b786f6367142b76605ac6e33a8d38c68c31c7afb099f1a83efb752a87afaf9d04a4a8fb656e40bfe2a4aa6e0c16b82d22bd6c232c2ce5e6672ac6232d2da6945bc033b04cbaaeb4b9af4b29585094e034ab8dcfb8b9c19----- +-----BEGIN PRIVATE KEY for 55fc7ab2e8c0a07bef2e1a9b35764cee1d604cb5634b7226a7310ce56a1f02e99d248fc5b416c4253ac7b88353b1a60f31e1104534e36cb00f46bdcb20a0d24f453e2c8d3cc48dc3c6086edbe16149aae14eb3a4d24ee2b217a4759bc0c0ea88----- +YmY3YjhmZjgxZmMzMzhjZWYwNzQ3ZWM1NzdlMzI3NTVkYTdjYThjMWVlN2QxYWNi +YzNkZDJhZDNhM2RkYzgzYg== +-----END PRIVATE KEY for 55fc7ab2e8c0a07bef2e1a9b35764cee1d604cb5634b7226a7310ce56a1f02e99d248fc5b416c4253ac7b88353b1a60f31e1104534e36cb00f46bdcb20a0d24f453e2c8d3cc48dc3c6086edbe16149aae14eb3a4d24ee2b217a4759bc0c0ea88----- +-----BEGIN PRIVATE KEY for a8e662e63ad0e87f2dc66cbed41d398b73a2da2aaced6cc466ed378b62daee28b3db8e8327a06278a094b05840965c17448ffc8a1c96e532a7960d1a15d2cabd16edadc476bfb4af3a825aff801f615d127b70b4745b88e01627a99ba52d5317----- +NWUyYWQyMGU5MzliMDUzMDU3Y2FkYjNkYTU0NmRkOWIyYjI3ODE1MWJkZDc1ODBl +MGFmYWEyZDM3YTZmNGY2Nw== +-----END PRIVATE KEY for a8e662e63ad0e87f2dc66cbed41d398b73a2da2aaced6cc466ed378b62daee28b3db8e8327a06278a094b05840965c17448ffc8a1c96e532a7960d1a15d2cabd16edadc476bfb4af3a825aff801f615d127b70b4745b88e01627a99ba52d5317----- +-----BEGIN PRIVATE KEY for f5e5eb9dd18aeb5d4829ab08795a9e4c8632a4fd248feed68382add1f2474d3cec042d51b897871bfee1f1c1fbeabf13d1c39d4f9b412948d27737f2b82e85474b7049a700ee8735373564791f0d20692dd1f8b494de7bab0a8415f01532ed90----- +NGNmNTQxMDMyYmNkNjQ3MWU0ZGNkN2NjYzZkNGY5ZDg4MTgwMThiMGIyOWE5NGZi +YTBlMTA2YmJlMTExMzMzMQ== +-----END PRIVATE KEY for f5e5eb9dd18aeb5d4829ab08795a9e4c8632a4fd248feed68382add1f2474d3cec042d51b897871bfee1f1c1fbeabf13d1c39d4f9b412948d27737f2b82e85474b7049a700ee8735373564791f0d20692dd1f8b494de7bab0a8415f01532ed90----- +-----BEGIN PRIVATE KEY for e66d7ac5e51a382164aeaae9924dda3272296a145d3c6178b962e3b7eb83e75515e665c327e86f3ef597ca840f8c5c0ace19ac9a8fbcdc573f9237d112fb1c467d646737863ccd1fe61f4c4341f9805f8e1fe98348b50c3c3f93f62de3975980----- +Mjc5N2ZjYjViYWMyOTJmOTZhMGI3NmYwNzhjZjVjMWJkMTkzYThjNmY1YWQ4NTdl +ZGU5MmU1MjVhMDE3NGIwNA== +-----END PRIVATE KEY for e66d7ac5e51a382164aeaae9924dda3272296a145d3c6178b962e3b7eb83e75515e665c327e86f3ef597ca840f8c5c0ace19ac9a8fbcdc573f9237d112fb1c467d646737863ccd1fe61f4c4341f9805f8e1fe98348b50c3c3f93f62de3975980----- +-----BEGIN PRIVATE KEY for 2a1c49643e564cdf28bba96dfd6cd8ad38a5958b2b3c9a8293ffb54e9df0a0188a67de2fb947b8ae3dd06b7411aaae0e8bedef795ad3b35ac9f1402dcd0631d9d530b01b3880362fbd3ed9a8488ecabfb1b46cac225c5d48c39be3e28503f90f----- +OTFjZTI1YzZiMjU2ZDZjNzE1MzIwMDUwYjIzZGU2YmI1NmNlYjc5Mzc0M2YyYTcz +MDRiOWUyN2ZjMjhkNmUxYQ== +-----END PRIVATE KEY for 2a1c49643e564cdf28bba96dfd6cd8ad38a5958b2b3c9a8293ffb54e9df0a0188a67de2fb947b8ae3dd06b7411aaae0e8bedef795ad3b35ac9f1402dcd0631d9d530b01b3880362fbd3ed9a8488ecabfb1b46cac225c5d48c39be3e28503f90f----- +-----BEGIN PRIVATE KEY for 5c9784523f360a802d4687c9c76bcef41a738d034aa8503a055c33898504b09670c1f637ca632e5290b3acf79a2191072f68c4192a9cbeb34f50c4a941e34247a64f642a6a074bec683bdfb83587cfdc0390ebd74505cb836cf04f3268e32f99----- +ZWMzOTQ2YTBlYmY2MjY5YTQwNWRkOTI2ODcxNjEzODVkMTUxYmEzZjRiOThlYTBj +YzUyMzc1OThiYmVkOGIzZA== +-----END PRIVATE KEY for 5c9784523f360a802d4687c9c76bcef41a738d034aa8503a055c33898504b09670c1f637ca632e5290b3acf79a2191072f68c4192a9cbeb34f50c4a941e34247a64f642a6a074bec683bdfb83587cfdc0390ebd74505cb836cf04f3268e32f99----- +-----BEGIN PRIVATE KEY for db7f7726c3e68abb28d070f529f2c222755d863aa9d7c0200fde10c93ccb8edcee8d45c9eb925bd5a0fa33c54d19270b7058f6e72256dad84214375f189310a73153cd84feef4b493ab61437b0cbcc2c592e6c093653a533631c8e0ab036c207----- +ZjFiODNjZTc2Y2Q1NGQzOWViNWFhNDNlMzdiNTBjMWJiNjY3YzVlNWQwNzg5YTg5 +ZWJlMWQ2NWE1ZmExZmQ1Nw== +-----END PRIVATE KEY for db7f7726c3e68abb28d070f529f2c222755d863aa9d7c0200fde10c93ccb8edcee8d45c9eb925bd5a0fa33c54d19270b7058f6e72256dad84214375f189310a73153cd84feef4b493ab61437b0cbcc2c592e6c093653a533631c8e0ab036c207----- +-----BEGIN PRIVATE KEY for a27b6f47c53263e5c8d69779a169d50605cdd7ddb4b5384f2d46e08ace6f787a60f6cf26256b62fafba9c91a87ff070bc99254fcb5a73239fc14f2108de62189005b51b21e2922b37c6cc657017832e3a59dfcc7a54ac5dcb997136da4e2748b----- +Mjk1YWExMDkzOWMyZWI2OGUyM2EzZWFmYzE1YjE2NmRjZDllMDIyZTUwYjU4MWE2 +ODcxN2NmN2E1ZDEyMmIxOA== +-----END PRIVATE KEY for a27b6f47c53263e5c8d69779a169d50605cdd7ddb4b5384f2d46e08ace6f787a60f6cf26256b62fafba9c91a87ff070bc99254fcb5a73239fc14f2108de62189005b51b21e2922b37c6cc657017832e3a59dfcc7a54ac5dcb997136da4e2748b----- +-----BEGIN PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +ZjU0OGUwZTZjODc0NzVjMTk2MjY5M2QzNzg2ZWIyZDMyYmViZDkxZmYwOWYxZThj +NGNhZWM3M2E5N2IwODk0OQ== +-----END PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +-----BEGIN PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- +OWM0NGIwY2U0OTliMDgwZjE1ZTBkYzdhMjg2MTY1ZThlMDU5MWU0Yjc3OTM0YzFl +NmQwNWJhOGQyMjk2NjA1MA== +-----END PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- diff --git a/cmd/node/config/testKeys/unStakedKeys.pem b/cmd/node/config/testKeys/unStakedKeys.pem new file mode 100644 index 00000000000..96a3bf2d715 --- /dev/null +++ b/cmd/node/config/testKeys/unStakedKeys.pem @@ -0,0 +1,24 @@ +-----BEGIN PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +NGJjZmIzODdkYmJkN2Q3NzIxOWVmOWFkZGI3OTMyZmRlYzcwNjZiOTk3MmVkNjg3 +ZjkyYmIyMzg5MGFhOTMzMQ== +-----END PRIVATE KEY for 283ccdc58e0df19717ecd0c4c3a553059bf6c8d91b9c7b624afa8cb0564c7fd86e5a199973d17b7b939e63186b25f20a7234ad7f162c8f2547ba2e326c30a6c0571ea04cba83b35fd9a2a60f13e95ee1767b5fe87cbf378458ac7e27b4833f96----- +-----BEGIN PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- +ZDE1ZDk1YzdhMGU1ZGY5MDRmNzQxODI2NDFiN2FlOGEwYmJkYzE5Y2RkOGNhMGZh +MzEyNDI3OTY2YjNkODE1YQ== +-----END PRIVATE KEY for 7d1a1b4f36fcd8cea426005212022511bc25a414019e2b5c65947f00c28c8f1220ff1473c36efaa22a94d3e2b5258705ff6efb91902afb2c951c90502edf60072c3330ad9fd1b5c4b85226f10a474a0ddda9b61730946629b110b6eac70de70a----- +-----BEGIN PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +NjIyMWJmYzY2ZmYwYzg1OWY5MTYxZDNkZjY3NGJmMWQ4ZjkwZjExZThmN2MyMWU3 +NzM5NDVmYTIzYTQ2YjUzYw== +-----END PRIVATE KEY for 03e79df244aef557cd6e3b9c7e8063575b6cce83bbff005a0abf96000d0a93652ef0071decdd3ce052aab0912e4d510566af6273d91f41b0d8505a8ca69cff449dc979ff0c3c9c319feab9f2d3a965a49c1bf0d899e85fb3851951a798b0ab03----- +-----BEGIN PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- +OTdkYTJmM2FkOWI1NzRmZTg2N2U1Y2YzMmEwMWYwNjEzOGE2OGM0NjUwNWQzNTI4 +NmJlM2Y4OTQzMDQ3YmIwMg== +-----END PRIVATE KEY for 06d0a320440091cc0f44979753c036744cd762d2bf7aa3b61b425397b74c27b581ec39d250818068eef095cb0a49be0c586526bcc9bd98ec3120b9633efd390d4d9a7b6ffcafae8bdbbcf2c98f0cb2cd75897c4d6701b9d77861bf7ab8be3f88----- +-----BEGIN PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +ZjU0OGUwZTZjODc0NzVjMTk2MjY5M2QzNzg2ZWIyZDMyYmViZDkxZmYwOWYxZThj +NGNhZWM3M2E5N2IwODk0OQ== +-----END PRIVATE KEY for 125af943ccf7405f204a34fe82d8b35f487d3c69c536311f999328ccaa7d1570626ea17c3fc4a75bba336746942e52025ebad7caf46e56ebd916178d89828ef3eb427c8e6c0cafe4adf91620e3ba23bb25cf751fc18f34775295765371c22b11----- +-----BEGIN PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- +OWM0NGIwY2U0OTliMDgwZjE1ZTBkYzdhMjg2MTY1ZThlMDU5MWU0Yjc3OTM0YzFl +NmQwNWJhOGQyMjk2NjA1MA== +-----END PRIVATE KEY for a07ff46694c1faa139166fccf535ca18f1dada26d7ee4879334e70d0f4cd948deba46617ebeabc2f5c5e021a3b16e5099ee3eed5e03a259b3609fcac5256bb064ac0718c277018c6b2ab73f079ac81baca252afd031954af2883c8b2a4063909----- diff --git a/cmd/node/config/testKeys/validatorKey.pem b/cmd/node/config/testKeys/validatorKey.pem new file mode 100644 index 00000000000..397c6629e6d --- /dev/null +++ b/cmd/node/config/testKeys/validatorKey.pem @@ -0,0 +1,96 @@ +-----BEGIN PRIVATE KEY for d3e0427c22ff9cc80ef4156f976644cfa25c54e5a69ed199132053f8cbbfddd4eb15a2f732a3c9b392169c8b1d060e0b5ab0d88b4dd7b4010fa051a17ef81bdbace5e68025965b00bf48e14a9ec8d8e2a8bcc9e62f97ddac3268f6b805f7b80e----- +MTMyZTliNDcyOTFmY2M2MmM2NGIzMzRmZDQzNGFiMmRiNzRiZjY0YjQyZDRjYzFi +NGNlZGQxMGRmNzdjMTkzNg== +-----END PRIVATE KEY for d3e0427c22ff9cc80ef4156f976644cfa25c54e5a69ed199132053f8cbbfddd4eb15a2f732a3c9b392169c8b1d060e0b5ab0d88b4dd7b4010fa051a17ef81bdbace5e68025965b00bf48e14a9ec8d8e2a8bcc9e62f97ddac3268f6b805f7b80e----- +-----BEGIN PRIVATE KEY for b0b6349b3f693e08c433970d10efb2fe943eac4057a945146bee5fd163687f4e1800d541aa0f11bf9e4cb6552f512e126068e68eb471d18fcc477ddfe0b9b3334f34e30d8b7b2c08f914f4ae54454f75fb28922ba9fd28785bcadc627031fa8a----- +NDkwYTU1YWI0MGNiZWE3Nzk4ZjdhNzQzYmNkM2RhNDQyNzZiZWM2YWQwODM3NTlh +NDUxNjY0NjE4NjI1NzQ2Ng== +-----END PRIVATE KEY for b0b6349b3f693e08c433970d10efb2fe943eac4057a945146bee5fd163687f4e1800d541aa0f11bf9e4cb6552f512e126068e68eb471d18fcc477ddfe0b9b3334f34e30d8b7b2c08f914f4ae54454f75fb28922ba9fd28785bcadc627031fa8a----- +-----BEGIN PRIVATE KEY for 67c301358a41bef74df2ae6aa9914e3a5e7a4b528bbd19596cca4b2fd97a62ab2c0a88b02adf1c5973a82c7544cdc40539ae62a9ac05351cfc59c300bbf4492f4266c550987355c39cff8e84ff74e012c7fd372c240eeb916ef87eead82ffd98----- +NTkwNzQzOTJmNGY5NzBjM2I1ZDRiYTE3ODM5NTVmY2Y5ZmNjNDRkOWE1YWZmMmI1 +Y2RkYjAwMjBjYTE1NWI1Yw== +-----END PRIVATE KEY for 67c301358a41bef74df2ae6aa9914e3a5e7a4b528bbd19596cca4b2fd97a62ab2c0a88b02adf1c5973a82c7544cdc40539ae62a9ac05351cfc59c300bbf4492f4266c550987355c39cff8e84ff74e012c7fd372c240eeb916ef87eead82ffd98----- +-----BEGIN PRIVATE KEY for ab0a22ba2be6560af8520208393381760f9d4f69fca4f152b0a3fe7b124dd7f932fd8c1fbb372792c235baafac36030ceaf6ebf215de4e8d8d239f347f2fed10a75a07cbf9dc56efbbfca2e319152a363df122c300cdeb2faa02a61ebefd8a0e----- +YTYwOTFmYjUxNzY0NTE5NjM5NmQwNGFhYjM2NzllNGYwNTlkYjlkODVjOTgxNjI1 +YzE5OTlkYWRhOTg1Y2Q1ZQ== +-----END PRIVATE KEY for ab0a22ba2be6560af8520208393381760f9d4f69fca4f152b0a3fe7b124dd7f932fd8c1fbb372792c235baafac36030ceaf6ebf215de4e8d8d239f347f2fed10a75a07cbf9dc56efbbfca2e319152a363df122c300cdeb2faa02a61ebefd8a0e----- +-----BEGIN PRIVATE KEY for caa87d67e195b52355d2c8f7f74c829395b134bd4a911f158e04b2d7e66a5ba195265743f10cf190105512fb3df9d708a8056c07a6165874d8749742502c0eada7d15b6c55f22c2cce2cf5001288f6b2d89319e6ff888344c01adcd362be8998----- +NDM2NDEwYTEwMmVmZDFjOWJjNjA2ZmRmM2FlNWI3ZDlkZTM3NjVkZDkxYTg0YjA1 +OTY4NjJjNTg3OTcwZjU3MQ== +-----END PRIVATE KEY for caa87d67e195b52355d2c8f7f74c829395b134bd4a911f158e04b2d7e66a5ba195265743f10cf190105512fb3df9d708a8056c07a6165874d8749742502c0eada7d15b6c55f22c2cce2cf5001288f6b2d89319e6ff888344c01adcd362be8998----- +-----BEGIN PRIVATE KEY for 598be7548d6bb605bd19d83037bf58a7797a4e48b33011a60a5633cf6fe8d59906130777c46f50a50d3d0f958effb5147befd5d67cbec7c5daddeaade4dca5d8a54fe0394fde7b6455e4fc4db91f33f907d450b45fc2d4a9990f96d893093d91----- +MTRiMjkxYzY1MzA0NzE1NzY1ZTYzYjUzMTUzYzNmZmIyNzNlZTRlMWNjYzY1ZTc4 +MjdhMDNmYmViMWRjZmE2NQ== +-----END PRIVATE KEY for 598be7548d6bb605bd19d83037bf58a7797a4e48b33011a60a5633cf6fe8d59906130777c46f50a50d3d0f958effb5147befd5d67cbec7c5daddeaade4dca5d8a54fe0394fde7b6455e4fc4db91f33f907d450b45fc2d4a9990f96d893093d91----- +-----BEGIN PRIVATE KEY for 69b277b127d025638dbb54d36baa8321540f6210fc5edaac77f94798c039a383aead3ae7c93cdfb8b4caab93a952d101ee2322c129b6ce2726359a65aa326bd35e54c974118503944fcaf80be80b5c3fc9cf86d574d0096140f16fbc55fc4984----- +Njc2ZDA3ZjBjNzQ5MWM4ZTYxOTg5NDdmN2Y1YThjMDcyMzAwZmM3NTlkYTkyOTQy +ODg5NjcyMDJhOTRiZWExNA== +-----END PRIVATE KEY for 69b277b127d025638dbb54d36baa8321540f6210fc5edaac77f94798c039a383aead3ae7c93cdfb8b4caab93a952d101ee2322c129b6ce2726359a65aa326bd35e54c974118503944fcaf80be80b5c3fc9cf86d574d0096140f16fbc55fc4984----- +-----BEGIN PRIVATE KEY for a006ad94b28c414c6ec0a5effb84594f39ede4f82b60aa077e2065b89407c78dd6479ebceed7bd42ed2779c34b718f11651427e550948cb8be2e6cea03a128ac3c52e599ada6f34912b119f94de472af0397a68769f1b3f647e87090918e030b----- +YzBkNjM4NjczODAxYWY4MWY5NWNkZjgxYzVkMWNiMTQwYWZjMmYwMjJkOTU3YTk0 +OGQ3ZTI4YTVjZjViMzE0Nw== +-----END PRIVATE KEY for a006ad94b28c414c6ec0a5effb84594f39ede4f82b60aa077e2065b89407c78dd6479ebceed7bd42ed2779c34b718f11651427e550948cb8be2e6cea03a128ac3c52e599ada6f34912b119f94de472af0397a68769f1b3f647e87090918e030b----- +-----BEGIN PRIVATE KEY for 91874fdfa8dfb85faf4f404b21c95fbb5d154db5a6abe46bd7860de9e5ddb78b61b5c6ddcf86e5ec8a237e130ed0fc0e418fb97d6fce5f6642ba33f99eff694ec7fb2921b423899a9a5888914bd625636a9b1ea186566561cd35b79aaca20e88----- +OTBhN2Y0YjlkNTVmMzliZmMzYmQ3Y2RiZWE2NWYyNmEzYThiNTk1ZjEyNzg5Yjlm +OGJmYzg5MDlhZTZjZmEzYQ== +-----END PRIVATE KEY for 91874fdfa8dfb85faf4f404b21c95fbb5d154db5a6abe46bd7860de9e5ddb78b61b5c6ddcf86e5ec8a237e130ed0fc0e418fb97d6fce5f6642ba33f99eff694ec7fb2921b423899a9a5888914bd625636a9b1ea186566561cd35b79aaca20e88----- +-----BEGIN PRIVATE KEY for cc3e0c1021f8c4c092499547b064cffef19d07f0bf250e5265cea1e49b282a7f6efb4b415ad37db2ef6efa253475f511e74efc2f76c087c9798f72187986bb752f61d0ac220045f8e2d945343f3bbb8ef34a6025fb855dd7d953a81477ad2309----- +OTc2NDdhMzYwODMyMTliZDhhYjI4NTYxYWQxZTRjOTZmNDdmNmUxOTM1NTVjNGY4 +MTc2ZDEwM2I4Y2Q0YjkzZA== +-----END PRIVATE KEY for cc3e0c1021f8c4c092499547b064cffef19d07f0bf250e5265cea1e49b282a7f6efb4b415ad37db2ef6efa253475f511e74efc2f76c087c9798f72187986bb752f61d0ac220045f8e2d945343f3bbb8ef34a6025fb855dd7d953a81477ad2309----- +-----BEGIN PRIVATE KEY for c2885340a6ba4341d68f80ce419deadf374bc52e2749c278b5bce5f795e9a90a04ef4f07a0b47777feb1982749b57a174b4927338df9da99a417a2df3152a9ebaf3465bfc092058324edf6892313f24be4612eb5663bb59d67a831dda135aa8b----- +MWQxOGIyMGFiZWUyNDFjOWU0ODEwZDQxMjI2ZGU4NDk3Y2FhYzk3OTczYmVhYzBk +YzUyYjI2ODg3M2FlMjM2NA== +-----END PRIVATE KEY for c2885340a6ba4341d68f80ce419deadf374bc52e2749c278b5bce5f795e9a90a04ef4f07a0b47777feb1982749b57a174b4927338df9da99a417a2df3152a9ebaf3465bfc092058324edf6892313f24be4612eb5663bb59d67a831dda135aa8b----- +-----BEGIN PRIVATE KEY for cf8a2f97b7822acb16016a6debaaedea39959c9ac60b80e50f24734a0e0f6128ed1d216f5aed71866ca34bb30b6e8300e7995237744e766f6016ca28d4ebb2274326cb7af1a3c12d795cc127a4bf9aa9497d89ef0450c40f675afd1afa761012----- +ZWRkY2RmNzg3NGQ3Y2M2N2Q2Yjc1OTRlOTlkY2JjMWY0OTNiNGEzNjA4ZWM0NTdk +MjY0NDU1OTJiMmYwM2YwNA== +-----END PRIVATE KEY for cf8a2f97b7822acb16016a6debaaedea39959c9ac60b80e50f24734a0e0f6128ed1d216f5aed71866ca34bb30b6e8300e7995237744e766f6016ca28d4ebb2274326cb7af1a3c12d795cc127a4bf9aa9497d89ef0450c40f675afd1afa761012----- +-----BEGIN PRIVATE KEY for 95a81b70474d59c1292bc5742db1a7b9bf03cb516ede6fb5cb3489ee812de8cccfc648f3ff3cda26106396a38c1c1f183b722392397a752d949c5123888b7a8ec012fe518f6efc25015a620b1559e4609286b52921e06b79fd563a9b3b4c4e16----- +MDUwNzJiZGQ3NGIyNzdkZTMzOTZhOGNlODk1ZGNmNzhhZWMzNGViYjJmNGI0ZmFi +MjI4MzVlNjhjNjUwNzMzZQ== +-----END PRIVATE KEY for 95a81b70474d59c1292bc5742db1a7b9bf03cb516ede6fb5cb3489ee812de8cccfc648f3ff3cda26106396a38c1c1f183b722392397a752d949c5123888b7a8ec012fe518f6efc25015a620b1559e4609286b52921e06b79fd563a9b3b4c4e16----- +-----BEGIN PRIVATE KEY for 5909def579148f456e8490659b859f80f8ccd62b5adda411e1acdc615c2ec795a88632cf2ec210a56ba91973fd3f07160f559f82f7afaafee008679fefb1b0cd2f26f4324197e6239c000accd1c427138568a8a9e276690c154d3df71a1f970c----- +OWMzYWU5MGNmOWJkOWIzZDUyOWE2YjBkZjMxOGU4MWU3MzRkNzA4MjdhMjZlYzc4 +YTcyZTBjYzhmYWQ4YzQ0Yg== +-----END PRIVATE KEY for 5909def579148f456e8490659b859f80f8ccd62b5adda411e1acdc615c2ec795a88632cf2ec210a56ba91973fd3f07160f559f82f7afaafee008679fefb1b0cd2f26f4324197e6239c000accd1c427138568a8a9e276690c154d3df71a1f970c----- +-----BEGIN PRIVATE KEY for 58d6cfe7e8c3ec675da17e492c4ba97759fa15fc0f41bbe29d1d49d5f5ca7db142450ada15e1e4bf4657614e26cceb04ed5c0ca17207b0e24c4baf5f91afc092d43a02aaeae76218420817c85292f8de7d3a2b4f3c8615c2bb6a6d1c74267788----- +N2YxOWM0MTU0NGIyMzAxYjA1NzBiM2E5MjhlODIyOTQyNTBlN2JmZjg4NTE3OTll +MTRhNTk3NDZkNmFhYzQ0ZA== +-----END PRIVATE KEY for 58d6cfe7e8c3ec675da17e492c4ba97759fa15fc0f41bbe29d1d49d5f5ca7db142450ada15e1e4bf4657614e26cceb04ed5c0ca17207b0e24c4baf5f91afc092d43a02aaeae76218420817c85292f8de7d3a2b4f3c8615c2bb6a6d1c74267788----- +-----BEGIN PRIVATE KEY for eb79770be0ae70e1d6932832eab94117b0c1a2442b3fdb380b1ad5a809b6221a4905e02a628886c925d152c4e5006413fe69d1f11cf543f4802d4ce4e5eac2b18b78a79215c737e2e098b40802044bc6e946b712299286c34f6d33d8b681790d----- +OWM1Njc4NjEyMWFiMmQ2MTdhYTIwM2QxMzU1N2QwNThmM2FhNDhhOTMyNWVhNzhh +N2NlODVhOTFjZGY4ODAwNA== +-----END PRIVATE KEY for eb79770be0ae70e1d6932832eab94117b0c1a2442b3fdb380b1ad5a809b6221a4905e02a628886c925d152c4e5006413fe69d1f11cf543f4802d4ce4e5eac2b18b78a79215c737e2e098b40802044bc6e946b712299286c34f6d33d8b681790d----- +-----BEGIN PRIVATE KEY for bc03265a52610464f2f0431a69647be3106924f5bf67cf87cd889bf86d81739b3f0f37bad11ab93c5209dc4496f4130d69a9649596b97884b7e91e0b4d7c59169dd0729ac3e3bcd308efac56bc29d3cc249d8759580ab117943aa40df3baac05----- +ZmEyMmRkODcyMzExMzgzZmRlNmE3ZWFmYTk1ZGZhNWRhMWNmNTJjYTE3NTc1NTdi +Yzk5MjAyNDE2YzFkY2IwNw== +-----END PRIVATE KEY for bc03265a52610464f2f0431a69647be3106924f5bf67cf87cd889bf86d81739b3f0f37bad11ab93c5209dc4496f4130d69a9649596b97884b7e91e0b4d7c59169dd0729ac3e3bcd308efac56bc29d3cc249d8759580ab117943aa40df3baac05----- +-----BEGIN PRIVATE KEY for aa4be8f36c2880ee4d2ca79dbd7a53537e3965f255dfb5c75324fe29fcb6ce56148fbaea334268e413f0df95f580c40fb3484165b2852236e3a1aa68151ac3327d981cfae52d99f9a564bd3139cdd768661854dae78880d9320191cdb2989815----- +MmRmYmFkMzMyNGMyZWEwNzZlZDQyYWY1NjFkZDRiZDdmMTU4ZGRiODQxZTUzMzYy +ODI5YmZlOWI5YzljYmUzMg== +-----END PRIVATE KEY for aa4be8f36c2880ee4d2ca79dbd7a53537e3965f255dfb5c75324fe29fcb6ce56148fbaea334268e413f0df95f580c40fb3484165b2852236e3a1aa68151ac3327d981cfae52d99f9a564bd3139cdd768661854dae78880d9320191cdb2989815----- +-----BEGIN PRIVATE KEY for 3e86fea8365791b3becfc9aa2bc239f6be58725e61e46e7935c56479ad285e0781da1f277980d2e1d0ecff3982f2d90f321aa03f3d934adf260628d0ed0dc81a98dfaf1e6278e042d6c78dc65f2fa79d3b457754a321b8a0d7bf9998feeea817----- +NTM4ZmFkYjlkZjRkMzJjZDcxMzU5MmZhN2Q1MWI2NmNjODg1MGQ0NmZjZDQ2YTIz +N2RmN2ExN2ZhODE5MjAxNQ== +-----END PRIVATE KEY for 3e86fea8365791b3becfc9aa2bc239f6be58725e61e46e7935c56479ad285e0781da1f277980d2e1d0ecff3982f2d90f321aa03f3d934adf260628d0ed0dc81a98dfaf1e6278e042d6c78dc65f2fa79d3b457754a321b8a0d7bf9998feeea817----- +-----BEGIN PRIVATE KEY for aa92cf6e0ac62df09e7adca139c41a162ad668e7a797770b6d195cd9b175d0fca9eac3f4bf859967139f2ba109741a144e3dc5e6ccaeb6cd21f1d202b10f08832274cd9cdf6b10dbc2c60acdd1c70ae9beae2139e2b69eccbcde32a7f3991393----- +ZjQ0ZDNmZDcyZTVmYjJmYmFiMTVkYjdlMmNjYTYzYzBjM2VjYWE0NjkwMjg0MTcz +OTQxZDIzM2FjMWEzZDQxMA== +-----END PRIVATE KEY for aa92cf6e0ac62df09e7adca139c41a162ad668e7a797770b6d195cd9b175d0fca9eac3f4bf859967139f2ba109741a144e3dc5e6ccaeb6cd21f1d202b10f08832274cd9cdf6b10dbc2c60acdd1c70ae9beae2139e2b69eccbcde32a7f3991393----- +-----BEGIN PRIVATE KEY for f2b7819d1c2e2e1d007edcf896034085645f3c81e7c7fe21aa7ad4f35f8b863ee1db13448d15a3d0d15018f741a991010a9374710b628e41ef078be8a10249f2a3000598432c28186af1c04a219ac914434dca9c27e61485d701505112093f8a----- +NTNiOGVmY2EwYmY0NmIzNjI1MzUzOGM1YjU2YjIzYTg4MDgxYWUwOThmZjk0Y2Yx +YjI2OGIwYmYzOTQ4ZmIwZA== +-----END PRIVATE KEY for f2b7819d1c2e2e1d007edcf896034085645f3c81e7c7fe21aa7ad4f35f8b863ee1db13448d15a3d0d15018f741a991010a9374710b628e41ef078be8a10249f2a3000598432c28186af1c04a219ac914434dca9c27e61485d701505112093f8a----- +-----BEGIN PRIVATE KEY for 292742eee9d12dade21b4cd8bcd44c210c26d927ef6dbd9cad59008643a971a86ea6dfce247515d4266789b3fe8e35167278e781e52b4cd7b9781554ba67ecc08680eb19628e7741c94d8456090a08aceab1c8d2ed39bf59e8e282381aa32a0a----- +NjFjZmE3YmYyNTZhNTIzY2FjM2ZiY2I4NzQ5ZDVmZWNhNzc1OWU1YmZlMGM2OWY5 +YmRkNTU0MGU4MmMwYTQwOA== +-----END PRIVATE KEY for 292742eee9d12dade21b4cd8bcd44c210c26d927ef6dbd9cad59008643a971a86ea6dfce247515d4266789b3fe8e35167278e781e52b4cd7b9781554ba67ecc08680eb19628e7741c94d8456090a08aceab1c8d2ed39bf59e8e282381aa32a0a----- +-----BEGIN PRIVATE KEY for 11f784d2970d65769ce267710b3d08b28b78c3f79283758918c8ef15717ccbe90c23348cafe0e98a5d101b8dafbe7d081c6821dee8bf40ba150664ccc2dbbdd6358c92404e677d82910ce61f1d7584fbbbc9ebf71b7f35a118556e2a5c220501----- +MjU2ZGI2MmU3ZTBmMzkzMjlhYmM1YzE1NWM2NmE0YTdhNmRhOTY2MTVmMDgxOTMz +NTYwMzU0YjllNWQ3YjYyYw== +-----END PRIVATE KEY for 11f784d2970d65769ce267710b3d08b28b78c3f79283758918c8ef15717ccbe90c23348cafe0e98a5d101b8dafbe7d081c6821dee8bf40ba150664ccc2dbbdd6358c92404e677d82910ce61f1d7584fbbbc9ebf71b7f35a118556e2a5c220501----- +-----BEGIN PRIVATE KEY for 0382c11222db8a15e42e3ff64893df46c7720b439fb2a546462815ac0a8fa3bed99fceae5da9b68524e36f61cc074d09ceafec274c54f182c56a77583f9421f19c777265c43da1d5747304b36f0367cf3e8e5f63f41dad1a4362d9e1997a9e16----- +ZTUxOWQwNzcwZWRlZDhhNTFiMzIwN2M4MWRmMDhjMWZlMWZhMTQ1ZjFmYWQwNDU3 +YzI4NzRiNWQzYmY3Y2MwMw== +-----END PRIVATE KEY for 0382c11222db8a15e42e3ff64893df46c7720b439fb2a546462815ac0a8fa3bed99fceae5da9b68524e36f61cc074d09ceafec274c54f182c56a77583f9421f19c777265c43da1d5747304b36f0367cf3e8e5f63f41dad1a4362d9e1997a9e16----- diff --git a/cmd/node/config/testKeys/walletKeys.pem b/cmd/node/config/testKeys/walletKeys.pem new file mode 100644 index 00000000000..a0fe3cb02f0 --- /dev/null +++ b/cmd/node/config/testKeys/walletKeys.pem @@ -0,0 +1,175 @@ +-----BEGIN PRIVATE KEY for erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7----- +ODgxZTRhNGQ1ZDZmMjg5MmNlZGYxN2QwZDExMjlhMWNlZDk3NDFjYzhiZTc3Njc1 +M2EyNTdlYmM2YWMyYmYzMzI4NTYyNmRiYzI2NDIzODg0YTQ5M2YxZjU5NTJjNjE0 +ZTkyYzVhYWYyYzMyOTY5MGRhMzE3YTliNDkxNTc3Mjc= +-----END PRIVATE KEY for erd19ptzdk7zvs3csjjf8u04j5kxzn5jck409sefdyx6x9afkjg4wunsfw7rj7----- +-----BEGIN PRIVATE KEY for erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk----- +MmIzNTVjMGRiYmY4MmVkNTljNDVmNzkzMDcwMTRhNmNiN2MzYmU5YzQzMDI1OWZl +ZjkwMzc4ODZmNTQ4ZjVlYzAwOGE4MGM0ZThhYWEyNzFjNWZlZjM4MTU1ODcwZjkx +YmEwN2E0ZmVjM2Q2YTlhYWUzODliNDljYTRmNDVjN2Y= +-----END PRIVATE KEY for erd1qz9gp38g4238r3077wq4tpc0jxaq0f87c0t2n2hr3x6fef85t3lshq2ejk----- +-----BEGIN PRIVATE KEY for erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35----- +NDU4MmViYThmNTI5MDc2MDhmZThhNThhY2NhM2Y4NzgwM2Q2MjZlMGVjNjczZDRm +M2FkM2ZmNjQzZWIyZGJmODU4NTVkNGQ2NGM2ZGZjMWYxNzY0ZTUyZmE4MGQ3OGJk +ZWFhMGQzMzEwZTJlMDFlNjM5OTEwOTMyZWMxNzc3NjM= +-----END PRIVATE KEY for erd1tp2af4jvdh7p79myu5h6srtchh42p5e3pchqre3ejyyn9mqhwa3shpgj35----- +-----BEGIN PRIVATE KEY for erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84----- +NTlhYTVlOThlNTQ1MTVjYTEwMDFkNDU2ODAyNGRjNWNmMjI4MGE4ODFhMzNkOTQ3 +ZjFmMTQ1ZWZjZDY2YjEwNWNhOTJiOTU2ZjJhYzdmNjZmMWMxODE0Y2RkYWQxMjll +Zjg4YjVjYmI5YjQzN2FjZDU4MzI3NjlkNzEyYzlkNmQ= +-----END PRIVATE KEY for erd1e2ftj4hj43lkduwps9xdmtgjnmugkh9mndph4n2cxfmf6ufvn4ks0zut84----- +-----BEGIN PRIVATE KEY for erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs----- +OGZlOTExYjJmNjRhODRkYzI0MmMyZjNhZmIwNGJmY2QyZDRkOWM1ZDdiYzhmMGI0 +Mjc3NzVmZjU0NjkxYTFjOTY4YTU5ODUzMWFlOWM3Y2FkMzFmNDdmYjEwM2VkMWM4 +YjZmZDQxOTk0Yzg1ZTYwYTA3MGM5MzMxODNhNzVlM2I= +-----END PRIVATE KEY for erd1dzjes5c6a8ru45clgla3q0k3ezm06svefjz7vzs8pjfnrqa8tcasl4j8hs----- +-----BEGIN PRIVATE KEY for erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9----- +ZDUwMzA4N2U4NWEyN2UyMDk0NDllMGIyZWFlN2M0Y2ViZmIwZTY0M2Q0MDg1NDZm +YzlkNTJmODJhOTBlMjg2MmFhMTExNjZhNTVhM2U5Y2MxYmNiNTM5N2YyOWQ2OGUw +NzY0MGZhYTdlODBhYTk2NTNiMGQyZmRkNjYyMWM2MTA= +-----END PRIVATE KEY for erd14gg3v6j4505ucx7t2wtl98tgupmyp748aq92jefmp5ha6e3pccgq9clwe9----- +-----BEGIN PRIVATE KEY for erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg----- +M2I0Y2M3NTQwNzA4ZGEwMWViOGMxNmY0MDFjMzAxZGFjNDI4Mzc5NjllNzU1MTJh +MjExZTBjMDBmMDI5YTRiODMzNTM4YWQ3NzZhZjE3NGMzMzVmOGVjMGYwOTM1NzM5 +ZjBiMjE0OTZlZTIxNmQ5Y2NjOGFkODMwOWNiMWI2Y2M= +-----END PRIVATE KEY for erd1xdfc44mk4ut5cv6l3mq0py6h88cty9ykacskm8xv3tvrp893kmxqppcefg----- +-----BEGIN PRIVATE KEY for erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g----- +OTk0Yzg3YWFmOGMyYTI2ZmM5Yzc5YWJiODgwNDVmZGZhMWY5OTM0MjA5MTM3NDE0 +MWQwMWM1N2JiOGY5ODE0NjI5N2QyNGI4NDNlNmVhMzFkYTg1ZThlOTBlMDcwNDQ2 +NGEzMGY3ZDEzMjE4YTBkNjk3OGYyNmIzOWRlYzg5NGI= +-----END PRIVATE KEY for erd1997jfwzrum4rrk59ar5supcyge9rpa73xgv2p45h3unt880v399svt8c9g----- +-----BEGIN PRIVATE KEY for erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss----- +MjdlOTZjZDBjNGI0NTQxYjRkYzFjNjY4YjhmZDM0MWZhYWQ2MGM3M2NjNTM4YzM4 +M2QxZTBmYmRkN2I1NTk5N2M5NzA4MjkyMDc0ZGZkZTk5NzNkMzA1MTVmNDBkZGNj +MmE5Yzk1MGE5YjA5YWVlYTgwMDk4OTZjMThlODFhMGI= +-----END PRIVATE KEY for erd1e9cg9ys8fh77n9eaxpg47sxaes4fe9g2nvy6a65qpxykcx8grg9sv45lss----- +-----BEGIN PRIVATE KEY for erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96----- +Y2E2MzIxOGYzZGRjZjI1ZTIwZDM2MmQ3OWNjYWRiZDdhOTQ5ZWJjMjliYmE4YjZi +M2YyNDQyMWYwODgxNDJmMTMzNDdmNWMwODgyM2FmYjU5ODUwZDhkMTJmYzBkMDMw +ZDM1MjJiYTQ2M2M1NDc5MzVhZDM1MTAwZWI0YjE5OWM= +-----END PRIVATE KEY for erd1xdrltsygywhmtxzsmrgjlsxsxrf4y2ayv0z50y666dgsp66trxwqzajk96--- +-----BEGIN PRIVATE KEY for erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg----- +ZDFlNWMwZTA2NThlZmVmMjY3NWQ3YTBhYzUzZTY4MTJkYTdlMmNhNjhmNTRiMDdm +ZTRiMjYxYWFmZjM4Yzc2YmY5MTc5NzcxMzI0ZmMyZWZlOTBiNGJkM2RlOGE0M2Ex +NjZkNTI2ZTk3N2VkZWNiYmNhYmQ4Mjg2ZGI3YzlmY2U= +-----END PRIVATE KEY for erd1lytewufjflpwl6gtf0faazjr59nd2fhfwlk7ew72hkpgdkmunl8qfrpywg----- +-----BEGIN PRIVATE KEY for erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d----- +OWZhYzA1YjhmOGEzNDEyYjkxMGQ0NjIyNzgwZjc4OGE1YmJiNThhNTlkODA3NmQz +YjFjMTNmZjM2MzdlZGYyYjgxZDYwMTJmNzVlNTUwZTU0NmE3MmZjMzAyZjgwNmM5 +ZDk4YjFmM2Y5YzdiNTU1NDg1YzY4OWM5NDMwNWQ0ZmI= +-----END PRIVATE KEY for erd1s8tqztm4u4gw23489lps97qxe8vck8eln3a424y9c6yujsc96nas0l968d----- +-----BEGIN PRIVATE KEY for erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g----- +NTI2NDc5M2JiMTgxZWY0YTAyNTIyYTUzNzUzYmYzODQ2M2FkODcwMmNlOWQwZWNl +MTQ1N2ExMDU0NmYyNzRmMTBmODJmNGM2Njc2OTk3Mjc5ZWYwMDk2ZjhjMDEwNThh +ODJkYWE0YjUxODIzZDZhMzQ4YTUzMmIzMWQxNTExZDc= +-----END PRIVATE KEY for erd1p7p0f3n8dxtj08hsp9hccqg932pd4f94rq3adg6g55etx8g4z8tsmg5e0g----- +-----BEGIN PRIVATE KEY for erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj----- +ZTljNjFlM2QwMzQ3Y2QyMTc5MDI1YTM5NmVjNDYxZWU1NGU4ZGE0NzNjYzQyMTg1 +ZWUxNTFkOGM4ZjNkZDUzOGUxMzM5ZmJlMzllOWUzZTY4NmUzMzJiYjZmN2FlZDY3 +ZWYyNmUwMGQ3MjA4OGQ1OGE1NDAyN2E3YTg5Yjg0ZmM= +-----END PRIVATE KEY for erd1uyeel03ea837dphrx2ak77hdvlhjdcqdwgyg6k99gqn602ymsn7qptmedj----- +-----BEGIN PRIVATE KEY for erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6----- +YzI3YzY5MTgzMGUwYzJhNzlhZmVjYjI3N2UxMGRhOWZlNzZmYjUwZTJkMWQyNDc2 +YzZjNTgzNzVlMTgwZDc5NzRhYzgyYjM2ZmZmMGZmZTNhYjYzMTBkYjg1NGIxNjhl +NTA0Njg1ZTQ2OGFmODk2Y2E4YzFlMGE5MTM3NGNhMDY= +-----END PRIVATE KEY for erd1ftyzkdhl7rl782mrzrdc2jck3egydp0ydzhcjm9gc8s2jym5egrqadl4h6----- +-----BEGIN PRIVATE KEY for erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha----- +NDg0MDgzZTIxMTk1ZGM2YjNjNmQwNTgwNWVmMGE2ZDhiYjdiMDYwMGZmMjFmMzIw +MGYwMzVhMTQwYjg2YTg2ODFjM2VhODRhODgzZmJlYjQ4MWY3NjBmNjhkYzY5YmZh +MmJmMTI2MGEyODZhODExYWVmZmRlYWM5MmIyNzI1Yjg= +-----END PRIVATE KEY for erd1rsl2sj5g87ltfq0hvrmgm35mlg4lzfs29p4gzxh0lh4vj2e8ykuqh69lha----- +-----BEGIN PRIVATE KEY for erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw----- +OGI3MDg3ZTk3NjQ3MmU0YzFiMDhmY2ZlNzQ5OGIwNDg5NTljYjZmYTlkMGExNjNl +YzFiMzk0M2NjMTk2N2Q4ZTI5MDcyNTkxNWZhOTE3ZmQzNjMyMjRjMzRkODEzOWIw +MmJjNGE4YzU4ZjZjMmQzZGRlZmM3MDFkMDA1MzI0NDM= +-----END PRIVATE KEY for erd19yrjty2l4ytl6d3jynp5mqfekq4uf2x93akz60w7l3cp6qzny3psnfyerw----- +-----BEGIN PRIVATE KEY for erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9----- +ZjVlNTgzODEyZDIzNjgyNDlmMjczOTc1NGIwYWQ0NGY0ZWI0OTMyZDViZWJmMTM0 +ZjMyYzYzNDM0NDkyOTBhOGE5ZmUwYWE4NGQxMGEzNTIzYzgzM2Y1OWY2YzA5ZTQz +OWUzZGYxOTJhZmY3MDU4Yjg4Zjc5OGYzOGEyMDdjZjE= +-----END PRIVATE KEY for erd148lq42zdzz34y0yr8avldsy7gw0rmuvj4lmstzug77v08z3q0ncszfk8w9----- +-----BEGIN PRIVATE KEY for erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme----- +YTEwMTM5NjQ0NjRlMzZhMDgyNTVkZTQyMTYyYmRhMjZiODVmNzEwOTgwZTAzM2M3 +ZGE0NjNjOTdlN2YyMzJkOGIyOTk1YmM0ZWZhMjI2NjRmOGY4NTI5MWVmYTczNTBh +MDBhZmVjNzVjOGI3NmIyODYwYjY2NWEyNDliZTQ1MjE= +-----END PRIVATE KEY for erd1k2v4h3805gnxf78c22g7lfe4pgq2lmr4ezmkk2rqkej6yjd7g5ssu88fme----- +-----BEGIN PRIVATE KEY for erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz----- +Y2VlOGU0M2I4N2Q3YTBhM2E3ZmE3Y2ZiY2RhMTA0YjRhNGQ5YWUyMGNlZWZiODY5 +ODkyZmNiNWYxZTdjOGQzNjk4YTQ0OTI4OTYwYmJhNDdkNDM4MTdlYWE0YTA3ZjE3 +NTQ5N2U3YTkyZjkyNjQ0ZjljNzQ0YWMyNjczMWQxYmU= +-----END PRIVATE KEY for erd1nzjyj2ykpway04pczl42fgrlza2f0eaf97fxgnuuw39vyee36xlqccc3qz----- +-----BEGIN PRIVATE KEY for erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5----- +ZDA2NTdmMmU2ZTZmNjlkNTlkZjM0Mjc5NDhiODk5ODY3NDQ3ZmI4MDlhOTE3Yjcx +NjExZDg2ZGQ5ZjA4ZmMwMjIwNWYzNjZmZjRjNTY5Yzg0NDAyMWU0MDZiNDhkZGVk +OGQzZjU2YWI4YTFhY2U5NjA3MzRkYjhlZDg1ZDc3MTc= +-----END PRIVATE KEY for erd1yp0nvml5c45us3qzreqxkjxaakxn744t3gdva9s8xndcakzawutstepmm5----- +-----BEGIN PRIVATE KEY for erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc----- +MTg4ZDlhNzE3NzAzNjYyMzY2YjE2NTIzYzI0MTliN2ExZjQ2OTk5Yzk5MmI5Mzcw +MDkxYTcxOGUwOTcxYjFkYjAxMTA3N2FjNzUxYjc1NGUwZTM3Y2I3NjViZDQ4Yzhh +NzBlZGVmNGM5OTA2NDBjZjY1ZjJhNmQzYmNlNzJkY2M= +-----END PRIVATE KEY for erd1qyg80tr4rd65ur3hedm9h4yv3fcwmm6vnyrypnm972nd80889hxqdfgwrc----- +-----BEGIN PRIVATE KEY for erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t----- +MzdmMDI3OGU4NGU3NjJlNzAzMzA3ZmY2MWQ4OGJlNjg5NDQ4MWVlNGNmZDI5NmQ1 +NjJmMjFkMWQ5MWE4OTFlOWE5YjRkYTljMTQyOGE1N2EwNGFmMmE0OGVmNjZiYWMw +OTJiNDM3YWUyMjdkMWFlMjdiNDVhOTlkNDUxNzFiMTk= +-----END PRIVATE KEY for erd14x6d48q59zjh5p909fyw7e46czftgdawyf734cnmgk5e63ghrvvsqp254t----- +-----BEGIN PRIVATE KEY for erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa----- +NTMwNjAxNzU5OThiYTIxNmRmN2EyN2E1Mjg3ZWMxODA4NjNiMTRkNjE5ZmFiY2U4 +ODhlMGU0MzIwNjFjMWM2MjcxMGM0ZmYyMTE4ZTZiZWQ1ZTk2NWY3ZDk3ZTNiOTAz +MTBjYzIwNjZiYmZhMjhjNzNlODA4OWRkMjNkYmJhOWQ= +-----END PRIVATE KEY for erd1wyxylus33e476h5kta7e0caeqvgvcgrxh0az33e7szya6g7mh2ws0n27sa----- +-----BEGIN PRIVATE KEY for erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w----- +OWZhNzRmNTE2MTFiNDA5ZGU2YTIyZTI3NDQ5OTI0YmM2NDM4Y2E4ZWFjYzI0MTJj +Yzc0MjcwYjMzOGNlYTY5ZTY0NDlmNzc5N2E2MGE0MjgzZjQ2OTI3NThjNmQzNGY2 +YjhkODlkMGE3NDJiNDVjYWQyNDU0ZjA4OWU1OTQ4NDQ= +-----END PRIVATE KEY for erd1v3ylw7t6vzjzs06xjf6ccmf576ud38g2ws45tjkjg48s38jefpzqlwms9w----- +-----BEGIN PRIVATE KEY for erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg----- +MDNhOGM3OWQwM2M2MzljODUyZmFhNDlmZGFhODMyNjFhNGJjYjI4MDdmYWU1MGI1 +OTUyMzJjOGQwNTdiZWJkNDViYjNmYWY0NWNkNzAyYjcxZmM3MjNmNTEwOTYzYjkw +Y2Y1MjUzNzViMDYwZTcyYmI5MDRhYTE0NTI1OTBmZDE= +-----END PRIVATE KEY for erd1twel4azu6uptw878y063p93mjr84y5m4kpsww2aeqj4pg5jeplgst04rhg----- +-----BEGIN PRIVATE KEY for erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc----- +YjhiYjRhOTFmOTEyNjAwZWViYmI5N2MzYzBlOGQ1NTc3YzQ4OGE2M2IwZDhhZmY5 +ZjI2NjNhNzcyOWI5ZjMyZjAyYTE5ZjUzOThmOTdiMDJjYmQ5YTlkOGY3Yzg3Mzk2 +YWVjNWRhOGMwMWJiNWVjN2E4YTc5NDU2NjE3MDZkYmI= +-----END PRIVATE KEY for erd1q2se75ucl9as9j7e48v00jrnj6hvtk5vqxa4a3ag5729vctsdkasm20cyc----- +-----BEGIN PRIVATE KEY for erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f----- +YWZjMGYzNmIwNWY3NGIwOGYyOWViMzMwZjkwZmU1ZTFmNmI4OWFlZDBkYzBjNjlk +OGY1NjJmMTk2MzA2ZWJiZDNlMzFhYzZlMzQzNWQ5ZWIwOWQwZGY4YmZhZWZiYTkw +NDUxY2U2OWY3OTI0NmU2MzVhYjVmNDNjMjE3ZDcyZjg= +-----END PRIVATE KEY for erd18cc6cm35xhv7kzwsm79l4ma6jpz3ee5l0yjxuc66kh6rcgtawtuq6lzp9f----- +-----BEGIN PRIVATE KEY for erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5----- +YWMwMTM4NjU1MDVhMzM5MTEwZDJhOGI4N2E5ZDc3YWVlYjJiYmVjNjkwZjEzOWI3 +YjUwMDNkZTQzYzBjZDM2YzBjMzg2Mjk2ZjU5N2U0NDVjNjc2NzFjMjNlMzIzMDBi +YTgzN2YyNjBjZDVkNjM5ZTNlZGVjYmIyMWNlOGZhOGU= +-----END PRIVATE KEY for erd1psux99h4jljyt3nkw8pruv3spw5r0unqe4wk8837mm9my88gl28qj6mml5----- +-----BEGIN PRIVATE KEY for erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p----- +N2E4YTViOGMzYjI3OWRmODMwYTkwNDI2YjI4MzU0NjE2MWJjOWIzN2NlYmE3Zjcy +NzFkYjk3YmIxZDM3YjUzZDYyMzY3MmNkMWI2MjkxNzE0MDgwMzlhNGI1ZDY5ODFl +NmQ2MzRlZjAwNDVlNDMwYmUwM2ViOTg4OGZiMTFkM2M= +-----END PRIVATE KEY for erd1vgm89ngmv2ghzsyq8xjtt45crekkxnhsq30yxzlq86uc3ra3r57qa3mw2p----- +-----BEGIN PRIVATE KEY for erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u----- +ODkxZjVhZTdhOGE0ZTdiMDAxNzBmZWM1NGFhN2FjZDgzNDVlZGJlYjc4M2UwZDUw +ZTEwNGUyZmZlY2U2MTMwYWI3YjVlNjZmNzMzYjEwNzMzMzkzMjQ1NDEwYjg3NTY5 +ODdmNDZjMjRiNGRmY2Y0ZjY1NTY1OWZlYTIyZWI3MmM= +-----END PRIVATE KEY for erd1k767vmmn8vg8xvuny32ppwr4dxrlgmpykn0u7nm92evlag3wkukqdgsf5u----- +-----BEGIN PRIVATE KEY for erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp----- +NjE0ZDc2YWVjOGE1MmI3NWU2MDI5ZWM4YjcyZWU1MTY1Mzg1OGQ2MzM4MmM1MmZl +MDc2MzI3ZWYxYTg1ZDk3ZGJiYjI3ZmMyZGIxNDFhMWUxMjI5ZDVmZWRlZWQ5Mzc4 +ODc3MTdkYjljMWY3NTVhY2Y3ZTA0MTQzYjdjODIwZjI= +-----END PRIVATE KEY for erd1hwe8lskmzsdpuy3f6hldamvn0zrhzldec8m4tt8hupq58d7gyrequy8wsp----- +-----BEGIN PRIVATE KEY for erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88----- +ZWQ2YTFjNzAyMGMzMDc3ZGU5MGIzMTEyY2Y3NTAyYTgwNWM1MmQ0MDdhNWMyMDRj +NmYyNmNhNDNiNWEzYWU4OTU1MzI0MWNhYzUyOGJhZDFiZWE4ZDk0NGEwZDY3OGI2 +ZTc5NDY0ZDBhNGM5NmY2NmM3YTBmOGI1NmI1NDVmYTk= +-----END PRIVATE KEY for erd125eyrjk99zadr04gm9z2p4nckmnegexs5nyk7ek85rut2665t75sql3w88----- +-----BEGIN PRIVATE KEY for erd17ndrqg38lqf2zjgeqvle90rsn9ejrd9upx8evkyvh8e0m5xlph5scv9l6n----- +YzU3YjdlZGZkZWE3Nzk2MWI0N2Y1YmFkYmYzMTc0M2MwMmRmNjMzOGIyMWExYjFk +M2E5NWQyYWE2NmZkMjgzNWY0ZGEzMDIyMjdmODEyYTE0OTE5MDMzZjkyYmM3MDk5 +NzMyMWI0YmMwOThmOTY1ODhjYjlmMmZkZDBkZjBkZTk= +-----END PRIVATE KEY for erd17ndrqg38lqf2zjgeqvle90rsn9ejrd9upx8evkyvh8e0m5xlph5scv9l6n----- +-----BEGIN PRIVATE KEY for erd1zed89c8226rs7f59zh2xea39qk9ym9tsmt4s0sg2uw7u9nvtzt3q8fdj2e----- +ZTE5MGYwNDU0NjA0ZTI4ZjI5NzVlN2U5YTY1M2VhYjM2ZTdlOWRiZGEzYzQ2NjVk +MTk2MmMxMGMwZTU3Mjg3NzE2NWE3MmUwZWE1Njg3MGYyNjg1MTVkNDZjZjYyNTA1 +OGE0ZDk1NzBkYWViMDdjMTBhZTNiZGMyY2Q4YjEyZTI= +-----END PRIVATE KEY for erd1zed89c8226rs7f59zh2xea39qk9ym9tsmt4s0sg2uw7u9nvtzt3q8fdj2e----- diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 7f610b8d130..72c86c04f96 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -633,7 +633,8 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { isInHistoricalBalancesMode := operationmodes.SliceContainsElement(operationModes, operationmodes.OperationModeHistoricalBalances) if isInHistoricalBalancesMode { - processHistoricalBalancesMode(log, configs) + // TODO move all operation modes settings in the common/operationmodes package and add tests + operationmodes.ProcessHistoricalBalancesMode(log, configs) } isInDbLookupExtensionMode := operationmodes.SliceContainsElement(operationModes, operationmodes.OperationModeDbLookupExtension) @@ -649,28 +650,6 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { return nil } -func processHistoricalBalancesMode(log logger.Logger, configs *config.Configs) { - configs.GeneralConfig.StoragePruning.Enabled = true - configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = false - configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = false - configs.GeneralConfig.GeneralSettings.StartInEpochEnabled = false - configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = false - configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = false - configs.GeneralConfig.DbLookupExtensions.Enabled = true - configs.PreferencesConfig.Preferences.FullArchive = true - - log.Warn("the node is in historical balances mode! Will auto-set some config values", - "StoragePruning.Enabled", configs.GeneralConfig.StoragePruning.Enabled, - "StoragePruning.ValidatorCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData, - "StoragePruning.ObserverCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData, - "StoragePruning.AccountsTrieCleanOldEpochsData", configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData, - "GeneralSettings.StartInEpochEnabled", configs.GeneralConfig.GeneralSettings.StartInEpochEnabled, - "StateTriesConfig.AccountsStatePruningEnabled", configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled, - "DbLookupExtensions.Enabled", configs.GeneralConfig.DbLookupExtensions.Enabled, - "Preferences.FullArchive", configs.PreferencesConfig.Preferences.FullArchive, - ) -} - func processDbLookupExtensionMode(log logger.Logger, configs *config.Configs) { configs.GeneralConfig.DbLookupExtensions.Enabled = true configs.GeneralConfig.StoragePruning.Enabled = true diff --git a/cmd/node/main.go b/cmd/node/main.go index 65fe1165a43..c7cc3c1085c 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -6,6 +6,7 @@ import ( "runtime" "time" + "github.com/klauspost/cpuid/v2" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/cmd/node/factory" @@ -46,10 +47,13 @@ VERSION: // appVersion should be populated at build time using ldflags // Usage examples: // linux/mac: -// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// +// go build -v -ldflags="-X main.appVersion=$(git describe --tags --long --dirty)" +// // windows: -// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i -// go build -v -ldflags="-X main.appVersion=%VERS%" +// +// for /f %i in ('git describe --tags --long --dirty') do set VERS=%i +// go build -v -ldflags="-X main.appVersion=%VERS%" var appVersion = common.UnVersionedAppString func main() { @@ -129,6 +133,11 @@ func startNodeRunner(c *cli.Context, log logger.Logger, baseVersion string, vers cfgs.FlagsConfig.BaseVersion = baseVersion cfgs.FlagsConfig.Version = version + err = checkHardwareRequirements(cfgs.GeneralConfig.HardwareRequirements) + if err != nil { + return fmt.Errorf("Hardware Requirements checks failed: %s", err.Error()) + } + nodeRunner, errRunner := node.NewNodeRunner(cfgs) if errRunner != nil { return errRunner @@ -301,3 +310,29 @@ func attachFileLogger(log logger.Logger, flagsConfig *config.ContextFlagsConfig) return fileLogging, nil } + +func checkHardwareRequirements(cfg config.HardwareRequirementsConfig) error { + cpuFlags, err := parseFeatures(cfg.CPUFlags) + if err != nil { + return err + } + + if !cpuid.CPU.Supports(cpuFlags...) { + return fmt.Errorf("CPU Flags: Streaming SIMD Extensions 4 required") + } + + return nil +} + +func parseFeatures(features []string) ([]cpuid.FeatureID, error) { + flags := make([]cpuid.FeatureID, 0) + + for _, cpuFlag := range features { + featureID := cpuid.ParseFeature(cpuFlag) + if featureID == cpuid.UNKNOWN { + return nil, fmt.Errorf("CPU Flags: cpu flag %s not found", cpuFlag) + } + } + + return flags, nil +} diff --git a/cmd/seednode/config/p2p.toml b/cmd/seednode/config/p2p.toml index 2c1a92717c9..8ddd4a72e4a 100644 --- a/cmd/seednode/config/p2p.toml +++ b/cmd/seednode/config/p2p.toml @@ -22,10 +22,11 @@ [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" # TCP listen address PreventPortReuse = true # seeder nodes will need to enable this option - [Node.ResourceLimiter] - Type = "default with manual scale" - ManualSystemMemoryInMB = 65536 # pretend that the host running the seeder has more RAM so it can handle more connections - ManualMaximumFD = 1048576 + + [Node.ResourceLimiter] + Type = "default with manual scale" + ManualSystemMemoryInMB = 65536 # pretend that the host running the seeder has more RAM so it can handle more connections + ManualMaximumFD = 1048576 # P2P peer discovery section diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index c881fb2a752..ee083fde21d 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -309,12 +309,21 @@ func displayMessengerInfo(messenger p2p.Messenger) { return strings.Compare(mesConnectedAddrs[i], mesConnectedAddrs[j]) < 0 }) - log.Info("known peers", "num peers", len(messenger.Peers())) - headerConnectedAddresses := []string{fmt.Sprintf("Seednode is connected to %d peers:", len(mesConnectedAddrs))} + protocolIDString := "Valid protocol ID?" + log.Info("peers info", "num known peers", len(messenger.Peers()), "num connected peers", len(mesConnectedAddrs)) + headerConnectedAddresses := []string{"Connected peers", protocolIDString} connAddresses := make([]*display.LineData, len(mesConnectedAddrs)) + yesMarker := "yes" + yesMarker = strings.Repeat(" ", (len(protocolIDString)-len(yesMarker))/2) + yesMarker // add padding + noMarker := "!!! no !!!" + noMarker = strings.Repeat(" ", (len(protocolIDString)-len(noMarker))/2) + noMarker // add padding for idx, address := range mesConnectedAddrs { - connAddresses[idx] = display.NewLineData(false, []string{address}) + marker := noMarker + if messenger.HasCompatibleProtocolID(address) { + marker = yesMarker + } + connAddresses[idx] = display.NewLineData(false, []string{address, marker}) } tbl2, _ := display.CreateTableString(headerConnectedAddresses, connAddresses) diff --git a/cmd/termui/presenter/presenterStatusHandler.go b/cmd/termui/presenter/presenterStatusHandler.go index 6ad88f98e4d..1722eedbcb4 100644 --- a/cmd/termui/presenter/presenterStatusHandler.go +++ b/cmd/termui/presenter/presenterStatusHandler.go @@ -6,7 +6,7 @@ import ( "sync" ) -//maxLogLines is used to specify how many lines of logs need to store in slice +// maxLogLines is used to specify how many lines of logs need to store in slice var maxLogLines = 100 // PresenterStatusHandler is the AppStatusHandler impl that is able to process and store received data diff --git a/cmd/termui/view/termuic/interface.go b/cmd/termui/view/termuic/interface.go index ecc3e618da6..63384792e6b 100644 --- a/cmd/termui/view/termuic/interface.go +++ b/cmd/termui/view/termuic/interface.go @@ -1,6 +1,6 @@ package termuic -//TermuiRender defines the actions which should be handled by a render +// TermuiRender defines the actions which should be handled by a render type TermuiRender interface { // RefreshData method is used to refresh data that are displayed on a grid RefreshData(numMillisecondsRefreshTime int) diff --git a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go index 4964c9d6a85..f21472b2185 100644 --- a/cmd/termui/view/termuic/termuiRenders/drawableContainer.go +++ b/cmd/termui/view/termuic/termuiRenders/drawableContainer.go @@ -17,7 +17,7 @@ type DrawableContainer struct { maxHeight int } -//NewDrawableContainer method is used to return a new NewDrawableContainer structure +// NewDrawableContainer method is used to return a new NewDrawableContainer structure func NewDrawableContainer() *DrawableContainer { dc := DrawableContainer{} return &dc diff --git a/common/constants.go b/common/constants.go index c2d76f0072a..5d4e15e9fc5 100644 --- a/common/constants.go +++ b/common/constants.go @@ -43,6 +43,14 @@ const NewList PeerType = "new" // MetachainTopicIdentifier is the identifier used in topics to define the metachain shard ID const MetachainTopicIdentifier = "META" // TODO - move this to mx-chain-core-go and change wherever we use the string value +// AuctionList represents the list of peers which don't participate in consensus yet, but will be selected +// based on their top up stake +const AuctionList PeerType = "auction" + +// SelectedFromAuctionList represents the list of peers which have been selected from AuctionList based on +// their top up to be distributed on the WaitingList in the next epoch +const SelectedFromAuctionList PeerType = "selectedFromAuction" + // CombinedPeerType - represents the combination of two peerTypes const CombinedPeerType = "%s (%s)" @@ -309,6 +317,9 @@ const MetricRedundancyLevel = "erd_redundancy_level" // MetricRedundancyIsMainActive is the metric that specifies data about the redundancy main machine const MetricRedundancyIsMainActive = "erd_redundancy_is_main_active" +// MetricRedundancyStepInReason is the metric that specifies why the back-up machine stepped in +const MetricRedundancyStepInReason = "erd_redundancy_step_in_reason" + // MetricValueNA represents the value to be used when a metric is not available/applicable const MetricValueNA = "N/A" @@ -505,12 +516,6 @@ const ( // MetricESDTTransferRoleEnableEpoch represents the epoch when the ESDT transfer role feature is enabled MetricESDTTransferRoleEnableEpoch = "erd_esdt_transfer_role_enable_epoch" - // MetricBuiltInFunctionOnMetaEnableEpoch represents the epoch when the builtin functions on metachain are enabled - MetricBuiltInFunctionOnMetaEnableEpoch = "erd_builtin_function_on_meta_enable_epoch" - - // MetricWaitingListFixEnableEpoch represents the epoch when the waiting list fix is enabled - MetricWaitingListFixEnableEpoch = "erd_waiting_list_fix_enable_epoch" - // MetricMaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MetricMaxNodesChangeEnableEpoch = "erd_max_nodes_change_enable_epoch" @@ -890,6 +895,7 @@ const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" // FullArchiveMetricSuffix is the suffix added to metrics specific for full archive network const FullArchiveMetricSuffix = "_full_archive" +// Enable epoch flags definitions const ( SCDeployFlag core.EnableEpochFlag = "SCDeployFlag" BuiltInFunctionsFlag core.EnableEpochFlag = "BuiltInFunctionsFlag" @@ -928,7 +934,6 @@ const ( ESDTMultiTransferFlag core.EnableEpochFlag = "ESDTMultiTransferFlag" GlobalMintBurnFlag core.EnableEpochFlag = "GlobalMintBurnFlag" ESDTTransferRoleFlag core.EnableEpochFlag = "ESDTTransferRoleFlag" - BuiltInFunctionOnMetaFlag core.EnableEpochFlag = "BuiltInFunctionOnMetaFlag" ComputeRewardCheckpointFlag core.EnableEpochFlag = "ComputeRewardCheckpointFlag" SCRSizeInvariantCheckFlag core.EnableEpochFlag = "SCRSizeInvariantCheckFlag" BackwardCompSaveKeyValueFlag core.EnableEpochFlag = "BackwardCompSaveKeyValueFlag" @@ -969,7 +974,6 @@ const ( SendAlwaysFlag core.EnableEpochFlag = "SendAlwaysFlag" ValueLengthCheckFlag core.EnableEpochFlag = "ValueLengthCheckFlag" CheckTransferFlag core.EnableEpochFlag = "CheckTransferFlag" - TransferToMetaFlag core.EnableEpochFlag = "TransferToMetaFlag" ESDTNFTImprovementV1Flag core.EnableEpochFlag = "ESDTNFTImprovementV1Flag" ChangeDelegationOwnerFlag core.EnableEpochFlag = "ChangeDelegationOwnerFlag" RefactorPeersMiniBlocksFlag core.EnableEpochFlag = "RefactorPeersMiniBlocksFlag" @@ -988,6 +992,7 @@ const ( MultiClaimOnDelegationFlag core.EnableEpochFlag = "MultiClaimOnDelegationFlag" ChangeUsernameFlag core.EnableEpochFlag = "ChangeUsernameFlag" AutoBalanceDataTriesFlag core.EnableEpochFlag = "AutoBalanceDataTriesFlag" + MigrateDataTrieFlag core.EnableEpochFlag = "MigrateDataTrieFlag" FixDelegationChangeOwnerOnAccountFlag core.EnableEpochFlag = "FixDelegationChangeOwnerOnAccountFlag" FixOOGReturnCodeFlag core.EnableEpochFlag = "FixOOGReturnCodeFlag" DeterministicSortOnValidatorsInfoFixFlag core.EnableEpochFlag = "DeterministicSortOnValidatorsInfoFixFlag" @@ -995,10 +1000,16 @@ const ( ScToScLogEventFlag core.EnableEpochFlag = "ScToScLogEventFlag" BlockGasAndFeesReCheckFlag core.EnableEpochFlag = "BlockGasAndFeesReCheckFlag" BalanceWaitingListsFlag core.EnableEpochFlag = "BalanceWaitingListsFlag" - WaitingListFixFlag core.EnableEpochFlag = "WaitingListFixFlag" NFTStopCreateFlag core.EnableEpochFlag = "NFTStopCreateFlag" FixGasRemainingForSaveKeyValueFlag core.EnableEpochFlag = "FixGasRemainingForSaveKeyValueFlag" IsChangeOwnerAddressCrossShardThroughSCFlag core.EnableEpochFlag = "IsChangeOwnerAddressCrossShardThroughSCFlag" CurrentRandomnessOnSortingFlag core.EnableEpochFlag = "CurrentRandomnessOnSortingFlag" + StakeLimitsFlag core.EnableEpochFlag = "StakeLimitsFlag" + StakingV4Step1Flag core.EnableEpochFlag = "StakingV4Step1Flag" + StakingV4Step2Flag core.EnableEpochFlag = "StakingV4Step2Flag" + StakingV4Step3Flag core.EnableEpochFlag = "StakingV4Step3Flag" + StakingQueueFlag core.EnableEpochFlag = "StakingQueueFlag" + StakingV4StartedFlag core.EnableEpochFlag = "StakingV4StartedFlag" + AlwaysMergeContextsInEEIFlag core.EnableEpochFlag = "AlwaysMergeContextsInEEIFlag" // all new flags must be added to createAllFlagsMap method, as part of enableEpochsHandler allFlagsDefined ) diff --git a/common/dtos.go b/common/dtos.go index e7876a9131b..50cf1109017 100644 --- a/common/dtos.go +++ b/common/dtos.go @@ -75,3 +75,19 @@ type EpochStartDataAPI struct { type AlteredAccountsForBlockAPIResponse struct { Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } + +// AuctionNode holds data needed for a node in auction to respond to API calls +type AuctionNode struct { + BlsKey string `json:"blsKey"` + Qualified bool `json:"qualified"` +} + +// AuctionListValidatorAPIResponse holds the data needed for an auction node validator for responding to API calls +type AuctionListValidatorAPIResponse struct { + Owner string `json:"owner"` + NumStakedNodes int64 `json:"numStakedNodes"` + TotalTopUp string `json:"totalTopUp"` + TopUpPerNode string `json:"topUpPerNode"` + QualifiedTopUp string `json:"qualifiedTopUp"` + Nodes []*AuctionNode `json:"nodes"` +} diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index 197cab8fff8..d560a432462 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -275,18 +275,6 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.ESDTTransferRoleEnableEpoch, }, - common.BuiltInFunctionOnMetaFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, - }, - common.TransferToMetaFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.BuiltInFunctionOnMetaEnableEpoch, - }, common.ComputeRewardCheckpointFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.ComputeRewardCheckpointEnableEpoch @@ -629,6 +617,12 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.AutoBalanceDataTriesEnableEpoch, }, + common.MigrateDataTrieFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.MigrateDataTrieEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.MigrateDataTrieEnableEpoch, + }, common.FixDelegationChangeOwnerOnAccountFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.FixDelegationChangeOwnerOnAccountEnableEpoch @@ -671,12 +665,6 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.BalanceWaitingListsEnableEpoch, }, - common.WaitingListFixFlag: { - isActiveInEpoch: func(epoch uint32) bool { - return epoch >= handler.enableEpochsConfig.WaitingListFixEnableEpoch - }, - activationEpoch: handler.enableEpochsConfig.WaitingListFixEnableEpoch, - }, common.NFTStopCreateFlag: { isActiveInEpoch: func(epoch uint32) bool { return epoch >= handler.enableEpochsConfig.NFTStopCreateEnableEpoch @@ -701,6 +689,48 @@ func (handler *enableEpochsHandler) createAllFlagsMap() { }, activationEpoch: handler.enableEpochsConfig.CurrentRandomnessOnSortingEnableEpoch, }, + common.StakeLimitsFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakeLimitsEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakeLimitsEnableEpoch, + }, + common.StakingV4Step1Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch == handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.StakingV4Step2Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step2EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step2EnableEpoch, + }, + common.StakingV4Step3Flag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step3EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step3EnableEpoch, + }, + common.StakingQueueFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch < handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.StakingV4StartedFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.StakingV4Step1EnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.StakingV4Step1EnableEpoch, + }, + common.AlwaysMergeContextsInEEIFlag: { + isActiveInEpoch: func(epoch uint32) bool { + return epoch >= handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch + }, + activationEpoch: handler.enableEpochsConfig.AlwaysMergeContextsInEEIEnableEpoch, + }, } } @@ -769,6 +799,16 @@ func (handler *enableEpochsHandler) GetCurrentEpoch() uint32 { return currentEpoch } +// StakingV4Step2EnableEpoch returns the epoch when stakingV4 becomes active +func (handler *enableEpochsHandler) StakingV4Step2EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step2EnableEpoch +} + +// StakingV4Step1EnableEpoch returns the epoch when stakingV4 phase1 becomes active +func (handler *enableEpochsHandler) StakingV4Step1EnableEpoch() uint32 { + return handler.enableEpochsConfig.StakingV4Step1EnableEpoch +} + // IsInterfaceNil returns true if there is no value under the interface func (handler *enableEpochsHandler) IsInterfaceNil() bool { return handler == nil diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 30949150e49..c91f65b805a 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -45,12 +45,10 @@ func createEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -111,6 +109,11 @@ func createEnableEpochsConfig() config.EnableEpochs { FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 93, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 94, CurrentRandomnessOnSortingEnableEpoch: 95, + StakeLimitsEnableEpoch: 95, + StakingV4Step1EnableEpoch: 96, + StakingV4Step2EnableEpoch: 97, + StakingV4Step3EnableEpoch: 98, + AlwaysMergeContextsInEEIEnableEpoch: 99, } } @@ -189,6 +192,20 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { handler.EpochConfirmed(cfg.SetGuardianEnableEpoch+1, 0) require.True(t, handler.IsFlagEnabled(common.SetGuardianFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch-1, 0) + require.True(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch, 0) + require.False(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch+1, 0) + require.False(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch-1, 0) + require.False(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(cfg.StakingV4Step1EnableEpoch+1, 0) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + handler.EpochConfirmed(math.MaxUint32, 0) require.True(t, handler.IsFlagEnabled(common.SCDeployFlag)) require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionsFlag)) @@ -227,7 +244,6 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.ESDTMultiTransferFlag)) require.False(t, handler.IsFlagEnabled(common.GlobalMintBurnFlag)) // < require.True(t, handler.IsFlagEnabled(common.ESDTTransferRoleFlag)) - require.True(t, handler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag)) require.True(t, handler.IsFlagEnabled(common.ComputeRewardCheckpointFlag)) require.True(t, handler.IsFlagEnabled(common.SCRSizeInvariantCheckFlag)) require.False(t, handler.IsFlagEnabled(common.BackwardCompSaveKeyValueFlag)) // < @@ -268,7 +284,6 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.SendAlwaysFlag)) require.True(t, handler.IsFlagEnabled(common.ValueLengthCheckFlag)) require.True(t, handler.IsFlagEnabled(common.CheckTransferFlag)) - require.True(t, handler.IsFlagEnabled(common.TransferToMetaFlag)) require.True(t, handler.IsFlagEnabled(common.ESDTNFTImprovementV1Flag)) require.True(t, handler.IsFlagEnabled(common.ChangeDelegationOwnerFlag)) require.True(t, handler.IsFlagEnabled(common.RefactorPeersMiniBlocksFlag)) @@ -287,6 +302,7 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.MultiClaimOnDelegationFlag)) require.True(t, handler.IsFlagEnabled(common.ChangeUsernameFlag)) require.True(t, handler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + require.True(t, handler.IsFlagEnabled(common.MigrateDataTrieFlag)) require.True(t, handler.IsFlagEnabled(common.FixDelegationChangeOwnerOnAccountFlag)) require.True(t, handler.IsFlagEnabled(common.FixOOGReturnCodeFlag)) require.True(t, handler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag)) @@ -294,11 +310,17 @@ func TestEnableEpochsHandler_IsFlagEnabled(t *testing.T) { require.True(t, handler.IsFlagEnabled(common.ScToScLogEventFlag)) require.True(t, handler.IsFlagEnabled(common.BlockGasAndFeesReCheckFlag)) require.True(t, handler.IsFlagEnabled(common.BalanceWaitingListsFlag)) - require.True(t, handler.IsFlagEnabled(common.WaitingListFixFlag)) require.True(t, handler.IsFlagEnabled(common.NFTStopCreateFlag)) require.True(t, handler.IsFlagEnabled(common.FixGasRemainingForSaveKeyValueFlag)) require.True(t, handler.IsFlagEnabled(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) require.True(t, handler.IsFlagEnabled(common.CurrentRandomnessOnSortingFlag)) + require.True(t, handler.IsFlagEnabled(common.StakeLimitsFlag)) + require.False(t, handler.IsFlagEnabled(common.StakingV4Step1Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step2Flag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4Step3Flag)) + require.False(t, handler.IsFlagEnabled(common.StakingQueueFlag)) + require.True(t, handler.IsFlagEnabled(common.StakingV4StartedFlag)) + require.True(t, handler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag)) } func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { @@ -338,7 +360,6 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTMultiTransferFlag)) require.Equal(t, cfg.GlobalMintBurnDisableEpoch, handler.GetActivationEpoch(common.GlobalMintBurnFlag)) require.Equal(t, cfg.ESDTTransferRoleEnableEpoch, handler.GetActivationEpoch(common.ESDTTransferRoleFlag)) - require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.BuiltInFunctionOnMetaFlag)) require.Equal(t, cfg.ComputeRewardCheckpointEnableEpoch, handler.GetActivationEpoch(common.ComputeRewardCheckpointFlag)) require.Equal(t, cfg.SCRSizeInvariantCheckEnableEpoch, handler.GetActivationEpoch(common.SCRSizeInvariantCheckFlag)) require.Equal(t, cfg.BackwardCompSaveKeyValueEnableEpoch, handler.GetActivationEpoch(common.BackwardCompSaveKeyValueFlag)) @@ -379,7 +400,6 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.SendAlwaysFlag)) require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.ValueLengthCheckFlag)) require.Equal(t, cfg.OptimizeNFTStoreEnableEpoch, handler.GetActivationEpoch(common.CheckTransferFlag)) - require.Equal(t, cfg.BuiltInFunctionOnMetaEnableEpoch, handler.GetActivationEpoch(common.TransferToMetaFlag)) require.Equal(t, cfg.ESDTMultiTransferEnableEpoch, handler.GetActivationEpoch(common.ESDTNFTImprovementV1Flag)) require.Equal(t, cfg.ESDTMetadataContinuousCleanupEnableEpoch, handler.GetActivationEpoch(common.ChangeDelegationOwnerFlag)) require.Equal(t, cfg.RefactorPeersMiniBlocksEnableEpoch, handler.GetActivationEpoch(common.RefactorPeersMiniBlocksFlag)) @@ -398,6 +418,7 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.MultiClaimOnDelegationEnableEpoch, handler.GetActivationEpoch(common.MultiClaimOnDelegationFlag)) require.Equal(t, cfg.ChangeUsernameEnableEpoch, handler.GetActivationEpoch(common.ChangeUsernameFlag)) require.Equal(t, cfg.AutoBalanceDataTriesEnableEpoch, handler.GetActivationEpoch(common.AutoBalanceDataTriesFlag)) + require.Equal(t, cfg.MigrateDataTrieEnableEpoch, handler.GetActivationEpoch(common.MigrateDataTrieFlag)) require.Equal(t, cfg.FixDelegationChangeOwnerOnAccountEnableEpoch, handler.GetActivationEpoch(common.FixDelegationChangeOwnerOnAccountFlag)) require.Equal(t, cfg.FixOOGReturnCodeEnableEpoch, handler.GetActivationEpoch(common.FixOOGReturnCodeFlag)) require.Equal(t, cfg.DeterministicSortOnValidatorsInfoEnableEpoch, handler.GetActivationEpoch(common.DeterministicSortOnValidatorsInfoFixFlag)) @@ -405,11 +426,17 @@ func TestEnableEpochsHandler_GetActivationEpoch(t *testing.T) { require.Equal(t, cfg.ScToScLogEventEnableEpoch, handler.GetActivationEpoch(common.ScToScLogEventFlag)) require.Equal(t, cfg.BlockGasAndFeesReCheckEnableEpoch, handler.GetActivationEpoch(common.BlockGasAndFeesReCheckFlag)) require.Equal(t, cfg.BalanceWaitingListsEnableEpoch, handler.GetActivationEpoch(common.BalanceWaitingListsFlag)) - require.Equal(t, cfg.WaitingListFixEnableEpoch, handler.GetActivationEpoch(common.WaitingListFixFlag)) require.Equal(t, cfg.NFTStopCreateEnableEpoch, handler.GetActivationEpoch(common.NFTStopCreateFlag)) require.Equal(t, cfg.ChangeOwnerAddressCrossShardThroughSCEnableEpoch, handler.GetActivationEpoch(common.IsChangeOwnerAddressCrossShardThroughSCFlag)) require.Equal(t, cfg.FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch, handler.GetActivationEpoch(common.FixGasRemainingForSaveKeyValueFlag)) require.Equal(t, cfg.CurrentRandomnessOnSortingEnableEpoch, handler.GetActivationEpoch(common.CurrentRandomnessOnSortingFlag)) + require.Equal(t, cfg.StakeLimitsEnableEpoch, handler.GetActivationEpoch(common.StakeLimitsFlag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step1Flag)) + require.Equal(t, cfg.StakingV4Step2EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step2Flag)) + require.Equal(t, cfg.StakingV4Step3EnableEpoch, handler.GetActivationEpoch(common.StakingV4Step3Flag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingQueueFlag)) + require.Equal(t, cfg.StakingV4Step1EnableEpoch, handler.GetActivationEpoch(common.StakingV4StartedFlag)) + require.Equal(t, cfg.AlwaysMergeContextsInEEIEnableEpoch, handler.GetActivationEpoch(common.AlwaysMergeContextsInEEIFlag)) } func TestEnableEpochsHandler_IsInterfaceNil(t *testing.T) { diff --git a/cmd/assessment/hostParameters/hostInfo.go b/common/hostParameters/hostInfo.go similarity index 100% rename from cmd/assessment/hostParameters/hostInfo.go rename to common/hostParameters/hostInfo.go diff --git a/cmd/assessment/hostParameters/hostInfo_test.go b/common/hostParameters/hostInfo_test.go similarity index 100% rename from cmd/assessment/hostParameters/hostInfo_test.go rename to common/hostParameters/hostInfo_test.go diff --git a/cmd/assessment/hostParameters/hostParametersGetter.go b/common/hostParameters/hostParametersGetter.go similarity index 100% rename from cmd/assessment/hostParameters/hostParametersGetter.go rename to common/hostParameters/hostParametersGetter.go diff --git a/cmd/assessment/hostParameters/hostParametersGetter_test.go b/common/hostParameters/hostParametersGetter_test.go similarity index 100% rename from cmd/assessment/hostParameters/hostParametersGetter_test.go rename to common/hostParameters/hostParametersGetter_test.go diff --git a/common/interface.go b/common/interface.go index d55a92853ff..73238c66e8c 100644 --- a/common/interface.go +++ b/common/interface.go @@ -223,17 +223,17 @@ type StateStatisticsHandler interface { Reset() ResetSnapshot() - IncrCache() + IncrementCache() Cache() uint64 - IncrSnapshotCache() + IncrementSnapshotCache() SnapshotCache() uint64 - IncrPersister(epoch uint32) + IncrementPersister(epoch uint32) Persister(epoch uint32) uint64 - IncrSnapshotPersister(epoch uint32) + IncrementSnapshotPersister(epoch uint32) SnapshotPersister(epoch uint32) uint64 - IncrTrie() + IncrementTrie() Trie() uint64 ProcessingStats() []string @@ -314,6 +314,7 @@ type ManagedPeersHolder interface { IncrementRoundsWithoutReceivedMessages(pkBytes []byte) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNode() [][]byte IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool IsPidManagedByCurrentNode(pid core.PeerID) bool @@ -322,6 +323,7 @@ type ManagedPeersHolder interface { GetNextPeerAuthenticationTime(pkBytes []byte) (time.Time, error) SetNextPeerAuthenticationTime(pkBytes []byte, nextTime time.Time) IsMultiKeyMode() bool + GetRedundancyStepInReason() string IsInterfaceNil() bool } @@ -342,6 +344,7 @@ type StateSyncNotifierSubscriber interface { type ManagedPeersMonitor interface { GetManagedKeysCount() int GetManagedKeys() [][]byte + GetLoadedKeys() [][]byte GetEligibleManagedKeys() ([][]byte, error) GetWaitingManagedKeys() ([][]byte, error) IsInterfaceNil() bool diff --git a/common/operationmodes/historicalBalances.go b/common/operationmodes/historicalBalances.go new file mode 100644 index 00000000000..da3cfe98dde --- /dev/null +++ b/common/operationmodes/historicalBalances.go @@ -0,0 +1,41 @@ +package operationmodes + +import ( + "github.com/multiversx/mx-chain-go/config" + logger "github.com/multiversx/mx-chain-logger-go" +) + +// ProcessHistoricalBalancesMode will process the provided flags for the historical balances +func ProcessHistoricalBalancesMode(log logger.Logger, configs *config.Configs) { + configs.GeneralConfig.StoragePruning.Enabled = true + configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = false + configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = false + configs.GeneralConfig.GeneralSettings.StartInEpochEnabled = false + configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = false + configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = false + configs.GeneralConfig.DbLookupExtensions.Enabled = true + configs.PreferencesConfig.Preferences.FullArchive = true + + log.Warn("the node is in historical balances mode! Will auto-set some config values", + "StoragePruning.Enabled", configs.GeneralConfig.StoragePruning.Enabled, + "StoragePruning.ValidatorCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData, + "StoragePruning.ObserverCleanOldEpochsData", configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData, + "StoragePruning.AccountsTrieCleanOldEpochsData", configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData, + "GeneralSettings.StartInEpochEnabled", configs.GeneralConfig.GeneralSettings.StartInEpochEnabled, + "StateTriesConfig.AccountsStatePruningEnabled", configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled, + "DbLookupExtensions.Enabled", configs.GeneralConfig.DbLookupExtensions.Enabled, + "Preferences.FullArchive", configs.PreferencesConfig.Preferences.FullArchive, + ) +} + +// IsInHistoricalBalancesMode returns true if the configuration provided denotes a historical balances mode +func IsInHistoricalBalancesMode(configs *config.Configs) bool { + return configs.GeneralConfig.StoragePruning.Enabled && + !configs.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData && + !configs.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData && + !configs.GeneralConfig.GeneralSettings.StartInEpochEnabled && + !configs.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData && + !configs.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled && + configs.GeneralConfig.DbLookupExtensions.Enabled && + configs.PreferencesConfig.Preferences.FullArchive +} diff --git a/common/operationmodes/historicalBalances_test.go b/common/operationmodes/historicalBalances_test.go new file mode 100644 index 00000000000..d06061c3027 --- /dev/null +++ b/common/operationmodes/historicalBalances_test.go @@ -0,0 +1,141 @@ +package operationmodes + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestProcessHistoricalBalancesMode(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + + assert.True(t, cfg.GeneralConfig.StoragePruning.Enabled) + assert.False(t, cfg.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.GeneralSettings.StartInEpochEnabled) + assert.False(t, cfg.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData) + assert.False(t, cfg.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled) + assert.True(t, cfg.GeneralConfig.DbLookupExtensions.Enabled) + assert.True(t, cfg.PreferencesConfig.Preferences.FullArchive) +} + +func TestIsInHistoricalBalancesMode(t *testing.T) { + t.Parallel() + + t.Run("empty configs should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("storage pruning disabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.Enabled = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("validator clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.ValidatorCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("observer clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.ObserverCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("start in epoch enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.GeneralSettings.StartInEpochEnabled = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("accounts trie clean old epoch data enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StoragePruning.AccountsTrieCleanOldEpochsData = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("accounts state pruning enabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.StateTriesConfig.AccountsStatePruningEnabled = true + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("db lookup extension disabled should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.GeneralConfig.DbLookupExtensions.Enabled = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("not a full archive node should return false", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + cfg.PreferencesConfig.Preferences.FullArchive = false + assert.False(t, IsInHistoricalBalancesMode(cfg)) + }) + t.Run("with historical balances config should return true", func(t *testing.T) { + t.Parallel() + + cfg := &config.Configs{ + GeneralConfig: &config.Config{}, + PreferencesConfig: &config.Preferences{}, + } + ProcessHistoricalBalancesMode(&testscommon.LoggerStub{}, cfg) + assert.True(t, IsInHistoricalBalancesMode(cfg)) + }) + +} diff --git a/common/operationmodes/operationmodes.go b/common/operationmodes/operationmodes.go index 70aed256f4b..1ae6a6fad70 100644 --- a/common/operationmodes/operationmodes.go +++ b/common/operationmodes/operationmodes.go @@ -5,6 +5,7 @@ import ( "strings" ) +// constants that define the operation mode of the node const ( OperationModeFullArchive = "full-archive" OperationModeDbLookupExtension = "db-lookup-extension" diff --git a/common/statistics/disabled/stateStatistics.go b/common/statistics/disabled/stateStatistics.go index d10d310129a..c3bdf12420d 100644 --- a/common/statistics/disabled/stateStatistics.go +++ b/common/statistics/disabled/stateStatistics.go @@ -19,8 +19,8 @@ func (s *stateStatistics) Reset() { func (s *stateStatistics) ResetSnapshot() { } -// IncrCache does nothing -func (s *stateStatistics) IncrCache() { +// IncrementCache does nothing +func (s *stateStatistics) IncrementCache() { } // Cache returns zero @@ -28,8 +28,8 @@ func (s *stateStatistics) Cache() uint64 { return 0 } -// IncrSnapshotCache does nothing -func (ss *stateStatistics) IncrSnapshotCache() { +// IncrementSnapshotCache does nothing +func (ss *stateStatistics) IncrementSnapshotCache() { } // SnapshotCache returns the number of cached operations @@ -37,8 +37,8 @@ func (ss *stateStatistics) SnapshotCache() uint64 { return 0 } -// IncrPersister does nothing -func (s *stateStatistics) IncrPersister(epoch uint32) { +// IncrementPersister does nothing +func (s *stateStatistics) IncrementPersister(epoch uint32) { } // Persister returns zero @@ -46,8 +46,8 @@ func (s *stateStatistics) Persister(epoch uint32) uint64 { return 0 } -// IncrSnapshotPersister does nothing -func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { +// IncrementSnapshotPersister does nothing +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { } // SnapshotPersister returns the number of persister operations @@ -55,8 +55,8 @@ func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { return 0 } -// IncrTrie does nothing -func (s *stateStatistics) IncrTrie() { +// IncrementTrie does nothing +func (s *stateStatistics) IncrementTrie() { } // Trie returns zero diff --git a/common/statistics/disabled/stateStatistics_test.go b/common/statistics/disabled/stateStatistics_test.go index 7d17aa689d1..725ec3ee6a1 100644 --- a/common/statistics/disabled/stateStatistics_test.go +++ b/common/statistics/disabled/stateStatistics_test.go @@ -31,12 +31,12 @@ func TestStateStatistics_MethodsShouldNotPanic(t *testing.T) { stats.ResetSnapshot() stats.ResetAll() - stats.IncrCache() - stats.IncrSnapshotCache() - stats.IncrSnapshotCache() - stats.IncrPersister(1) - stats.IncrSnapshotPersister(1) - stats.IncrTrie() + stats.IncrementCache() + stats.IncrementSnapshotCache() + stats.IncrementSnapshotCache() + stats.IncrementPersister(1) + stats.IncrementSnapshotPersister(1) + stats.IncrementTrie() require.Equal(t, uint64(0), stats.Cache()) require.Equal(t, uint64(0), stats.SnapshotCache()) diff --git a/common/statistics/osLevel/memStats_test.go b/common/statistics/osLevel/memStats_test.go index 99724172e67..ff42ad516c2 100644 --- a/common/statistics/osLevel/memStats_test.go +++ b/common/statistics/osLevel/memStats_test.go @@ -3,12 +3,17 @@ package osLevel import ( + "runtime" "testing" "github.com/stretchr/testify/assert" ) func TestReadCurrentMemStats(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping test on darwin") + } + t.Parallel() memStats, err := ReadCurrentMemStats() diff --git a/common/statistics/stateStatistics.go b/common/statistics/stateStatistics.go index c41040ab933..474dc6d47d1 100644 --- a/common/statistics/stateStatistics.go +++ b/common/statistics/stateStatistics.go @@ -51,8 +51,8 @@ func (ss *stateStatistics) ResetSnapshot() { ss.mutPersisters.Unlock() } -// IncrCache will increment cache counter -func (ss *stateStatistics) IncrCache() { +// IncrementCache will increment cache counter +func (ss *stateStatistics) IncrementCache() { atomic.AddUint64(&ss.numCache, 1) } @@ -61,8 +61,8 @@ func (ss *stateStatistics) Cache() uint64 { return atomic.LoadUint64(&ss.numCache) } -// IncrSnapshotCache will increment snapshot cache counter -func (ss *stateStatistics) IncrSnapshotCache() { +// IncrementSnapshotCache will increment snapshot cache counter +func (ss *stateStatistics) IncrementSnapshotCache() { atomic.AddUint64(&ss.numSnapshotCache, 1) } @@ -71,8 +71,8 @@ func (ss *stateStatistics) SnapshotCache() uint64 { return atomic.LoadUint64(&ss.numSnapshotCache) } -// IncrPersister will increment persister counter -func (ss *stateStatistics) IncrPersister(epoch uint32) { +// IncrementPersister will increment persister counter +func (ss *stateStatistics) IncrementPersister(epoch uint32) { ss.mutPersisters.Lock() defer ss.mutPersisters.Unlock() @@ -87,8 +87,8 @@ func (ss *stateStatistics) Persister(epoch uint32) uint64 { return ss.numPersister[epoch] } -// IncrSnapshotPersister will increment snapshot persister counter -func (ss *stateStatistics) IncrSnapshotPersister(epoch uint32) { +// IncrementSnapshotPersister will increment snapshot persister counter +func (ss *stateStatistics) IncrementSnapshotPersister(epoch uint32) { ss.mutPersisters.Lock() defer ss.mutPersisters.Unlock() @@ -103,8 +103,8 @@ func (ss *stateStatistics) SnapshotPersister(epoch uint32) uint64 { return ss.numSnapshotPersister[epoch] } -// IncrTrie will increment trie counter -func (ss *stateStatistics) IncrTrie() { +// IncrementTrie will increment trie counter +func (ss *stateStatistics) IncrementTrie() { atomic.AddUint64(&ss.numTrie, 1) } diff --git a/common/statistics/stateStatistics_test.go b/common/statistics/stateStatistics_test.go index e1beaf9d35b..674b3d8ea6b 100644 --- a/common/statistics/stateStatistics_test.go +++ b/common/statistics/stateStatistics_test.go @@ -27,11 +27,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Trie()) - ss.IncrTrie() - ss.IncrTrie() + ss.IncrementTrie() + ss.IncrementTrie() assert.Equal(t, uint64(2), ss.Trie()) - ss.IncrTrie() + ss.IncrementTrie() assert.Equal(t, uint64(3), ss.Trie()) ss.Reset() @@ -47,11 +47,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Persister(epoch)) - ss.IncrPersister(epoch) - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) + ss.IncrementPersister(epoch) assert.Equal(t, uint64(2), ss.Persister(epoch)) - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) assert.Equal(t, uint64(3), ss.Persister(epoch)) ss.Reset() @@ -65,11 +65,11 @@ func TestStateStatistics_Processing(t *testing.T) { assert.Equal(t, uint64(0), ss.Cache()) - ss.IncrCache() - ss.IncrCache() + ss.IncrementCache() + ss.IncrementCache() assert.Equal(t, uint64(2), ss.Cache()) - ss.IncrCache() + ss.IncrementCache() assert.Equal(t, uint64(3), ss.Cache()) ss.Reset() @@ -89,11 +89,11 @@ func TestStateStatistics_Snapshot(t *testing.T) { assert.Equal(t, uint64(0), ss.SnapshotPersister(epoch)) - ss.IncrSnapshotPersister(epoch) - ss.IncrSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) assert.Equal(t, uint64(2), ss.SnapshotPersister(epoch)) - ss.IncrSnapshotPersister(epoch) + ss.IncrementSnapshotPersister(epoch) assert.Equal(t, uint64(3), ss.SnapshotPersister(epoch)) ss.ResetSnapshot() @@ -107,11 +107,11 @@ func TestStateStatistics_Snapshot(t *testing.T) { assert.Equal(t, uint64(0), ss.Cache()) - ss.IncrSnapshotCache() - ss.IncrSnapshotCache() + ss.IncrementSnapshotCache() + ss.IncrementSnapshotCache() assert.Equal(t, uint64(2), ss.SnapshotCache()) - ss.IncrSnapshotCache() + ss.IncrementSnapshotCache() assert.Equal(t, uint64(3), ss.SnapshotCache()) ss.ResetSnapshot() @@ -144,11 +144,11 @@ func TestStateStatistics_ConcurrenyOperations(t *testing.T) { case 0: ss.Reset() case 1: - ss.IncrCache() + ss.IncrementCache() case 2: - ss.IncrPersister(epoch) + ss.IncrementPersister(epoch) case 3: - ss.IncrTrie() + ss.IncrementTrie() case 7: _ = ss.Cache() case 8: diff --git a/common/validatorInfo/validatorInfoUtils.go b/common/validatorInfo/validatorInfoUtils.go index e6cf36ba52a..20f4e97897a 100644 --- a/common/validatorInfo/validatorInfoUtils.go +++ b/common/validatorInfo/validatorInfoUtils.go @@ -6,41 +6,41 @@ import ( ) // WasActiveInCurrentEpoch returns true if the node was active in current epoch -func WasActiveInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasActiveInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - active := valInfo.LeaderFailure > 0 || valInfo.LeaderSuccess > 0 || valInfo.ValidatorSuccess > 0 || valInfo.ValidatorFailure > 0 + active := valInfo.GetLeaderFailure() > 0 || valInfo.GetLeaderSuccess() > 0 || valInfo.GetValidatorSuccess() > 0 || valInfo.GetValidatorFailure() > 0 return active } // WasLeavingEligibleInCurrentEpoch returns true if the validator was eligible in the epoch but has done an unstake. -func WasLeavingEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasLeavingEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.LeavingList) && WasActiveInCurrentEpoch(valInfo) } // WasJailedEligibleInCurrentEpoch returns true if the validator was jailed in the epoch but also active/eligible due to not enough -//nodes in shard. -func WasJailedEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +// nodes in shard. +func WasJailedEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - return valInfo.List == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) + return valInfo.GetList() == string(common.JailedList) && WasActiveInCurrentEpoch(valInfo) } // WasEligibleInCurrentEpoch returns true if the validator was eligible for consensus in current epoch -func WasEligibleInCurrentEpoch(valInfo *state.ValidatorInfo) bool { +func WasEligibleInCurrentEpoch(valInfo state.ValidatorInfoHandler) bool { if valInfo == nil { return false } - wasEligibleInShard := valInfo.List == string(common.EligibleList) || + wasEligibleInShard := valInfo.GetList() == string(common.EligibleList) || WasLeavingEligibleInCurrentEpoch(valInfo) || WasJailedEligibleInCurrentEpoch(valInfo) diff --git a/config/config.go b/config/config.go index 18bd9f8b26f..3b36baba8b8 100644 --- a/config/config.go +++ b/config/config.go @@ -88,12 +88,14 @@ type EvictionWaitingListConfig struct { // EpochStartConfig will hold the configuration of EpochStart settings type EpochStartConfig struct { - MinRoundsBetweenEpochs int64 - RoundsPerEpoch int64 - MinShuffledOutRestartThreshold float64 - MaxShuffledOutRestartThreshold float64 - MinNumConnectedPeersToStart int - MinNumOfPeersToConsiderBlockValid int + MinRoundsBetweenEpochs int64 + RoundsPerEpoch int64 + MinShuffledOutRestartThreshold float64 + MaxShuffledOutRestartThreshold float64 + MinNumConnectedPeersToStart int + MinNumOfPeersToConsiderBlockValid int + ExtraDelayForRequestBlockInfoInMilliseconds int + GenesisEpoch uint32 } // BlockSizeThrottleConfig will hold the configuration for adaptive block size throttle @@ -193,15 +195,16 @@ type Config struct { PublicKeyPIDSignature CacheConfig PeerHonesty CacheConfig - Antiflood AntifloodConfig - WebServerAntiflood WebServerAntifloodConfig - ResourceStats ResourceStatsConfig - HeartbeatV2 HeartbeatV2Config - ValidatorStatistics ValidatorStatisticsConfig - GeneralSettings GeneralSettingsConfig - Consensus ConsensusConfig - StoragePruning StoragePruningConfig - LogsAndEvents LogsAndEventsConfig + Antiflood AntifloodConfig + WebServerAntiflood WebServerAntifloodConfig + ResourceStats ResourceStatsConfig + HeartbeatV2 HeartbeatV2Config + ValidatorStatistics ValidatorStatisticsConfig + GeneralSettings GeneralSettingsConfig + Consensus ConsensusConfig + StoragePruning StoragePruningConfig + LogsAndEvents LogsAndEventsConfig + HardwareRequirements HardwareRequirementsConfig NTPConfig NTPConfig HeadersPoolConfig HeadersPoolConfig @@ -289,6 +292,11 @@ type GeneralSettingsConfig struct { SetGuardianEpochsDelay uint32 } +// HardwareRequirementsConfig will hold the hardware requirements config +type HardwareRequirementsConfig struct { + CPUFlags []string +} + // FacadeConfig will hold different configuration option that will be passed to the node facade type FacadeConfig struct { RestApiInterface string @@ -360,15 +368,16 @@ type TxAccumulatorConfig struct { // AntifloodConfig will hold all p2p antiflood parameters type AntifloodConfig struct { - Enabled bool - NumConcurrentResolverJobs int32 - OutOfSpecs FloodPreventerConfig - FastReacting FloodPreventerConfig - SlowReacting FloodPreventerConfig - PeerMaxOutput AntifloodLimitsConfig - Cache CacheConfig - Topic TopicAntifloodConfig - TxAccumulator TxAccumulatorConfig + Enabled bool + NumConcurrentResolverJobs int32 + NumConcurrentResolvingTrieNodesJobs int32 + OutOfSpecs FloodPreventerConfig + FastReacting FloodPreventerConfig + SlowReacting FloodPreventerConfig + PeerMaxOutput AntifloodLimitsConfig + Cache CacheConfig + Topic TopicAntifloodConfig + TxAccumulator TxAccumulatorConfig } // FloodPreventerConfig will hold all flood preventer parameters diff --git a/config/configChecker.go b/config/configChecker.go new file mode 100644 index 00000000000..11ddc7eff9a --- /dev/null +++ b/config/configChecker.go @@ -0,0 +1,103 @@ +package config + +import ( + "fmt" + + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("config-checker") + +// SanityCheckNodesConfig checks if the nodes limit setup is set correctly +func SanityCheckNodesConfig( + nodesSetup NodesSetupHandler, + cfg EnableEpochs, +) error { + maxNodesChange := cfg.MaxNodesChangeEnableEpoch + for _, maxNodesConfig := range maxNodesChange { + err := checkMaxNodesConfig(nodesSetup, maxNodesConfig) + if err != nil { + return fmt.Errorf("%w in MaxNodesChangeConfig at EpochEnable = %d", err, maxNodesConfig.EpochEnable) + } + } + + return sanityCheckEnableEpochsStakingV4(cfg, nodesSetup.NumberOfShards()) +} + +func checkMaxNodesConfig( + nodesSetup NodesSetupHandler, + maxNodesConfig MaxNodesChangeConfig, +) error { + maxNumNodes := maxNodesConfig.MaxNumNodes + minNumNodesWithHysteresis := nodesSetup.MinNumberOfNodesWithHysteresis() + if maxNumNodes < minNumNodesWithHysteresis { + return fmt.Errorf("%w, maxNumNodes: %d, minNumNodesWithHysteresis: %d", + errInvalidMaxMinNodes, maxNumNodes, minNumNodesWithHysteresis) + } + + return nil +} + +// sanityCheckEnableEpochsStakingV4 checks if the enable epoch configs for stakingV4 are set correctly +func sanityCheckEnableEpochsStakingV4(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + if !areStakingV4StepsInOrder(enableEpochsCfg) { + return errStakingV4StepsNotInOrder + } + + return checkStakingV4MaxNodesChangeCfg(enableEpochsCfg, numOfShards) +} + +func areStakingV4StepsInOrder(enableEpochsCfg EnableEpochs) bool { + return (enableEpochsCfg.StakingV4Step1EnableEpoch == enableEpochsCfg.StakingV4Step2EnableEpoch-1) && + (enableEpochsCfg.StakingV4Step2EnableEpoch == enableEpochsCfg.StakingV4Step3EnableEpoch-1) +} + +func checkStakingV4MaxNodesChangeCfg(enableEpochsCfg EnableEpochs, numOfShards uint32) error { + maxNodesChangeCfg := enableEpochsCfg.MaxNodesChangeEnableEpoch + if len(maxNodesChangeCfg) <= 1 { + return nil + } + + maxNodesConfigAdaptedForStakingV4 := false + + for idx, currMaxNodesChangeCfg := range maxNodesChangeCfg { + if currMaxNodesChangeCfg.EpochEnable == enableEpochsCfg.StakingV4Step3EnableEpoch { + maxNodesConfigAdaptedForStakingV4 = true + + if idx == 0 { + log.Warn(fmt.Errorf("found config change in MaxNodesChangeEnableEpoch for StakingV4Step3EnableEpoch = %d, but %w ", + enableEpochsCfg.StakingV4Step3EnableEpoch, errNoMaxNodesConfigBeforeStakingV4).Error()) + break + } + + prevMaxNodesChange := maxNodesChangeCfg[idx-1] + err := checkMaxNodesChangedCorrectly(prevMaxNodesChange, currMaxNodesChangeCfg, numOfShards) + if err != nil { + return err + } + + break + } + } + + if !maxNodesConfigAdaptedForStakingV4 { + return fmt.Errorf("%w = %d", errNoMaxNodesConfigChangeForStakingV4, enableEpochsCfg.StakingV4Step3EnableEpoch) + } + + return nil +} + +func checkMaxNodesChangedCorrectly(prevMaxNodesChange MaxNodesChangeConfig, currMaxNodesChange MaxNodesChangeConfig, numOfShards uint32) error { + if prevMaxNodesChange.NodesToShufflePerShard != currMaxNodesChange.NodesToShufflePerShard { + return errMismatchNodesToShuffle + } + + totalShuffled := (numOfShards + 1) * prevMaxNodesChange.NodesToShufflePerShard + expectedMaxNumNodes := prevMaxNodesChange.MaxNumNodes - totalShuffled + if expectedMaxNumNodes != currMaxNodesChange.MaxNumNodes { + return fmt.Errorf("expected MaxNodesChangeEnableEpoch.MaxNumNodes for StakingV4Step3EnableEpoch = %d, but got %d", + expectedMaxNumNodes, currMaxNodesChange.MaxNumNodes) + } + + return nil +} diff --git a/config/configChecker_test.go b/config/configChecker_test.go new file mode 100644 index 00000000000..ec993631fbb --- /dev/null +++ b/config/configChecker_test.go @@ -0,0 +1,382 @@ +package config + +import ( + "strings" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/nodesSetupMock" + "github.com/stretchr/testify/require" +) + +const numOfShards = 3 + +func generateCorrectConfig() EnableEpochs { + return EnableEpochs{ + StakingV4Step1EnableEpoch: 4, + StakingV4Step2EnableEpoch: 5, + StakingV4Step3EnableEpoch: 6, + MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestSanityCheckEnableEpochsStakingV4(t *testing.T) { + t.Parallel() + + t.Run("correct config, should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("staking v4 steps not in ascending order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.StakingV4Step1EnableEpoch = 5 + cfg.StakingV4Step2EnableEpoch = 5 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg = generateCorrectConfig() + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 4 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("staking v4 steps not in cardinal order, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 3 + cfg.StakingV4Step3EnableEpoch = 6 + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 2 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + + cfg.StakingV4Step1EnableEpoch = 1 + cfg.StakingV4Step2EnableEpoch = 5 + cfg.StakingV4Step3EnableEpoch = 6 + err = sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Equal(t, errStakingV4StepsNotInOrder, err) + }) + + t.Run("no previous config for max nodes change with one entry, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("no max nodes config change for StakingV4Step3EnableEpoch, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errNoMaxNodesConfigChangeForStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), "6")) + }) + + t.Run("max nodes config change for StakingV4Step3EnableEpoch has no previous config change, should not error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: cfg.StakingV4Step3EnableEpoch, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 444, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + } + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.Nil(t, err) + }) + + t.Run("stakingV4 config for max nodes changed with different nodes to shuffle, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + cfg.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 4 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.ErrorIs(t, err, errMismatchNodesToShuffle) + }) + + t.Run("stakingV4 config for max nodes changed with wrong max num nodes, should return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 56 + + err := sanityCheckEnableEpochsStakingV4(cfg, numOfShards) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "expected")) + require.True(t, strings.Contains(err.Error(), "48")) + require.True(t, strings.Contains(err.Error(), "got")) + require.True(t, strings.Contains(err.Error(), "56")) + }) +} + +func TestSanityCheckNodesConfig(t *testing.T) { + t.Parallel() + + numShards := uint32(3) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 5, + MinNumberOfShardNodesField: 5, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 2, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 3, + MaxNumNodes: 2240, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 4, + MaxNumNodes: 2240, + NodesToShufflePerShard: 40, + }, + { + EpochEnable: 6, + MaxNumNodes: 2080, + NodesToShufflePerShard: 40, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0, + MinNumberOfMetaNodesField: 3, + MinNumberOfShardNodesField: 3, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 7, + MinNumberOfShardNodesField: 7, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 48, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 2, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 10, + MinNumberOfShardNodesField: 10, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 2169, + NodesToShufflePerShard: 143, + }, + { + EpochEnable: 1, + MaxNumNodes: 3200, + NodesToShufflePerShard: 80, + }, + { + EpochEnable: 6, + MaxNumNodes: 2880, + NodesToShufflePerShard: 80, + }, + } + nodesSetup = &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err = SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("zero nodes to shuffle per shard, should not return error", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + { + EpochEnable: 6, + MaxNumNodes: 3200, + NodesToShufflePerShard: 0, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.Nil(t, err) + }) + + t.Run("maxNumNodes < minNumNodesWithHysteresis, should return error ", func(t *testing.T) { + t.Parallel() + + cfg := generateCorrectConfig() + cfg.MaxNodesChangeEnableEpoch = []MaxNodesChangeConfig{ + { + EpochEnable: 4, + MaxNumNodes: 1900, + NodesToShufflePerShard: 80, + }, + } + nodesSetup := &nodesSetupMock.NodesSetupMock{ + NumberOfShardsField: numShards, + HysteresisField: 0.2, + MinNumberOfMetaNodesField: 400, + MinNumberOfShardNodesField: 400, + } + err := SanityCheckNodesConfig(nodesSetup, cfg) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), errInvalidMaxMinNodes.Error())) + require.True(t, strings.Contains(err.Error(), "maxNumNodes: 1900")) + require.True(t, strings.Contains(err.Error(), "minNumNodesWithHysteresis: 1920")) + }) +} diff --git a/config/epochConfig.go b/config/epochConfig.go index 854f80063be..7789ecc72b3 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -43,13 +43,11 @@ type EnableEpochs struct { SaveJailedAlwaysEnableEpoch uint32 ValidatorToDelegationEnableEpoch uint32 ReDelegateBelowMinCheckEnableEpoch uint32 - WaitingListFixEnableEpoch uint32 IncrementSCRNonceInMultiTransferEnableEpoch uint32 ScheduledMiniBlocksEnableEpoch uint32 ESDTMultiTransferEnableEpoch uint32 GlobalMintBurnDisableEpoch uint32 ESDTTransferRoleEnableEpoch uint32 - BuiltInFunctionOnMetaEnableEpoch uint32 ComputeRewardCheckpointEnableEpoch uint32 SCRSizeInvariantCheckEnableEpoch uint32 BackwardCompSaveKeyValueEnableEpoch uint32 @@ -102,6 +100,7 @@ type EnableEpochs struct { MultiClaimOnDelegationEnableEpoch uint32 ChangeUsernameEnableEpoch uint32 AutoBalanceDataTriesEnableEpoch uint32 + MigrateDataTrieEnableEpoch uint32 ConsistentTokensValuesLengthCheckEnableEpoch uint32 FixDelegationChangeOwnerOnAccountEnableEpoch uint32 DynamicGasCostForDataTrieStorageLoadEnableEpoch uint32 @@ -109,6 +108,11 @@ type EnableEpochs struct { ChangeOwnerAddressCrossShardThroughSCEnableEpoch uint32 FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch uint32 CurrentRandomnessOnSortingEnableEpoch uint32 + StakeLimitsEnableEpoch uint32 + StakingV4Step1EnableEpoch uint32 + StakingV4Step2EnableEpoch uint32 + StakingV4Step3EnableEpoch uint32 + AlwaysMergeContextsInEEIEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig } diff --git a/config/errors.go b/config/errors.go new file mode 100644 index 00000000000..6161ef4c168 --- /dev/null +++ b/config/errors.go @@ -0,0 +1,13 @@ +package config + +import "errors" + +var errStakingV4StepsNotInOrder = errors.New("staking v4 enable epoch steps should be in cardinal order(e.g.: StakingV4Step1EnableEpoch = 2, StakingV4Step2EnableEpoch = 3, StakingV4Step3EnableEpoch = 4)") + +var errNoMaxNodesConfigBeforeStakingV4 = errors.New("no previous config change entry in MaxNodesChangeEnableEpoch before entry with EpochEnable = StakingV4Step3EnableEpoch") + +var errMismatchNodesToShuffle = errors.New("previous MaxNodesChangeEnableEpoch.NodesToShufflePerShard != MaxNodesChangeEnableEpoch.NodesToShufflePerShard with EnableEpoch = StakingV4Step3EnableEpoch") + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") + +var errInvalidMaxMinNodes = errors.New("number of min nodes with hysteresis > number of max nodes") diff --git a/config/interface.go b/config/interface.go new file mode 100644 index 00000000000..859e845c434 --- /dev/null +++ b/config/interface.go @@ -0,0 +1,7 @@ +package config + +// NodesSetupHandler provides nodes setup information +type NodesSetupHandler interface { + MinNumberOfNodesWithHysteresis() uint32 + NumberOfShards() uint32 +} diff --git a/config/ratingsConfig.go b/config/ratingsConfig.go index 3558a32f446..a4c243cd51b 100644 --- a/config/ratingsConfig.go +++ b/config/ratingsConfig.go @@ -27,7 +27,7 @@ type MetaChain struct { RatingSteps } -//RatingValue will hold different rating options with increase and decrease steps +// RatingValue will hold different rating options with increase and decrease steps type RatingValue struct { Name string Value int32 diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index d48027574eb..0ed6cce28b1 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -7,6 +7,7 @@ type SystemSmartContractsConfig struct { StakingSystemSCConfig StakingSystemSCConfig DelegationManagerSystemSCConfig DelegationManagerSystemSCConfig DelegationSystemSCConfig DelegationSystemSCConfig + SoftAuctionConfig SoftAuctionConfig } // StakingSystemSCConfig will hold the staking system smart contract settings @@ -23,6 +24,8 @@ type StakingSystemSCConfig struct { BleedPercentagePerRound float64 MaxNumberOfNodesForStake uint64 ActivateBLSPubKeyMessageVerification bool + StakeLimitPercentage float64 + NodeLimitPercentage float64 } // ESDTSystemSCConfig defines a set of constant to initialize the esdt system smart contract @@ -32,7 +35,7 @@ type ESDTSystemSCConfig struct { } // GovernanceSystemSCConfigV1 holds the initial set of values that were used to initialise the -// governance system smart contract at genesis time +// governance system smart contract at genesis time type GovernanceSystemSCConfigV1 struct { NumNodes int64 ProposalCost string @@ -42,7 +45,7 @@ type GovernanceSystemSCConfigV1 struct { } // GovernanceSystemSCConfigActive defines the set of configuration values used by the governance -// system smart contract once it activates +// system smart contract once it activates type GovernanceSystemSCConfigActive struct { ProposalCost string LostProposalFee string @@ -71,3 +74,11 @@ type DelegationSystemSCConfig struct { MaxServiceFee uint64 AddTokensWhitelistedAddress string } + +// SoftAuctionConfig represents the config options for soft auction selecting used in staking v4 +type SoftAuctionConfig struct { + TopUpStep string + MinTopUp string + MaxTopUp string + MaxNumberOfIterations uint64 +} diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index a6e9f5c2086..45dd2c7ef00 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -489,10 +489,11 @@ func TestP2pConfig(t *testing.T) { [Node.Transports.TCP] ListenAddress = "/ip4/0.0.0.0/tcp/%d" PreventPortReuse = true - [Node.ResourceLimiter] - Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". - ManualSystemMemoryInMB = 1 # not taken into account if the type is not "default with manual scale" - ManualMaximumFD = 2 # not taken into account if the type is not "default with manual scale" + + [Node.ResourceLimiter] + Type = "default autoscale" #available options "default autoscale", "infinite", "default with manual scale". + ManualSystemMemoryInMB = 1 # not taken into account if the type is not "default with manual scale" + ManualMaximumFD = 2 # not taken into account if the type is not "default with manual scale" [KadDhtPeerDiscovery] Enabled = false @@ -645,9 +646,6 @@ func TestEnableEpochConfig(t *testing.T) { # ValidatorToDelegationEnableEpoch represents the epoch when the validator-to-delegation feature will be enabled ValidatorToDelegationEnableEpoch = 29 - # WaitingListFixEnableEpoch represents the epoch when the 6 epoch waiting list fix is enabled - WaitingListFixEnableEpoch = 30 - # IncrementSCRNonceInMultiTransferEnableEpoch represents the epoch when the fix for preventing the generation of the same SCRs # is enabled. The fix is done by adding an extra increment. IncrementSCRNonceInMultiTransferEnableEpoch = 31 @@ -661,9 +659,6 @@ func TestEnableEpochConfig(t *testing.T) { # ESDTTransferRoleEnableEpoch represents the epoch when esdt transfer role set is enabled ESDTTransferRoleEnableEpoch = 34 - # BuiltInFunctionOnMetaEnableEpoch represents the epoch when built in function processing on metachain is enabled - BuiltInFunctionOnMetaEnableEpoch = 35 - # ComputeRewardCheckpointEnableEpoch represents the epoch when compute rewards checkpoint epoch is enabled ComputeRewardCheckpointEnableEpoch = 36 @@ -838,9 +833,15 @@ func TestEnableEpochConfig(t *testing.T) { # FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch represents the epoch when the fix for the remaining gas in the SaveKeyValue builtin function is enabled FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch = 91 + + # MigrateDataTrieEnableEpoch represents the epoch when the data tries migration is enabled + MigrateDataTrieEnableEpoch = 92 # CurrentRandomnessOnSortingEnableEpoch represents the epoch when the current randomness on sorting is enabled - CurrentRandomnessOnSortingEnableEpoch = 92 + CurrentRandomnessOnSortingEnableEpoch = 93 + + # AlwaysMergeContextsInEEIEnableEpoch represents the epoch in which the EEI will always merge the contexts + AlwaysMergeContextsInEEIEnableEpoch = 94 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ @@ -891,12 +892,10 @@ func TestEnableEpochConfig(t *testing.T) { SaveJailedAlwaysEnableEpoch: 27, ReDelegateBelowMinCheckEnableEpoch: 28, ValidatorToDelegationEnableEpoch: 29, - WaitingListFixEnableEpoch: 30, IncrementSCRNonceInMultiTransferEnableEpoch: 31, ESDTMultiTransferEnableEpoch: 32, GlobalMintBurnDisableEpoch: 33, ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, ComputeRewardCheckpointEnableEpoch: 36, SCRSizeInvariantCheckEnableEpoch: 37, BackwardCompSaveKeyValueEnableEpoch: 38, @@ -953,7 +952,9 @@ func TestEnableEpochConfig(t *testing.T) { NFTStopCreateEnableEpoch: 89, ChangeOwnerAddressCrossShardThroughSCEnableEpoch: 90, FixGasRemainingForSaveKeyValueBuiltinFunctionEnableEpoch: 91, - CurrentRandomnessOnSortingEnableEpoch: 92, + MigrateDataTrieEnableEpoch: 92, + CurrentRandomnessOnSortingEnableEpoch: 93, + AlwaysMergeContextsInEEIEnableEpoch: 94, MaxNodesChangeEnableEpoch: []MaxNodesChangeConfig{ { EpochEnable: 44, diff --git a/consensus/interface.go b/consensus/interface.go index 97292269a99..aa8d9057bc4 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -190,5 +190,6 @@ type KeysHandler interface { GetAssociatedPid(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNode(pkBytes []byte) bool ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + GetRedundancyStepInReason() string IsInterfaceNil() bool } diff --git a/consensus/mock/peerProcessorStub.go b/consensus/mock/peerProcessorStub.go deleted file mode 100644 index 69e8b8d7d31..00000000000 --- a/consensus/mock/peerProcessorStub.go +++ /dev/null @@ -1,37 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/sharding" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - LoadInitialStateCalled func(in []*sharding.InitialNode) error - UpdatePeerStateCalled func(header, previousHeader data.HeaderHandler) error - IsInterfaceNilCalled func() bool -} - -// LoadInitialState - -func (pm *ValidatorStatisticsProcessorStub) LoadInitialState(in []*sharding.InitialNode) error { - if pm.LoadInitialStateCalled != nil { - return pm.LoadInitialStateCalled(in) - } - return nil -} - -// UpdatePeerState - -func (pm *ValidatorStatisticsProcessorStub) UpdatePeerState(header, previousHeader data.HeaderHandler) error { - if pm.UpdatePeerStateCalled != nil { - return pm.UpdatePeerStateCalled(header, previousHeader) - } - return nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index 81a09e71009..aeb64a5775a 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -80,7 +80,7 @@ func checkNewFactoryParams( return spos.ErrNilAppStatusHandler } if check.IfNil(sentSignaturesTracker) { - return spos.ErrNilSentSignatureTracker + return ErrNilSentSignatureTracker } if len(chainID) == 0 { return spos.ErrInvalidChainID diff --git a/consensus/spos/bls/blsSubroundsFactory_test.go b/consensus/spos/bls/blsSubroundsFactory_test.go index a0cf949d366..af3267a78cc 100644 --- a/consensus/spos/bls/blsSubroundsFactory_test.go +++ b/consensus/spos/bls/blsSubroundsFactory_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/consensus/spos/bls" "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/testscommon" testscommonOutport "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -76,7 +77,7 @@ func initFactoryWithContainer(container *mock.ConsensusCoreMock) bls.Factory { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return fct @@ -125,7 +126,7 @@ func TestFactory_NewFactoryNilContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -145,7 +146,7 @@ func TestFactory_NewFactoryNilConsensusStateShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -167,7 +168,7 @@ func TestFactory_NewFactoryNilBlockchainShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -189,7 +190,7 @@ func TestFactory_NewFactoryNilBlockProcessorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -211,7 +212,7 @@ func TestFactory_NewFactoryNilBootstrapperShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -233,7 +234,7 @@ func TestFactory_NewFactoryNilChronologyHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -255,7 +256,7 @@ func TestFactory_NewFactoryNilHasherShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -277,7 +278,7 @@ func TestFactory_NewFactoryNilMarshalizerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -299,7 +300,7 @@ func TestFactory_NewFactoryNilMultiSignerContainerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -321,7 +322,7 @@ func TestFactory_NewFactoryNilRoundHandlerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -343,7 +344,7 @@ func TestFactory_NewFactoryNilShardCoordinatorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -365,7 +366,7 @@ func TestFactory_NewFactoryNilSyncTimerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -387,7 +388,7 @@ func TestFactory_NewFactoryNilValidatorGroupSelectorShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -407,7 +408,7 @@ func TestFactory_NewFactoryNilWorkerShouldFail(t *testing.T) { chainID, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -428,7 +429,7 @@ func TestFactory_NewFactoryNilAppStatusHandlerShouldFail(t *testing.T) { chainID, currentPid, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) @@ -453,7 +454,7 @@ func TestFactory_NewFactoryNilSignaturesTrackerShouldFail(t *testing.T) { ) assert.Nil(t, fct) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) } func TestFactory_NewFactoryShouldWork(t *testing.T) { @@ -478,7 +479,7 @@ func TestFactory_NewFactoryEmptyChainIDShouldFail(t *testing.T) { nil, currentPid, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, fct) diff --git a/consensus/spos/bls/blsWorker.go b/consensus/spos/bls/blsWorker.go index 8a5eabe6b5a..456d4e8b1d8 100644 --- a/consensus/spos/bls/blsWorker.go +++ b/consensus/spos/bls/blsWorker.go @@ -7,12 +7,13 @@ import ( // peerMaxMessagesPerSec defines how many messages can be propagated by a pid in a round. The value was chosen by // following the next premises: -// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; -// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round -// adds an extra 1 to the total value, reaching value 4; -// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly -// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. -// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// 1. a leader can propagate as maximum as 3 messages per round: proposed header block + proposed body + final info; +// 2. due to the fact that a delayed signature of the proposer (from previous round) can be received in the current round +// adds an extra 1 to the total value, reaching value 4; +// 3. Because the leader might be selected in the next round and might have an empty data pool, it can send the newly +// empty proposed block at the very beginning of the next round. One extra message here, yielding to a total of 5. +// 4. If we consider the forks that can appear on the system wee need to add one more to the value. +// // Validators only send one signature message in a round, treating the edge case of a delayed message, will need at most // 2 messages per round (which is ok as it is below the set value of 5) const peerMaxMessagesPerSec = uint32(6) @@ -36,7 +37,7 @@ func NewConsensusService() (*worker, error) { return &wrk, nil } -//InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService +// InitReceivedMessages initializes the MessagesType map for all messages for the current ConsensusService func (wrk *worker) InitReceivedMessages() map[consensus.MessageType][]*consensus.Message { receivedMessages := make(map[consensus.MessageType][]*consensus.Message) receivedMessages[MtBlockBodyAndHeader] = make([]*consensus.Message, 0) @@ -54,47 +55,47 @@ func (wrk *worker) GetMaxMessagesInARoundPerPeer() uint32 { return peerMaxMessagesPerSec } -//GetStringValue gets the name of the messageType +// GetStringValue gets the name of the messageType func (wrk *worker) GetStringValue(messageType consensus.MessageType) string { return getStringValue(messageType) } -//GetSubroundName gets the subround name for the subround id provided +// GetSubroundName gets the subround name for the subround id provided func (wrk *worker) GetSubroundName(subroundId int) string { return getSubroundName(subroundId) } -//IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header +// IsMessageWithBlockBodyAndHeader returns if the current messageType is about block body and header func (wrk *worker) IsMessageWithBlockBodyAndHeader(msgType consensus.MessageType) bool { return msgType == MtBlockBodyAndHeader } -//IsMessageWithBlockBody returns if the current messageType is about block body +// IsMessageWithBlockBody returns if the current messageType is about block body func (wrk *worker) IsMessageWithBlockBody(msgType consensus.MessageType) bool { return msgType == MtBlockBody } -//IsMessageWithBlockHeader returns if the current messageType is about block header +// IsMessageWithBlockHeader returns if the current messageType is about block header func (wrk *worker) IsMessageWithBlockHeader(msgType consensus.MessageType) bool { return msgType == MtBlockHeader } -//IsMessageWithSignature returns if the current messageType is about signature +// IsMessageWithSignature returns if the current messageType is about signature func (wrk *worker) IsMessageWithSignature(msgType consensus.MessageType) bool { return msgType == MtSignature } -//IsMessageWithFinalInfo returns if the current messageType is about header final info +// IsMessageWithFinalInfo returns if the current messageType is about header final info func (wrk *worker) IsMessageWithFinalInfo(msgType consensus.MessageType) bool { return msgType == MtBlockHeaderFinalInfo } -//IsMessageWithInvalidSigners returns if the current messageType is about invalid signers +// IsMessageWithInvalidSigners returns if the current messageType is about invalid signers func (wrk *worker) IsMessageWithInvalidSigners(msgType consensus.MessageType) bool { return msgType == MtInvalidSigners } -//IsMessageTypeValid returns if the current messageType is valid +// IsMessageTypeValid returns if the current messageType is valid func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { isMessageTypeValid := msgType == MtBlockBodyAndHeader || msgType == MtBlockBody || @@ -106,17 +107,17 @@ func (wrk *worker) IsMessageTypeValid(msgType consensus.MessageType) bool { return isMessageTypeValid } -//IsSubroundSignature returns if the current subround is about signature +// IsSubroundSignature returns if the current subround is about signature func (wrk *worker) IsSubroundSignature(subroundId int) bool { return subroundId == SrSignature } -//IsSubroundStartRound returns if the current subround is about start round +// IsSubroundStartRound returns if the current subround is about start round func (wrk *worker) IsSubroundStartRound(subroundId int) bool { return subroundId == SrStartRound } -//GetMessageRange provides the MessageType range used in checks by the consensus +// GetMessageRange provides the MessageType range used in checks by the consensus func (wrk *worker) GetMessageRange() []consensus.MessageType { var v []consensus.MessageType @@ -127,7 +128,7 @@ func (wrk *worker) GetMessageRange() []consensus.MessageType { return v } -//CanProceed returns if the current messageType can proceed further if previous subrounds finished +// CanProceed returns if the current messageType can proceed further if previous subrounds finished func (wrk *worker) CanProceed(consensusState *spos.ConsensusState, msgType consensus.MessageType) bool { switch msgType { case MtBlockBodyAndHeader: diff --git a/consensus/spos/bls/errors.go b/consensus/spos/bls/errors.go new file mode 100644 index 00000000000..b840f9e2c85 --- /dev/null +++ b/consensus/spos/bls/errors.go @@ -0,0 +1,6 @@ +package bls + +import "errors" + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/consensus/spos/bls/subroundBlock.go b/consensus/spos/bls/subroundBlock.go index d032a04eb63..a83969721b8 100644 --- a/consensus/spos/bls/subroundBlock.go +++ b/consensus/spos/bls/subroundBlock.go @@ -63,7 +63,8 @@ func checkNewSubroundBlockParams( // doBlockJob method does the job of the subround Block func (sr *subroundBlock) doBlockJob(ctx context.Context) bool { - if !sr.IsSelfLeaderInCurrentRound() && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !isSelfLeader && !sr.IsMultiKeyLeaderInCurrentRound() { // is NOT self leader in this round? return false } diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index 723fc0bcbf3..21675715f39 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process/headerCheck" ) type subroundEndRound struct { @@ -48,7 +49,7 @@ func NewSubroundEndRound( return nil, spos.ErrNilAppStatusHandler } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srEndRound := subroundEndRound{ @@ -120,9 +121,6 @@ func (sr *subroundEndRound) receivedBlockHeaderFinalInfo(_ context.Context, cnsD "AggregateSignature", cnsDta.AggregateSignature, "LeaderSignature", cnsDta.LeaderSignature) - signers := computeSignersPublicKeys(sr.ConsensusGroup(), cnsDta.PubKeysBitmap) - sr.sentSignatureTracker.ReceivedActualSigners(signers) - sr.PeerHonestyHandler().ChangeScore( node, spos.GetConsensusTopicID(sr.ShardCoordinator()), @@ -189,7 +187,7 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta return false } - if sr.IsSelfLeaderInCurrentRound() { + if sr.IsSelfLeaderInCurrentRound() || sr.IsMultiKeyLeaderInCurrentRound() { return false } @@ -589,12 +587,23 @@ func (sr *subroundEndRound) createAndBroadcastHeaderFinalInfo() { } func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) { + isSelfLeader := sr.IsSelfLeaderInCurrentRound() && sr.ShouldConsiderSelfKeyInConsensus() + if !(isSelfLeader || sr.IsMultiKeyLeaderInCurrentRound()) { + return + } + + leader, errGetLeader := sr.GetLeader() + if errGetLeader != nil { + log.Debug("createAndBroadcastInvalidSigners.GetLeader", "error", errGetLeader) + return + } + cnsMsg := consensus.NewConsensusMessage( sr.GetData(), nil, nil, nil, - []byte(sr.SelfPubKey()), + []byte(leader), nil, int(MtInvalidSigners), sr.RoundHandler().Index(), @@ -602,7 +611,7 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by nil, nil, nil, - sr.CurrentPid(), + sr.GetAssociatedPid([]byte(leader)), invalidSigners, ) @@ -853,33 +862,9 @@ func (sr *subroundEndRound) doEndRoundConsensusCheck() bool { return false } -// computeSignersPublicKeys will extract from the provided consensus group slice only the strings that matched with the bitmap -func computeSignersPublicKeys(consensusGroup []string, bitmap []byte) []string { - nbBitsBitmap := len(bitmap) * 8 - consensusGroupSize := len(consensusGroup) - size := consensusGroupSize - if consensusGroupSize > nbBitsBitmap { - size = nbBitsBitmap - } - - result := make([]string, 0, len(consensusGroup)) - - for i := 0; i < size; i++ { - indexRequired := (bitmap[i/8] & (1 << uint16(i%8))) > 0 - if !indexRequired { - continue - } - - pubKey := consensusGroup[i] - result = append(result, pubKey) - } - - return result -} - func (sr *subroundEndRound) checkSignaturesValidity(bitmap []byte) error { consensusGroup := sr.ConsensusGroup() - signers := computeSignersPublicKeys(consensusGroup, bitmap) + signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, bitmap) for _, pubKey := range signers { isSigJobDone, err := sr.JobDone(pubKey, SrSignature) if err != nil { diff --git a/consensus/spos/bls/subroundEndRound_test.go b/consensus/spos/bls/subroundEndRound_test.go index 456277e23fc..725513b8cb2 100644 --- a/consensus/spos/bls/subroundEndRound_test.go +++ b/consensus/spos/bls/subroundEndRound_test.go @@ -55,7 +55,7 @@ func initSubroundEndRoundWithContainer( bls.ProcessingThresholdPercent, displayStatistics, appStatusHandler, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srEndRound @@ -97,7 +97,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -112,7 +112,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -127,7 +127,7 @@ func TestNewSubroundEndRound(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srEndRound) @@ -146,7 +146,7 @@ func TestNewSubroundEndRound(t *testing.T) { ) assert.Nil(t, srEndRound) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -179,7 +179,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockChainShouldFail(t *testing. bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -215,7 +215,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilBlockProcessorShouldFail(t *test bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -252,7 +252,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilConsensusStateShouldFail(t *test bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -288,7 +288,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilMultiSignerContainerShouldFail(t bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -324,7 +324,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilRoundHandlerShouldFail(t *testin bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -360,7 +360,7 @@ func TestSubroundEndRound_NewSubroundEndRoundNilSyncTimerShouldFail(t *testing.T bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srEndRound)) @@ -396,7 +396,7 @@ func TestSubroundEndRound_NewSubroundEndRoundShouldWork(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srEndRound)) @@ -902,16 +902,8 @@ func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldWork(t *testing.T) { PubKey: []byte("A"), } - sentTrackerInterface := sr.GetSentSignatureTracker() - sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) - receivedActualSignersCalled := false - sentTracker.ReceivedActualSignersCalled = func(signersPks []string) { - receivedActualSignersCalled = true - } - res := sr.ReceivedBlockHeaderFinalInfo(&cnsData) assert.True(t, res) - assert.True(t, receivedActualSignersCalled) } func TestSubroundEndRound_ReceivedBlockHeaderFinalInfoShouldReturnFalseWhenFinalInfoIsNotValid(t *testing.T) { @@ -1322,7 +1314,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { assert.False(t, res) }) - t.Run("received message for self leader", func(t *testing.T) { + t.Run("received message from self leader should return false", func(t *testing.T) { t.Parallel() container := mock.InitConsensusCore() @@ -1339,6 +1331,53 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) { assert.False(t, res) }) + t.Run("received message from self multikey leader should return false", func(t *testing.T) { + t.Parallel() + + container := mock.InitConsensusCore() + keysHandler := &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return string(pkBytes) == "A" + }, + } + ch := make(chan bool, 1) + consensusState := initConsensusStateWithKeysHandler(keysHandler) + sr, _ := spos.NewSubround( + bls.SrSignature, + bls.SrEndRound, + -1, + int64(85*roundTimeDuration/100), + int64(95*roundTimeDuration/100), + "(END_ROUND)", + consensusState, + ch, + executeStoredMessages, + container, + chainID, + currentPid, + &statusHandler.AppStatusHandlerStub{}, + ) + + srEndRound, _ := bls.NewSubroundEndRound( + sr, + extend, + bls.ProcessingThresholdPercent, + displayStatistics, + &statusHandler.AppStatusHandlerStub{}, + &testscommon.SentSignatureTrackerStub{}, + ) + + srEndRound.SetSelfPubKey("A") + + cnsData := consensus.Message{ + BlockHeaderHash: []byte("X"), + PubKey: []byte("A"), + } + + res := srEndRound.ReceivedInvalidSignersInfo(&cnsData) + assert.False(t, res) + }) + t.Run("received hash does not match the hash from current consensus state", func(t *testing.T) { t.Parallel() @@ -1556,29 +1595,60 @@ func TestVerifyInvalidSigners(t *testing.T) { func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) { t.Parallel() - wg := &sync.WaitGroup{} - wg.Add(1) + t.Run("redundancy node should not send while main is active", func(t *testing.T) { + t.Parallel() - expectedInvalidSigners := []byte("invalid signers") + expectedInvalidSigners := []byte("invalid signers") - wasCalled := false - container := mock.InitConsensusCore() - messenger := &mock.BroadcastMessengerMock{ - BroadcastConsensusMessageCalled: func(message *consensus.Message) error { - wg.Done() - assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) - wasCalled = true - return nil - }, - } - container.SetBroadcastMessenger(messenger) - sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + container := mock.InitConsensusCore() + nodeRedundancy := &mock.NodeRedundancyHandlerStub{ + IsRedundancyNodeCalled: func() bool { + return true + }, + IsMainMachineActiveCalled: func() bool { + return true + }, + } + container.SetNodeRedundancyHandler(nodeRedundancy) + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Fail(t, "should have not been called") + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + wg := &sync.WaitGroup{} + wg.Add(1) - wg.Wait() + expectedInvalidSigners := []byte("invalid signers") - require.True(t, wasCalled) + wasCalled := false + container := mock.InitConsensusCore() + messenger := &mock.BroadcastMessengerMock{ + BroadcastConsensusMessageCalled: func(message *consensus.Message) error { + assert.Equal(t, expectedInvalidSigners, message.InvalidSigners) + wasCalled = true + wg.Done() + return nil + }, + } + container.SetBroadcastMessenger(messenger) + sr := *initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{}) + sr.SetSelfPubKey("A") + + sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners) + + wg.Wait() + + require.True(t, wasCalled) + }) } func TestGetFullMessagesForInvalidSigners(t *testing.T) { @@ -1665,7 +1735,7 @@ func TestSubroundEndRound_getMinConsensusGroupIndexOfManagedKeys(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) t.Run("no managed keys from consensus group", func(t *testing.T) { diff --git a/consensus/spos/bls/subroundSignature.go b/consensus/spos/bls/subroundSignature.go index 84892d660fe..ac06cc72fdd 100644 --- a/consensus/spos/bls/subroundSignature.go +++ b/consensus/spos/bls/subroundSignature.go @@ -39,7 +39,7 @@ func NewSubroundSignature( return nil, spos.ErrNilAppStatusHandler } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srSignature := subroundSignature{ diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/subroundSignature_test.go index d12e00b52c0..9ee8a03ba19 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/subroundSignature_test.go @@ -41,7 +41,7 @@ func initSubroundSignatureWithContainer(container *mock.ConsensusCoreMock) bls.S sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srSignature @@ -82,7 +82,7 @@ func TestNewSubroundSignature(t *testing.T) { nil, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -95,7 +95,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, nil, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -108,7 +108,7 @@ func TestNewSubroundSignature(t *testing.T) { sr, extend, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srSignature) @@ -125,7 +125,7 @@ func TestNewSubroundSignature(t *testing.T) { ) assert.Nil(t, srSignature) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -157,7 +157,7 @@ func TestSubroundSignature_NewSubroundSignatureNilConsensusStateShouldFail(t *te sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -191,7 +191,7 @@ func TestSubroundSignature_NewSubroundSignatureNilHasherShouldFail(t *testing.T) sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -225,7 +225,7 @@ func TestSubroundSignature_NewSubroundSignatureNilMultiSignerContainerShouldFail sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -260,7 +260,7 @@ func TestSubroundSignature_NewSubroundSignatureNilRoundHandlerShouldFail(t *test sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -294,7 +294,7 @@ func TestSubroundSignature_NewSubroundSignatureNilSyncTimerShouldFail(t *testing sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.True(t, check.IfNil(srSignature)) @@ -328,7 +328,7 @@ func TestSubroundSignature_NewSubroundSignatureShouldWork(t *testing.T) { sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.False(t, check.IfNil(srSignature)) @@ -411,7 +411,7 @@ func TestSubroundSignature_DoSignatureJobWithMultikey(t *testing.T) { sr, extend, &statusHandler.AppStatusHandlerStub{}, - &mock.SentSignatureTrackerStub{ + &testscommon.SentSignatureTrackerStub{ SignatureSentCalled: func(pkBytes []byte) { signatureSentForPks[string(pkBytes)] = struct{}{} }, diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index c622779fdac..571270dd774 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -54,7 +54,7 @@ func NewSubroundStartRound( return nil, fmt.Errorf("%w for resetConsensusMessages function", spos.ErrNilFunctionHandler) } if check.IfNil(sentSignatureTracker) { - return nil, spos.ErrNilSentSignatureTracker + return nil, ErrNilSentSignatureTracker } srStartRound := subroundStartRound{ @@ -155,6 +155,8 @@ func (sr *subroundStartRound) initCurrentRound() bool { sr.ConsensusGroup(), sr.RoundHandler().Index(), ) + // we should not return here, the multikey redundancy system relies on it + // the NodeRedundancyHandler "thinks" it is in redundancy mode even if we use the multikey redundancy system } leader, err := sr.GetLeader() diff --git a/consensus/spos/bls/subroundStartRound_test.go b/consensus/spos/bls/subroundStartRound_test.go index 51c96117dbc..2f5c21d2659 100644 --- a/consensus/spos/bls/subroundStartRound_test.go +++ b/consensus/spos/bls/subroundStartRound_test.go @@ -23,7 +23,7 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (bls.SubroundStart bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return startRound, err @@ -36,7 +36,7 @@ func defaultWithoutErrorSubroundStartRoundFromSubround(sr *spos.Subround) bls.Su bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return startRound @@ -75,7 +75,7 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) bl bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) return srStartRound @@ -117,7 +117,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -132,7 +132,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -148,7 +148,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, nil, resetConsensusMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -164,7 +164,7 @@ func TestNewSubroundStartRound(t *testing.T) { bls.ProcessingThresholdPercent, executeStoredMessages, nil, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) assert.Nil(t, srStartRound) @@ -184,7 +184,7 @@ func TestNewSubroundStartRound(t *testing.T) { ) assert.Nil(t, srStartRound) - assert.Equal(t, spos.ErrNilSentSignatureTracker, err) + assert.Equal(t, bls.ErrNilSentSignatureTracker, err) }) } @@ -366,7 +366,7 @@ func TestSubroundStartRound_DoStartRoundConsensusCheckShouldReturnTrueWhenInitCu sr := *initSubroundStartRoundWithContainer(container) sentTrackerInterface := sr.GetSentSignatureTracker() - sentTracker := sentTrackerInterface.(*mock.SentSignatureTrackerStub) + sentTracker := sentTrackerInterface.(*testscommon.SentSignatureTrackerStub) startRoundCalled := false sentTracker.StartRoundCalled = func() { startRoundCalled = true @@ -561,7 +561,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -615,7 +615,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -668,7 +668,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasCalled) @@ -732,7 +732,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) @@ -800,7 +800,7 @@ func TestSubroundStartRound_InitCurrentRoundShouldMetrics(t *testing.T) { bls.ProcessingThresholdPercent, displayStatistics, executeStoredMessages, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, ) srStartRound.Check() assert.True(t, wasMetricConsensusStateCalled) diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 1edfb09b5fc..2cf7ca369d6 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -14,7 +14,7 @@ import ( ) // ConsensusCore implements ConsensusCoreHandler and provides access to common functionality -// for the rest of the consensus structures +// for the rest of the consensus structures type ConsensusCore struct { blockChain data.ChainHandler blockProcessor process.BlockProcessor @@ -148,7 +148,7 @@ func (cc *ConsensusCore) MultiSignerContainer() cryptoCommon.MultiSignerContaine return cc.multiSignerContainer } -//RoundHandler gets the RoundHandler stored in the ConsensusCore +// RoundHandler gets the RoundHandler stored in the ConsensusCore func (cc *ConsensusCore) RoundHandler() consensus.RoundHandler { return cc.roundHandler } @@ -158,7 +158,7 @@ func (cc *ConsensusCore) ShardCoordinator() sharding.Coordinator { return cc.shardCoordinator } -//SyncTimer gets the SyncTimer stored in the ConsensusCore +// SyncTimer gets the SyncTimer stored in the ConsensusCore func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { return cc.syncTimer } diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index c3f48919d83..564b3def852 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -380,6 +380,11 @@ func (cns *ConsensusState) IsMultiKeyJobDone(currentSubroundId int) bool { return true } +// GetMultikeyRedundancyStepInReason returns the reason if the current node stepped in as a multikey redundancy node +func (cns *ConsensusState) GetMultikeyRedundancyStepInReason() string { + return cns.keysHandler.GetRedundancyStepInReason() +} + // ResetRoundsWithoutReceivedMessages will reset the rounds received without a message for a specified public key by // providing also the peer ID from the received message func (cns *ConsensusState) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) { diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index 74c8426f197..554c9c0c755 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/spos" @@ -582,3 +583,37 @@ func TestConsensusState_IsMultiKeyJobDone(t *testing.T) { assert.True(t, cns.IsMultiKeyJobDone(0)) }) } + +func TestConsensusState_GetMultikeyRedundancyStepInReason(t *testing.T) { + t.Parallel() + + expectedString := "expected string" + keysHandler := &testscommon.KeysHandlerStub{ + GetRedundancyStepInReasonCalled: func() string { + return expectedString + }, + } + cns := internalInitConsensusStateWithKeysHandler(keysHandler) + + assert.Equal(t, expectedString, cns.GetMultikeyRedundancyStepInReason()) +} + +func TestConsensusState_ResetRoundsWithoutReceivedMessages(t *testing.T) { + t.Parallel() + + resetRoundsWithoutReceivedMessagesCalled := false + testPkBytes := []byte("pk bytes") + testPid := core.PeerID("pid") + + keysHandler := &testscommon.KeysHandlerStub{ + ResetRoundsWithoutReceivedMessagesCalled: func(pkBytes []byte, pid core.PeerID) { + resetRoundsWithoutReceivedMessagesCalled = true + assert.Equal(t, testPkBytes, pkBytes) + assert.Equal(t, testPid, pid) + }, + } + cns := internalInitConsensusStateWithKeysHandler(keysHandler) + + cns.ResetRoundsWithoutReceivedMessages(testPkBytes, testPid) + assert.True(t, resetRoundsWithoutReceivedMessagesCalled) +} diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index c8b5cede565..3aeac029da3 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -238,8 +238,8 @@ var ErrNilSigningHandler = errors.New("nil signing handler") // ErrNilKeysHandler signals that a nil keys handler was provided var ErrNilKeysHandler = errors.New("nil keys handler") -// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker -var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") - // ErrNilFunctionHandler signals that a nil function handler was provided var ErrNilFunctionHandler = errors.New("nil function handler") + +// ErrWrongHashForHeader signals that the hash of the header is not the expected one +var ErrWrongHashForHeader = errors.New("wrong hash for header") diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 3a02e7b27fb..39d19de6e30 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -10,6 +10,9 @@ import ( "github.com/multiversx/mx-chain-go/process" ) +// RedundancySingleKeySteppedIn exposes the redundancySingleKeySteppedIn constant +const RedundancySingleKeySteppedIn = redundancySingleKeySteppedIn + type RoundConsensus struct { *roundConsensus } @@ -173,6 +176,16 @@ func (wrk *Worker) CheckSelfState(cnsDta *consensus.Message) error { return wrk.checkSelfState(cnsDta) } +// SetRedundancyHandler - +func (wrk *Worker) SetRedundancyHandler(redundancyHandler consensus.NodeRedundancyHandler) { + wrk.nodeRedundancyHandler = redundancyHandler +} + +// SetKeysHandler - +func (wrk *Worker) SetKeysHandler(keysHandler consensus.KeysHandler) { + wrk.consensusState.keysHandler = keysHandler +} + // EligibleList - func (rcns *RoundConsensus) EligibleList() map[string]struct{} { return rcns.eligibleNodes diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 235c139d2fb..0ca771d30e5 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -175,6 +175,5 @@ type PeerBlackListCacher interface { type SentSignaturesTracker interface { StartRound() SignatureSent(pkBytes []byte) - ReceivedActualSigners(signersPks []string) IsInterfaceNil() bool } diff --git a/consensus/spos/sposFactory/sposFactory_test.go b/consensus/spos/sposFactory/sposFactory_test.go index 090f5b19f0a..4a672a3343f 100644 --- a/consensus/spos/sposFactory/sposFactory_test.go +++ b/consensus/spos/sposFactory/sposFactory_test.go @@ -52,7 +52,7 @@ func TestGetSubroundsFactory_BlsNilConsensusCoreShouldErr(t *testing.T) { consensusType, statusHandler, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) @@ -76,7 +76,7 @@ func TestGetSubroundsFactory_BlsNilStatusHandlerShouldErr(t *testing.T) { consensusType, nil, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) @@ -101,7 +101,7 @@ func TestGetSubroundsFactory_BlsShouldWork(t *testing.T) { consensusType, statusHandler, indexer, - &mock.SentSignatureTrackerStub{}, + &testscommon.SentSignatureTrackerStub{}, chainID, currentPid, ) diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 8fdcca4686f..f11e40d3089 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -1,6 +1,7 @@ package spos import ( + "bytes" "context" "encoding/hex" "errors" @@ -17,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" errorsErd "github.com/multiversx/mx-chain-go/errors" @@ -31,6 +33,7 @@ var _ closing.Closer = (*Worker)(nil) // sleepTime defines the time in milliseconds between each iteration made in checkChannels method const sleepTime = 5 * time.Millisecond +const redundancySingleKeySteppedIn = "single-key node stepped in" // Worker defines the data needed by spos to communicate between nodes which are in the validators group type Worker struct { @@ -484,6 +487,11 @@ func (wrk *Worker) doJobOnMessageWithHeader(cnsMsg *consensus.Message) error { "nbTxs", header.GetTxCount(), "val stats root hash", valStatsRootHash) + if !wrk.verifyHeaderHash(headerHash, cnsMsg.Header) { + return fmt.Errorf("%w : received header from consensus with wrong hash", + ErrWrongHashForHeader) + } + err = wrk.headerIntegrityVerifier.Verify(header) if err != nil { return fmt.Errorf("%w : verify header integrity from consensus topic failed", err) @@ -508,6 +516,11 @@ func (wrk *Worker) doJobOnMessageWithHeader(cnsMsg *consensus.Message) error { return nil } +func (wrk *Worker) verifyHeaderHash(hash []byte, marshalledHeader []byte) bool { + computedHash := wrk.hasher.Compute(string(marshalledHeader)) + return bytes.Equal(hash, computedHash) +} + func (wrk *Worker) doJobOnMessageWithSignature(cnsMsg *consensus.Message, p2pMsg p2p.MessageP2P) { wrk.mutDisplayHashConsensusMessage.Lock() defer wrk.mutDisplayHashConsensusMessage.Unlock() @@ -545,7 +558,20 @@ func (wrk *Worker) processReceivedHeaderMetric(cnsDta *consensus.Message) { } percent := sinceRoundStart * 100 / wrk.roundHandler.TimeDuration() wrk.appStatusHandler.SetUInt64Value(common.MetricReceivedProposedBlock, uint64(percent)) - wrk.appStatusHandler.SetStringValue(common.MetricRedundancyIsMainActive, strconv.FormatBool(wrk.nodeRedundancyHandler.IsMainMachineActive())) + + isMainMachineActive, redundancyReason := wrk.computeRedundancyMetrics() + wrk.appStatusHandler.SetStringValue(common.MetricRedundancyIsMainActive, strconv.FormatBool(isMainMachineActive)) + wrk.appStatusHandler.SetStringValue(common.MetricRedundancyStepInReason, redundancyReason) +} + +func (wrk *Worker) computeRedundancyMetrics() (bool, string) { + if !wrk.nodeRedundancyHandler.IsMainMachineActive() { + return false, redundancySingleKeySteppedIn + } + + reason := wrk.consensusState.GetMultikeyRedundancyStepInReason() + + return len(reason) == 0, reason } func (wrk *Worker) checkSelfState(cnsDta *consensus.Message) error { diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 37cc36f33c1..b179fdf0db8 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "strconv" "sync/atomic" "testing" "time" @@ -15,6 +16,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/consensus/mock" @@ -26,8 +30,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const roundTimeDuration = 100 * time.Millisecond @@ -628,13 +630,21 @@ func TestWorker_ProcessReceivedMessageComputeReceivedProposedBlockMetric(t *test delay := time.Millisecond * 430 roundStartTimeStamp := time.Now() - receivedValue := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric(roundStartTimeStamp, delay, roundDuration) + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{}) minimumExpectedValue := uint64(delay * 100 / roundDuration) assert.True(t, receivedValue >= minimumExpectedValue, fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), ) + assert.Empty(t, redundancyReason) + assert.True(t, redundancyStatus) }) t.Run("time.Since returns negative value", func(t *testing.T) { // test the edgecase when the returned NTP time stored in the round handler is @@ -645,23 +655,101 @@ func TestWorker_ProcessReceivedMessageComputeReceivedProposedBlockMetric(t *test delay := time.Millisecond * 430 roundStartTimeStamp := time.Now().Add(time.Minute) - receivedValue := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric(roundStartTimeStamp, delay, roundDuration) + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{}) assert.Zero(t, receivedValue) + assert.Empty(t, redundancyReason) + assert.True(t, redundancyStatus) + }) + t.Run("normal operation as a single-key redundancy node", func(t *testing.T) { + t.Parallel() + + roundDuration := time.Millisecond * 1000 + delay := time.Millisecond * 430 + roundStartTimeStamp := time.Now() + + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{ + IsMainMachineActiveCalled: func() bool { + return false + }, + }, + &testscommon.KeysHandlerStub{}) + + minimumExpectedValue := uint64(delay * 100 / roundDuration) + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) + assert.Equal(t, spos.RedundancySingleKeySteppedIn, redundancyReason) + assert.False(t, redundancyStatus) + }) + t.Run("normal operation as a multikey-key redundancy node", func(t *testing.T) { + t.Parallel() + + roundDuration := time.Millisecond * 1000 + delay := time.Millisecond * 430 + roundStartTimeStamp := time.Now() + + multikeyReason := "multikey step in reason" + receivedValue, redundancyReason, redundancyStatus := testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t, + roundStartTimeStamp, + delay, + roundDuration, + &mock.NodeRedundancyHandlerStub{}, + &testscommon.KeysHandlerStub{ + GetRedundancyStepInReasonCalled: func() string { + return multikeyReason + }, + }) + + minimumExpectedValue := uint64(delay * 100 / roundDuration) + assert.True(t, + receivedValue >= minimumExpectedValue, + fmt.Sprintf("minimum expected was %d, got %d", minimumExpectedValue, receivedValue), + ) + assert.Equal(t, multikeyReason, redundancyReason) + assert.False(t, redundancyStatus) }) } func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( + t *testing.T, roundStartTimeStamp time.Time, delay time.Duration, roundDuration time.Duration, -) uint64 { + redundancyHandler consensus.NodeRedundancyHandler, + keysHandler consensus.KeysHandler, +) (uint64, string, bool) { marshaller := mock.MarshalizerMock{} receivedValue := uint64(0) + redundancyReason := "" + redundancyStatus := false wrk := *initWorker(&statusHandlerMock.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { receivedValue = value }, + SetStringValueHandler: func(key string, value string) { + if key == common.MetricRedundancyIsMainActive { + var err error + redundancyStatus, err = strconv.ParseBool(value) + assert.Nil(t, err) + } + if key == common.MetricRedundancyStepInReason { + redundancyReason = value + } + }, }) wrk.SetBlockProcessor(&testscommon.BlockProcessorStub{ DecodeBlockHeaderCalled: func(dta []byte) data.HeaderHandler { @@ -686,6 +774,8 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( return roundStartTimeStamp }, }) + wrk.SetRedundancyHandler(redundancyHandler) + wrk.SetKeysHandler(keysHandler) hdr := &block.Header{ ChainID: chainID, PrevHash: []byte("prev hash"), @@ -725,7 +815,7 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( } _ = wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) - return receivedValue + return receivedValue, redundancyReason, redundancyStatus } func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShouldErr(t *testing.T) { @@ -1163,6 +1253,64 @@ func TestWorker_ProcessReceivedMessageWithABadOriginatorShouldErr(t *testing.T) assert.True(t, errors.Is(err, spos.ErrOriginatorMismatch)) } +func TestWorker_ProcessReceivedMessageWithHeaderAndWrongHash(t *testing.T) { + t.Parallel() + + workerArgs := createDefaultWorkerArgs(&statusHandlerMock.AppStatusHandlerStub{}) + wrk, _ := spos.NewWorker(workerArgs) + + wrk.SetBlockProcessor( + &testscommon.BlockProcessorStub{ + DecodeBlockHeaderCalled: func(dta []byte) data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + CheckChainIDCalled: func(reference []byte) error { + return nil + }, + GetPrevHashCalled: func() []byte { + return make([]byte, 0) + }, + } + }, + RevertCurrentBlockCalled: func() { + }, + DecodeBlockBodyCalled: func(dta []byte) data.BodyHandler { + return nil + }, + }, + ) + + hdr := &block.Header{ChainID: chainID} + hdrHash := make([]byte, 32) // wrong hash + hdrStr, _ := mock.MarshalizerMock{}.Marshal(hdr) + cnsMsg := consensus.NewConsensusMessage( + hdrHash, + nil, + nil, + hdrStr, + []byte(wrk.ConsensusState().ConsensusGroup()[0]), + signature, + int(bls.MtBlockHeader), + 0, + chainID, + nil, + nil, + nil, + currentPid, + nil, + ) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + msg := &p2pmocks.P2PMessageMock{ + DataField: buff, + PeerField: currentPid, + SignatureField: []byte("signature"), + } + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) + time.Sleep(time.Second) + + assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockHeader])) + assert.ErrorIs(t, err, spos.ErrWrongHashForHeader) +} + func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/dataRetriever/chainStorer.go b/dataRetriever/chainStorer.go index 88541d10077..933d4b97a51 100644 --- a/dataRetriever/chainStorer.go +++ b/dataRetriever/chainStorer.go @@ -10,7 +10,7 @@ import ( var _ StorageService = (*ChainStorer)(nil) // ChainStorer is a StorageService implementation that can hold multiple storages -// grouped by storage unit type +// grouped by storage unit type type ChainStorer struct { lock sync.RWMutex chain map[UnitType]storage.Storer diff --git a/dataRetriever/factory/resolverscontainer/args.go b/dataRetriever/factory/resolverscontainer/args.go index 1446af01b97..d0001014a4d 100644 --- a/dataRetriever/factory/resolverscontainer/args.go +++ b/dataRetriever/factory/resolverscontainer/args.go @@ -11,21 +11,22 @@ import ( // FactoryArgs will hold the arguments for ResolversContainerFactory for both shard and meta type FactoryArgs struct { - NumConcurrentResolvingJobs int32 - ShardCoordinator sharding.Coordinator - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - Store dataRetriever.StorageService - Marshalizer marshal.Marshalizer - DataPools dataRetriever.PoolsHolder - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - DataPacker dataRetriever.DataPacker - TriesContainer common.TriesHolder - InputAntifloodHandler dataRetriever.P2PAntifloodHandler - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - MainPreferredPeersHolder p2p.PreferredPeersHolderHandler - FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler - SizeCheckDelta uint32 - IsFullHistoryNode bool - PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator + NumConcurrentResolvingJobs int32 + NumConcurrentResolvingTrieNodesJobs int32 + ShardCoordinator sharding.Coordinator + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + Store dataRetriever.StorageService + Marshalizer marshal.Marshalizer + DataPools dataRetriever.PoolsHolder + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + DataPacker dataRetriever.DataPacker + TriesContainer common.TriesHolder + InputAntifloodHandler dataRetriever.P2PAntifloodHandler + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + MainPreferredPeersHolder p2p.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler + SizeCheckDelta uint32 + IsFullHistoryNode bool + PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator } diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index c1fc1e3a16b..3d0eff8eaa9 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -36,6 +36,7 @@ type baseResolversContainerFactory struct { inputAntifloodHandler dataRetriever.P2PAntifloodHandler outputAntifloodHandler dataRetriever.P2PAntifloodHandler throttler dataRetriever.ResolverThrottler + trieNodesThrottler dataRetriever.ResolverThrottler intraShardTopic string isFullHistoryNode bool mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler @@ -78,7 +79,10 @@ func (brcf *baseResolversContainerFactory) checkParams() error { return fmt.Errorf("%w for output", dataRetriever.ErrNilAntifloodHandler) } if check.IfNil(brcf.throttler) { - return dataRetriever.ErrNilThrottler + return fmt.Errorf("%w for the main throttler", dataRetriever.ErrNilThrottler) + } + if check.IfNil(brcf.trieNodesThrottler) { + return fmt.Errorf("%w for the trie nodes throttler", dataRetriever.ErrNilThrottler) } if check.IfNil(brcf.mainPreferredPeersHolder) { return fmt.Errorf("%w for main network", dataRetriever.ErrNilPreferredPeersHolder) @@ -351,7 +355,7 @@ func (brcf *baseResolversContainerFactory) createTrieNodesResolver( SenderResolver: resolverSender, Marshaller: brcf.marshalizer, AntifloodHandler: brcf.inputAntifloodHandler, - Throttler: brcf.throttler, + Throttler: brcf.trieNodesThrottler, }, TrieDataGetter: trie, } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 426a978ae20..b72f8c3154a 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -27,7 +27,12 @@ func NewMetaResolversContainerFactory( args.Marshalizer = marshal.NewSizeCheckUnmarshalizer(args.Marshalizer, args.SizeCheckDelta) } - thr, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + mainThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + if err != nil { + return nil, err + } + + trieNodesThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingTrieNodesJobs) if err != nil { return nil, err } @@ -46,7 +51,8 @@ func NewMetaResolversContainerFactory( triesContainer: args.TriesContainer, inputAntifloodHandler: args.InputAntifloodHandler, outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, + throttler: mainThrottler, + trieNodesThrottler: trieNodesThrottler, isFullHistoryNode: args.IsFullHistoryNode, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index c6659693d79..755672384cd 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -94,8 +94,15 @@ func TestNewMetaResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldEr args := getArgumentsMeta() args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) + + args.NumConcurrentResolvingJobs = 10 + args.NumConcurrentResolvingTrieNodesJobs = 0 + rcf, err = resolverscontainer.NewMetaResolversContainerFactory(args) assert.Nil(t, rcf) assert.Equal(t, core.ErrNotPositiveValue, err) } @@ -357,21 +364,22 @@ func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createStubMessengerForMeta("", ""), - FullArchiveMessenger: createStubMessengerForMeta("", ""), - Store: createStoreForMeta(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForMeta(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForMeta(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createStubMessengerForMeta("", ""), + FullArchiveMessenger: createStubMessengerForMeta("", ""), + Store: createStoreForMeta(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForMeta(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForMeta(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 28582f03bc5..f24beaa4331 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -25,7 +25,12 @@ func NewShardResolversContainerFactory( args.Marshalizer = marshal.NewSizeCheckUnmarshalizer(args.Marshalizer, args.SizeCheckDelta) } - thr, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + mainThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingJobs) + if err != nil { + return nil, err + } + + trieNodesThrottler, err := throttler.NewNumGoRoutinesThrottler(args.NumConcurrentResolvingTrieNodesJobs) if err != nil { return nil, err } @@ -44,7 +49,8 @@ func NewShardResolversContainerFactory( triesContainer: args.TriesContainer, inputAntifloodHandler: args.InputAntifloodHandler, outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, + throttler: mainThrottler, + trieNodesThrottler: trieNodesThrottler, isFullHistoryNode: args.IsFullHistoryNode, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index 4d6ca351195..ca97015f3ae 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -100,8 +100,15 @@ func TestNewShardResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldE args := getArgumentsShard() args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) + + args.NumConcurrentResolvingJobs = 10 + args.NumConcurrentResolvingTrieNodesJobs = 0 + rcf, err = resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) assert.Equal(t, core.ErrNotPositiveValue, err) } @@ -465,21 +472,22 @@ func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createMessengerStubForShard("", ""), - FullArchiveMessenger: createMessengerStubForShard("", ""), - Store: createStoreForShard(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForShard(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForShard(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createMessengerStubForShard("", ""), + FullArchiveMessenger: createMessengerStubForShard("", ""), + Store: createStoreForShard(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForShard(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForShard(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index e68b10d5e46..2682231a768 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -20,9 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" - storageFactory "github.com/multiversx/mx-chain-go/storage/factory" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" ) const defaultBeforeGracefulClose = time.Minute @@ -239,46 +235,6 @@ func (brcf *baseRequestersContainerFactory) createMiniBlocksRequester(responseTo return mbRequester, nil } -func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( - mainStorer storage.Storer, - storageIdentifier dataRetriever.UnitType, - handler common.EnableEpochsHandler, - stateStatsHandler common.StateStatisticsHandler, -) (common.StorageManager, dataRetriever.TrieDataGetter, error) { - pathManager, err := storageFactory.CreatePathManager( - storageFactory.ArgCreatePathManager{ - WorkingDir: brcf.workingDir, - ChainID: brcf.chainID, - }, - ) - if err != nil { - return nil, nil, err - } - - trieFactoryArgs := trieFactory.TrieFactoryArgs{ - Marshalizer: brcf.marshalizer, - Hasher: brcf.hasher, - PathManager: pathManager, - TrieStorageManagerConfig: brcf.generalConfig.TrieStorageManagerConfig, - } - trieFactoryInstance, err := trieFactory.NewTrieFactory(trieFactoryArgs) - if err != nil { - return nil, nil, err - } - - args := trieFactory.TrieCreateArgs{ - MainStorer: mainStorer, - PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, - SnapshotsEnabled: brcf.snapshotsEnabled, - IdleProvider: disabled.NewProcessStatusHandler(), - Identifier: storageIdentifier.String(), - EnableEpochsHandler: handler, - StatsCollector: stateStatsHandler, - } - return trieFactoryInstance.Create(args) -} - func (brcf *baseRequestersContainerFactory) generatePeerAuthenticationRequester() error { identifierPeerAuth := common.PeerAuthenticationTopic peerAuthRequester := disabledRequesters.NewDisabledRequester() diff --git a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go index a0d6963ad14..675ebd6f276 100644 --- a/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go +++ b/dataRetriever/resolvers/epochproviders/arithmeticEpochProvider.go @@ -9,7 +9,7 @@ import ( ) // deltaEpochActive represents how many epochs behind the current computed epoch are to be considered "active" and -//cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have +// cause the requests to be sent to all peers regardless of being full observers or not. Usually, a node will have // [config.toml].[StoragePruning].NumActivePersisters opened persisters but to the fact that a shorter epoch can happen, // that value is lowered at a maximum 1. const deltaEpochActive = uint32(1) diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go index 871ed85fee5..275327d44c6 100644 --- a/dataRetriever/resolvers/trieNodeResolver.go +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -1,6 +1,8 @@ package resolvers import ( + "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/batch" @@ -20,6 +22,7 @@ type ArgTrieNodeResolver struct { // TrieNodeResolver is a wrapper over Resolver that is specialized in resolving trie node requests type TrieNodeResolver struct { + mutCriticalSection sync.Mutex *baseResolver messageProcessor trieDataGetter dataRetriever.TrieDataGetter @@ -104,6 +107,9 @@ func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message } func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes map[string]struct{}) (int, bool) { + tnRes.mutCriticalSection.Lock() + defer tnRes.mutCriticalSection.Unlock() + spaceUsed := 0 usedAllSpace := false remainingSpace := core.MaxBufferSizeToSendTrieNodes @@ -129,6 +135,9 @@ func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes } func (tnRes *TrieNodeResolver) resolveSubTries(hashes [][]byte, nodes map[string]struct{}, spaceUsedAlready int) { + tnRes.mutCriticalSection.Lock() + defer tnRes.mutCriticalSection.Unlock() + var serializedNodes [][]byte var err error var serializedNode []byte @@ -168,7 +177,10 @@ func convertMapToSlice(m map[string]struct{}) [][]byte { } func (tnRes *TrieNodeResolver) resolveOneHash(hash []byte, chunkIndex uint32, message p2p.MessageP2P, source p2p.MessageHandler) error { + tnRes.mutCriticalSection.Lock() serializedNode, err := tnRes.trieDataGetter.GetSerializedNode(hash) + tnRes.mutCriticalSection.Unlock() + if err != nil { return err } diff --git a/debug/handler/interceptorDebugHandler.go b/debug/handler/interceptorDebugHandler.go index 9c5b2cb361a..a00f7b878b9 100644 --- a/debug/handler/interceptorDebugHandler.go +++ b/debug/handler/interceptorDebugHandler.go @@ -202,7 +202,7 @@ func (idh *interceptorDebugHandler) incrementNumOfPrints() { } } -//TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters +// TODO replace this with a call to Query(search) when a suitable conditional parser will be used. Also replace config parameters // with a query string so it will be more extensible func (idh *interceptorDebugHandler) getStringEvents(maxNumPrints int) []string { acceptEvent := func(ev *event) bool { diff --git a/epochStart/bootstrap/baseStorageHandler.go b/epochStart/bootstrap/baseStorageHandler.go index dcf9193808d..1442af7e3b0 100644 --- a/epochStart/bootstrap/baseStorageHandler.go +++ b/epochStart/bootstrap/baseStorageHandler.go @@ -2,22 +2,65 @@ package bootstrap import ( "encoding/hex" - "encoding/json" "strings" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" ) +// StorageHandlerArgs is a struct placeholder for all arguments required to create either a shard or a meta storage handler +type StorageHandlerArgs struct { + GeneralConfig config.Config + PreferencesConfig config.PreferencesConfig + ShardCoordinator sharding.Coordinator + PathManagerHandler storage.PathManagerHandler + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + CurrentEpoch uint32 + Uint64Converter typeConverters.Uint64ByteSliceConverter + NodeTypeProvider NodeTypeProviderHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + SnapshotsEnabled bool + ManagedPeersHolder common.ManagedPeersHolder + NodeProcessingMode common.NodeProcessingMode + RepopulateTokensSupplies bool + StateStatsHandler common.StateStatisticsHandler +} + +func checkNilArgs(args StorageHandlerArgs) error { + if check.IfNil(args.ShardCoordinator) { + return core.ErrNilShardCoordinator + } + if check.IfNil(args.PathManagerHandler) { + return dataRetriever.ErrNilPathManager + } + if check.IfNil(args.Marshaller) { + return core.ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return core.ErrNilHasher + } + if check.IfNil(args.Uint64Converter) { + return dataRetriever.ErrNilUint64ByteSliceConverter + } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory + } + return nil +} + type miniBlocksInfo struct { miniBlockHashes [][]byte fullyProcessed []bool @@ -33,12 +76,13 @@ type processedIndexes struct { // baseStorageHandler handles the storage functions for saving bootstrap data type baseStorageHandler struct { - storageService dataRetriever.StorageService - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter + storageService dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + currentEpoch uint32 + uint64Converter typeConverters.Uint64ByteSliceConverter + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*block.MiniBlock) ([]bootstrapStorage.PendingMiniBlocksInfo, error) { @@ -61,12 +105,11 @@ func (bsh *baseStorageHandler) groupMiniBlocksByShard(miniBlocks map[string]*blo func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( metaBlock data.HeaderHandler, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) ([]byte, error) { key := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), metaBlock.GetPrevRandSeed()...) - // TODO: replace hardcoded json - although it is hardcoded in nodesCoordinator as well. - registryBytes, err := json.Marshal(nodesConfig) + registryBytes, err := bsh.nodesCoordinatorRegistryFactory.GetRegistryData(nodesConfig, metaBlock.GetEpoch()) if err != nil { return nil, err } @@ -81,7 +124,7 @@ func (bsh *baseStorageHandler) saveNodesCoordinatorRegistry( return nil, err } - log.Debug("saving nodes coordinator config", "key", key) + log.Debug("saving nodes coordinator config", "key", key, "epoch", metaBlock.GetEpoch()) return metaBlock.GetPrevRandSeed(), nil } diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go index 19bfa2acc54..da6e99fda1b 100644 --- a/epochStart/bootstrap/common.go +++ b/epochStart/bootstrap/common.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) const baseErrorMessage = "error with epoch start bootstrapper arguments" @@ -119,6 +120,9 @@ func checkArguments(args ArgsEpochStartBootstrap) error { if check.IfNil(args.StateStatsHandler) { return fmt.Errorf("%s: %w", baseErrorMessage, statistics.ErrNilStateStatsHandler) } + if check.IfNil(args.NodesCoordinatorRegistryFactory) { + return fmt.Errorf("%s: %w", baseErrorMessage, nodesCoordinator.ErrNilNodesCoordinatorRegistryFactory) + } return nil } diff --git a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go index 742fa1e0523..efee420feec 100644 --- a/epochStart/bootstrap/disabled/disabledNodesCoordinator.go +++ b/epochStart/bootstrap/disabled/disabledNodesCoordinator.go @@ -49,6 +49,11 @@ func (n *nodesCoordinator) GetAllWaitingValidatorsPublicKeys(_ uint32) (map[uint return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (n *nodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetConsensusValidatorsPublicKeys - func (n *nodesCoordinator) GetConsensusValidatorsPublicKeys(_ []byte, _ uint64, _ uint32, _ uint32) ([]string, error) { return nil, nil diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index b6dea44ee81..868d0359ef5 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -2,7 +2,6 @@ package bootstrap import ( "bytes" - "encoding/json" "fmt" "strconv" @@ -196,22 +195,22 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { func (e *epochStartBootstrap) checkIfShuffledOut( pubKey []byte, - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry, + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler, ) (uint32, bool) { epochIDasString := fmt.Sprint(e.baseData.lastEpoch) - epochConfig := nodesConfig.EpochsConfig[epochIDasString] + epochConfig := nodesConfig.GetEpochsConfig()[epochIDasString] if epochConfig == nil { return e.baseData.shardId, false } - newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.WaitingValidators) + newShardId, isWaitingForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetWaitingValidators()) if isWaitingForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator return newShardId, isShuffledOut } - newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.EligibleValidators) + newShardId, isEligibleForShard := checkIfPubkeyIsInMap(pubKey, epochConfig.GetEligibleValidators()) if isEligibleForShard { isShuffledOut := newShardId != e.baseData.shardId e.nodeType = core.NodeTypeValidator @@ -252,7 +251,7 @@ func checkIfValidatorIsInList( return false } -func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, *nodesCoordinator.NodesCoordinatorRegistry, error) { +func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*bootstrapStorage.BootstrapData, nodesCoordinator.NodesCoordinatorRegistryHandler, error) { bootStorer, err := bootstrapStorage.NewBootstrapStorer(e.coreComponentsHolder.InternalMarshalizer(), storer) if err != nil { return nil, nil, err @@ -271,8 +270,7 @@ func (e *epochStartBootstrap) getLastBootstrapData(storer storage.Storer) (*boot return nil, nil, err } - config := &nodesCoordinator.NodesCoordinatorRegistry{} - err = json.Unmarshal(d, config) + config, err := e.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(d) if err != nil { return nil, nil, err } diff --git a/epochStart/bootstrap/interface.go b/epochStart/bootstrap/interface.go index e934e450f7c..bfc293032ee 100644 --- a/epochStart/bootstrap/interface.go +++ b/epochStart/bootstrap/interface.go @@ -13,7 +13,7 @@ import ( // StartOfEpochNodesConfigHandler defines the methods to process nodesConfig from epoch start metablocks type StartOfEpochNodesConfigHandler interface { - NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) + NodesConfigFromMetaBlock(currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) IsInterfaceNil() bool } @@ -26,7 +26,7 @@ type EpochStartMetaBlockInterceptorProcessor interface { // StartInEpochNodesCoordinator defines the methods to process and save nodesCoordinator information to storage type StartInEpochNodesCoordinator interface { EpochStartPrepare(metaHdr data.HeaderHandler, body data.BodyHandler) - NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry + NodesCoordinatorToRegistry(epoch uint32) nodesCoordinator.NodesCoordinatorRegistryHandler ShardIdForEpoch(epoch uint32) (uint32, error) IsInterfaceNil() bool } diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 65e7e9c9237..01f65ccabe6 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -7,17 +7,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" ) @@ -26,36 +20,28 @@ type metaStorageHandler struct { } // NewMetaStorageHandler will return a new instance of metaStorageHandler -func NewMetaStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, - stateStatsHandler common.StateStatisticsHandler, -) (*metaStorageHandler, error) { +func NewMetaStorageHandler(args StorageHandlerArgs) (*metaStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, - RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, - StateStatsHandler: stateStatsHandler, + NodeProcessingMode: args.NodeProcessingMode, + RepopulateTokensSupplies: false, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -68,12 +54,13 @@ func NewMetaStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &metaStorageHandler{baseStorageHandler: base}, nil diff --git a/epochStart/bootstrap/metaStorageHandler_test.go b/epochStart/bootstrap/metaStorageHandler_test.go index 4fee7dee5b5..92603df176a 100644 --- a/epochStart/bootstrap/metaStorageHandler_test.go +++ b/epochStart/bootstrap/metaStorageHandler_test.go @@ -20,36 +20,37 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func createStorageHandlerArgs() StorageHandlerArgs { + return StorageHandlerArgs{ + GeneralConfig: testscommon.GetGeneralConfig(), + PreferencesConfig: config.PreferencesConfig{}, + ShardCoordinator: &mock.ShardCoordinatorStub{}, + PathManagerHandler: &testscommon.PathManagerStub{}, + Marshaller: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + CurrentEpoch: 0, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SnapshotsEnabled: false, + NodeProcessingMode: common.Normal, + StateStatsHandler: disabled.NewStateStatistics(), + RepopulateTokensSupplies: false, + } +} + func TestNewMetaStorageHandler_InvalidConfigErr(t *testing.T) { - gCfg := config.Config{} - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + args.GeneralConfig = config.Config{} + + mtStrHandler, err := NewMetaStorageHandler(args) assert.True(t, check.IfNil(mtStrHandler)) assert.NotNil(t, err) } @@ -59,29 +60,8 @@ func TestNewMetaStorageHandler_CreateForMetaErr(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - mtStrHandler, err := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, err := NewMetaStorageHandler(args) assert.False(t, check.IfNil(mtStrHandler)) assert.Nil(t, err) } @@ -91,34 +71,11 @@ func TestMetaStorageHandler_saveLastHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) - + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) header := &block.MetaBlock{Nonce: 0} - headerHash, _ := core.CalculateHash(marshalizer, hasher, header) + headerHash, _ := core.CalculateHash(args.Marshaller, args.Hasher, header) expectedBootInfo := bootstrapStorage.BootstrapHeaderInfo{ ShardId: core.MetachainShardId, Hash: headerHash, } @@ -133,35 +90,13 @@ func TestMetaStorageHandler_saveLastCrossNotarizedHeaders(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) hdr1 := &block.Header{Nonce: 1} hdr2 := &block.Header{Nonce: 2} - hdrHash1, _ := core.CalculateHash(marshalizer, hasher, hdr1) - hdrHash2, _ := core.CalculateHash(marshalizer, hasher, hdr2) + hdrHash1, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr1) + hdrHash2, _ := core.CalculateHash(args.Marshaller, args.Hasher, hdr2) hdr3 := &block.MetaBlock{ Nonce: 3, @@ -181,30 +116,8 @@ func TestMetaStorageHandler_saveTriggerRegistry(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -220,30 +133,8 @@ func TestMetaStorageHandler_saveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -276,30 +167,8 @@ func testMetaWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber _ = os.RemoveAll("./Epoch_0") }() - gCfg := testscommon.GetGeneralConfig() - prefsConfig := config.PreferencesConfig{} - coordinator := &mock.ShardCoordinatorStub{} - pathManager := &testscommon.PathManagerStub{} - marshalizer := &mock.MarshalizerMock{} - hasher := &hashingMocks.HasherMock{} - uit64Cvt := &mock.Uint64ByteSliceConverterMock{} - nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - managedPeersHolder := &testscommon.ManagedPeersHolderStub{} - - mtStrHandler, _ := NewMetaStorageHandler( - gCfg, - prefsConfig, - coordinator, - pathManager, - marshalizer, - hasher, - 1, - uit64Cvt, - nodeTypeProvider, - common.Normal, - managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + mtStrHandler, _ := NewMetaStorageHandler(args) counter := 0 mtStrHandler.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 55a642a6793..dce9135e0a3 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -73,7 +73,7 @@ type Parameters struct { Epoch uint32 SelfShardId uint32 NumOfShards uint32 - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler } // ComponentsNeededForBootstrap holds the components which need to be initialized from network @@ -81,7 +81,7 @@ type ComponentsNeededForBootstrap struct { EpochStartMetaBlock data.MetaHeaderHandler PreviousEpochStart data.MetaHeaderHandler ShardHeader data.HeaderHandler - NodesConfig *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler Headers map[string]data.HeaderHandler ShardCoordinator sharding.Coordinator PendingMiniBlocks map[string]*block.MiniBlock @@ -136,15 +136,17 @@ type epochStartBootstrap struct { storageOpenerHandler storage.UnitOpenerHandler latestStorageDataProvider storage.LatestStorageDataProviderHandler argumentsParser process.ArgumentsParser + dataSyncerFactory types.ScheduledDataSyncerCreator dataSyncerWithScheduled types.ScheduledDataSyncer storageService dataRetriever.StorageService + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory // gathered data epochStartMeta data.MetaHeaderHandler prevEpochStartMeta data.MetaHeaderHandler syncedHeaders map[string]data.HeaderHandler - nodesConfig *nodesCoordinator.NodesCoordinatorRegistry + nodesConfig nodesCoordinator.NodesCoordinatorRegistryHandler baseData baseDataInStorage startRound int64 nodeType core.NodeType @@ -163,30 +165,31 @@ type baseDataInStorage struct { // ArgsEpochStartBootstrap holds the arguments needed for creating an epoch start data provider component type ArgsEpochStartBootstrap struct { - CoreComponentsHolder process.CoreComponentsHolder - CryptoComponentsHolder process.CryptoComponentsHolder - DestinationShardAsObserver uint32 - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - GeneralConfig config.Config - PrefsConfig config.PreferencesConfig - FlagsConfig config.ContextFlagsConfig - EconomicsData process.EconomicsDataHandler - GenesisNodesConfig sharding.GenesisNodesSetupHandler - GenesisShardCoordinator sharding.Coordinator - StorageUnitOpener storage.UnitOpenerHandler - LatestStorageDataProvider storage.LatestStorageDataProviderHandler - Rater nodesCoordinator.ChanceComputer - NodeShuffler nodesCoordinator.NodesShuffler - RoundHandler epochStart.RoundHandler - ArgumentsParser process.ArgumentsParser - StatusHandler core.AppStatusHandler - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - DataSyncerCreator types.ScheduledDataSyncerCreator - ScheduledSCRsStorer storage.Storer - TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler - NodeProcessingMode common.NodeProcessingMode - StateStatsHandler common.StateStatisticsHandler + CoreComponentsHolder process.CoreComponentsHolder + CryptoComponentsHolder process.CryptoComponentsHolder + DestinationShardAsObserver uint32 + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + GeneralConfig config.Config + PrefsConfig config.PreferencesConfig + FlagsConfig config.ContextFlagsConfig + EconomicsData process.EconomicsDataHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + GenesisShardCoordinator sharding.Coordinator + StorageUnitOpener storage.UnitOpenerHandler + LatestStorageDataProvider storage.LatestStorageDataProviderHandler + Rater nodesCoordinator.ChanceComputer + NodeShuffler nodesCoordinator.NodesShuffler + RoundHandler epochStart.RoundHandler + ArgumentsParser process.ArgumentsParser + StatusHandler core.AppStatusHandler + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + DataSyncerCreator types.ScheduledDataSyncerCreator + ScheduledSCRsStorer storage.Storer + TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler + NodeProcessingMode common.NodeProcessingMode + StateStatsHandler common.StateStatisticsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } type dataToSync struct { @@ -205,38 +208,40 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, } epochStartProvider := &epochStartBootstrap{ - coreComponentsHolder: args.CoreComponentsHolder, - cryptoComponentsHolder: args.CryptoComponentsHolder, - mainMessenger: args.MainMessenger, - fullArchiveMessenger: args.FullArchiveMessenger, - generalConfig: args.GeneralConfig, - prefsConfig: args.PrefsConfig, - flagsConfig: args.FlagsConfig, - economicsData: args.EconomicsData, - genesisNodesConfig: args.GenesisNodesConfig, - genesisShardCoordinator: args.GenesisShardCoordinator, - rater: args.Rater, - destinationShardAsObserver: args.DestinationShardAsObserver, - nodeShuffler: args.NodeShuffler, - roundHandler: args.RoundHandler, - storageOpenerHandler: args.StorageUnitOpener, - latestStorageDataProvider: args.LatestStorageDataProvider, - shuffledOut: false, - statusHandler: args.StatusHandler, - nodeType: core.NodeTypeObserver, - argumentsParser: args.ArgumentsParser, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, - maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, - trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, - checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, - dataSyncerFactory: args.DataSyncerCreator, - storerScheduledSCRs: args.ScheduledSCRsStorer, - shardCoordinator: args.GenesisShardCoordinator, - trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, - nodeProcessingMode: args.NodeProcessingMode, - nodeOperationMode: common.NormalOperation, - stateStatsHandler: args.StateStatsHandler, + coreComponentsHolder: args.CoreComponentsHolder, + cryptoComponentsHolder: args.CryptoComponentsHolder, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + generalConfig: args.GeneralConfig, + prefsConfig: args.PrefsConfig, + flagsConfig: args.FlagsConfig, + economicsData: args.EconomicsData, + genesisNodesConfig: args.GenesisNodesConfig, + genesisShardCoordinator: args.GenesisShardCoordinator, + rater: args.Rater, + destinationShardAsObserver: args.DestinationShardAsObserver, + nodeShuffler: args.NodeShuffler, + roundHandler: args.RoundHandler, + storageOpenerHandler: args.StorageUnitOpener, + latestStorageDataProvider: args.LatestStorageDataProvider, + shuffledOut: false, + statusHandler: args.StatusHandler, + nodeType: core.NodeTypeObserver, + argumentsParser: args.ArgumentsParser, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + numConcurrentTrieSyncers: args.GeneralConfig.TrieSync.NumConcurrentTrieSyncers, + maxHardCapForMissingNodes: args.GeneralConfig.TrieSync.MaxHardCapForMissingNodes, + trieSyncerVersion: args.GeneralConfig.TrieSync.TrieSyncerVersion, + checkNodesOnDisk: args.GeneralConfig.TrieSync.CheckNodesOnDisk, + dataSyncerFactory: args.DataSyncerCreator, + storerScheduledSCRs: args.ScheduledSCRsStorer, + shardCoordinator: args.GenesisShardCoordinator, + trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, + nodeProcessingMode: args.NodeProcessingMode, + nodeOperationMode: common.NormalOperation, + stateStatsHandler: args.StateStatsHandler, + startEpoch: args.GeneralConfig.EpochStartConfig.GenesisEpoch, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } if epochStartProvider.prefsConfig.FullArchive { @@ -754,19 +759,20 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl shardId = e.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: e.dataPool, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: e.requestHandler, - ChanceComputer: e.rater, - GenesisNodesConfig: e.genesisNodesConfig, - NodeShuffler: e.nodeShuffler, - Hasher: e.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: e.prefsConfig.FullArchive, - EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + DataPool: e.dataPool, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: e.requestHandler, + ChanceComputer: e.rater, + GenesisNodesConfig: e.genesisNodesConfig, + NodeShuffler: e.nodeShuffler, + Hasher: e.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: e.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: e.prefsConfig.FullArchive, + EnableEpochsHandler: e.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, } e.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) @@ -784,20 +790,22 @@ func (e *epochStartBootstrap) processNodesConfig(pubKey []byte) ([]*block.MiniBl func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.MiniBlock) error { var err error - storageHandlerComponent, err := NewMetaStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.epochStartMeta.GetEpoch(), - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - e.stateStatsHandler, - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.epochStartMeta.GetEpoch(), + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewMetaStorageHandler(argsStorageHandler) if err != nil { return err } @@ -954,20 +962,22 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.syncedHeaders[hash] = hdr } - storageHandlerComponent, err := NewShardStorageHandler( - e.generalConfig, - e.prefsConfig, - e.shardCoordinator, - e.coreComponentsHolder.PathHandler(), - e.coreComponentsHolder.InternalMarshalizer(), - e.coreComponentsHolder.Hasher(), - e.baseData.lastEpoch, - e.coreComponentsHolder.Uint64ByteSliceConverter(), - e.coreComponentsHolder.NodeTypeProvider(), - e.nodeProcessingMode, - e.cryptoComponentsHolder.ManagedPeersHolder(), - e.stateStatsHandler, - ) + argsStorageHandler := StorageHandlerArgs{ + GeneralConfig: e.generalConfig, + PreferencesConfig: e.prefsConfig, + ShardCoordinator: e.shardCoordinator, + PathManagerHandler: e.coreComponentsHolder.PathHandler(), + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Hasher: e.coreComponentsHolder.Hasher(), + CurrentEpoch: e.baseData.lastEpoch, + Uint64Converter: e.coreComponentsHolder.Uint64ByteSliceConverter(), + NodeTypeProvider: e.coreComponentsHolder.NodeTypeProvider(), + NodesCoordinatorRegistryFactory: e.nodesCoordinatorRegistryFactory, + ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), + NodeProcessingMode: e.nodeProcessingMode, + StateStatsHandler: e.stateStatsHandler, + } + storageHandlerComponent, err := NewShardStorageHandler(argsStorageHandler) if err != nil { return err } @@ -1220,22 +1230,23 @@ func (e *epochStartBootstrap) createResolversContainer() error { // this one should only be used before determining the correct shard where the node should reside log.Debug("epochStartBootstrap.createRequestHandler", "shard", e.shardCoordinator.SelfId()) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: e.shardCoordinator, - MainMessenger: e.mainMessenger, - FullArchiveMessenger: e.fullArchiveMessenger, - Store: storageService, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - DataPools: e.dataPool, - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - NumConcurrentResolvingJobs: 10, - DataPacker: dataPacker, - TriesContainer: e.trieContainer, - SizeCheckDelta: 0, - InputAntifloodHandler: disabled.NewAntiFloodHandler(), - OutputAntifloodHandler: disabled.NewAntiFloodHandler(), - MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), - FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), - PayloadValidator: payloadValidator, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Store: storageService, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + DataPools: e.dataPool, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + DataPacker: dataPacker, + TriesContainer: e.trieContainer, + SizeCheckDelta: 0, + InputAntifloodHandler: disabled.NewAntiFloodHandler(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + PayloadValidator: payloadValidator, } resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index d95d97282d5..11a42a22301 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -41,6 +41,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" @@ -85,7 +86,14 @@ func createComponentsForEpochStart() (*mock.CoreComponentsMock, *mock.CryptoComp NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, ProcessStatusHandlerInstance: &testscommon.ProcessStatusHandlerStub{}, HardforkTriggerPubKeyField: []byte("provided hardfork pub key"), - EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return 99999 + } + return 0 + }, + }, }, &mock.CryptoComponentsMock{ PubKey: &cryptoMocks.PublicKeyStub{}, @@ -111,9 +119,9 @@ func createMockEpochStartBootstrapArgs( MainMessenger: &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{"peer0", "peer1", "peer2", "peer3", "peer4", "peer5"} - }, - }, - FullArchiveMessenger: &p2pmocks.MessengerStub{}, + }}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, GeneralConfig: config.Config{ MiniBlocksStorage: generalCfg.MiniBlocksStorage, PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, @@ -205,7 +213,7 @@ func createMockEpochStartBootstrapArgs( return 1 }, }, - GenesisNodesConfig: &mock.NodesSetupStub{}, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, GenesisShardCoordinator: mock.NewMultipleShardsCoordinatorMock(), Rater: &mock.RaterStub{}, DestinationShardAsObserver: 0, @@ -794,7 +802,7 @@ func TestIsStartInEpochZero(t *testing.T) { coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetStartTimeCalled: func() int64 { return 1000 }, @@ -828,7 +836,7 @@ func TestEpochStartBootstrap_BootstrapShouldStartBootstrapProcess(t *testing.T) roundDuration := uint64(60000) coreComp, cryptoComp := createComponentsForEpochStart() args := createMockEpochStartBootstrapArgs(coreComp, cryptoComp) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -887,7 +895,7 @@ func TestPrepareForEpochZero_NodeInGenesisShouldNotAlterShardID(t *testing.T) { } args.DestinationShardAsObserver = uint32(7) - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -922,7 +930,7 @@ func TestPrepareForEpochZero_NodeNotInGenesisShouldAlterShardID(t *testing.T) { }, } args.DestinationShardAsObserver = desiredShardAsObserver - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { eligibleMap := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ 1: {mock.NewNodeInfo([]byte("addr"), []byte("pubKey11"), 1, initRating)}, @@ -1487,7 +1495,7 @@ func getNodesConfigMock(numOfShards uint32) sharding.GenesisNodesSetupHandler { roundDurationMillis := 4000 epochDurationMillis := 50 * int64(roundDurationMillis) - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < numOfShards; i++ { diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 881aedf74c2..49535a7228c 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -10,17 +10,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -30,36 +24,28 @@ type shardStorageHandler struct { } // NewShardStorageHandler will return a new instance of shardStorageHandler -func NewShardStorageHandler( - generalConfig config.Config, - prefsConfig config.PreferencesConfig, - shardCoordinator sharding.Coordinator, - pathManagerHandler storage.PathManagerHandler, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - currentEpoch uint32, - uint64Converter typeConverters.Uint64ByteSliceConverter, - nodeTypeProvider core.NodeTypeProviderHandler, - nodeProcessingMode common.NodeProcessingMode, - managedPeersHolder common.ManagedPeersHolder, - stateStatsHandler common.StateStatisticsHandler, -) (*shardStorageHandler, error) { +func NewShardStorageHandler(args StorageHandlerArgs) (*shardStorageHandler, error) { + err := checkNilArgs(args) + if err != nil { + return nil, err + } + epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( factory.StorageServiceFactoryArgs{ - Config: generalConfig, - PrefsConfig: prefsConfig, - ShardCoordinator: shardCoordinator, - PathManager: pathManagerHandler, + Config: args.GeneralConfig, + PrefsConfig: args.PreferencesConfig, + ShardCoordinator: args.ShardCoordinator, + PathManager: args.PathManagerHandler, EpochStartNotifier: epochStartNotifier, - NodeTypeProvider: nodeTypeProvider, - CurrentEpoch: currentEpoch, + NodeTypeProvider: args.NodeTypeProvider, StorageType: factory.BootstrapStorageService, + ManagedPeersHolder: args.ManagedPeersHolder, + CurrentEpoch: args.CurrentEpoch, CreateTrieEpochRootHashStorer: false, - NodeProcessingMode: nodeProcessingMode, + NodeProcessingMode: args.NodeProcessingMode, RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time - ManagedPeersHolder: managedPeersHolder, - StateStatsHandler: stateStatsHandler, + StateStatsHandler: args.StateStatsHandler, }, ) if err != nil { @@ -72,12 +58,13 @@ func NewShardStorageHandler( } base := &baseStorageHandler{ - storageService: storageService, - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - currentEpoch: currentEpoch, - uint64Converter: uint64Converter, + storageService: storageService, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshaller, + hasher: args.Hasher, + currentEpoch: args.CurrentEpoch, + uint64Converter: args.Uint64Converter, + nodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } return &shardStorageHandler{baseStorageHandler: base}, nil @@ -123,7 +110,7 @@ func (ssh *shardStorageHandler) SaveDataToStorage(components *ComponentsNeededFo return err } - components.NodesConfig.CurrentEpoch = components.ShardHeader.GetEpoch() + components.NodesConfig.SetCurrentEpoch(components.ShardHeader.GetEpoch()) nodesCoordinatorConfigKey, err := ssh.saveNodesCoordinatorRegistry(components.EpochStartMetaBlock, components.NodesConfig) if err != nil { return err diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index b27f13df28b..018bc4b99b8 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -13,24 +13,13 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/typeConverters" - "github.com/multiversx/mx-chain-core-go/hashing" - "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/statistics/disabled" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" - "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/testscommon" epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" - "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -41,21 +30,8 @@ func TestNewShardStorageHandler_ShouldWork(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, err := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, err := NewShardStorageHandler(args) assert.False(t, check.IfNil(shardStorage)) assert.Nil(t, err) @@ -66,21 +42,8 @@ func TestShardStorageHandler_SaveDataToStorageShardDataNotFound(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Epoch: 1}, @@ -97,21 +60,8 @@ func TestShardStorageHandler_SaveDataToStorageMissingHeader(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{ @@ -151,21 +101,8 @@ func testShardWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber }() counter := 0 - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardStorage.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { counter++ @@ -206,21 +143,8 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) hash1 := []byte("hash1") hdr1 := block.MetaBlock{ @@ -318,21 +242,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. mbs := append(intraMbs, crossMbs...) - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shardHeader := &block.Header{ Nonce: 100, MiniBlockHeaders: mbs, @@ -351,21 +262,8 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorGettingProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -382,21 +280,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorG func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, false) @@ -410,21 +295,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoSche func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongHeaderType(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() wrongShardHeader := &block.MetaBlock{} @@ -445,21 +317,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongH func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, true) @@ -626,21 +485,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochSt _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -662,21 +508,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksMissingHeader(t *te }() lastFinishedMetaBlock := "last finished meta block" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{ @@ -701,21 +534,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWrongHeader(t *test lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -745,21 +565,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNilMetaBlock(t *tes lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -791,21 +598,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) lastFinishedHeaders[0].PendingMiniBlockHeaders = nil @@ -833,21 +627,8 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithProcessedAndPendingMbs(t *testing.T) { t.Parallel() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, firstPendingMetaBlockHash, err := shardStorage.getProcessedAndPendingMiniBlocks(scenario.metaBlock, scenario.headers) @@ -864,21 +645,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledGetSha _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) headers := map[string]data.HeaderHandler{} meta := &block.MetaBlock{ @@ -898,21 +666,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledMissin _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -940,21 +695,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledWrongT _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -984,26 +726,12 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledErrorW _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() + args := createStorageHandlerArgs() expectedErr := fmt.Errorf("expected error") - // Simulate an error when writing to storage with a mock marshaller - args.marshalizer = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { + args.Marshaller = &marshallerMock.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { return nil, expectedErr }} - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1033,21 +761,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduled(t *te _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1082,21 +797,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduledErrorUpda _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -1125,21 +827,8 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduled(t *testi _ = os.RemoveAll("./Epoch_0") }() - args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler( - args.generalConfig, - args.prefsConfig, - args.shardCoordinator, - args.pathManagerHandler, - args.marshalizer, - args.hasher, - 1, - args.uint64Converter, - args.nodeTypeProvider, - args.nodeProcessingMode, - args.managedPeersHolder, - disabled.NewStateStatistics(), - ) + args := createStorageHandlerArgs() + shardStorage, _ := NewShardStorageHandler(args) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" prevMetaHash := "prev metaHlock hash" @@ -1351,36 +1040,6 @@ func Test_getShardHeaderAndMetaHashes(t *testing.T) { require.Equal(t, metaHashes, headers[shardHdrKey].(data.ShardHeaderHandler).GetMetaBlockHashes()) } -type shardStorageArgs struct { - generalConfig config.Config - prefsConfig config.PreferencesConfig - shardCoordinator sharding.Coordinator - pathManagerHandler storage.PathManagerHandler - marshalizer marshal.Marshalizer - hasher hashing.Hasher - currentEpoch uint32 - uint64Converter typeConverters.Uint64ByteSliceConverter - nodeTypeProvider core.NodeTypeProviderHandler - nodeProcessingMode common.NodeProcessingMode - managedPeersHolder common.ManagedPeersHolder -} - -func createDefaultShardStorageArgs() shardStorageArgs { - return shardStorageArgs{ - generalConfig: testscommon.GetGeneralConfig(), - prefsConfig: config.PreferencesConfig{}, - shardCoordinator: &mock.ShardCoordinatorStub{}, - pathManagerHandler: &testscommon.PathManagerStub{}, - marshalizer: &mock.MarshalizerMock{}, - hasher: &hashingMocks.HasherMock{}, - currentEpoch: 0, - uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - nodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - nodeProcessingMode: common.Normal, - managedPeersHolder: &testscommon.ManagedPeersHolderStub{}, - } -} - func createDefaultEpochStartShardData(lastFinishedMetaBlockHash []byte, shardHeaderHash []byte) []block.EpochStartShardData { return []block.EpochStartShardData{ { @@ -1451,7 +1110,6 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbsWithScheduled := []bootstrapStorage.PendingMiniBlocksInfo{ {ShardID: 0, MiniBlocksHashes: [][]byte{crossMbHeaders[1].Hash, crossMbHeaders[2].Hash, crossMbHeaders[3].Hash, crossMbHeaders[4].Hash, crossMbHeaders[0].Hash}}, } - expectedProcessedMbsWithScheduled := make([]bootstrapStorage.MiniBlocksInMeta, 0) headers := map[string]data.HeaderHandler{ lastFinishedMetaBlockHash: &block.MetaBlock{ @@ -1492,7 +1150,7 @@ func createPendingAndProcessedMiniBlocksScenario() scenarioData { expectedPendingMbs: expectedPendingMiniBlocks, expectedProcessedMbs: expectedProcessedMiniBlocks, expectedPendingMbsWithScheduled: expectedPendingMbsWithScheduled, - expectedProcessedMbsWithScheduled: expectedProcessedMbsWithScheduled, + expectedProcessedMbsWithScheduled: []bootstrapStorage.MiniBlocksInMeta{}, } } diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index 0f87b3626e7..809b0dfbb8b 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -404,19 +404,20 @@ func (sesb *storageEpochStartBootstrap) processNodesConfig(pubKey []byte) error shardId = sesb.genesisShardCoordinator.SelfId() } argsNewValidatorStatusSyncers := ArgsNewSyncValidatorStatus{ - DataPool: sesb.dataPool, - Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), - RequestHandler: sesb.requestHandler, - ChanceComputer: sesb.rater, - GenesisNodesConfig: sesb.genesisNodesConfig, - NodeShuffler: sesb.nodeShuffler, - Hasher: sesb.coreComponentsHolder.Hasher(), - PubKey: pubKey, - ShardIdAsObserver: shardId, - ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), - NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), - IsFullArchive: sesb.prefsConfig.FullArchive, - EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + DataPool: sesb.dataPool, + Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), + RequestHandler: sesb.requestHandler, + ChanceComputer: sesb.rater, + GenesisNodesConfig: sesb.genesisNodesConfig, + NodeShuffler: sesb.nodeShuffler, + Hasher: sesb.coreComponentsHolder.Hasher(), + PubKey: pubKey, + ShardIdAsObserver: shardId, + ChanNodeStop: sesb.coreComponentsHolder.ChanStopNodeProcess(), + NodeTypeProvider: sesb.coreComponentsHolder.NodeTypeProvider(), + IsFullArchive: sesb.prefsConfig.FullArchive, + EnableEpochsHandler: sesb.coreComponentsHolder.EnableEpochsHandler(), + NodesCoordinatorRegistryFactory: sesb.nodesCoordinatorRegistryFactory, } sesb.nodesConfigHandler, err = NewSyncValidatorStatus(argsNewValidatorStatusSyncers) if err != nil { diff --git a/epochStart/bootstrap/storageProcess_test.go b/epochStart/bootstrap/storageProcess_test.go index 78288156144..a59b0d125f2 100644 --- a/epochStart/bootstrap/storageProcess_test.go +++ b/epochStart/bootstrap/storageProcess_test.go @@ -22,6 +22,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/stretchr/testify/assert" ) @@ -92,7 +93,7 @@ func TestStorageEpochStartBootstrap_BootstrapFromGenesis(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, @@ -116,7 +117,7 @@ func TestStorageEpochStartBootstrap_BootstrapMetablockNotFound(t *testing.T) { return 1 }, } - args.GenesisNodesConfig = &mock.NodesSetupStub{ + args.GenesisNodesConfig = &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return roundDuration }, diff --git a/epochStart/bootstrap/syncValidatorStatus.go b/epochStart/bootstrap/syncValidatorStatus.go index 0a74d4151fb..0bcb9308311 100644 --- a/epochStart/bootstrap/syncValidatorStatus.go +++ b/epochStart/bootstrap/syncValidatorStatus.go @@ -38,19 +38,20 @@ type syncValidatorStatus struct { // ArgsNewSyncValidatorStatus holds the arguments needed for creating a new validator status process component type ArgsNewSyncValidatorStatus struct { - DataPool dataRetriever.PoolsHolder - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - RequestHandler process.RequestHandler - ChanceComputer nodesCoordinator.ChanceComputer - GenesisNodesConfig sharding.GenesisNodesSetupHandler - NodeShuffler nodesCoordinator.NodesShuffler - PubKey []byte - ShardIdAsObserver uint32 - ChanNodeStop chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler + DataPool dataRetriever.PoolsHolder + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + RequestHandler process.RequestHandler + ChanceComputer nodesCoordinator.ChanceComputer + GenesisNodesConfig sharding.GenesisNodesSetupHandler + NodeShuffler nodesCoordinator.NodesShuffler + PubKey []byte + ShardIdAsObserver uint32 + ChanNodeStop chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewSyncValidatorStatus creates a new validator status process component @@ -111,26 +112,27 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat s.memDB = disabled.CreateMemUnit() argsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), - MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), - Marshalizer: args.Marshalizer, - Hasher: args.Hasher, - Shuffler: args.NodeShuffler, - EpochStartNotifier: &disabled.EpochStartNotifier{}, - BootStorer: s.memDB, - ShardIDAsObserver: args.ShardIdAsObserver, - NbShards: args.GenesisNodesConfig.NumberOfShards(), - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: args.PubKey, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: disabled.NewShuffledOutHandler(), - ChanStopNode: args.ChanNodeStop, - NodeTypeProvider: args.NodeTypeProvider, - IsFullArchive: args.IsFullArchive, - EnableEpochsHandler: args.EnableEpochsHandler, - ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), - GenesisNodesSetupHandler: s.genesisNodesConfig, + ShardConsensusGroupSize: int(args.GenesisNodesConfig.GetShardConsensusGroupSize()), + MetaConsensusGroupSize: int(args.GenesisNodesConfig.GetMetaConsensusGroupSize()), + Marshalizer: args.Marshalizer, + Hasher: args.Hasher, + Shuffler: args.NodeShuffler, + EpochStartNotifier: &disabled.EpochStartNotifier{}, + BootStorer: s.memDB, + ShardIDAsObserver: args.ShardIdAsObserver, + NbShards: args.GenesisNodesConfig.NumberOfShards(), + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: args.PubKey, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: disabled.NewShuffledOutHandler(), + ChanStopNode: args.ChanNodeStop, + NodeTypeProvider: args.NodeTypeProvider, + IsFullArchive: args.IsFullArchive, + EnableEpochsHandler: args.EnableEpochsHandler, + ValidatorInfoCacher: s.dataPool.CurrentEpochValidatorInfo(), + GenesisNodesSetupHandler: s.genesisNodesConfig, + NodesCoordinatorRegistryFactory: args.NodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argsNodesCoordinator) if err != nil { @@ -151,7 +153,7 @@ func NewSyncValidatorStatus(args ArgsNewSyncValidatorStatus) (*syncValidatorStat func (s *syncValidatorStatus) NodesConfigFromMetaBlock( currMetaBlock data.HeaderHandler, prevMetaBlock data.HeaderHandler, -) (*nodesCoordinator.NodesCoordinatorRegistry, uint32, []*block.MiniBlock, error) { +) (nodesCoordinator.NodesCoordinatorRegistryHandler, uint32, []*block.MiniBlock, error) { if currMetaBlock.GetNonce() > 1 && !currMetaBlock.IsStartOfEpochBlock() { return nil, 0, nil, epochStart.ErrNotEpochStartBlock } @@ -177,8 +179,8 @@ func (s *syncValidatorStatus) NodesConfigFromMetaBlock( return nil, 0, nil, err } - nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry() - nodesConfig.CurrentEpoch = currMetaBlock.GetEpoch() + nodesConfig := s.nodeCoordinator.NodesCoordinatorToRegistry(currMetaBlock.GetEpoch()) + nodesConfig.SetCurrentEpoch(currMetaBlock.GetEpoch()) return nodesConfig, selfShardId, allMiniblocks, nil } diff --git a/epochStart/bootstrap/syncValidatorStatus_test.go b/epochStart/bootstrap/syncValidatorStatus_test.go index f7e409af875..7cfe6061c77 100644 --- a/epochStart/bootstrap/syncValidatorStatus_test.go +++ b/epochStart/bootstrap/syncValidatorStatus_test.go @@ -17,6 +17,7 @@ import ( epochStartMocks "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks/epochStart" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -246,6 +247,11 @@ func TestSyncValidatorStatus_getPeerBlockBodyForMeta(t *testing.T) { } func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &mock.MarshalizerMock{}, + 444, + ) + return ArgsNewSyncValidatorStatus{ DataPool: &dataRetrieverMock.PoolsHolderStub{ MiniBlocksCalled: func() storage.Cacher { @@ -259,7 +265,7 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { Hasher: &hashingMocks.HasherMock{}, RequestHandler: &testscommon.RequestHandlerStub{}, ChanceComputer: &shardingMocks.NodesCoordinatorStub{}, - GenesisNodesConfig: &mock.NodesSetupStub{ + GenesisNodesConfig: &genesisMocks.NodesSetupStub{ NumberOfShardsCalled: func() uint32 { return 1 }, @@ -301,12 +307,13 @@ func getSyncValidatorStatusArgs() ArgsNewSyncValidatorStatus { return 2 }, }, - NodeShuffler: &shardingMocks.NodeShufflerMock{}, - PubKey: []byte("public key"), - ShardIdAsObserver: 0, - ChanNodeStop: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodeShuffler: &shardingMocks.NodeShufflerMock{}, + PubKey: []byte("public key"), + ShardIdAsObserver: 0, + ChanNodeStop: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } } diff --git a/epochStart/dtos.go b/epochStart/dtos.go new file mode 100644 index 00000000000..ea5aa95f626 --- /dev/null +++ b/epochStart/dtos.go @@ -0,0 +1,17 @@ +package epochStart + +import ( + "math/big" + + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerData is a struct containing relevant information about owner's nodes data +type OwnerData struct { + NumStakedNodes int64 + NumActiveNodes int64 + TotalTopUp *big.Int + TopUpPerNode *big.Int + AuctionList []state.ValidatorInfoHandler + Qualified bool +} diff --git a/epochStart/errors.go b/epochStart/errors.go index 3f705f585fd..ca115e939f4 100644 --- a/epochStart/errors.go +++ b/epochStart/errors.go @@ -281,6 +281,9 @@ var ErrSystemValidatorSCCall = errors.New("system validator sc call failed") // ErrOwnerDoesntHaveEligibleNodesInEpoch signals that the owner doesn't have any eligible nodes in epoch var ErrOwnerDoesntHaveEligibleNodesInEpoch = errors.New("owner has no eligible nodes in epoch") +// ErrOwnerDoesntHaveNodesInEpoch signals that the owner has no nodes in epoch +var ErrOwnerDoesntHaveNodesInEpoch = errors.New("owner has no nodes in epoch") + // ErrInvalidMaxHardCapForMissingNodes signals that the maximum hardcap value for missing nodes is invalid var ErrInvalidMaxHardCapForMissingNodes = errors.New("invalid max hardcap for missing nodes") @@ -331,3 +334,21 @@ var ErrNilManagedPeersHolder = errors.New("nil managed peers holder") // ErrNilExecutionOrderHandler signals that a nil execution order handler has been provided var ErrNilExecutionOrderHandler = errors.New("nil execution order handler") + +// ErrReceivedNewListNodeInStakingV4 signals that a new node has been assigned in common.NewList instead of common.AuctionList after staking v4 +var ErrReceivedNewListNodeInStakingV4 = errors.New("new node has been assigned in common.NewList instead of common.AuctionList after staking v4") + +// ErrNilMaxNodesChangeConfigProvider signals that a nil nodes config provider has been provided +var ErrNilMaxNodesChangeConfigProvider = errors.New("nil nodes config provider has been provided") + +// ErrNilAuctionListSelector signals that a nil auction list selector has been provided +var ErrNilAuctionListSelector = errors.New("nil auction list selector has been provided") + +// ErrOwnerHasNoStakedNode signals that the owner has no staked node +var ErrOwnerHasNoStakedNode = errors.New("owner has no staked node") + +// ErrUint32SubtractionOverflow signals uint32 subtraction overflowed +var ErrUint32SubtractionOverflow = errors.New("uint32 subtraction overflowed") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that an auction node has been provided before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("auction node has been provided before enabling staking v4") diff --git a/epochStart/interface.go b/epochStart/interface.go index fc4364afc43..06f04c11117 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -85,14 +86,6 @@ type Notifier interface { IsInterfaceNil() bool } -// ValidatorStatisticsProcessorHandler defines the actions for processing validator statistics -// needed in the epoch events -type ValidatorStatisticsProcessorHandler interface { - Process(info data.ShardValidatorInfoHandler) error - Commit() ([]byte, error) - IsInterfaceNil() bool -} - // ValidatorInfoCreator defines the methods to create a validator info type ValidatorInfoCreator interface { PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo @@ -161,9 +154,12 @@ type StakingDataProvider interface { GetTotalStakeEligibleNodes() *big.Int GetTotalTopUpStakeEligibleNodes() *big.Int GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) - PrepareStakingDataForRewards(keys map[uint32][][]byte) error - FillValidatorInfo(blsKey []byte) error - ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) + PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error + FillValidatorInfo(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwner(blsKey []byte) (string, error) + GetNumOfValidatorsInCurrentEpoch() uint32 + GetOwnersData() map[string]*OwnerData Clean() IsInterfaceNil() bool } @@ -186,10 +182,10 @@ type EpochEconomicsDataProvider interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() TransactionCacher @@ -214,3 +210,21 @@ type EpochStartNotifier interface { RegisterHandler(handler ActionHandler) IsInterfaceNil() bool } + +// MaxNodesChangeConfigProvider provides all config.MaxNodesChangeConfig, as well as +// the current config.MaxNodesChangeConfig based on the current epoch +type MaxNodesChangeConfigProvider interface { + GetAllNodesConfig() []config.MaxNodesChangeConfig + GetCurrentNodesConfig() config.MaxNodesChangeConfig + EpochConfirmed(epoch uint32, round uint64) + IsInterfaceNil() bool +} + +// AuctionListSelector handles selection of nodes from auction list to be sent to waiting list, based on their top up +type AuctionListSelector interface { + SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, + ) error + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/auctionListDisplayer.go b/epochStart/metachain/auctionListDisplayer.go new file mode 100644 index 00000000000..d64a156a51c --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer.go @@ -0,0 +1,232 @@ +package metachain + +import ( + "math/big" + "strconv" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/config" + errorsCommon "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const maxPubKeyDisplayableLen = 20 +const maxNumOfDecimalsToDisplay = 5 + +type auctionListDisplayer struct { + softAuctionConfig *auctionConfig + tableDisplayer TableDisplayHandler + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter +} + +// ArgsAuctionListDisplayer is a struct placeholder for arguments needed to create an auction list displayer +type ArgsAuctionListDisplayer struct { + TableDisplayHandler TableDisplayHandler + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + AuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListDisplayer creates an auction list data displayer, useful for debugging purposes during selection process +func NewAuctionListDisplayer(args ArgsAuctionListDisplayer) (*auctionListDisplayer, error) { + softAuctionConfig, err := getAuctionConfig(args.AuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + + err = checkDisplayerNilArgs(args) + if err != nil { + return nil, err + } + + return &auctionListDisplayer{ + softAuctionConfig: softAuctionConfig, + tableDisplayer: args.TableDisplayHandler, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, + }, nil +} + +func checkDisplayerNilArgs(args ArgsAuctionListDisplayer) error { + if check.IfNil(args.TableDisplayHandler) { + return errNilTableDisplayHandler + } + if check.IfNil(args.ValidatorPubKeyConverter) { + return errorsCommon.ErrNilValidatorPublicKeyConverter + } + if check.IfNil(args.AddressPubKeyConverter) { + return errorsCommon.ErrNilAddressPublicKeyConverter + } + + return nil +} + +// DisplayOwnersData will display initial owners data for auction selection +func (ald *auctionListDisplayer) DisplayOwnersData(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + strconv.Itoa(int(owner.numAuctionNodes)), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Initial nodes config in auction list") +} + +func getPrettyValue(val *big.Int, denominator *big.Int) string { + first := big.NewInt(0).Div(val, denominator).String() + decimals := big.NewInt(0).Mod(val, denominator).String() + + zeroesCt := (len(denominator.String()) - len(decimals)) - 1 + zeroesCt = core.MaxInt(zeroesCt, 0) + zeroes := strings.Repeat("0", zeroesCt) + + second := zeroes + decimals + if len(second) > maxNumOfDecimalsToDisplay { + second = second[:maxNumOfDecimalsToDisplay] + } + + return first + "." + second +} + +func (ald *auctionListDisplayer) getShortDisplayableBlsKeys(list []state.ValidatorInfoHandler) string { + pubKeys := "" + + for idx, validator := range list { + pubKeys += ald.getShortKey(validator.GetPublicKey()) + addDelimiter := idx != len(list)-1 + if addDelimiter { + pubKeys += ", " + } + } + + return pubKeys +} + +func (ald *auctionListDisplayer) getShortKey(pubKey []byte) string { + pubKeyHex := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + displayablePubKey := pubKeyHex + + pubKeyLen := len(displayablePubKey) + if pubKeyLen > maxPubKeyDisplayableLen { + displayablePubKey = pubKeyHex[:maxPubKeyDisplayableLen/2] + "..." + pubKeyHex[pubKeyLen-maxPubKeyDisplayableLen/2:] + } + + return displayablePubKey +} + +// DisplayOwnersSelectedNodes will display owners' selected nodes +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + } + + lines := make([]*display.LineData, 0, len(ownersData)) + for ownerPubKey, owner := range ownersData { + line := []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log), + strconv.Itoa(int(owner.numStakedNodes)), + getPrettyValue(owner.topUpPerNode, ald.softAuctionConfig.denominator), + getPrettyValue(owner.totalTopUp, ald.softAuctionConfig.denominator), + strconv.Itoa(int(owner.numAuctionNodes)), + strconv.Itoa(int(owner.numQualifiedAuctionNodes)), + strconv.Itoa(int(owner.numActiveNodes)), + getPrettyValue(owner.qualifiedTopUpPerNode, ald.softAuctionConfig.denominator), + ald.getShortDisplayableBlsKeys(owner.auctionList[:owner.numQualifiedAuctionNodes]), + } + lines = append(lines, display.NewLineData(false, line)) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Selected nodes config from auction list") +} + +// DisplayAuctionList will display the final selected auction nodes +func (ald *auctionListDisplayer) DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, +) { + if log.GetLevel() > logger.LogDebug { + return + } + + tableHeader := []string{"Owner", "Registered key", "Qualified TopUp per node"} + lines := make([]*display.LineData, 0, len(auctionList)) + blsKeysOwnerMap := getBlsKeyOwnerMap(ownersData) + for idx, validator := range auctionList { + pubKey := validator.GetPublicKey() + pubKeyEncoded := ald.validatorPubKeyConverter.SilentEncode(pubKey, log) + owner, found := blsKeysOwnerMap[string(pubKey)] + if !found { + log.Error("auctionListSelector.displayAuctionList could not find owner for", + "bls key", pubKeyEncoded) + continue + } + + qualifiedTopUp := ownersData[owner].qualifiedTopUpPerNode + horizontalLine := uint32(idx) == numOfSelectedNodes-1 + line := display.NewLineData(horizontalLine, []string{ + ald.addressPubKeyConverter.SilentEncode([]byte(owner), log), + pubKeyEncoded, + getPrettyValue(qualifiedTopUp, ald.softAuctionConfig.denominator), + }) + lines = append(lines, line) + } + + ald.tableDisplayer.DisplayTable(tableHeader, lines, "Final selected nodes from auction list") +} + +func getBlsKeyOwnerMap(ownersData map[string]*OwnerAuctionData) map[string]string { + ret := make(map[string]string) + for ownerPubKey, owner := range ownersData { + for _, blsKey := range owner.auctionList { + ret[string(blsKey.GetPublicKey())] = ownerPubKey + } + } + + return ret +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/epochStart/metachain/auctionListDisplayer_test.go b/epochStart/metachain/auctionListDisplayer_test.go new file mode 100644 index 00000000000..68d74e08e41 --- /dev/null +++ b/epochStart/metachain/auctionListDisplayer_test.go @@ -0,0 +1,288 @@ +package metachain + +import ( + "math" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +func createDisplayerArgs() ArgsAuctionListDisplayer { + return ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: createSoftAuctionConfig(), + Denomination: 0, + } +} + +func TestNewAuctionListDisplayer(t *testing.T) { + t.Parallel() + + t.Run("invalid auction config", func(t *testing.T) { + args := createDisplayerArgs() + args.AuctionConfig.MaxNumberOfIterations = 0 + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, ald) + requireInvalidValueError(t, err, "for max number of iterations") + }) + + t.Run("should work", func(t *testing.T) { + args := createDisplayerArgs() + ald, err := NewAuctionListDisplayer(args) + require.Nil(t, err) + require.False(t, ald.IsInterfaceNil()) + }) +} + +func TestAuctionListDisplayer_DisplayOwnersData(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "Num active nodes", + "Num auction nodes", + "Total top up", + "Top up per node", + "Auction list nodes", + }, tableHeader) + require.Equal(t, "Initial nodes config in auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "4", "1", "100.0", "25.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 4, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersData(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayOwnersSelectedNodes(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Num staked nodes", + "TopUp per node", + "Total top up", + "Num auction nodes", + "Num qualified auction nodes", + "Num active nodes", + "Qualified top up per node", + "Selected auction list nodes", + }, tableHeader) + require.Equal(t, "Selected nodes config from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "4", "25.0", "100.0", "1", "1", "4", "15.0", "pubKeyEncoded"}, + HorizontalRuleAfter: false, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}}, + }, + } + + ald.DisplayOwnersSelectedNodes(ownersData) + require.True(t, wasDisplayCalled) +} + +func TestAuctionListDisplayer_DisplayAuctionList(t *testing.T) { + _ = logger.SetLogLevel("*:DEBUG") + defer func() { + _ = logger.SetLogLevel("*:INFO") + }() + + owner := []byte("owner") + validator := &state.ValidatorInfo{PublicKey: []byte("pubKey")} + wasDisplayCalled := false + + args := createDisplayerArgs() + args.AddressPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, owner, pkBytes) + return "ownerEncoded" + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + require.Equal(t, validator.PublicKey, pkBytes) + return "pubKeyEncoded" + }, + } + args.TableDisplayHandler = &testscommon.TableDisplayerMock{ + DisplayTableCalled: func(tableHeader []string, lines []*display.LineData, message string) { + require.Equal(t, []string{ + "Owner", + "Registered key", + "Qualified TopUp per node", + }, tableHeader) + require.Equal(t, "Final selected nodes from auction list", message) + require.Equal(t, []*display.LineData{ + { + Values: []string{"ownerEncoded", "pubKeyEncoded", "15.0"}, + HorizontalRuleAfter: true, + }, + }, lines) + + wasDisplayCalled = true + }, + } + ald, _ := NewAuctionListDisplayer(args) + + auctionList := []state.ValidatorInfoHandler{&state.ValidatorInfo{PublicKey: []byte("pubKey")}} + ownersData := map[string]*OwnerAuctionData{ + "owner": { + numStakedNodes: 4, + numActiveNodes: 4, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + totalTopUp: big.NewInt(100), + topUpPerNode: big.NewInt(25), + qualifiedTopUpPerNode: big.NewInt(15), + auctionList: auctionList, + }, + } + + ald.DisplayAuctionList(auctionList, ownersData, 1) + require.True(t, wasDisplayCalled) +} + +func TestGetPrettyValue(t *testing.T) { + t.Parallel() + + require.Equal(t, "1234.0", getPrettyValue(big.NewInt(1234), big.NewInt(1))) + require.Equal(t, "123.4", getPrettyValue(big.NewInt(1234), big.NewInt(10))) + require.Equal(t, "12.34", getPrettyValue(big.NewInt(1234), big.NewInt(100))) + require.Equal(t, "1.234", getPrettyValue(big.NewInt(1234), big.NewInt(1000))) + require.Equal(t, "0.1234", getPrettyValue(big.NewInt(1234), big.NewInt(10000))) + require.Equal(t, "0.01234", getPrettyValue(big.NewInt(1234), big.NewInt(100000))) + require.Equal(t, "0.00123", getPrettyValue(big.NewInt(1234), big.NewInt(1000000))) + require.Equal(t, "0.00012", getPrettyValue(big.NewInt(1234), big.NewInt(10000000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1234), big.NewInt(100000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(1000000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1234), big.NewInt(10000000000))) + + require.Equal(t, "1.0", getPrettyValue(big.NewInt(1), big.NewInt(1))) + require.Equal(t, "0.1", getPrettyValue(big.NewInt(1), big.NewInt(10))) + require.Equal(t, "0.01", getPrettyValue(big.NewInt(1), big.NewInt(100))) + require.Equal(t, "0.001", getPrettyValue(big.NewInt(1), big.NewInt(1000))) + require.Equal(t, "0.0001", getPrettyValue(big.NewInt(1), big.NewInt(10000))) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(1), big.NewInt(100000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(1000000))) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1), big.NewInt(10000000))) + + oneEGLD := big.NewInt(1000000000000000000) + denominationEGLD := big.NewInt(int64(math.Pow10(18))) + + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(0), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(oneEGLD, denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000000), denominationEGLD)) + require.Equal(t, "1.10000", getPrettyValue(big.NewInt(1100000000000000001), denominationEGLD)) + require.Equal(t, "1.11000", getPrettyValue(big.NewInt(1110000000000000001), denominationEGLD)) + require.Equal(t, "0.11100", getPrettyValue(big.NewInt(111000000000000001), denominationEGLD)) + require.Equal(t, "0.01110", getPrettyValue(big.NewInt(11100000000000001), denominationEGLD)) + require.Equal(t, "0.00111", getPrettyValue(big.NewInt(1110000000000001), denominationEGLD)) + require.Equal(t, "0.00011", getPrettyValue(big.NewInt(111000000000001), denominationEGLD)) + require.Equal(t, "0.00001", getPrettyValue(big.NewInt(11100000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(1110000000001), denominationEGLD)) + require.Equal(t, "0.00000", getPrettyValue(big.NewInt(111000000001), denominationEGLD)) + + require.Equal(t, "2.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2)), denominationEGLD)) + require.Equal(t, "20.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(20)), denominationEGLD)) + require.Equal(t, "2000000.00000", getPrettyValue(big.NewInt(0).Mul(oneEGLD, big.NewInt(2000000)), denominationEGLD)) + + require.Equal(t, "3.22220", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000000)), denominationEGLD)) + require.Equal(t, "1.22222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000000)), denominationEGLD)) + require.Equal(t, "1.02222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000000)), denominationEGLD)) + require.Equal(t, "1.00222", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000000)), denominationEGLD)) + require.Equal(t, "1.00022", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000000)), denominationEGLD)) + require.Equal(t, "1.00002", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(22222000000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(2222200000000)), denominationEGLD)) + require.Equal(t, "1.00000", getPrettyValue(big.NewInt(0).Add(oneEGLD, big.NewInt(222220000000)), denominationEGLD)) +} diff --git a/epochStart/metachain/auctionListSelector.go b/epochStart/metachain/auctionListSelector.go new file mode 100644 index 00000000000..4b7c353a180 --- /dev/null +++ b/epochStart/metachain/auctionListSelector.go @@ -0,0 +1,422 @@ +package metachain + +import ( + "fmt" + "math/big" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" +) + +// OwnerAuctionData holds necessary auction data for an owner +type OwnerAuctionData struct { + numStakedNodes int64 + numActiveNodes int64 + numAuctionNodes int64 + numQualifiedAuctionNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + qualifiedTopUpPerNode *big.Int + auctionList []state.ValidatorInfoHandler +} + +type auctionConfig struct { + step *big.Int + minTopUp *big.Int + maxTopUp *big.Int + denominator *big.Int + maxNumberOfIterations uint64 +} + +type auctionListSelector struct { + shardCoordinator sharding.Coordinator + stakingDataProvider epochStart.StakingDataProvider + nodesConfigProvider epochStart.MaxNodesChangeConfigProvider + auctionListDisplayer AuctionListDisplayHandler + softAuctionConfig *auctionConfig +} + +// AuctionListSelectorArgs is a struct placeholder for all arguments required to create an auctionListSelector +type AuctionListSelectorArgs struct { + ShardCoordinator sharding.Coordinator + StakingDataProvider epochStart.StakingDataProvider + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + AuctionListDisplayHandler AuctionListDisplayHandler + SoftAuctionConfig config.SoftAuctionConfig + Denomination int +} + +// NewAuctionListSelector will create a new auctionListSelector, which handles selection of nodes from auction list based +// on their top up +func NewAuctionListSelector(args AuctionListSelectorArgs) (*auctionListSelector, error) { + softAuctionConfig, err := getAuctionConfig(args.SoftAuctionConfig, args.Denomination) + if err != nil { + return nil, err + } + err = checkNilArgs(args) + if err != nil { + return nil, err + } + + log.Debug("NewAuctionListSelector with config", + "top up step", softAuctionConfig.step.String(), + "min top up", softAuctionConfig.minTopUp.String(), + "max top up", softAuctionConfig.maxTopUp.String(), + "denomination", args.Denomination, + "denominator for pretty values", softAuctionConfig.denominator.String(), + ) + + return &auctionListSelector{ + shardCoordinator: args.ShardCoordinator, + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.MaxNodesChangeConfigProvider, + auctionListDisplayer: args.AuctionListDisplayHandler, + softAuctionConfig: softAuctionConfig, + }, nil +} + +func getAuctionConfig(softAuctionConfig config.SoftAuctionConfig, denomination int) (*auctionConfig, error) { + step, ok := big.NewInt(0).SetString(softAuctionConfig.TopUpStep, 10) + if !ok || step.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for step in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.TopUpStep, + ) + } + + minTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MinTopUp, 10) + if !ok || minTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for min top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + ) + } + + maxTopUp, ok := big.NewInt(0).SetString(softAuctionConfig.MaxTopUp, 10) + if !ok || maxTopUp.Cmp(zero) <= 0 { + return nil, fmt.Errorf("%w for max top up in soft auction config;expected number > 0, got %s", + process.ErrInvalidValue, + softAuctionConfig.MaxTopUp, + ) + } + + if minTopUp.Cmp(maxTopUp) > 0 { + return nil, fmt.Errorf("%w for min/max top up in soft auction config; min value: %s > max value: %s", + process.ErrInvalidValue, + softAuctionConfig.MinTopUp, + softAuctionConfig.MaxTopUp, + ) + } + + if denomination < 0 { + return nil, fmt.Errorf("%w for denomination in soft auction config;expected number >= 0, got %d", + process.ErrInvalidValue, + denomination, + ) + } + + if softAuctionConfig.MaxNumberOfIterations == 0 { + return nil, fmt.Errorf("%w for max number of iterations in soft auction config;expected value > 0", + process.ErrInvalidValue, + ) + } + + denominationStr := "1" + strings.Repeat("0", denomination) + denominator, ok := big.NewInt(0).SetString(denominationStr, 10) + if !ok { + return nil, fmt.Errorf("%w for denomination: %d", + errCannotComputeDenominator, + denomination, + ) + } + + if minTopUp.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for min top up in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + minTopUp.String(), + ) + } + + if step.Cmp(denominator) < 0 { + return nil, fmt.Errorf("%w for step in auction config; expected value to be >= %s, got %s", + process.ErrInvalidValue, + denominator.String(), + step.String(), + ) + } + + return &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: denominator, + maxNumberOfIterations: softAuctionConfig.MaxNumberOfIterations, + }, nil +} + +func checkNilArgs(args AuctionListSelectorArgs) error { + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.AuctionListDisplayHandler) { + return errNilAuctionListDisplayHandler + } + + return nil +} + +// SelectNodesFromAuctionList will select nodes from validatorsInfoMap based on their top up. If two or more validators +// have the same top-up, then sorting will be done based on blsKey XOR randomness. Selected nodes will have their list set +// to common.SelectNodesFromAuctionList +func (als *auctionListSelector) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if len(randomness) == 0 { + return process.ErrNilRandSeed + } + + ownersData, auctionListSize := als.getAuctionData() + if auctionListSize == 0 { + log.Info("auctionListSelector.SelectNodesFromAuctionList: empty auction list; skip selection") + return nil + } + + currNodesConfig := als.nodesConfigProvider.GetCurrentNodesConfig() + currNumOfValidators := als.stakingDataProvider.GetNumOfValidatorsInCurrentEpoch() + numOfShuffledNodes := currNodesConfig.NodesToShufflePerShard * (als.shardCoordinator.NumberOfShards() + 1) + numOfValidatorsAfterShuffling, err := safeSub(currNumOfValidators, numOfShuffledNodes) + if err != nil { + log.Warn(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v when trying to compute numOfValidatorsAfterShuffling = %v - %v (currNumOfValidators - numOfShuffledNodes)", + err, + currNumOfValidators, + numOfShuffledNodes, + )) + numOfValidatorsAfterShuffling = 0 + } + + maxNumNodes := currNodesConfig.MaxNumNodes + availableSlots, err := safeSub(maxNumNodes, numOfValidatorsAfterShuffling) + if availableSlots == 0 || err != nil { + log.Info(fmt.Sprintf("auctionListSelector.SelectNodesFromAuctionList: %v or zero value when trying to compute availableSlots = %v - %v (maxNodes - numOfValidatorsAfterShuffling); skip selecting nodes from auction list", + err, + maxNumNodes, + numOfValidatorsAfterShuffling, + )) + return nil + } + + log.Info("auctionListSelector.SelectNodesFromAuctionList", + "max nodes", maxNumNodes, + "current number of validators", currNumOfValidators, + "num of nodes which will be shuffled out", numOfShuffledNodes, + "num of validators after shuffling", numOfValidatorsAfterShuffling, + "auction list size", auctionListSize, + fmt.Sprintf("available slots (%v - %v)", maxNumNodes, numOfValidatorsAfterShuffling), availableSlots, + ) + + als.auctionListDisplayer.DisplayOwnersData(ownersData) + numOfAvailableNodeSlots := core.MinUint32(auctionListSize, availableSlots) + + sw := core.NewStopWatch() + sw.Start("auctionListSelector.sortAuctionList") + defer func() { + sw.Stop("auctionListSelector.sortAuctionList") + log.Debug("time measurements", sw.GetMeasurements()...) + }() + + return als.sortAuctionList(ownersData, numOfAvailableNodeSlots, validatorsInfoMap, randomness) +} + +func (als *auctionListSelector) getAuctionData() (map[string]*OwnerAuctionData, uint32) { + ownersData := make(map[string]*OwnerAuctionData) + numOfNodesInAuction := uint32(0) + + for owner, ownerData := range als.stakingDataProvider.GetOwnersData() { + if ownerData.Qualified && len(ownerData.AuctionList) > 0 { + numAuctionNodes := len(ownerData.AuctionList) + + ownersData[owner] = &OwnerAuctionData{ + numActiveNodes: ownerData.NumActiveNodes, + numAuctionNodes: int64(numAuctionNodes), + numQualifiedAuctionNodes: int64(numAuctionNodes), + numStakedNodes: ownerData.NumStakedNodes, + totalTopUp: ownerData.TotalTopUp, + topUpPerNode: ownerData.TopUpPerNode, + qualifiedTopUpPerNode: ownerData.TopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, numAuctionNodes), + } + copy(ownersData[owner].auctionList, ownerData.AuctionList) + numOfNodesInAuction += uint32(numAuctionNodes) + } + } + + return ownersData, numOfNodesInAuction +} + +func isInAuction(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.AuctionList) +} + +// TODO: Move this in elrond-go-core +func safeSub(a, b uint32) (uint32, error) { + if a < b { + return 0, epochStart.ErrUint32SubtractionOverflow + } + return a - b, nil +} + +func (als *auctionListSelector) sortAuctionList( + ownersData map[string]*OwnerAuctionData, + numOfAvailableNodeSlots uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + softAuctionNodesConfig := als.calcSoftAuctionNodesConfig(ownersData, numOfAvailableNodeSlots) + selectedNodes := als.selectNodes(softAuctionNodesConfig, numOfAvailableNodeSlots, randomness) + return markAuctionNodesAsSelected(selectedNodes, validatorsInfoMap) +} + +func (als *auctionListSelector) calcSoftAuctionNodesConfig( + data map[string]*OwnerAuctionData, + numAvailableSlots uint32, +) map[string]*OwnerAuctionData { + ownersData := copyOwnersData(data) + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + log.Debug("auctionListSelector: calc min and max possible top up", + "min top up per node", getPrettyValue(minTopUp, als.softAuctionConfig.denominator), + "max top up per node", getPrettyValue(maxTopUp, als.softAuctionConfig.denominator), + ) + + topUp := big.NewInt(0).SetBytes(minTopUp.Bytes()) + previousConfig := copyOwnersData(ownersData) + iterationNumber := uint64(0) + maxNumberOfIterationsReached := false + + for ; topUp.Cmp(maxTopUp) < 0 && !maxNumberOfIterationsReached; topUp.Add(topUp, als.softAuctionConfig.step) { + previousConfig = copyOwnersData(ownersData) + numNodesQualifyingForTopUp := calcNodesConfig(ownersData, topUp) + + if numNodesQualifyingForTopUp < int64(numAvailableSlots) { + break + } + + iterationNumber++ + maxNumberOfIterationsReached = iterationNumber >= als.softAuctionConfig.maxNumberOfIterations + } + + log.Debug("auctionListSelector: found min required", + "topUp", getPrettyValue(topUp, als.softAuctionConfig.denominator), + "after num of iterations", iterationNumber, + ) + return previousConfig +} + +func (als *auctionListSelector) getMinMaxPossibleTopUp(ownersData map[string]*OwnerAuctionData) (*big.Int, *big.Int) { + min := big.NewInt(0).SetBytes(als.softAuctionConfig.maxTopUp.Bytes()) + max := big.NewInt(0).SetBytes(als.softAuctionConfig.minTopUp.Bytes()) + + for _, owner := range ownersData { + if owner.topUpPerNode.Cmp(min) < 0 { + min = big.NewInt(0).SetBytes(owner.topUpPerNode.Bytes()) + } + + ownerNumNodesWithOnlyOneAuctionNode := big.NewInt(owner.numActiveNodes + 1) + maxPossibleTopUpForOwner := big.NewInt(0).Div(owner.totalTopUp, ownerNumNodesWithOnlyOneAuctionNode) + if maxPossibleTopUpForOwner.Cmp(max) > 0 { + max = big.NewInt(0).SetBytes(maxPossibleTopUpForOwner.Bytes()) + } + } + + if min.Cmp(als.softAuctionConfig.minTopUp) < 0 { + min = als.softAuctionConfig.minTopUp + } + + return min, max +} + +func copyOwnersData(ownersData map[string]*OwnerAuctionData) map[string]*OwnerAuctionData { + ret := make(map[string]*OwnerAuctionData) + for owner, data := range ownersData { + ret[owner] = &OwnerAuctionData{ + numActiveNodes: data.numActiveNodes, + numAuctionNodes: data.numAuctionNodes, + numQualifiedAuctionNodes: data.numQualifiedAuctionNodes, + numStakedNodes: data.numStakedNodes, + totalTopUp: data.totalTopUp, + topUpPerNode: data.topUpPerNode, + qualifiedTopUpPerNode: data.qualifiedTopUpPerNode, + auctionList: make([]state.ValidatorInfoHandler, len(data.auctionList)), + } + copy(ret[owner].auctionList, data.auctionList) + } + + return ret +} + +func calcNodesConfig(ownersData map[string]*OwnerAuctionData, topUp *big.Int) int64 { + numNodesQualifyingForTopUp := int64(0) + + for ownerPubKey, owner := range ownersData { + activeNodes := big.NewInt(owner.numActiveNodes) + topUpActiveNodes := big.NewInt(0).Mul(topUp, activeNodes) + validatorTopUpForAuction := big.NewInt(0).Sub(owner.totalTopUp, topUpActiveNodes) + if validatorTopUpForAuction.Cmp(topUp) < 0 { + delete(ownersData, ownerPubKey) + continue + } + + qualifiedNodesBigInt := big.NewInt(0).Div(validatorTopUpForAuction, topUp) + qualifiedNodes := qualifiedNodesBigInt.Int64() + isNumQualifiedNodesOverflow := !qualifiedNodesBigInt.IsUint64() + + if qualifiedNodes > owner.numAuctionNodes || isNumQualifiedNodesOverflow { + numNodesQualifyingForTopUp += owner.numAuctionNodes + } else { + numNodesQualifyingForTopUp += qualifiedNodes + owner.numQualifiedAuctionNodes = qualifiedNodes + + ownerRemainingNodes := big.NewInt(owner.numActiveNodes + owner.numQualifiedAuctionNodes) + owner.qualifiedTopUpPerNode = big.NewInt(0).Div(owner.totalTopUp, ownerRemainingNodes) + } + } + + return numNodesQualifyingForTopUp +} + +func markAuctionNodesAsSelected( + selectedNodes []state.ValidatorInfoHandler, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) error { + for _, node := range selectedNodes { + newNode := node.ShallowClone() + newNode.SetPreviousList(node.GetList()) + newNode.SetList(string(common.SelectedFromAuctionList)) + + err := validatorsInfoMap.Replace(node, newNode) + if err != nil { + return err + } + } + + return nil +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/epochStart/metachain/auctionListSelector_test.go b/epochStart/metachain/auctionListSelector_test.go new file mode 100644 index 00000000000..25cced015fc --- /dev/null +++ b/epochStart/metachain/auctionListSelector_test.go @@ -0,0 +1,895 @@ +package metachain + +import ( + "math/big" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/stretchr/testify/require" +) + +func createSoftAuctionConfig() config.SoftAuctionConfig { + return config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } +} + +func createAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) AuctionListSelectorArgs { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsStakingDataProvider := createStakingDataProviderArgs() + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + } +} + +func createFullAuctionListSelectorArgs(maxNodesChangeConfig []config.MaxNodesChangeConfig) (AuctionListSelectorArgs, ArgsNewEpochStartSystemSCProcessing) { + epochNotifier := forking.NewGenericEpochNotifier() + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(epochNotifier, maxNodesChangeConfig) + + argsSystemSC, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + argsSystemSC.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ + EpochField: stakingV4Step2EnableEpoch, + }) + argsSystemSC.MaxNodesChangeConfigProvider = nodesConfigProvider + + softAuctionCfg := createSoftAuctionConfig() + auctionDisplayer, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: softAuctionCfg, + }) + return AuctionListSelectorArgs{ + ShardCoordinator: argsSystemSC.ShardCoordinator, + StakingDataProvider: argsSystemSC.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: auctionDisplayer, + SoftAuctionConfig: softAuctionCfg, + }, argsSystemSC +} + +func fillValidatorsInfo(t *testing.T, validatorsMap state.ShardValidatorsInfoMapHandler, sdp epochStart.StakingDataProvider) { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.FillValidatorInfo(validator) + require.Nil(t, err) + } +} + +func TestNewAuctionListSelector(t *testing.T) { + t.Parallel() + + t.Run("nil shard coordinator", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.ShardCoordinator = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilShardCoordinator, err) + }) + + t.Run("nil staking data provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.StakingDataProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilStakingDataProvider, err) + }) + + t.Run("nil max nodes change config provider", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.MaxNodesChangeConfigProvider = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, epochStart.ErrNilMaxNodesChangeConfigProvider, err) + }) + + t.Run("nil auction list displayer", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.AuctionListDisplayHandler = nil + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + require.Equal(t, errNilAuctionListDisplayHandler, err) + }) + + t.Run("invalid soft auction config", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + args.SoftAuctionConfig.TopUpStep = "0" + als, err := NewAuctionListSelector(args) + require.Nil(t, als) + requireInvalidValueError(t, err, "step") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + args := createAuctionListSelectorArgs(nil) + als, err := NewAuctionListSelector(args) + require.NotNil(t, als) + require.Nil(t, err) + require.False(t, als.IsInterfaceNil()) + }) +} + +func requireInvalidValueError(t *testing.T, err error, msgToContain string) { + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), process.ErrInvalidValue.Error())) + require.True(t, strings.Contains(err.Error(), msgToContain)) +} + +func TestGetAuctionConfig(t *testing.T) { + t.Parallel() + + t.Run("invalid step", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.TopUpStep = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + + cfg.TopUpStep = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "step") + }) + + t.Run("invalid min top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MinTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + + cfg.MinTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min top up") + }) + + t.Run("invalid max top up", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + cfg.MaxTopUp = "dsa" + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "-1" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + + cfg.MaxTopUp = "0" + res, err = getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "max top up") + }) + + t.Run("invalid denomination", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + + res, err := getAuctionConfig(cfg, -1) + require.Nil(t, res) + requireInvalidValueError(t, err, "denomination") + }) + + t.Run("zero max number of iterations", func(t *testing.T) { + t.Parallel() + + cfg := createSoftAuctionConfig() + cfg.MaxNumberOfIterations = 0 + + res, err := getAuctionConfig(cfg, 10) + require.Nil(t, res) + requireInvalidValueError(t, err, "for max number of iterations in soft auction config") + }) + + t.Run("min top up > max top up", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "32", + MaxTopUp: "16", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 1) + require.Nil(t, res) + requireInvalidValueError(t, err, "min value: 32 > max value: 16") + }) + + t.Run("min top up < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "100", + MinTopUp: "10", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for min top up in auction config; expected value to be >= 100, got 10") + }) + + t.Run("step < denominator", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "100", + MaxTopUp: "5000", + MaxNumberOfIterations: 1, + } + + res, err := getAuctionConfig(cfg, 2) + require.Nil(t, res) + requireInvalidValueError(t, err, "for step in auction config; expected value to be >= 100, got 10") + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "444", + MaxNumberOfIterations: 100000, + } + + res, err := getAuctionConfig(cfg, 0) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: big.NewInt(10), + minTopUp: big.NewInt(1), + maxTopUp: big.NewInt(444), + denominator: big.NewInt(1), + maxNumberOfIterations: 100000, + }, res) + + minTopUp, _ := big.NewInt(0).SetString("1000000000000000000", 10) + maxTopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) + step, _ := big.NewInt(0).SetString("10000000000000000000", 10) + cfg = config.SoftAuctionConfig{ + TopUpStep: step.String(), + MinTopUp: minTopUp.String(), + MaxTopUp: maxTopUp.String(), + MaxNumberOfIterations: 100000, + } + + res, err = getAuctionConfig(cfg, 18) + require.Nil(t, err) + require.Equal(t, &auctionConfig{ + step: step, + minTopUp: minTopUp, + maxTopUp: maxTopUp, + denominator: minTopUp, + maxNumberOfIterations: 100000, + }, res) + }) +} + +func TestAuctionListSelector_SelectNodesFromAuction(t *testing.T) { + t.Parallel() + + t.Run("nil randomness, expect error", func(t *testing.T) { + t.Parallel() + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), nil) + require.Equal(t, process.ErrNilRandSeed, err) + }) + + t.Run("empty auction list", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(state.NewShardValidatorsInfoMap(), []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("not enough available slots to select auction nodes", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("one eligible + one auction, max num nodes = 1, number of nodes after shuffling = 0, expect node in auction is selected", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + owner2StakedKeys := [][]byte{[]byte("pubKey1")} + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.AuctionList, "", 0, owner2)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 1, NodesToShufflePerShard: 1}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner2StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner2), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) + + t.Run("two available slots for auction nodes, but only one node in auction", func(t *testing.T) { + t.Parallel() + + owner1 := []byte("owner1") + owner1StakedKeys := [][]byte{[]byte("pubKey0")} + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.AuctionList, "", 0, owner1)) + + args, argsSystemSC := createFullAuctionListSelectorArgs([]config.MaxNodesChangeConfig{{MaxNumNodes: 2}}) + stakingcommon.RegisterValidatorKeys(argsSystemSC.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(1000), argsSystemSC.Marshalizer) + fillValidatorsInfo(t, validatorsInfo, argsSystemSC.StakingDataProvider) + + als, _ := NewAuctionListSelector(args) + err := als.SelectNodesFromAuctionList(validatorsInfo, []byte("rnd")) + require.Nil(t, err) + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + } + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfigEdgeCases(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + t.Run("two validators, both have zero top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, als.softAuctionConfig.minTopUp, minTopUp) + require.Equal(t, als.softAuctionConfig.minTopUp, maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("one validator with zero top up, one with min top up, one with top up", func(t *testing.T) { + t.Parallel() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1), + topUpPerNode: big.NewInt(1), + qualifiedTopUpPerNode: big.NewInt(1), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + owner3: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuctionConfig := copyOwnersData(softAuctionConfig) + delete(expectedSoftAuctionConfig, owner1) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuctionConfig, owner2) + require.Equal(t, expectedSoftAuctionConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v3}, selectedNodes) + }) + + t.Run("two validators, both have same top up", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1000), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("two validators, top up difference less than step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(995), + topUpPerNode: big.NewInt(995), + qualifiedTopUpPerNode: big.NewInt(995), + auctionList: []state.ValidatorInfoHandler{v2}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(995), minTopUp) + require.Equal(t, big.NewInt(1000), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 2) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1}, selectedNodes) + }) + + t.Run("three validators, top up difference equal to step", func(t *testing.T) { + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v1}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(1980), + topUpPerNode: big.NewInt(990), + qualifiedTopUpPerNode: big.NewInt(990), + auctionList: []state.ValidatorInfoHandler{v2, v0}, + }, + } + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(990), minTopUp) + require.Equal(t, big.NewInt(1980), maxTopUp) + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v1, v2, v0}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner2].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner2].qualifiedTopUpPerNode = big.NewInt(1980) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner1) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v2}, selectedNodes) + }) + + t.Run("large top up difference, would qualify more nodes than an owner has, expect correct computation", func(t *testing.T) { + argsLargeTopUp := createAuctionListSelectorArgs(nil) + argsLargeTopUp.SoftAuctionConfig = config.SoftAuctionConfig{ + TopUpStep: "10000000000000000000", // 10 eGLD + MinTopUp: "1000000000000000000", // 1 eGLD + MaxTopUp: "32000000000000000000000000", // 32 mil eGLD + MaxNumberOfIterations: 10, + } + argsLargeTopUp.Denomination = 18 + selector, _ := NewAuctionListSelector(argsLargeTopUp) + + v0 := &state.ValidatorInfo{PublicKey: []byte("pk0")} + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + + oneEGLD, _ := big.NewInt(0).SetString("1000000000000000000", 10) + owner1TopUp, _ := big.NewInt(0).SetString("32000000000000000000000000", 10) // 31 mil eGLD + owner1 := "owner1" + owner2 := "owner2" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 0, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 1, + totalTopUp: owner1TopUp, + topUpPerNode: owner1TopUp, + qualifiedTopUpPerNode: owner1TopUp, + auctionList: []state.ValidatorInfoHandler{v0}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + } + + minTopUp, maxTopUp := selector.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, oneEGLD, minTopUp) + require.Equal(t, owner1TopUp, maxTopUp) + + softAuctionConfig := selector.calcSoftAuctionNodesConfig(ownersData, 3) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := selector.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2, v1}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 2) + expectedSoftAuction := copyOwnersData(ownersData) + expectedSoftAuction[owner1].numQualifiedAuctionNodes = 1 + expectedSoftAuction[owner1].qualifiedTopUpPerNode = owner1TopUp + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0, v2}, selectedNodes) + + softAuctionConfig = selector.calcSoftAuctionNodesConfig(ownersData, 1) + delete(expectedSoftAuction, owner2) + require.Equal(t, expectedSoftAuction, softAuctionConfig) + selectedNodes = selector.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v0}, selectedNodes) + }) +} + +func TestAuctionListSelector_calcSoftAuctionNodesConfig(t *testing.T) { + t.Parallel() + + randomness := []byte("pk0") + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1")} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2")} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3")} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4")} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5")} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6")} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7")} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8")} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + ownersData := map[string]*OwnerAuctionData{ + owner1: { + numActiveNodes: 2, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 4, + totalTopUp: big.NewInt(1500), + topUpPerNode: big.NewInt(375), + qualifiedTopUpPerNode: big.NewInt(375), + auctionList: []state.ValidatorInfoHandler{v1, v2}, + }, + owner2: { + numActiveNodes: 0, + numAuctionNodes: 3, + numQualifiedAuctionNodes: 3, + numStakedNodes: 3, + totalTopUp: big.NewInt(3000), + topUpPerNode: big.NewInt(1000), + qualifiedTopUpPerNode: big.NewInt(1000), + auctionList: []state.ValidatorInfoHandler{v3, v4, v5}, + }, + owner3: { + numActiveNodes: 1, + numAuctionNodes: 2, + numQualifiedAuctionNodes: 2, + numStakedNodes: 3, + totalTopUp: big.NewInt(1000), + topUpPerNode: big.NewInt(333), + qualifiedTopUpPerNode: big.NewInt(333), + auctionList: []state.ValidatorInfoHandler{v6, v7}, + }, + owner4: { + numActiveNodes: 1, + numAuctionNodes: 1, + numQualifiedAuctionNodes: 1, + numStakedNodes: 2, + totalTopUp: big.NewInt(0), + topUpPerNode: big.NewInt(0), + qualifiedTopUpPerNode: big.NewInt(0), + auctionList: []state.ValidatorInfoHandler{v8}, + }, + } + + args := createAuctionListSelectorArgs(nil) + als, _ := NewAuctionListSelector(args) + + minTopUp, maxTopUp := als.getMinMaxPossibleTopUp(ownersData) + require.Equal(t, big.NewInt(1), minTopUp) // owner4 having all nodes in auction + require.Equal(t, big.NewInt(3000), maxTopUp) // owner2 having only only one node in auction + + softAuctionConfig := als.calcSoftAuctionNodesConfig(ownersData, 9) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes := als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 8) + require.Equal(t, ownersData, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 8, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6, v8}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 7) + expectedConfig := copyOwnersData(ownersData) + delete(expectedConfig, owner4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 7, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v2, v1, v7, v6}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 6) + expectedConfig[owner3].numQualifiedAuctionNodes = 1 + expectedConfig[owner3].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 6, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2, v1}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 5) + expectedConfig[owner1].numQualifiedAuctionNodes = 1 + expectedConfig[owner1].qualifiedTopUpPerNode = big.NewInt(500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 5, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7, v2}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 4) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 4, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3, v7}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 3) + delete(expectedConfig, owner3) + delete(expectedConfig, owner1) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 3, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4, v3}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 2) + expectedConfig[owner2].numQualifiedAuctionNodes = 2 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(1500) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 2, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5, v4}, selectedNodes) + + softAuctionConfig = als.calcSoftAuctionNodesConfig(ownersData, 1) + expectedConfig[owner2].numQualifiedAuctionNodes = 1 + expectedConfig[owner2].qualifiedTopUpPerNode = big.NewInt(3000) + require.Equal(t, expectedConfig, softAuctionConfig) + selectedNodes = als.selectNodes(softAuctionConfig, 1, randomness) + require.Equal(t, []state.ValidatorInfoHandler{v5}, selectedNodes) +} diff --git a/epochStart/metachain/auctionListSorting.go b/epochStart/metachain/auctionListSorting.go new file mode 100644 index 00000000000..4759ec65bcb --- /dev/null +++ b/epochStart/metachain/auctionListSorting.go @@ -0,0 +1,104 @@ +package metachain + +import ( + "bytes" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-go/state" +) + +func (als *auctionListSelector) selectNodes( + ownersData map[string]*OwnerAuctionData, + numAvailableSlots uint32, + randomness []byte, +) []state.ValidatorInfoHandler { + selectedFromAuction := make([]state.ValidatorInfoHandler, 0) + validatorTopUpMap := make(map[string]*big.Int) + + pubKeyLen := getPubKeyLen(ownersData) + normRand := calcNormalizedRandomness(randomness, pubKeyLen) + + for _, owner := range ownersData { + sortListByPubKey(owner.auctionList) + addQualifiedValidatorsTopUpInMap(owner, validatorTopUpMap) + selectedFromAuction = append(selectedFromAuction, owner.auctionList[:owner.numQualifiedAuctionNodes]...) + } + + als.auctionListDisplayer.DisplayOwnersSelectedNodes(ownersData) + sortValidators(selectedFromAuction, validatorTopUpMap, normRand) + als.auctionListDisplayer.DisplayAuctionList(selectedFromAuction, ownersData, numAvailableSlots) + + return selectedFromAuction[:numAvailableSlots] +} + +func getPubKeyLen(ownersData map[string]*OwnerAuctionData) int { + for _, owner := range ownersData { + return len(owner.auctionList[0].GetPublicKey()) + } + + return 0 +} + +func calcNormalizedRandomness(randomness []byte, expectedLen int) []byte { + rand := randomness + randLen := len(rand) + + if expectedLen > randLen { + repeatedCt := expectedLen/randLen + 1 + rand = bytes.Repeat(randomness, repeatedCt) + } + + rand = rand[:expectedLen] + return rand +} + +func sortListByPubKey(list []state.ValidatorInfoHandler) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + return bytes.Compare(pubKey1, pubKey2) > 0 + }) +} + +func addQualifiedValidatorsTopUpInMap(owner *OwnerAuctionData, validatorTopUpMap map[string]*big.Int) { + for i := int64(0); i < owner.numQualifiedAuctionNodes; i++ { + validatorPubKey := string(owner.auctionList[i].GetPublicKey()) + validatorTopUpMap[validatorPubKey] = big.NewInt(0).SetBytes(owner.qualifiedTopUpPerNode.Bytes()) + } +} + +func sortValidators( + list []state.ValidatorInfoHandler, + validatorTopUpMap map[string]*big.Int, + randomness []byte, +) { + sort.SliceStable(list, func(i, j int) bool { + pubKey1 := list[i].GetPublicKey() + pubKey2 := list[j].GetPublicKey() + + nodeTopUpPubKey1 := validatorTopUpMap[string(pubKey1)] + nodeTopUpPubKey2 := validatorTopUpMap[string(pubKey2)] + + if nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) == 0 { + return compareByXORWithRandomness(pubKey1, pubKey2, randomness) + } + + return nodeTopUpPubKey1.Cmp(nodeTopUpPubKey2) > 0 + }) +} + +func compareByXORWithRandomness(pubKey1, pubKey2, randomness []byte) bool { + xorLen := len(randomness) + + key1Xor := make([]byte, xorLen) + key2Xor := make([]byte, xorLen) + + for idx := 0; idx < xorLen; idx++ { + key1Xor[idx] = pubKey1[idx] ^ randomness[idx] + key2Xor[idx] = pubKey2[idx] ^ randomness[idx] + } + + return bytes.Compare(key1Xor, key2Xor) == 1 +} diff --git a/epochStart/metachain/auctionListSorting_test.go b/epochStart/metachain/auctionListSorting_test.go new file mode 100644 index 00000000000..637869ea1d6 --- /dev/null +++ b/epochStart/metachain/auctionListSorting_test.go @@ -0,0 +1,39 @@ +package metachain + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCalcNormalizedRandomness(t *testing.T) { + t.Parallel() + + t.Run("randomness longer than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 2) + require.Equal(t, []byte("ra"), result) + }) + + t.Run("randomness length equal to expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 4) + require.Equal(t, []byte("rand"), result) + }) + + t.Run("randomness length less than expected len", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 6) + require.Equal(t, []byte("randra"), result) + }) + + t.Run("expected len is zero", func(t *testing.T) { + t.Parallel() + + result := calcNormalizedRandomness([]byte("rand"), 0) + require.Empty(t, result) + }) +} diff --git a/epochStart/metachain/common.go b/epochStart/metachain/common.go new file mode 100644 index 00000000000..9eb614772ab --- /dev/null +++ b/epochStart/metachain/common.go @@ -0,0 +1,16 @@ +package metachain + +import "github.com/multiversx/mx-chain-go/state" + +// GetAllNodeKeys returns all from the provided map +func GetAllNodeKeys(validatorsInfo state.ShardValidatorsInfoMapHandler) map[uint32][][]byte { + nodeKeys := make(map[uint32][][]byte) + for shardID, validatorsInfoSlice := range validatorsInfo.GetShardValidatorsInfoMap() { + nodeKeys[shardID] = make([][]byte, 0) + for _, validatorInfo := range validatorsInfoSlice { + nodeKeys[shardID] = append(nodeKeys[shardID], validatorInfo.GetPublicKey()) + } + } + + return nodeKeys +} diff --git a/epochStart/metachain/economicsDataProvider.go b/epochStart/metachain/economicsDataProvider.go index c39eb917521..ec165ffe80a 100644 --- a/epochStart/metachain/economicsDataProvider.go +++ b/epochStart/metachain/economicsDataProvider.go @@ -53,7 +53,7 @@ func (es *epochEconomicsStatistics) SetLeadersFees(fees *big.Int) { } // SetRewardsToBeDistributed sets the rewards to be distributed at the end of the epoch (includes the rewards per block, -//the block producers fees, protocol sustainability rewards and developer fees) +// the block producers fees, protocol sustainability rewards and developer fees) func (es *epochEconomicsStatistics) SetRewardsToBeDistributed(rewards *big.Int) { es.mutEconomicsStatistics.Lock() defer es.mutEconomicsStatistics.Unlock() @@ -99,7 +99,7 @@ func (es *epochEconomicsStatistics) LeaderFees() *big.Int { } // RewardsToBeDistributed returns the rewards to be distributed at the end of epoch (includes rewards for produced -//blocks, protocol sustainability rewards, block producer fees and developer fees) +// blocks, protocol sustainability rewards, block producer fees and developer fees) func (es *epochEconomicsStatistics) RewardsToBeDistributed() *big.Int { es.mutEconomicsStatistics.RLock() defer es.mutEconomicsStatistics.RUnlock() diff --git a/epochStart/metachain/epochStartData.go b/epochStart/metachain/epochStartData.go index 1c6bd30516e..1a67b3a3692 100644 --- a/epochStart/metachain/epochStartData.go +++ b/epochStart/metachain/epochStartData.go @@ -289,7 +289,7 @@ func (e *epochStartData) getShardDataFromEpochStartData( } epochStartIdentifier := core.EpochStartIdentifier(prevEpoch) - if prevEpoch == 0 { + if prevEpoch == e.genesisEpoch { return lastMetaHash, []byte(epochStartIdentifier), nil } diff --git a/epochStart/metachain/errors.go b/epochStart/metachain/errors.go new file mode 100644 index 00000000000..319bf83dafd --- /dev/null +++ b/epochStart/metachain/errors.go @@ -0,0 +1,11 @@ +package metachain + +import "errors" + +var errNilValidatorsInfoMap = errors.New("received nil shard validators info map") + +var errCannotComputeDenominator = errors.New("cannot compute denominator value") + +var errNilAuctionListDisplayHandler = errors.New("nil auction list display handler provided") + +var errNilTableDisplayHandler = errors.New("nil table display handler provided") diff --git a/epochStart/metachain/interface.go b/epochStart/metachain/interface.go new file mode 100644 index 00000000000..1e141fc079f --- /dev/null +++ b/epochStart/metachain/interface.go @@ -0,0 +1,24 @@ +package metachain + +import ( + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +// AuctionListDisplayHandler should be able to display auction list data during selection process +type AuctionListDisplayHandler interface { + DisplayOwnersData(ownersData map[string]*OwnerAuctionData) + DisplayOwnersSelectedNodes(ownersData map[string]*OwnerAuctionData) + DisplayAuctionList( + auctionList []state.ValidatorInfoHandler, + ownersData map[string]*OwnerAuctionData, + numOfSelectedNodes uint32, + ) + IsInterfaceNil() bool +} + +// TableDisplayHandler should be able to display tables in log +type TableDisplayHandler interface { + DisplayTable(tableHeader []string, lines []*display.LineData, message string) + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/legacySystemSCs.go b/epochStart/metachain/legacySystemSCs.go new file mode 100644 index 00000000000..327a5ab88e5 --- /dev/null +++ b/epochStart/metachain/legacySystemSCs.go @@ -0,0 +1,1340 @@ +package metachain + +import ( + "bytes" + "context" + "fmt" + "math" + "math/big" + "sort" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type legacySystemSCProcessor struct { + systemVM vmcommon.VMExecutionHandler + userAccountsDB state.AccountsAdapter + marshalizer marshal.Marshalizer + peerAccountsDB state.AccountsAdapter + chanceComputer nodesCoordinator.ChanceComputer + shardCoordinator sharding.Coordinator + startRating uint32 + validatorInfoCreator epochStart.ValidatorInfoCreator + genesisNodesConfig sharding.GenesisNodesSetupHandler + nodesConfigProvider epochStart.NodesConfigProvider + stakingDataProvider epochStart.StakingDataProvider + maxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + endOfEpochCallerAddress []byte + stakingSCAddress []byte + esdtOwnerAddressBytes []byte + mapNumSwitchedPerShard map[uint32]uint32 + mapNumSwitchablePerShard map[uint32]uint32 + maxNodes uint32 + + flagChangeMaxNodesEnabled atomic.Flag + enableEpochsHandler common.EnableEpochsHandler +} + +func newLegacySystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*legacySystemSCProcessor, error) { + err := checkLegacyArgs(args) + if err != nil { + return nil, err + } + + legacy := &legacySystemSCProcessor{ + systemVM: args.SystemVM, + userAccountsDB: args.UserAccountsDB, + peerAccountsDB: args.PeerAccountsDB, + marshalizer: args.Marshalizer, + startRating: args.StartRating, + validatorInfoCreator: args.ValidatorInfoCreator, + genesisNodesConfig: args.GenesisNodesConfig, + endOfEpochCallerAddress: args.EndOfEpochCallerAddress, + stakingSCAddress: args.StakingSCAddress, + chanceComputer: args.ChanceComputer, + mapNumSwitchedPerShard: make(map[uint32]uint32), + mapNumSwitchablePerShard: make(map[uint32]uint32), + stakingDataProvider: args.StakingDataProvider, + nodesConfigProvider: args.NodesConfigProvider, + shardCoordinator: args.ShardCoordinator, + esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, + maxNodesChangeConfigProvider: args.MaxNodesChangeConfigProvider, + enableEpochsHandler: args.EnableEpochsHandler, + } + + return legacy, nil +} + +func checkLegacyArgs(args ArgsNewEpochStartSystemSCProcessing) error { + if check.IfNilReflect(args.SystemVM) { + return epochStart.ErrNilSystemVM + } + if check.IfNil(args.UserAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.PeerAccountsDB) { + return epochStart.ErrNilAccountsDB + } + if check.IfNil(args.Marshalizer) { + return epochStart.ErrNilMarshalizer + } + if check.IfNil(args.ValidatorInfoCreator) { + return epochStart.ErrNilValidatorInfoProcessor + } + if len(args.EndOfEpochCallerAddress) == 0 { + return epochStart.ErrNilEndOfEpochCallerAddress + } + if len(args.StakingSCAddress) == 0 { + return epochStart.ErrNilStakingSCAddress + } + if check.IfNil(args.ChanceComputer) { + return epochStart.ErrNilChanceComputer + } + if check.IfNil(args.GenesisNodesConfig) { + return epochStart.ErrNilGenesisNodesConfig + } + if check.IfNil(args.NodesConfigProvider) { + return epochStart.ErrNilNodesConfigProvider + } + if check.IfNil(args.StakingDataProvider) { + return epochStart.ErrNilStakingDataProvider + } + if check.IfNil(args.ShardCoordinator) { + return epochStart.ErrNilShardCoordinator + } + if check.IfNil(args.MaxNodesChangeConfigProvider) { + return epochStart.ErrNilMaxNodesChangeConfigProvider + } + if check.IfNil(args.EnableEpochsHandler) { + return process.ErrNilEnableEpochsHandler + } + if len(args.ESDTOwnerAddressBytes) == 0 { + return epochStart.ErrEmptyESDTOwnerAddress + } + + return nil +} + +func (s *legacySystemSCProcessor) processLegacy( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nonce uint64, + epoch uint32, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { + err := s.updateSystemSCConfigMinNodes() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { + err := s.updateOwnersForBlsKeys() + if err != nil { + return err + } + } + + if s.flagChangeMaxNodesEnabled.IsSet() { + err := s.updateMaxNodes(validatorsInfoMap, nonce) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { + err := s.resetLastUnJailed() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlagInSpecificEpochOnly) { + err := s.initDelegationSystemSC() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.cleanAdditionalQueue() + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.computeNumWaitingPerShard(validatorsInfoMap) + if err != nil { + return err + } + + err = s.swapJailedWithWaiting(validatorsInfoMap) + if err != nil { + return err + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) + if err != nil { + return err + } + + err = s.fillStakingDataForNonEligible(validatorsInfoMap) + if err != nil { + return err + } + + numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorsInfoMap, epoch) + if err != nil { + return err + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingQueueFlag) { + err = s.stakeNodesFromQueue(validatorsInfoMap, numUnStaked, nonce, common.NewList) + if err != nil { + return err + } + } + } + + if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { + err := s.initESDT() + if err != nil { + // not a critical error + log.Error("error while initializing ESDT", "err", err) + } + } + + return nil +} + +// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc +func (s *legacySystemSCProcessor) ToggleUnStakeUnBond(value bool) error { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + return nil + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: big.NewInt(0), + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unPauseUnStakeUnBond", + } + + if value { + vmInput.Function = "pauseUnStakeUnBond" + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemValidatorSCCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) unStakeNodesWithNotEnoughFunds( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + epoch uint32, +) (uint32, error) { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) + if err != nil { + return 0, err + } + + nodesUnStakedFromAdditionalQueue := uint32(0) + + log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) + for _, blsKey := range nodesToUnStake { + log.Debug("unStake at end of epoch for node", "blsKey", blsKey) + err = s.unStakeOneNode(blsKey, epoch) + if err != nil { + return 0, err + } + + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + nodesUnStakedFromAdditionalQueue++ + log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) + continue + } + + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + err = validatorsInfoMap.Replace(validatorInfo, validatorLeaving) + if err != nil { + return 0, err + } + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + return 0, err + } + + nodesToStakeFromQueue := uint32(len(nodesToUnStake)) + nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue + + log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) + return nodesToStakeFromQueue, nil +} + +func (s *legacySystemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) + if errExists != nil { + return nil + } + + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAccount.SetUnStakedEpoch(epoch) + err = s.peerAccountsDB.SaveAccount(peerAccount) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { + sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) + for address := range mapOwnerKeys { + shardId := s.shardCoordinator.ComputeId([]byte(address)) + if shardId != core.MetachainShardId { + continue + } + sortedDelegationsSCs = append(sortedDelegationsSCs, address) + } + + sort.Slice(sortedDelegationsSCs, func(i, j int) bool { + return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] + }) + + for _, address := range sortedDelegationsSCs { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: mapOwnerKeys[address], + CallValue: big.NewInt(0), + }, + RecipientAddr: []byte(address), + Function: "unStakeAtEndOfEpoch", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) + return epochStart.ErrUnStakeExecuteError + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) fillStakingDataForNonEligible(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shId, validatorsInfoSlice := range validatorsInfoMap.GetShardValidatorsInfoMap() { + newList := make([]state.ValidatorInfoHandler, 0, len(validatorsInfoSlice)) + deleteCalled := false + + for _, validatorInfo := range validatorsInfoSlice { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + newList = append(newList, validatorInfo) + continue + } + + err := s.stakingDataProvider.FillValidatorInfo(validatorInfo) + if err != nil { + deleteCalled = true + + log.Error("fillStakingDataForNonEligible", "error", err) + if len(validatorInfo.GetList()) > 0 { + return err + } + + err = s.peerAccountsDB.RemoveAccount(validatorInfo.GetPublicKey()) + if err != nil { + log.Error("fillStakingDataForNonEligible removeAccount", "error", err) + } + + continue + } + + newList = append(newList, validatorInfo) + } + + if deleteCalled { + err := validatorsInfoMap.SetValidatorsInShard(shId, newList) + if err != nil { + return err + } + } + } + + return nil +} + +func (s *legacySystemSCProcessor) prepareStakingDataForEligibleNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + eligibleNodes, err := getEligibleNodeKeys(validatorsInfoMap) + if err != nil { + return err + } + + return s.prepareStakingData(eligibleNodes) +} + +func (s *legacySystemSCProcessor) prepareStakingData(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + sw := core.NewStopWatch() + sw.Start("prepareStakingDataForRewards") + defer func() { + sw.Stop("prepareStakingDataForRewards") + log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) + }() + + return s.stakingDataProvider.PrepareStakingData(validatorsInfoMap) +} + +func getEligibleNodeKeys( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, +) (state.ShardValidatorsInfoMapHandler, error) { + eligibleNodesKeys := state.NewShardValidatorsInfoMap() + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { + err := eligibleNodesKeys.Add(validatorInfo.ShallowClone()) + if err != nil { + log.Error("getEligibleNodeKeys: could not add validator info in map", "error", err) + return nil, err + } + } + } + + return eligibleNodesKeys, nil +} + +// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts +func (s *legacySystemSCProcessor) ProcessDelegationRewards( + miniBlocks block.MiniBlockSlice, + txCache epochStart.TransactionCacher, +) error { + if txCache == nil { + return epochStart.ErrNilLocalTxCache + } + + rwdMb := getRewardsMiniBlockForMeta(miniBlocks) + if rwdMb == nil { + return nil + } + + for _, txHash := range rwdMb.TxHashes { + rwdTx, err := txCache.GetTx(txHash) + if err != nil { + return err + } + + err = s.executeRewardTx(rwdTx) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: nil, + CallValue: rwdTx.GetValue(), + }, + RecipientAddr: rwdTx.GetRcvAddr(), + Function: "updateRewards", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrSystemDelegationCall + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateSystemSCConfigMinNodes() error { + minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() + err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) + + return err +} + +func (s *legacySystemSCProcessor) resetLastUnJailed() error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "resetLastUnJailedFromQueue", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrResetLastUnJailedFromQueue + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +// updates the configuration of the system SC if the flags permit +func (s *legacySystemSCProcessor) updateMaxNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler, nonce uint64) error { + sw := core.NewStopWatch() + sw.Start("total") + defer func() { + sw.Stop("total") + log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) + }() + + maxNumberOfNodes := s.maxNodes + sw.Start("setMaxNumberOfNodes") + prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) + sw.Stop("setMaxNumberOfNodes") + if err != nil { + return err + } + + if s.enableEpochsHandler.IsFlagEnabled(common.StakingQueueFlag) { + sw.Start("stakeNodesFromQueue") + err = s.stakeNodesFromQueue(validatorsInfoMap, maxNumberOfNodes-prevMaxNumberOfNodes, nonce, common.NewList) + sw.Stop("stakeNodesFromQueue") + if err != nil { + return err + } + } + return nil +} + +func (s *legacySystemSCProcessor) computeNumWaitingPerShard(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + for shardID, validatorInfoList := range validatorsInfoMap.GetShardValidatorsInfoMap() { + totalInWaiting := uint32(0) + for _, validatorInfo := range validatorInfoList { + switch validatorInfo.GetList() { + case string(common.WaitingList): + totalInWaiting++ + } + } + s.mapNumSwitchablePerShard[shardID] = totalInWaiting + s.mapNumSwitchedPerShard[shardID] = 0 + } + return nil +} + +func (s *legacySystemSCProcessor) swapJailedWithWaiting(validatorsInfoMap state.ShardValidatorsInfoMapHandler) error { + jailedValidators := s.getSortedJailedNodes(validatorsInfoMap) + + log.Debug("number of jailed validators", "num", len(jailedValidators)) + + newValidators := make(map[string]struct{}) + for _, jailedValidator := range jailedValidators { + if _, ok := newValidators[string(jailedValidator.GetPublicKey())]; ok { + continue + } + if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.GetShardId()] <= s.mapNumSwitchedPerShard[jailedValidator.GetShardId()] { + log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", + "shardID", jailedValidator.GetShardId(), + "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]) + continue + } + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{jailedValidator.GetPublicKey()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "switchJailedWithWaiting", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("switchJailedWithWaiting called for", + "key", jailedValidator.GetPublicKey(), + "returnMessage", vmOutput.ReturnMessage) + if vmOutput.ReturnCode != vmcommon.Ok { + continue + } + + newValidator, err := s.stakingToValidatorStatistics(validatorsInfoMap, jailedValidator, vmOutput) + if err != nil { + return err + } + + if len(newValidator) != 0 { + newValidators[string(newValidator)] = struct{}{} + } + } + + return nil +} + +func (s *legacySystemSCProcessor) stakingToValidatorStatistics( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + jailedValidator state.ValidatorInfoHandler, + vmOutput *vmcommon.VMOutput, +) ([]byte, error) { + stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] + if !ok { + return nil, epochStart.ErrStakingSCOutputAccountNotFound + } + + var activeStorageUpdate *vmcommon.StorageUpdate + for _, storageUpdate := range stakingSCOutput.StorageUpdates { + isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.GetPublicKey()) && + !bytes.Equal(storageUpdate.Offset, jailedValidator.GetPublicKey()) + if isNewValidatorKey { + activeStorageUpdate = storageUpdate + break + } + } + if activeStorageUpdate == nil { + log.Debug("no one in waiting suitable for switch") + if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + } + + return nil, nil + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return nil, err + } + + var stakingData systemSmartContracts.StakedDataV2_0 + err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) + if err != nil { + return nil, err + } + + blsPubKey := activeStorageUpdate.Offset + log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) + account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) + if err != nil { + return nil, err + } + + if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { + err = account.SetRewardAddress(stakingData.RewardAddress) + if err != nil { + return nil, err + } + } + + if !isNew { + err = validatorsInfoMap.Delete(jailedValidator) + if err != nil { + return nil, err + } + } + + account.SetListAndIndex(jailedValidator.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + account.SetTempRating(s.startRating) + account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(account) + if err != nil { + return nil, err + } + + jailedAccount, err := s.getPeerAccount(jailedValidator.GetPublicKey()) + if err != nil { + return nil, err + } + + jailedAccount.SetListAndIndex(jailedValidator.GetShardId(), string(common.JailedList), jailedValidator.GetIndex(), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + jailedAccount.ResetAtNewEpoch() + err = s.peerAccountsDB.SaveAccount(jailedAccount) + if err != nil { + return nil, err + } + + if isValidator(jailedValidator) { + s.mapNumSwitchedPerShard[jailedValidator.GetShardId()]++ + } + + newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) + err = validatorsInfoMap.Replace(jailedValidator, newValidatorInfo) + if err != nil { + return nil, err + } + + return blsPubKey, nil +} + +func isValidator(validator state.ValidatorInfoHandler) bool { + return validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) +} + +func (s *legacySystemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { + acnt, err := s.userAccountsDB.LoadAccount(address) + if err != nil { + return nil, err + } + + stAcc, ok := acnt.(state.UserAccountHandler) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + return stAcc, nil +} + +// save account changes in state from vmOutput - protected by VM - every output can be treated as is. +func (s *legacySystemSCProcessor) processSCOutputAccounts( + vmOutput *vmcommon.VMOutput, +) error { + + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc, err := s.getUserAccount(outAcc.Address) + if err != nil { + return err + } + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = s.userAccountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) getSortedJailedNodes(validatorsInfoMap state.ShardValidatorsInfoMapHandler) []state.ValidatorInfoHandler { + newJailedValidators := make([]state.ValidatorInfoHandler, 0) + oldJailedValidators := make([]state.ValidatorInfoHandler, 0) + + minChance := s.chanceComputer.GetChance(0) + for _, validatorInfo := range validatorsInfoMap.GetAllValidatorsInfo() { + if validatorInfo.GetList() == string(common.JailedList) { + oldJailedValidators = append(oldJailedValidators, validatorInfo) + } else if s.chanceComputer.GetChance(validatorInfo.GetTempRating()) < minChance { + newJailedValidators = append(newJailedValidators, validatorInfo) + } + + } + + sort.Sort(validatorList(oldJailedValidators)) + sort.Sort(validatorList(newJailedValidators)) + + return append(oldJailedValidators, newJailedValidators...) +} + +func (s *legacySystemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { + account, err := s.peerAccountsDB.LoadAccount(key) + if err != nil { + return nil, err + } + + peerAcc, ok := account.(state.PeerAccountHandler) + if !ok { + return nil, epochStart.ErrWrongTypeAssertion + } + + return peerAcc, nil +} + +func (s *legacySystemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMinNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + + log.Debug("setMinNumberOfNodes called with", + "minNumNodes", minNumNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrInvalidMinNumberOfNodes + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, + CallValue: big.NewInt(0), + }, + RecipientAddr: s.stakingSCAddress, + Function: "updateConfigMaxNodes", + } + + vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return 0, err + } + + log.Debug("setMaxNumberOfNodes called with", + "maxNumNodes", maxNumNodes, + "current maxNumNodes in legacySystemSCProcessor", s.maxNodes, + "returnMessage", vmOutput.ReturnMessage) + + if vmOutput.ReturnCode != vmcommon.Ok { + return 0, epochStart.ErrInvalidMaxNumberOfNodes + } + if len(vmOutput.ReturnData) != 1 { + return 0, epochStart.ErrInvalidSystemSCReturn + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return 0, err + } + + prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() + return uint32(prevMaxNumNodes), nil +} + +func (s *legacySystemSCProcessor) updateOwnersForBlsKeys() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) + }() + + sw.Start("getValidatorSystemAccount") + userValidatorAccount, err := s.getValidatorSystemAccount() + sw.Stop("getValidatorSystemAccount") + if err != nil { + return err + } + + sw.Start("getArgumentsForSetOwnerFunctionality") + arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) + sw.Stop("getArgumentsForSetOwnerFunctionality") + if err != nil { + return err + } + + sw.Start("callSetOwnersOnAddresses") + err = s.callSetOwnersOnAddresses(arguments) + sw.Stop("callSetOwnersOnAddresses") + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { + validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) + if err != nil { + return nil, fmt.Errorf("%w when loading validator account", err) + } + + userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) + if !ok { + return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) + } + + if check.IfNil(userValidatorAccount.DataTrie()) { + return nil, epochStart.ErrNilDataTrie + } + + return userValidatorAccount, nil +} + +func (s *legacySystemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { + arguments := make([][]byte, 0) + + leavesChannels := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) + if err != nil { + return nil, err + } + for leaf := range leavesChannels.LeavesChan { + validatorData := &systemSmartContracts.ValidatorDataV2{} + + err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) + if err != nil { + continue + } + for _, blsKey := range validatorData.BlsPubKeys { + arguments = append(arguments, blsKey) + arguments = append(arguments, leaf.Key()) + } + } + + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() + if err != nil { + return nil, err + } + + return arguments, nil +} + +func (s *legacySystemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: arguments, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "setOwnersOnAddresses", + } + + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) + } + + return s.processSCOutputAccounts(vmOutput) +} + +func (s *legacySystemSCProcessor) initDelegationSystemSC() error { + codeMetaData := &vmcommon.CodeMetadata{ + Upgradeable: false, + Payable: false, + Readable: true, + } + + vmInput := &vmcommon.ContractCreateInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.DelegationManagerSCAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + }, + ContractCode: vm.DelegationManagerSCAddress, + ContractCodeMetadata: codeMetaData.ToBytes(), + } + + vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) + if err != nil { + return err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return epochStart.ErrCouldNotInitDelegationSystemSC + } + + err = s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { + contractsToUpdate := make([][]byte, 0) + contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) + contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) + + for _, address := range contractsToUpdate { + userAcc, err := s.getUserAccount(address) + if err != nil { + return err + } + + userAcc.SetOwnerAddress(address) + userAcc.SetCodeMetadata(contractMetadata) + userAcc.SetCode(address) + + err = s.userAccountsDB.SaveAccount(userAcc) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) cleanAdditionalQueue() error { + sw := core.NewStopWatch() + sw.Start("systemSCProcessor") + defer func() { + sw.Stop("systemSCProcessor") + log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) + }() + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "cleanAdditionalQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when cleaning additional queue", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + // returnData format is list(address - all blsKeys which were unstaked for that) + addressLength := len(s.endOfEpochCallerAddress) + mapOwnersKeys := make(map[string][][]byte) + currentOwner := "" + for _, returnData := range vmOutput.ReturnData { + if len(returnData) == addressLength { + currentOwner = string(returnData) + continue + } + + if len(currentOwner) != addressLength { + continue + } + + mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) + } + + err = s.updateDelegationContracts(mapOwnersKeys) + if err != nil { + log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) stakeNodesFromQueue( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + nodesToStake uint32, + nonce uint64, + list common.PeerType, +) error { + if nodesToStake == 0 { + return nil + } + + nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.EndOfEpochAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "stakeNodesFromQueue", + } + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when staking nodes from waiting list", errRun) + } + if vmOutput.ReturnCode != vmcommon.Ok { + return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) + } + if len(vmOutput.ReturnData)%2 != 0 { + return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) + } + + err := s.processSCOutputAccounts(vmOutput) + if err != nil { + return err + } + + err = s.addNewlyStakedNodesToValidatorTrie(validatorsInfoMap, vmOutput.ReturnData, nonce, list) + if err != nil { + return err + } + + return nil +} + +func (s *legacySystemSCProcessor) addNewlyStakedNodesToValidatorTrie( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + returnData [][]byte, + nonce uint64, + list common.PeerType, +) error { + for i := 0; i < len(returnData); i += 2 { + blsKey := returnData[i] + rewardAddress := returnData[i+1] + + peerAcc, err := s.getPeerAccount(blsKey) + if err != nil { + return err + } + + err = peerAcc.SetRewardAddress(rewardAddress) + if err != nil { + return err + } + + err = peerAcc.SetBLSPublicKey(blsKey) + if err != nil { + return err + } + + peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(list), uint32(nonce), s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) + peerAcc.SetTempRating(s.startRating) + peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) + + err = s.peerAccountsDB.SaveAccount(peerAcc) + if err != nil { + return err + } + + validatorInfo := &state.ValidatorInfo{ + PublicKey: blsKey, + ShardId: peerAcc.GetShardId(), + List: string(list), + Index: uint32(nonce), + TempRating: s.startRating, + Rating: s.startRating, + RewardAddress: rewardAddress, + AccumulatedFees: big.NewInt(0), + } + + existingValidator := validatorsInfoMap.GetValidator(validatorInfo.GetPublicKey()) + // This fix is not be backwards incompatible + if !check.IfNil(existingValidator) && s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + err = validatorsInfoMap.Delete(existingValidator) + if err != nil { + return err + } + } + + err = validatorsInfoMap.Add(validatorInfo) + if err != nil { + return err + } + } + + return nil +} + +func (s *legacySystemSCProcessor) initESDT() error { + currentConfigValues, err := s.extractConfigFromESDTContract() + if err != nil { + return err + } + + return s.changeESDTOwner(currentConfigValues) +} + +func (s *legacySystemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{}, + CallValue: big.NewInt(0), + GasProvided: math.MaxUint64, + }, + Function: "getContractConfig", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return nil, err + } + if len(output.ReturnData) != 4 { + return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) + } + + return output.ReturnData, nil +} + +func (s *legacySystemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { + baseIssuingCost := currentConfigValues[1] + minTokenNameLength := currentConfigValues[2] + maxTokenNameLength := currentConfigValues[3] + + vmInput := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: s.endOfEpochCallerAddress, + Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, + CallValue: big.NewInt(0), + GasProvided: math.MaxUint64, + }, + Function: "configChange", + RecipientAddr: vm.ESDTSCAddress, + } + + output, err := s.systemVM.RunSmartContractCall(vmInput) + if err != nil { + return err + } + if output.ReturnCode != vmcommon.Ok { + return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) + } + + return s.processSCOutputAccounts(output) +} + +func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { + for _, miniBlock := range miniBlocks { + if miniBlock.Type != block.RewardsBlock { + continue + } + if miniBlock.ReceiverShardID != core.MetachainShardId { + continue + } + return miniBlock + } + return nil +} + +func (s *legacySystemSCProcessor) legacyEpochConfirmed(epoch uint32) { + s.flagChangeMaxNodesEnabled.SetValue(false) + for _, maxNodesConfig := range s.maxNodesChangeConfigProvider.GetAllNodesConfig() { + if epoch == maxNodesConfig.EpochEnable { + s.flagChangeMaxNodesEnabled.SetValue(true) + break + } + } + s.maxNodes = s.maxNodesChangeConfigProvider.GetCurrentNodesConfig().MaxNumNodes + + log.Debug("legacySystemSC: change of maximum number of nodes and/or shuffling percentage", + "enabled", s.flagChangeMaxNodesEnabled.IsSet(), + "epoch", epoch, + "maxNodes", s.maxNodes, + ) +} diff --git a/epochStart/metachain/rewards.go b/epochStart/metachain/rewards.go index 3620070a6e0..0b279d56c32 100644 --- a/epochStart/metachain/rewards.go +++ b/epochStart/metachain/rewards.go @@ -50,7 +50,7 @@ func NewRewardsCreator(args ArgsNewRewardsCreator) (*rewardsCreator, error) { // CreateRewardsMiniBlocks creates the rewards miniblocks according to economics data and validator info func (rc *rewardsCreator) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -116,7 +116,7 @@ func (rc *rewardsCreator) adjustProtocolSustainabilityRewards(protocolSustainabi } func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, metaBlock data.HeaderHandler, miniBlocks block.MiniBlockSlice, protocolSustainabilityRwdTx *rewardTx.RewardTx, @@ -162,41 +162,40 @@ func (rc *rewardsCreator) addValidatorRewardsToMiniBlocks( } func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, protocolSustainabilityRwd *rewardTx.RewardTx, epoch uint32, ) map[string]*rewardInfoData { rwdAddrValidatorInfo := make(map[string]*rewardInfoData) - for _, shardValidatorsInfo := range validatorsInfo { - for _, validatorInfo := range shardValidatorsInfo { - rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.ShardId] - protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.NumSelectedInSuccessBlocks))) + for _, validatorInfo := range validatorsInfo.GetAllValidatorsInfo() { + rewardsPerBlockPerNodeForShard := rc.mapBaseRewardsPerBlockPerValidator[validatorInfo.GetShardId()] + protocolRewardValue := big.NewInt(0).Mul(rewardsPerBlockPerNodeForShard, big.NewInt(0).SetUint64(uint64(validatorInfo.GetNumSelectedInSuccessBlocks()))) - isFix1Enabled := rc.isRewardsFix1Enabled(epoch) - if isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorSuccess == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } - if !isFix1Enabled && validatorInfo.LeaderSuccess == 0 && validatorInfo.ValidatorFailure == 0 { - protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) - continue - } + isFix1Enabled := rc.isRewardsFix1Enabled(epoch) + if isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorSuccess() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } + if !isFix1Enabled && validatorInfo.GetLeaderSuccess() == 0 && validatorInfo.GetValidatorFailure() == 0 { + protocolSustainabilityRwd.Value.Add(protocolSustainabilityRwd.Value, protocolRewardValue) + continue + } - rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] - if !ok { - rwdInfo = &rewardInfoData{ - accumulatedFees: big.NewInt(0), - rewardsFromProtocol: big.NewInt(0), - address: string(validatorInfo.RewardAddress), - } - rwdAddrValidatorInfo[string(validatorInfo.RewardAddress)] = rwdInfo + rwdInfo, ok := rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] + if !ok { + rwdInfo = &rewardInfoData{ + accumulatedFees: big.NewInt(0), + rewardsFromProtocol: big.NewInt(0), + address: string(validatorInfo.GetRewardAddress()), } - - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.AccumulatedFees) - rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + rwdAddrValidatorInfo[string(validatorInfo.GetRewardAddress())] = rwdInfo } + + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, validatorInfo.GetAccumulatedFees()) + rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, protocolRewardValue) + } return rwdAddrValidatorInfo @@ -205,7 +204,7 @@ func (rc *rewardsCreator) computeValidatorInfoPerRewardAddress( // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreator) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { diff --git a/epochStart/metachain/rewardsCreatorProxy.go b/epochStart/metachain/rewardsCreatorProxy.go index 6c183f43f7b..0e770c69629 100644 --- a/epochStart/metachain/rewardsCreatorProxy.go +++ b/epochStart/metachain/rewardsCreatorProxy.go @@ -64,7 +64,7 @@ func NewRewardsCreatorProxy(args RewardsCreatorProxyArgs) (*rewardsCreatorProxy, // CreateRewardsMiniBlocks proxies the CreateRewardsMiniBlocks method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) @@ -77,7 +77,7 @@ func (rcp *rewardsCreatorProxy) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks proxies the same method of the configured rewardsCreator instance func (rcp *rewardsCreatorProxy) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { err := rcp.changeRewardCreatorIfNeeded(metaBlock.GetEpoch()) diff --git a/epochStart/metachain/rewardsCreatorProxy_test.go b/epochStart/metachain/rewardsCreatorProxy_test.go index 6de5ac93a49..e41730d34f1 100644 --- a/epochStart/metachain/rewardsCreatorProxy_test.go +++ b/epochStart/metachain/rewardsCreatorProxy_test.go @@ -15,9 +15,11 @@ import ( "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -55,9 +57,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return nil, expectedErr }, @@ -74,9 +76,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -93,9 +95,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksOK(t *testing.T) { func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -125,9 +127,9 @@ func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV2 func TestRewardsCreatorProxy_CreateRewardsMiniBlocksWithSwitchToRewardsCreatorV1(t *testing.T) { t.Parallel() - rewardCreatorV2 := &mock.RewardsCreatorStub{ + rewardCreatorV2 := &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { return make(block.MiniBlockSlice, 2), nil }, @@ -159,9 +161,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("expectedError") - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return expectedErr }, } @@ -176,9 +178,9 @@ func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksWithError(t *testing.T) { func TestRewardsCreatorProxy_VerifyRewardsMiniBlocksOK(t *testing.T) { t.Parallel() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics) error { + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics) error { return nil }, } @@ -194,7 +196,7 @@ func TestRewardsCreatorProxy_GetProtocolSustainabilityRewards(t *testing.T) { t.Parallel() expectedValue := big.NewInt(12345) - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedValue }, @@ -210,7 +212,7 @@ func TestRewardsCreatorProxy_GetLocalTxCache(t *testing.T) { t.Parallel() expectedValue := &mock.TxForCurrentBlockStub{} - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetLocalTxCacheCalled: func() epochStart.TransactionCacher { return expectedValue }, @@ -228,7 +230,7 @@ func TestRewardsCreatorProxy_CreateMarshalizedData(t *testing.T) { expectedValue := make(map[string][][]byte) blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { if blockBody == body { return expectedValue @@ -252,7 +254,7 @@ func TestRewardsCreatorProxy_GetRewardsTxs(t *testing.T) { } blockBody := createDefaultBlockBody() - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ GetRewardsTxsCalled: func(body *block.Body) map[string]data.TransactionHandler { if blockBody == body { return expectedValue @@ -273,7 +275,7 @@ func TestRewardsCreatorProxy_SaveTxBlockToStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ SaveBlockDataToStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -291,7 +293,7 @@ func TestRewardsCreatorProxy_DeleteTxsFromStorage(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ DeleteBlockDataFromStorageCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -309,7 +311,7 @@ func TestRewardsCreatorProxy_RemoveBlockDataFromPools(t *testing.T) { blockBody := createDefaultBlockBody() functionCalled := false - rewardCreatorV1 := &mock.RewardsCreatorStub{ + rewardCreatorV1 := &testscommon.RewardsCreatorStub{ RemoveBlockDataFromPoolsCalled: func(metaBlock data.MetaHeaderHandler, body *block.Body) { functionCalled = true }, @@ -327,13 +329,13 @@ func TestRewardsCreatorProxy_IsInterfaceNil(t *testing.T) { var rewardsCreatorProxy epochStart.RewardsCreator require.True(t, check.IfNil(rewardsCreatorProxy)) - rewardCreatorV1 := &mock.RewardsCreatorStub{} + rewardCreatorV1 := &testscommon.RewardsCreatorStub{} rewardsCreatorProxy, _, _ = createTestData(rewardCreatorV1, rCreatorV1) require.False(t, check.IfNil(rewardsCreatorProxy)) } -func createTestData(rewardCreator *mock.RewardsCreatorStub, rcType configuredRewardsCreator) (*rewardsCreatorProxy, map[uint32][]*state.ValidatorInfo, *block.MetaBlock) { +func createTestData(rewardCreator epochStart.RewardsCreator, rcType configuredRewardsCreator) (*rewardsCreatorProxy, state.ShardValidatorsInfoMapHandler, *block.MetaBlock) { args := createDefaultRewardsCreatorProxyArgs() rewardsCreatorProxy := &rewardsCreatorProxy{ rc: rewardCreator, @@ -380,7 +382,7 @@ func createDefaultRewardsCreatorProxyArgs() RewardsCreatorProxyArgs { return RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } diff --git a/epochStart/metachain/rewardsV2.go b/epochStart/metachain/rewardsV2.go index 371f577b875..ddfc05abcfe 100644 --- a/epochStart/metachain/rewardsV2.go +++ b/epochStart/metachain/rewardsV2.go @@ -25,7 +25,7 @@ type nodeRewardsData struct { fullRewards *big.Int topUpStake *big.Int powerInShard *big.Int - valInfo *state.ValidatorInfo + valInfo state.ValidatorInfoHandler } // RewardsCreatorArgsV2 holds the data required to create end of epoch rewards @@ -75,7 +75,7 @@ func NewRewardsCreatorV2(args RewardsCreatorArgsV2) (*rewardsCreatorV2, error) { // stake top-up values per node func (rc *rewardsCreatorV2) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if check.IfNil(metaBlock) { @@ -151,7 +151,7 @@ func (rc *rewardsCreatorV2) adjustProtocolSustainabilityRewards(protocolSustaina // VerifyRewardsMiniBlocks verifies if received rewards miniblocks are correct func (rc *rewardsCreatorV2) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if check.IfNil(metaBlock) { @@ -222,23 +222,23 @@ func (rc *rewardsCreatorV2) computeValidatorInfoPerRewardAddress( for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - if nodeInfo.valInfo.LeaderSuccess == 0 && nodeInfo.valInfo.ValidatorSuccess == 0 { + if nodeInfo.valInfo.GetLeaderSuccess() == 0 && nodeInfo.valInfo.GetValidatorSuccess() == 0 { accumulatedUnassigned.Add(accumulatedUnassigned, nodeInfo.fullRewards) continue } - rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] + rwdInfo, ok := rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] if !ok { rwdInfo = &rewardInfoData{ accumulatedFees: big.NewInt(0), rewardsFromProtocol: big.NewInt(0), - address: string(nodeInfo.valInfo.RewardAddress), + address: string(nodeInfo.valInfo.GetRewardAddress()), } - rwdAddrValidatorInfo[string(nodeInfo.valInfo.RewardAddress)] = rwdInfo + rwdAddrValidatorInfo[string(nodeInfo.valInfo.GetRewardAddress())] = rwdInfo } - distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.AccumulatedFees) - rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.AccumulatedFees) + distributedLeaderFees.Add(distributedLeaderFees, nodeInfo.valInfo.GetAccumulatedFees()) + rwdInfo.accumulatedFees.Add(rwdInfo.accumulatedFees, nodeInfo.valInfo.GetAccumulatedFees()) rwdInfo.rewardsFromProtocol.Add(rwdInfo.rewardsFromProtocol, nodeInfo.fullRewards) } } @@ -263,7 +263,7 @@ func (rc *rewardsCreatorV2) IsInterfaceNil() bool { } func (rc *rewardsCreatorV2) computeRewardsPerNode( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) (map[uint32][]*nodeRewardsData, *big.Int) { var baseRewardsPerBlock *big.Int @@ -302,11 +302,11 @@ func (rc *rewardsCreatorV2) computeRewardsPerNode( } func (rc *rewardsCreatorV2) initNodesRewardsInfo( - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, ) map[uint32][]*nodeRewardsData { nodesRewardsInfo := make(map[uint32][]*nodeRewardsData) - for shardID, valInfoList := range validatorsInfo { + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { nodesRewardsInfo[shardID] = make([]*nodeRewardsData, 0, len(valInfoList)) for _, valInfo := range valInfoList { if validatorInfo.WasEligibleInCurrentEpoch(valInfo) { @@ -336,7 +336,7 @@ func (rc *rewardsCreatorV2) computeBaseRewardsPerNode( for _, nodeRewardsInfo := range nodeRewardsInfoList { nodeRewardsInfo.baseReward = big.NewInt(0).Mul( rc.mapBaseRewardsPerBlockPerValidator[shardID], - big.NewInt(int64(nodeRewardsInfo.valInfo.NumSelectedInSuccessBlocks))) + big.NewInt(int64(nodeRewardsInfo.valInfo.GetNumSelectedInSuccessBlocks()))) accumulatedRewards.Add(accumulatedRewards, nodeRewardsInfo.baseReward) } } @@ -507,13 +507,13 @@ func computeNodesPowerInShard( // power in epoch is computed as nbBlocks*nodeTopUp, where nbBlocks represents the number of blocks the node // participated at creation/validation -func computeNodePowerInShard(nodeInfo *state.ValidatorInfo, nodeTopUp *big.Int) *big.Int { +func computeNodePowerInShard(nodeInfo state.ValidatorInfoHandler, nodeTopUp *big.Int) *big.Int { // if node was offline, it had no power, so the rewards should go to the others - if nodeInfo.LeaderSuccess == 0 && nodeInfo.ValidatorSuccess == 0 { + if nodeInfo.GetLeaderSuccess() == 0 && nodeInfo.GetValidatorSuccess() == 0 { return big.NewInt(0) } - nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.NumSelectedInSuccessBlocks)) + nbBlocks := big.NewInt(0).SetUint64(uint64(nodeInfo.GetNumSelectedInSuccessBlocks())) return big.NewInt(0).Mul(nbBlocks, nodeTopUp) } diff --git a/epochStart/metachain/rewardsV2_test.go b/epochStart/metachain/rewardsV2_test.go index 48d9564b7aa..7abea51dea3 100644 --- a/epochStart/metachain/rewardsV2_test.go +++ b/epochStart/metachain/rewardsV2_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/stretchr/testify/require" ) @@ -106,12 +107,12 @@ func TestNewRewardsCreatorV2_initNodesRewardsInfo(t *testing.T) { valInfoEligibleWithExtra := addNonEligibleValidatorInfo(100, valInfoEligible, string(common.WaitingList)) nodesRewardInfo := rwd.initNodesRewardsInfo(valInfoEligibleWithExtra) - require.Equal(t, len(valInfoEligible), len(nodesRewardInfo)) + require.Equal(t, len(valInfoEligible.GetShardValidatorsInfoMap()), len(nodesRewardInfo)) for shardID, nodeInfoList := range nodesRewardInfo { - require.Equal(t, len(nodeInfoList), len(valInfoEligible[shardID])) + require.Equal(t, len(nodeInfoList), len(valInfoEligible.GetShardValidatorsInfoMap()[shardID])) for i, nodeInfo := range nodeInfoList { - require.True(t, valInfoEligible[shardID][i] == nodeInfo.valInfo) + require.True(t, valInfoEligible.GetShardValidatorsInfoMap()[shardID][i] == nodeInfo.valInfo) require.Equal(t, zero, nodeInfo.topUpStake) require.Equal(t, zero, nodeInfo.powerInShard) require.Equal(t, zero, nodeInfo.baseReward) @@ -126,7 +127,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleNodes(t *testing.T) { args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { topUp := big.NewInt(0).Set(topUpVal) return topUp, nil @@ -155,7 +156,7 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * args := getRewardsCreatorV2Arguments() topUpVal, _ := big.NewInt(0).SetString("100000000000000000000", 10) notFoundKey := []byte("notFound") - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { if bytes.Equal(blsKey, notFoundKey) { return nil, fmt.Errorf("not found") @@ -170,9 +171,9 @@ func TestNewRewardsCreatorV2_getTopUpForAllEligibleSomeBLSKeysNotFoundZeroed(t * nodesPerShard := uint32(10) valInfo := createDefaultValidatorInfo(nodesPerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - for _, valList := range valInfo { - valList[0].PublicKey = notFoundKey - valList[1].PublicKey = notFoundKey + for _, valList := range valInfo.GetShardValidatorsInfoMap() { + valList[0].SetPublicKey(notFoundKey) + valList[1].SetPublicKey(notFoundKey) } nodesRewardInfo := rwd.initNodesRewardsInfo(valInfo) @@ -387,7 +388,7 @@ func TestNewRewardsCreatorV2_computeNodesPowerInShard(t *testing.T) { for _, nodeInfoList := range nodesRewardInfo { for _, nodeInfo := range nodeInfoList { - blocks := nodeInfo.valInfo.NumSelectedInSuccessBlocks + blocks := nodeInfo.valInfo.GetNumSelectedInSuccessBlocks() topUp := nodeInfo.topUpStake require.Equal(t, big.NewInt(0).Mul(big.NewInt(int64(blocks)), topUp), nodeInfo.powerInShard) } @@ -607,11 +608,11 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -653,7 +654,7 @@ func TestNewRewardsCreatorV2_computeTopUpRewardsPerNodeNotFoundBLSKeys(t *testin args := getRewardsCreatorV2Arguments() nbEligiblePerShard := uint32(400) vInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, 100, defaultBlocksPerShard) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { return nil, fmt.Errorf("not found") }, @@ -737,15 +738,15 @@ func TestNewRewardsCreatorV2_computeRewardsPerNode(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, totalTopUpStake := setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { topUpStake := big.NewInt(0).Set(totalTopUpStake) return topUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1042,7 +1043,7 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1050,9 +1051,9 @@ func TestNewRewardsCreatorV35_computeRewardsPer3200NodesWithDifferentTopups(t *t return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1149,7 +1150,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te nodesRewardInfo, _ := setupNodeRewardInfo(setupResult, vInfo, topupStakePerNode, tt.validatorTopupStake) setupResult.EconomicsDataProvider.SetRewardsToBeDistributedForBlocks(setupResult.rewardsForBlocks) - setupResult.RewardsCreatorArgsV2.StakingDataProvider = &mock.StakingDataProviderStub{ + setupResult.RewardsCreatorArgsV2.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return topupEligibleStake }, @@ -1157,9 +1158,9 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te return baseEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1200,7 +1201,7 @@ func TestNewRewardsCreatorV2_computeRewardsPer3200NodesWithDifferentTopups(t *te func setupNodeRewardInfo( setupResult SetupRewardsResult, - vInfo map[uint32][]*state.ValidatorInfo, + vInfo state.ShardValidatorsInfoMapHandler, topupStakePerNode *big.Int, validatorTopupStake *big.Int, ) (map[uint32][]*nodeRewardsData, error) { @@ -1267,7 +1268,7 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t totalEligibleStake, _ := big.NewInt(0).SetString("4000000"+"000000000000000000", 10) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopUpStake }, @@ -1275,9 +1276,9 @@ func computeRewardsAndDust(nbEligiblePerShard uint32, args SetupRewardsResult, t return totalEligibleStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1360,11 +1361,11 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithOfflineVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard-nbOfflinePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbOfflinePerShard); i++ { - valList[i].LeaderSuccess = 0 - valList[i].ValidatorSuccess = 0 - valList[i].AccumulatedFees = big.NewInt(0) + valList[i].SetLeaderSuccess(0) + valList[i].SetValidatorSuccess(0) + valList[i].SetAccumulatedFees(big.NewInt(0)) } } @@ -1412,9 +1413,9 @@ func TestNewRewardsCreatorV2_computeValidatorInfoPerRewardAddressWithLeavingVali nbShards := int64(args.ShardCoordinator.NumberOfShards()) + 1 args.EconomicsDataProvider.SetLeadersFees(big.NewInt(0).Mul(big.NewInt(int64(proposerFee)), big.NewInt(int64(nbEligiblePerShard)*nbShards))) valInfo := createDefaultValidatorInfo(nbEligiblePerShard, args.ShardCoordinator, args.NodesConfigProvider, proposerFee, defaultBlocksPerShard) - for _, valList := range valInfo { + for _, valList := range valInfo.GetShardValidatorsInfoMap() { for i := 0; i < int(nbLeavingPerShard); i++ { - valList[i].List = string(common.LeavingList) + valList[i].SetList(string(common.LeavingList)) } } @@ -1500,10 +1501,8 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocks(t *testing.T) { DevFeesInEpoch: big.NewInt(0), } sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { - for _, vInfo := range vInfoList { - sumFees.Add(sumFees, vInfo.AccumulatedFees) - } + for _, vInfo := range valInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } accumulatedDust, err := rwd.addValidatorRewardsToMiniBlocks(metaBlock, miniBlocks, nodesRewardInfo) @@ -1548,12 +1547,12 @@ func TestNewRewardsCreatorV2_addValidatorRewardsToMiniBlocksAddressInMetaChainDe nbAddrInMetachainPerShard := 2 sumFees := big.NewInt(0) - for _, vInfoList := range valInfo { + for _, vInfoList := range valInfo.GetShardValidatorsInfoMap() { for i, vInfo := range vInfoList { if i < nbAddrInMetachainPerShard { - vInfo.RewardAddress = addrInMeta + vInfo.SetRewardAddress(addrInMeta) } - sumFees.Add(sumFees, vInfo.AccumulatedFees) + sumFees.Add(sumFees, vInfo.GetAccumulatedFees()) } } @@ -1585,15 +1584,15 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { nodesRewardInfo := dummyRwd.initNodesRewardsInfo(vInfo) _, _ = setDummyValuesInNodesRewardInfo(nodesRewardInfo, nbEligiblePerShard, tuStake, 0) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { totalTopUpStake, _ := big.NewInt(0).SetString("3000000000000000000000000", 10) return totalTopUpStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1637,10 +1636,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1683,14 +1680,14 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { topupValue.Mul(topupValue, multiplier) _, totalTopupStake := setValuesInNodesRewardInfo(nodesRewardInfo, topupValue, tuStake) - args.StakingDataProvider = &mock.StakingDataProviderStub{ + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ GetTotalTopUpStakeEligibleNodesCalled: func() *big.Int { return totalTopupStake }, GetNodeStakedTopUpCalled: func(blsKey []byte) (*big.Int, error) { - for shardID, vList := range vInfo { + for shardID, vList := range vInfo.GetShardValidatorsInfoMap() { for i, v := range vList { - if bytes.Equal(v.PublicKey, blsKey) { + if bytes.Equal(v.GetPublicKey(), blsKey) { return nodesRewardInfo[shardID][i].topUpStake, nil } } @@ -1734,10 +1731,8 @@ func TestNewRewardsCreatorV2_CreateRewardsMiniBlocks2169Nodes(t *testing.T) { } sumFees := big.NewInt(0) - for _, vInfoList := range vInfo { - for _, v := range vInfoList { - sumFees.Add(sumFees, v.AccumulatedFees) - } + for _, v := range vInfo.GetAllValidatorsInfo() { + sumFees.Add(sumFees, v.GetAccumulatedFees()) } totalRws := rwd.economicsDataProvider.RewardsToBeDistributedForBlocks() @@ -1781,7 +1776,7 @@ func getRewardsCreatorV2Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1801,7 +1796,7 @@ func getRewardsCreatorV35Arguments() RewardsCreatorArgsV2 { } return RewardsCreatorArgsV2{ BaseRewardsCreatorArgs: getBaseRewardsArguments(), - StakingDataProvider: &mock.StakingDataProviderStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, EconomicsDataProvider: NewEpochEconomicsStatistics(), RewardsHandler: rewardsHandler, } @@ -1877,7 +1872,7 @@ func createDefaultValidatorInfo( nodesConfigProvider epochStart.NodesConfigProvider, proposerFeesPerNode uint32, nbBlocksPerShard uint32, -) map[uint32][]*state.ValidatorInfo { +) state.ShardValidatorsInfoMapHandler { cGrShard := uint32(nodesConfigProvider.ConsensusGroupSize(0)) cGrMeta := uint32(nodesConfigProvider.ConsensusGroupSize(core.MetachainShardId)) nbBlocksSelectedNodeInShard := nbBlocksPerShard * cGrShard / eligibleNodesPerShard @@ -1886,9 +1881,8 @@ func createDefaultValidatorInfo( shardsMap := createShardsMap(shardCoordinator) var nbBlocksSelected uint32 - validators := make(map[uint32][]*state.ValidatorInfo) + validators := state.NewShardValidatorsInfoMap() for shardID := range shardsMap { - validators[shardID] = make([]*state.ValidatorInfo, eligibleNodesPerShard) nbBlocksSelected = nbBlocksSelectedNodeInShard if shardID == core.MetachainShardId { nbBlocksSelected = nbBlocksSelectedNodeInMeta @@ -1900,7 +1894,7 @@ func createDefaultValidatorInfo( _ = hex.Encode(addrHex, []byte(str)) leaderSuccess := uint32(20) - validators[shardID][i] = &state.ValidatorInfo{ + _ = validators.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLS%d%d", shardID, i)), ShardId: shardID, RewardAddress: addrHex, @@ -1909,7 +1903,7 @@ func createDefaultValidatorInfo( NumSelectedInSuccessBlocks: nbBlocksSelected, AccumulatedFees: big.NewInt(int64(proposerFeesPerNode)), List: string(common.EligibleList), - } + }) } } @@ -1918,13 +1912,14 @@ func createDefaultValidatorInfo( func addNonEligibleValidatorInfo( nonEligiblePerShard uint32, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, list string, -) map[uint32][]*state.ValidatorInfo { - resultedValidatorsInfo := make(map[uint32][]*state.ValidatorInfo) - for shardID, valInfoList := range validatorsInfo { +) state.ShardValidatorsInfoMapHandler { + resultedValidatorsInfo := state.NewShardValidatorsInfoMap() + for shardID, valInfoList := range validatorsInfo.GetShardValidatorsInfoMap() { + _ = resultedValidatorsInfo.SetValidatorsInShard(shardID, valInfoList) for i := uint32(0); i < nonEligiblePerShard; i++ { - vInfo := &state.ValidatorInfo{ + _ = resultedValidatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("pubKeyBLSExtra%d", i)), ShardId: shardID, RewardAddress: []byte(fmt.Sprintf("addrRewardsExtra%d", i)), @@ -1933,8 +1928,7 @@ func addNonEligibleValidatorInfo( NumSelectedInSuccessBlocks: 1, AccumulatedFees: big.NewInt(int64(10)), List: list, - } - resultedValidatorsInfo[shardID] = append(valInfoList, vInfo) + }) } } diff --git a/epochStart/metachain/rewards_test.go b/epochStart/metachain/rewards_test.go index a41355bef67..b40fe8882e9 100644 --- a/epochStart/metachain/rewards_test.go +++ b/epochStart/metachain/rewards_test.go @@ -136,14 +136,12 @@ func TestRewardsCreator_CreateRewardsMiniBlocks(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) bdy, err := rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) assert.NotNil(t, bdy) @@ -178,14 +176,12 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksHashDoesNotMatch(t *testing.T) { }, DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlockHashDoesNotMatch, err) @@ -236,15 +232,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksRewardsMbNumDoesNotMatch(t *testi mbh.Hash = mbHash mb.MiniBlockHeaders = []block.MiniBlockHeader{mbh, mbh} - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Equal(t, epochStart.ErrRewardMiniBlocksNumDoesNotMatch, err) @@ -393,15 +387,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWork(t *testing.T) { mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -463,15 +455,13 @@ func TestRewardsCreator_VerifyRewardsMiniBlocksShouldWorkEvenIfNotAllShardsHaveR mb.EpochStart.Economics.RewardsForProtocolSustainability.Set(protocolSustainabilityRewardTx.Value) mb.EpochStart.Economics.TotalToDistribute.Set(big.NewInt(0).Add(rwdTx.Value, protocolSustainabilityRewardTx.Value)) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: receivedShardID, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: receivedShardID, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) err := rwd.VerifyRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) assert.Nil(t, err) @@ -487,14 +477,12 @@ func TestRewardsCreator_CreateMarshalizedData(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) rwdTx := rewardTx.RewardTx{ @@ -544,15 +532,13 @@ func TestRewardsCreator_SaveTxBlockToStorage(t *testing.T) { EpochStart: getDefaultEpochStart(), DevFeesInEpoch: big.NewInt(0), } - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) _, _ = rwd.CreateRewardsMiniBlocks(mb, valInfo, &mb.EpochStart.Economics) mb2 := block.MetaBlock{ @@ -613,15 +599,13 @@ func TestRewardsCreator_addValidatorRewardsToMiniBlocks(t *testing.T) { expectedRwdTxHash, _ := core.CalculateHash(&marshal.JsonMarshalizer{}, &hashingMocks.HasherMock{}, expectedRwdTx) cloneMb.TxHashes = append(cloneMb.TxHashes, expectedRwdTxHash) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - PublicKey: []byte("pubkey"), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("pubkey"), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) err := rwdc.addValidatorRewardsToMiniBlocks(valInfo, mb, miniBlocks, &rewardTx.RewardTx{}) @@ -648,25 +632,21 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing } pubkey := "pubkey" - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 100, - LeaderSuccess: 1, - }, - } - valInfo[core.MetachainShardId] = []*state.ValidatorInfo{ - { - RewardAddress: []byte(pubkey), - ShardId: core.MetachainShardId, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 200, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 100, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: []byte(pubkey), + ShardId: core.MetachainShardId, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 200, + LeaderSuccess: 1, + }) rwdc.fillBaseRewardsPerBlockPerNode(mb.EpochStart.Economics.RewardsPerBlock) rwdInfoData := rwdc.computeValidatorInfoPerRewardAddress(valInfo, &rewardTx.RewardTx{}, 0) @@ -675,8 +655,8 @@ func TestRewardsCreator_ProtocolRewardsForValidatorFromMultipleShards(t *testing assert.Equal(t, rwdInfo.address, pubkey) assert.Equal(t, rwdInfo.accumulatedFees.Cmp(big.NewInt(200)), 0) - protocolRewards := uint64(valInfo[0][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) - protocolRewards += uint64(valInfo[core.MetachainShardId][0].NumSelectedInSuccessBlocks) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) + protocolRewards := uint64(valInfo.GetShardValidatorsInfoMap()[0][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(0))) + protocolRewards += uint64(valInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetNumSelectedInSuccessBlocks()) * (mb.EpochStart.Economics.RewardsPerBlock.Uint64() / uint64(args.NodesConfigProvider.ConsensusGroupSize(core.MetachainShardId))) assert.Equal(t, rwdInfo.rewardsFromProtocol.Uint64(), protocolRewards) } @@ -730,7 +710,7 @@ func TestRewardsCreator_AddProtocolSustainabilityRewardToMiniBlocks(t *testing.T metaBlk.EpochStart.Economics.RewardsForProtocolSustainability.Set(expectedRewardTx.Value) metaBlk.EpochStart.Economics.TotalToDistribute.Set(expectedRewardTx.Value) - miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, make(map[uint32][]*state.ValidatorInfo), &metaBlk.EpochStart.Economics) + miniBlocks, err := rwdc.CreateRewardsMiniBlocks(metaBlk, state.NewShardValidatorsInfoMap(), &metaBlk.EpochStart.Economics) assert.Nil(t, err) assert.Equal(t, cloneMb, miniBlocks[0]) } @@ -747,23 +727,21 @@ func TestRewardsCreator_ValidatorInfoWithMetaAddressAddedToProtocolSustainabilit DevFeesInEpoch: big.NewInt(0), } metaBlk.EpochStart.Economics.TotalToDistribute = big.NewInt(20250) - valInfo := make(map[uint32][]*state.ValidatorInfo) - valInfo[0] = []*state.ValidatorInfo{ - { - RewardAddress: vm.StakingSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - { - RewardAddress: vm.FirstDelegationSCAddress, - ShardId: 0, - AccumulatedFees: big.NewInt(100), - NumSelectedInSuccessBlocks: 1, - LeaderSuccess: 1, - }, - } + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.StakingSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) + _ = valInfo.Add(&state.ValidatorInfo{ + RewardAddress: vm.FirstDelegationSCAddress, + ShardId: 0, + AccumulatedFees: big.NewInt(100), + NumSelectedInSuccessBlocks: 1, + LeaderSuccess: 1, + }) acc, _ := args.UserAccountsDB.LoadAccount(vm.FirstDelegationSCAddress) userAcc, _ := acc.(state.UserAccountHandler) diff --git a/epochStart/metachain/stakingDataProvider.go b/epochStart/metachain/stakingDataProvider.go index bf3faf572b3..722a838193f 100644 --- a/epochStart/metachain/stakingDataProvider.go +++ b/epochStart/metachain/stakingDataProvider.go @@ -16,46 +16,67 @@ import ( ) type ownerStats struct { - numEligible int - numStakedNodes int64 - topUpValue *big.Int - totalStaked *big.Int - eligibleBaseStake *big.Int - eligibleTopUpStake *big.Int - topUpPerNode *big.Int - blsKeys [][]byte + numEligible int + numStakedNodes int64 + numActiveNodes int64 + totalTopUp *big.Int + topUpPerNode *big.Int + totalStaked *big.Int + eligibleBaseStake *big.Int + eligibleTopUpStake *big.Int + eligibleTopUpPerNode *big.Int + blsKeys [][]byte + auctionList []state.ValidatorInfoHandler + qualified bool +} + +type ownerInfoSC struct { + topUpValue *big.Int + totalStakedValue *big.Int + numStakedWaiting *big.Int + blsKeys [][]byte } type stakingDataProvider struct { - mutStakingData sync.RWMutex - cache map[string]*ownerStats - systemVM vmcommon.VMExecutionHandler - totalEligibleStake *big.Int - totalEligibleTopUpStake *big.Int - minNodePrice *big.Int + mutStakingData sync.RWMutex + cache map[string]*ownerStats + systemVM vmcommon.VMExecutionHandler + totalEligibleStake *big.Int + totalEligibleTopUpStake *big.Int + minNodePrice *big.Int + numOfValidatorsInCurrEpoch uint32 + enableEpochsHandler common.EnableEpochsHandler +} + +// StakingDataProviderArgs is a struct placeholder for all arguments required to create a NewStakingDataProvider +type StakingDataProviderArgs struct { + EnableEpochsHandler common.EnableEpochsHandler + SystemVM vmcommon.VMExecutionHandler + MinNodePrice string } // NewStakingDataProvider will create a new instance of a staking data provider able to aid in the final rewards // computation as this will retrieve the staking data from the system VM -func NewStakingDataProvider( - systemVM vmcommon.VMExecutionHandler, - minNodePrice string, -) (*stakingDataProvider, error) { - if check.IfNil(systemVM) { +func NewStakingDataProvider(args StakingDataProviderArgs) (*stakingDataProvider, error) { + if check.IfNil(args.SystemVM) { return nil, epochStart.ErrNilSystemVmInstance } + if check.IfNil(args.EnableEpochsHandler) { + return nil, epochStart.ErrNilEnableEpochsHandler + } - nodePrice, ok := big.NewInt(0).SetString(minNodePrice, 10) + nodePrice, ok := big.NewInt(0).SetString(args.MinNodePrice, 10) if !ok || nodePrice.Cmp(big.NewInt(0)) <= 0 { return nil, epochStart.ErrInvalidMinNodePrice } sdp := &stakingDataProvider{ - systemVM: systemVM, + systemVM: args.SystemVM, cache: make(map[string]*ownerStats), minNodePrice: nodePrice, totalEligibleStake: big.NewInt(0), totalEligibleTopUpStake: big.NewInt(0), + enableEpochsHandler: args.EnableEpochsHandler, } return sdp, nil @@ -67,6 +88,7 @@ func (sdp *stakingDataProvider) Clean() { sdp.cache = make(map[string]*ownerStats) sdp.totalEligibleStake.SetInt64(0) sdp.totalEligibleTopUpStake.SetInt64(0) + sdp.numOfValidatorsInCurrEpoch = 0 sdp.mutStakingData.Unlock() } @@ -91,7 +113,7 @@ func (sdp *stakingDataProvider) GetTotalTopUpStakeEligibleNodes() *big.Int { // GetNodeStakedTopUp returns the owner of provided bls key staking stats for the current epoch func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("GetOwnerStakingStats", "key", hex.EncodeToString(blsKey), "error", err) return nil, err @@ -102,19 +124,17 @@ func (sdp *stakingDataProvider) GetNodeStakedTopUp(blsKey []byte) (*big.Int, err return nil, epochStart.ErrOwnerDoesntHaveEligibleNodesInEpoch } - return ownerInfo.topUpPerNode, nil + return ownerInfo.eligibleTopUpPerNode, nil } -// PrepareStakingDataForRewards prepares the staking data for the given map of node keys per shard -func (sdp *stakingDataProvider) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { +// PrepareStakingData prepares the staking data for the given map of node keys per shard +func (sdp *stakingDataProvider) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { sdp.Clean() - for _, keysList := range keys { - for _, blsKey := range keysList { - err := sdp.loadDataForBlsKey(blsKey) - if err != nil { - return err - } + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := sdp.loadDataForBlsKey(validator) + if err != nil { + return err } } @@ -146,7 +166,7 @@ func (sdp *stakingDataProvider) processStakingData() { totalEligibleStake.Add(totalEligibleStake, ownerEligibleStake) totalEligibleTopUpStake.Add(totalEligibleTopUpStake, owner.eligibleTopUpStake) - owner.topUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) + owner.eligibleTopUpPerNode = big.NewInt(0).Div(owner.eligibleTopUpStake, ownerEligibleNodes) } sdp.totalEligibleTopUpStake = totalEligibleTopUpStake @@ -154,40 +174,48 @@ func (sdp *stakingDataProvider) processStakingData() { } // FillValidatorInfo will fill the validator info for the bls key if it was not already filled -func (sdp *stakingDataProvider) FillValidatorInfo(blsKey []byte) error { +func (sdp *stakingDataProvider) FillValidatorInfo(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - _, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + _, err := sdp.getAndFillOwnerStats(validator) return err } -func (sdp *stakingDataProvider) getAndFillOwnerStatsFromSC(blsKey []byte) (*ownerStats, error) { - owner, err := sdp.getBlsKeyOwner(blsKey) +func (sdp *stakingDataProvider) getAndFillOwnerStats(validator state.ValidatorInfoHandler) (*ownerStats, error) { + blsKey := validator.GetPublicKey() + owner, err := sdp.GetBlsKeyOwner(blsKey) if err != nil { log.Debug("error fill owner stats", "step", "get owner from bls", "key", hex.EncodeToString(blsKey), "error", err) return nil, err } - ownerData, err := sdp.getValidatorData(owner) + ownerData, err := sdp.fillOwnerData(owner, validator) if err != nil { log.Debug("error fill owner stats", "step", "get owner data", "key", hex.EncodeToString(blsKey), "owner", hex.EncodeToString([]byte(owner)), "error", err) return nil, err } + if isValidator(validator) { + sdp.numOfValidatorsInCurrEpoch++ + } + return ownerData, nil } // loadDataForBlsKey will be called for each BLS key that took part in the consensus (no matter the shard ID) so the // staking data can be recovered from the staking system smart contracts. // The function will error if something went wrong. It does change the inner state of the called instance. -func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { +func (sdp *stakingDataProvider) loadDataForBlsKey(validator state.ValidatorInfoHandler) error { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() - ownerData, err := sdp.getAndFillOwnerStatsFromSC(blsKey) + ownerData, err := sdp.getAndFillOwnerStats(validator) if err != nil { - log.Debug("error computing rewards for bls key", "step", "get owner data", "key", hex.EncodeToString(blsKey), "error", err) + log.Debug("error computing rewards for bls key", + "step", "get owner data", + "key", hex.EncodeToString(validator.GetPublicKey()), + "error", err) return err } ownerData.numEligible++ @@ -195,7 +223,29 @@ func (sdp *stakingDataProvider) loadDataForBlsKey(blsKey []byte) error { return nil } -func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { +// GetOwnersData returns all owner stats +func (sdp *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + ret := make(map[string]*epochStart.OwnerData) + for owner, ownerData := range sdp.cache { + ret[owner] = &epochStart.OwnerData{ + NumActiveNodes: ownerData.numActiveNodes, + NumStakedNodes: ownerData.numStakedNodes, + TotalTopUp: big.NewInt(0).SetBytes(ownerData.totalTopUp.Bytes()), + TopUpPerNode: big.NewInt(0).SetBytes(ownerData.topUpPerNode.Bytes()), + AuctionList: make([]state.ValidatorInfoHandler, len(ownerData.auctionList)), + Qualified: ownerData.qualified, + } + copy(ret[owner].AuctionList, ownerData.auctionList) + } + + return ret +} + +// GetBlsKeyOwner returns the owner's public key of the provided bls key +func (sdp *stakingDataProvider) GetBlsKeyOwner(blsKey []byte) (string, error) { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.ValidatorSCAddress, @@ -221,48 +271,109 @@ func (sdp *stakingDataProvider) getBlsKeyOwner(blsKey []byte) (string, error) { return string(data[0]), nil } -func (sdp *stakingDataProvider) getValidatorData(validatorAddress string) (*ownerStats, error) { - ownerData, exists := sdp.cache[validatorAddress] +func (sdp *stakingDataProvider) fillOwnerData(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + var err error + ownerData, exists := sdp.cache[owner] if exists { - return ownerData, nil + updateOwnerData(ownerData, validator) + } else { + ownerData, err = sdp.getAndFillOwnerDataFromSC(owner, validator) + if err != nil { + return nil, err + } + sdp.cache[owner] = ownerData } - return sdp.getValidatorDataFromStakingSC(validatorAddress) + return ownerData, nil +} + +func updateOwnerData(ownerData *ownerStats, validator state.ValidatorInfoHandler) { + if isInAuction(validator) { + ownerData.numActiveNodes-- + ownerData.auctionList = append(ownerData.auctionList, validator.ShallowClone()) + } } -func (sdp *stakingDataProvider) getValidatorDataFromStakingSC(validatorAddress string) (*ownerStats, error) { - topUpValue, totalStakedValue, numStakedWaiting, blsKeys, err := sdp.getValidatorInfoFromSC(validatorAddress) +func (sdp *stakingDataProvider) getAndFillOwnerDataFromSC(owner string, validator state.ValidatorInfoHandler) (*ownerStats, error) { + ownerInfo, err := sdp.getOwnerInfoFromSC(owner) if err != nil { return nil, err } - ownerData := &ownerStats{ - numEligible: 0, - numStakedNodes: numStakedWaiting.Int64(), - topUpValue: topUpValue, - totalStaked: totalStakedValue, - eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), + topUpPerNode := big.NewInt(0) + numStakedNodes := ownerInfo.numStakedWaiting.Int64() + if numStakedNodes == 0 { + log.Debug("stakingDataProvider.fillOwnerData", + "message", epochStart.ErrOwnerHasNoStakedNode, + "owner", hex.EncodeToString([]byte(owner)), + "validator", hex.EncodeToString(validator.GetPublicKey()), + ) + } else { + topUpPerNode = big.NewInt(0).Div(ownerInfo.topUpValue, ownerInfo.numStakedWaiting) } - ownerData.blsKeys = make([][]byte, len(blsKeys)) - copy(ownerData.blsKeys, blsKeys) + ownerData := &ownerStats{ + numEligible: 0, + numStakedNodes: numStakedNodes, + numActiveNodes: numStakedNodes, + totalTopUp: ownerInfo.topUpValue, + topUpPerNode: topUpPerNode, + totalStaked: ownerInfo.totalStakedValue, + eligibleBaseStake: big.NewInt(0).Set(sdp.minNodePrice), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + qualified: true, + } + err = sdp.checkAndFillOwnerValidatorAuctionData([]byte(owner), ownerData, validator) + if err != nil { + return nil, err + } - sdp.cache[validatorAddress] = ownerData + ownerData.blsKeys = make([][]byte, len(ownerInfo.blsKeys)) + copy(ownerData.blsKeys, ownerInfo.blsKeys) return ownerData, nil } -func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) (*big.Int, *big.Int, *big.Int, [][]byte, error) { - validatorAddressBytes := []byte(validatorAddress) +func (sdp *stakingDataProvider) checkAndFillOwnerValidatorAuctionData( + ownerPubKey []byte, + ownerData *ownerStats, + validator state.ValidatorInfoHandler, +) error { + validatorInAuction := isInAuction(validator) + if !validatorInAuction { + return nil + } + if ownerData.numStakedNodes == 0 { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrOwnerHasNoStakedNode, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + if !sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return fmt.Errorf("stakingDataProvider.checkAndFillOwnerValidatorAuctionData for validator in auction error: %w, owner: %s, node: %s", + epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4, + hex.EncodeToString(ownerPubKey), + hex.EncodeToString(validator.GetPublicKey()), + ) + } + + ownerData.numActiveNodes -= 1 + ownerData.auctionList = []state.ValidatorInfoHandler{validator} + + return nil +} + +func (sdp *stakingDataProvider) getOwnerInfoFromSC(owner string) (*ownerInfoSC, error) { + ownerAddressBytes := []byte(owner) vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ CallerAddr: vm.EndOfEpochAddress, CallValue: big.NewInt(0), GasProvided: math.MaxInt64, - Arguments: [][]byte{validatorAddressBytes}, + Arguments: [][]byte{ownerAddressBytes}, }, RecipientAddr: vm.ValidatorSCAddress, Function: "getTotalStakedTopUpStakedBlsKeys", @@ -270,41 +381,50 @@ func (sdp *stakingDataProvider) getValidatorInfoFromSC(validatorAddress string) vmOutput, err := sdp.systemVM.RunSmartContractCall(vmInput) if err != nil { - return nil, nil, nil, nil, err + return nil, err } if vmOutput.ReturnCode != vmcommon.Ok { - return nil, nil, nil, nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) + return nil, fmt.Errorf("%w, error: %v message: %s", epochStart.ErrExecutingSystemScCode, vmOutput.ReturnCode, vmOutput.ReturnMessage) } if len(vmOutput.ReturnData) < 3 { - return nil, nil, nil, nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) + return nil, fmt.Errorf("%w, getTotalStakedTopUpStakedBlsKeys function should have at least three values", epochStart.ErrExecutingSystemScCode) } topUpValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]) totalStakedValue := big.NewInt(0).SetBytes(vmOutput.ReturnData[1]) numStakedWaiting := big.NewInt(0).SetBytes(vmOutput.ReturnData[2]) - return topUpValue, totalStakedValue, numStakedWaiting, vmOutput.ReturnData[3:], nil + return &ownerInfoSC{ + topUpValue: topUpValue, + totalStakedValue: totalStakedValue, + numStakedWaiting: numStakedWaiting, + blsKeys: vmOutput.ReturnData[3:], + }, nil } // ComputeUnQualifiedNodes will compute which nodes are not qualified - do not have enough tokens to be validators -func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { +func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorsInfo state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { sdp.mutStakingData.Lock() defer sdp.mutStakingData.Unlock() mapOwnersKeys := make(map[string][][]byte) keysToUnStake := make([][]byte, 0) - mapBLSKeyStatus := createMapBLSKeyStatus(validatorInfos) + mapBLSKeyStatus, err := sdp.createMapBLSKeyStatus(validatorsInfo) + if err != nil { + return nil, nil, err + } + for ownerAddress, stakingInfo := range sdp.cache { maxQualified := big.NewInt(0).Div(stakingInfo.totalStaked, sdp.minNodePrice) if maxQualified.Int64() >= stakingInfo.numStakedNodes { continue } - sortedKeys := arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) + sortedKeys := sdp.arrangeBlsKeysByStatus(mapBLSKeyStatus, stakingInfo.blsKeys) numKeysToUnStake := stakingInfo.numStakedNodes - maxQualified.Int64() - selectedKeys := selectKeysToUnStake(sortedKeys, numKeysToUnStake) + selectedKeys, numRemovedValidators := sdp.selectKeysToUnStake(sortedKeys, numKeysToUnStake) if len(selectedKeys) == 0 { continue } @@ -313,31 +433,44 @@ func (sdp *stakingDataProvider) ComputeUnQualifiedNodes(validatorInfos map[uint3 mapOwnersKeys[ownerAddress] = make([][]byte, len(selectedKeys)) copy(mapOwnersKeys[ownerAddress], selectedKeys) + + stakingInfo.qualified = false + sdp.numOfValidatorsInCurrEpoch -= uint32(numRemovedValidators) } return keysToUnStake, mapOwnersKeys, nil } -func createMapBLSKeyStatus(validatorInfos map[uint32][]*state.ValidatorInfo) map[string]string { +func (sdp *stakingDataProvider) createMapBLSKeyStatus(validatorsInfo state.ShardValidatorsInfoMapHandler) (map[string]string, error) { mapBLSKeyStatus := make(map[string]string) - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - mapBLSKeyStatus[string(validatorInfo.PublicKey)] = validatorInfo.List + for _, validator := range validatorsInfo.GetAllValidatorsInfo() { + list := validator.GetList() + pubKey := validator.GetPublicKey() + + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) && list == string(common.NewList) { + return nil, fmt.Errorf("%w, bls key = %s", + epochStart.ErrReceivedNewListNodeInStakingV4, + hex.EncodeToString(pubKey), + ) } + + mapBLSKeyStatus[string(pubKey)] = list } - return mapBLSKeyStatus + return mapBLSKeyStatus, nil } -func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][]byte { +func (sdp *stakingDataProvider) selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) ([][]byte, int) { selectedKeys := make([][]byte, 0) - newKeys := sortedKeys[string(common.NewList)] + newNodesList := sdp.getNewNodesList() + + newKeys := sortedKeys[newNodesList] if len(newKeys) > 0 { selectedKeys = append(selectedKeys, newKeys...) } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + return selectedKeys[:numToSelect], 0 } waitingKeys := sortedKeys[string(common.WaitingList)] @@ -346,7 +479,9 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedWaiting := len(waitingKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedWaiting } eligibleKeys := sortedKeys[string(common.EligibleList)] @@ -355,18 +490,22 @@ func selectKeysToUnStake(sortedKeys map[string][][]byte, numToSelect int64) [][] } if int64(len(selectedKeys)) >= numToSelect { - return selectedKeys[:numToSelect] + overFlowKeys := len(selectedKeys) - int(numToSelect) + removedEligible := len(eligibleKeys) - overFlowKeys + return selectedKeys[:numToSelect], removedEligible + len(waitingKeys) } - return selectedKeys + return selectedKeys, len(eligibleKeys) + len(waitingKeys) } -func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { +func (sdp *stakingDataProvider) arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) map[string][][]byte { sortedKeys := make(map[string][][]byte) + newNodesList := sdp.getNewNodesList() + for _, blsKey := range blsKeys { - blsKeyStatus, ok := mapBlsKeyStatus[string(blsKey)] - if !ok { - sortedKeys[string(common.NewList)] = append(sortedKeys[string(common.NewList)], blsKey) + blsKeyStatus, found := mapBlsKeyStatus[string(blsKey)] + if !found { + sortedKeys[newNodesList] = append(sortedKeys[newNodesList], blsKey) continue } @@ -376,6 +515,23 @@ func arrangeBlsKeysByStatus(mapBlsKeyStatus map[string]string, blsKeys [][]byte) return sortedKeys } +func (sdp *stakingDataProvider) getNewNodesList() string { + newNodesList := string(common.NewList) + if sdp.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + newNodesList = string(common.AuctionList) + } + + return newNodesList +} + +// GetNumOfValidatorsInCurrentEpoch returns the number of validators(eligible + waiting) in current epoch +func (sdp *stakingDataProvider) GetNumOfValidatorsInCurrentEpoch() uint32 { + sdp.mutStakingData.RLock() + defer sdp.mutStakingData.RUnlock() + + return sdp.numOfValidatorsInCurrEpoch +} + // IsInterfaceNil return true if underlying object is nil func (sdp *stakingDataProvider) IsInterfaceNil() bool { return sdp == nil diff --git a/epochStart/metachain/stakingDataProvider_test.go b/epochStart/metachain/stakingDataProvider_test.go index 257485e8f0c..e3bfc1e6259 100644 --- a/epochStart/metachain/stakingDataProvider_test.go +++ b/epochStart/metachain/stakingDataProvider_test.go @@ -17,28 +17,49 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/mock" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestNewStakingDataProvider_NilSystemVMShouldErr(t *testing.T) { - t.Parallel() - - sdp, err := NewStakingDataProvider(nil, "100000") +const stakingV4Step1EnableEpoch = 444 +const stakingV4Step2EnableEpoch = 445 - assert.True(t, check.IfNil(sdp)) - assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) +func createStakingDataProviderArgs() StakingDataProviderArgs { + return StakingDataProviderArgs{ + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + SystemVM: &mock.VMExecutionHandlerStub{}, + MinNodePrice: "2500", + } } -func TestNewStakingDataProvider_ShouldWork(t *testing.T) { +func TestNewStakingDataProvider_NilInputPointersShouldErr(t *testing.T) { t.Parallel() - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{}, "100000") + t.Run("nil system vm", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.SystemVM = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, epochStart.ErrNilSystemVmInstance, err) + }) - assert.False(t, check.IfNil(sdp)) - assert.Nil(t, err) + t.Run("nil epoch notifier", func(t *testing.T) { + args := createStakingDataProviderArgs() + args.EnableEpochsHandler = nil + sdp, err := NewStakingDataProvider(args) + assert.True(t, check.IfNil(sdp)) + assert.Equal(t, vm.ErrNilEnableEpochsHandler, err) + }) + + t.Run("should work", func(t *testing.T) { + args := createStakingDataProviderArgs() + sdp, err := NewStakingDataProvider(args) + assert.False(t, check.IfNil(sdp)) + assert.Nil(t, err) + }) } func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t *testing.T) { @@ -46,7 +67,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t numCall := 0 expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { numCall++ if numCall == 1 { @@ -65,17 +87,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyGetBlsKeyOwnerErrorsShouldErr(t return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "returned exactly one value: the owner address")) @@ -87,7 +110,8 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t numCall := 0 owner := []byte("owner") expectedErr := errors.New("expected error") - sdp, _ := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { if input.Function == "getOwner" { return &vmcommon.VMOutput{ @@ -111,17 +135,18 @@ func TestStakingDataProvider_PrepareDataForBlsKeyLoadOwnerDataErrorsShouldErr(t } return nil, nil }, - }, "100000") + } + sdp, _ := NewStakingDataProvider(args) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Equal(t, expectedErr, err) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), vmcommon.UserError.String())) - err = sdp.loadDataForBlsKey([]byte("bls key")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), epochStart.ErrExecutingSystemScCode.Error())) assert.True(t, strings.Contains(err.Error(), "getTotalStakedTopUpStakedBlsKeys function should have at least three values")) @@ -138,12 +163,12 @@ func TestStakingDataProvider_PrepareDataForBlsKeyFromSCShouldWork(t *testing.T) sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) assert.Equal(t, 2, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -158,16 +183,16 @@ func TestStakingDataProvider_PrepareDataForBlsKeyCachedResponseShouldWork(t *tes sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.loadDataForBlsKey([]byte("bls key")) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key")}) assert.Nil(t, err) - err = sdp.loadDataForBlsKey([]byte("bls key2")) + err = sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: []byte("bls key2")}) assert.Nil(t, err) assert.Equal(t, 3, numRunContractCalls) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 2, ownerData.numEligible) } @@ -179,11 +204,11 @@ func TestStakingDataProvider_PrepareDataForBlsKeyWithRealSystemVmShouldWork(t *t blsKey := []byte("bls key") sdp := createStakingDataProviderWithRealArgs(t, owner, blsKey, topUpVal) - err := sdp.loadDataForBlsKey(blsKey) + err := sdp.loadDataForBlsKey(&state.ValidatorInfo{PublicKey: blsKey}) assert.Nil(t, err) ownerData := sdp.GetFromCache(owner) require.NotNil(t, ownerData) - assert.Equal(t, topUpVal, ownerData.topUpValue) + assert.Equal(t, topUpVal, ownerData.totalTopUp) assert.Equal(t, 1, ownerData.numEligible) } @@ -224,6 +249,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodes(t *testing.T) { require.Zero(t, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithStakingV4ReceivedNewListNode(t *testing.T) { + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte("address0"), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.NewList), + RewardAddress: []byte("address0"), + } + v2 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey2"), + List: string(common.AuctionList), + RewardAddress: []byte("address1"), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + _ = valInfo.Add(v2) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedNewListNodeInStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Empty(t, keysToUnStake) + require.Empty(t, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *testing.T) { nbShards := uint32(3) nbEligible := make(map[uint32]uint32) @@ -259,6 +317,39 @@ func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFunds(t *t require.Equal(t, 1, len(ownersWithNotEnoughFunds)) } +func TestStakingDataProvider_ComputeUnQualifiedNodesWithOwnerNotEnoughFundsWithStakingV4(t *testing.T) { + owner := "address0" + v0 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey0"), + List: string(common.EligibleList), + RewardAddress: []byte(owner), + } + v1 := &state.ValidatorInfo{ + PublicKey: []byte("blsKey1"), + List: string(common.AuctionList), + RewardAddress: []byte(owner), + } + + valInfo := state.NewShardValidatorsInfoMap() + _ = valInfo.Add(v0) + _ = valInfo.Add(v1) + + sdp := createStakingDataProviderAndUpdateCache(t, valInfo, big.NewInt(0)) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sdp.cache[owner].blsKeys = append(sdp.cache[owner].blsKeys, []byte("newKey")) + sdp.cache[owner].totalStaked = big.NewInt(2500) + sdp.cache[owner].numStakedNodes++ + + keysToUnStake, ownersWithNotEnoughFunds, err := sdp.ComputeUnQualifiedNodes(valInfo) + require.Nil(t, err) + + expectedUnStakedKeys := [][]byte{[]byte("blsKey1"), []byte("newKey")} + expectedOwnerWithNotEnoughFunds := map[string][][]byte{owner: expectedUnStakedKeys} + require.Equal(t, expectedUnStakedKeys, keysToUnStake) + require.Equal(t, expectedOwnerWithNotEnoughFunds, ownersWithNotEnoughFunds) +} + func TestStakingDataProvider_GetTotalStakeEligibleNodes(t *testing.T) { t.Parallel() @@ -345,13 +436,13 @@ func TestStakingDataProvider_GetNodeStakedTopUpShouldWork(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) expectedOwnerStats := &ownerStats{ - topUpPerNode: big.NewInt(37), + eligibleTopUpPerNode: big.NewInt(37), } sdp.SetInCache(owner, expectedOwnerStats) res, err := sdp.GetNodeStakedTopUp(owner) require.NoError(t, err) - require.Equal(t, expectedOwnerStats.topUpPerNode, res) + require.Equal(t, expectedOwnerStats.eligibleTopUpPerNode, res) } func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { @@ -365,9 +456,9 @@ func TestStakingDataProvider_PrepareStakingDataForRewards(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - keys := make(map[uint32][][]byte) - keys[0] = append(keys[0], []byte("owner")) - err := sdp.PrepareStakingDataForRewards(keys) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{PublicKey: owner, ShardId: 0}) + err := sdp.PrepareStakingData(validatorsMap) require.NoError(t, err) } @@ -382,10 +473,144 @@ func TestStakingDataProvider_FillValidatorInfo(t *testing.T) { sdp := createStakingDataProviderWithMockArgs(t, owner, topUpVal, stakeVal, &numRunContractCalls) - err := sdp.FillValidatorInfo([]byte("owner")) + err := sdp.FillValidatorInfo(&state.ValidatorInfo{PublicKey: []byte("bls key")}) require.NoError(t, err) } +func TestCheckAndFillOwnerValidatorAuctionData(t *testing.T) { + t.Parallel() + + t.Run("validator not in auction, expect no error, no owner data update", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + ownerData := &ownerStats{} + err := sdp.checkAndFillOwnerValidatorAuctionData([]byte("owner"), ownerData, &state.ValidatorInfo{List: string(common.NewList)}) + require.Nil(t, err) + require.Equal(t, &ownerStats{}, ownerData) + }) + + t.Run("validator in auction, but no staked node, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 0} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrOwnerHasNoStakedNode.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 0}, ownerData) + }) + + t.Run("validator in auction, staking v4 not enabled yet, expect error", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 1} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), epochStart.ErrReceivedAuctionValidatorsBeforeStakingV4.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(owner))) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(validator.PublicKey))) + require.Equal(t, &ownerStats{numStakedNodes: 1}, ownerData) + }) + + t.Run("should update owner's data", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4StartedFlag) + + owner := []byte("owner") + ownerData := &ownerStats{numStakedNodes: 3, numActiveNodes: 3} + validator := &state.ValidatorInfo{PublicKey: []byte("validatorPubKey"), List: string(common.AuctionList)} + + err := sdp.checkAndFillOwnerValidatorAuctionData(owner, ownerData, validator) + require.Nil(t, err) + require.Equal(t, &ownerStats{ + numStakedNodes: 3, + numActiveNodes: 2, + auctionList: []state.ValidatorInfoHandler{validator}, + }, ownerData) + }) +} + +func TestSelectKeysToUnStake(t *testing.T) { + t.Parallel() + + t.Run("no validator removed", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0")}, unStakedKeys) + require.Equal(t, 0, removedValidators) + }) + + t.Run("overflow from waiting", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk2")}, + string(common.WaitingList): {[]byte("pk3"), []byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 2) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk3")}, unStakedKeys) + require.Equal(t, 1, removedValidators) + }) + + t.Run("overflow from eligible", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1"), []byte("pk2")}, + string(common.WaitingList): {[]byte("pk4"), []byte("pk5")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 4) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk4"), []byte("pk5"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 3, removedValidators) + }) + + t.Run("no overflow", func(t *testing.T) { + t.Parallel() + args := createStakingDataProviderArgs() + sdp, _ := NewStakingDataProvider(args) + sdp.enableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV4Step2Flag) + + sortedKeys := map[string][][]byte{ + string(common.AuctionList): {[]byte("pk0")}, + string(common.EligibleList): {[]byte("pk1")}, + string(common.WaitingList): {[]byte("pk2")}, + } + unStakedKeys, removedValidators := sdp.selectKeysToUnStake(sortedKeys, 3) + require.Equal(t, [][]byte{[]byte("pk0"), []byte("pk2"), []byte("pk1")}, unStakedKeys) + require.Equal(t, 2, removedValidators) + }) +} + func createStakingDataProviderWithMockArgs( t *testing.T, owner []byte, @@ -393,7 +618,8 @@ func createStakingDataProviderWithMockArgs( stakingVal *big.Int, numRunContractCalls *int, ) *stakingDataProvider { - sdp, err := NewStakingDataProvider(&mock.VMExecutionHandlerStub{ + args := createStakingDataProviderArgs() + args.SystemVM = &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { *numRunContractCalls++ switch input.Function { @@ -417,7 +643,8 @@ func createStakingDataProviderWithMockArgs( return nil, errors.New("unexpected call") }, - }, "100000") + } + sdp, err := NewStakingDataProvider(args) require.Nil(t, err) return sdp @@ -435,7 +662,9 @@ func createStakingDataProviderWithRealArgs(t *testing.T, owner []byte, blsKey [] doStake(t, s.systemVM, s.userAccountsDB, owner, big.NewInt(0).Add(big.NewInt(1000), topUpVal), blsKey) - sdp, _ := NewStakingDataProvider(s.systemVM, "100000") + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = s.systemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) return sdp } @@ -464,27 +693,28 @@ func saveOutputAccounts(t *testing.T, accountsDB state.AccountsAdapter, vmOutput require.Nil(t, err) } -func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo map[uint32][]*state.ValidatorInfo, topUpValue *big.Int) *stakingDataProvider { - +func createStakingDataProviderAndUpdateCache(t *testing.T, validatorsInfo state.ShardValidatorsInfoMapHandler, topUpValue *big.Int) *stakingDataProvider { args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ StakingV2EnableEpoch: 1, }, testscommon.CreateMemUnit()) args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, }) - sdp, _ := NewStakingDataProvider(args.SystemVM, "2500") + + argsStakingDataProvider := createStakingDataProviderArgs() + argsStakingDataProvider.SystemVM = args.SystemVM + sdp, _ := NewStakingDataProvider(argsStakingDataProvider) args.StakingDataProvider = sdp s, _ := NewSystemSCProcessor(args) require.NotNil(t, s) - for _, valsList := range validatorsInfo { - for _, valInfo := range valsList { - stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) - if valInfo.List != string(common.LeavingList) && valInfo.List != string(common.InactiveList) { - doStake(t, s.systemVM, s.userAccountsDB, valInfo.RewardAddress, stake, valInfo.PublicKey) - } - updateCache(sdp, valInfo.RewardAddress, valInfo.PublicKey, valInfo.List, stake) + for _, valInfo := range validatorsInfo.GetAllValidatorsInfo() { + stake := big.NewInt(0).Add(big.NewInt(2500), topUpValue) + if valInfo.GetList() != string(common.LeavingList) && valInfo.GetList() != string(common.InactiveList) { + doStake(t, s.systemVM, s.userAccountsDB, valInfo.GetRewardAddress(), stake, valInfo.GetPublicKey()) } + updateCache(sdp, valInfo.GetRewardAddress(), valInfo.GetPublicKey(), valInfo.GetList(), stake) + } return sdp @@ -495,14 +725,14 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l if owner == nil { owner = &ownerStats{ - numEligible: 0, - numStakedNodes: 0, - topUpValue: big.NewInt(0), - totalStaked: big.NewInt(0), - eligibleBaseStake: big.NewInt(0), - eligibleTopUpStake: big.NewInt(0), - topUpPerNode: big.NewInt(0), - blsKeys: nil, + numEligible: 0, + numStakedNodes: 0, + totalTopUp: big.NewInt(0), + totalStaked: big.NewInt(0), + eligibleBaseStake: big.NewInt(0), + eligibleTopUpStake: big.NewInt(0), + eligibleTopUpPerNode: big.NewInt(0), + blsKeys: nil, } } @@ -518,12 +748,12 @@ func updateCache(sdp *stakingDataProvider, ownerAddress []byte, blsKey []byte, l sdp.cache[string(ownerAddress)] = owner } -func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) map[uint32][]*state.ValidatorInfo { - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) +func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbInactive map[uint32]uint32) state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() shardMap := shardsMap(nbShards) for shardID := range shardMap { - valInfoList := make([]*state.ValidatorInfo, 0) + valInfoList := make([]state.ValidatorInfoHandler, 0) for eligible := uint32(0); eligible < nbEligible[shardID]; eligible++ { vInfo := &state.ValidatorInfo{ PublicKey: []byte(fmt.Sprintf("blsKey%s%d%d", common.EligibleList, shardID, eligible)), @@ -561,7 +791,7 @@ func createValidatorsInfo(nbShards uint32, nbEligible, nbWaiting, nbLeaving, nbI } valInfoList = append(valInfoList, vInfo) } - validatorsInfo[shardID] = valInfoList + _ = validatorsInfo.SetValidatorsInShard(shardID, valInfoList) } return validatorsInfo } diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index 39bfa4c2e41..a0bd2a3402d 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -1,30 +1,21 @@ package metachain import ( - "bytes" - "context" "fmt" "math" "math/big" - "sort" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/errChan" - vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/vm" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -41,107 +32,41 @@ type ArgsNewEpochStartSystemSCProcessing struct { EndOfEpochCallerAddress []byte StakingSCAddress []byte - MaxNodesEnableConfig []config.MaxNodesChangeConfig ESDTOwnerAddressBytes []byte - GenesisNodesConfig sharding.GenesisNodesSetupHandler - EpochNotifier process.EpochNotifier - NodesConfigProvider epochStart.NodesConfigProvider - StakingDataProvider epochStart.StakingDataProvider - EnableEpochsHandler common.EnableEpochsHandler + GenesisNodesConfig sharding.GenesisNodesSetupHandler + EpochNotifier process.EpochNotifier + NodesConfigProvider epochStart.NodesConfigProvider + StakingDataProvider epochStart.StakingDataProvider + AuctionListSelector epochStart.AuctionListSelector + MaxNodesChangeConfigProvider epochStart.MaxNodesChangeConfigProvider + EnableEpochsHandler common.EnableEpochsHandler } type systemSCProcessor struct { - systemVM vmcommon.VMExecutionHandler - userAccountsDB state.AccountsAdapter - marshalizer marshal.Marshalizer - peerAccountsDB state.AccountsAdapter - chanceComputer nodesCoordinator.ChanceComputer - shardCoordinator sharding.Coordinator - startRating uint32 - validatorInfoCreator epochStart.ValidatorInfoCreator - genesisNodesConfig sharding.GenesisNodesSetupHandler - nodesConfigProvider epochStart.NodesConfigProvider - stakingDataProvider epochStart.StakingDataProvider - endOfEpochCallerAddress []byte - stakingSCAddress []byte - maxNodesEnableConfig []config.MaxNodesChangeConfig - maxNodes uint32 - flagChangeMaxNodesEnabled atomic.Flag - esdtOwnerAddressBytes []byte - mapNumSwitchedPerShard map[uint32]uint32 - mapNumSwitchablePerShard map[uint32]uint32 - enableEpochsHandler common.EnableEpochsHandler -} - -type validatorList []*state.ValidatorInfo - -// Len will return the length of the validatorList -func (v validatorList) Len() int { return len(v) } - -// Swap will interchange the objects on input indexes -func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } - -// Less will return true if object on index i should appear before object in index j -// Sorting of validators should be by index and public key -func (v validatorList) Less(i, j int) bool { - if v[i].TempRating == v[j].TempRating { - if v[i].Index == v[j].Index { - return bytes.Compare(v[i].PublicKey, v[j].PublicKey) < 0 - } - return v[i].Index < v[j].Index - } - return v[i].TempRating < v[j].TempRating + *legacySystemSCProcessor + auctionListSelector epochStart.AuctionListSelector + enableEpochsHandler common.EnableEpochsHandler } // NewSystemSCProcessor creates the end of epoch system smart contract processor func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCProcessor, error) { - if check.IfNilReflect(args.SystemVM) { - return nil, epochStart.ErrNilSystemVM - } - if check.IfNil(args.UserAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.PeerAccountsDB) { - return nil, epochStart.ErrNilAccountsDB - } - if check.IfNil(args.Marshalizer) { - return nil, epochStart.ErrNilMarshalizer - } - if check.IfNil(args.ValidatorInfoCreator) { - return nil, epochStart.ErrNilValidatorInfoProcessor - } - if len(args.EndOfEpochCallerAddress) == 0 { - return nil, epochStart.ErrNilEndOfEpochCallerAddress - } - if len(args.StakingSCAddress) == 0 { - return nil, epochStart.ErrNilStakingSCAddress - } - if check.IfNil(args.ChanceComputer) { - return nil, epochStart.ErrNilChanceComputer - } if check.IfNil(args.EpochNotifier) { return nil, epochStart.ErrNilEpochStartNotifier } - if check.IfNil(args.GenesisNodesConfig) { - return nil, epochStart.ErrNilGenesisNodesConfig - } - if check.IfNil(args.NodesConfigProvider) { - return nil, epochStart.ErrNilNodesConfigProvider + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector } - if check.IfNil(args.StakingDataProvider) { - return nil, epochStart.ErrNilStakingDataProvider - } - if check.IfNil(args.ShardCoordinator) { - return nil, epochStart.ErrNilShardCoordinator - } - if len(args.ESDTOwnerAddressBytes) == 0 { - return nil, epochStart.ErrEmptyESDTOwnerAddress + + legacy, err := newLegacySystemSCProcessor(args) + if err != nil { + return nil, err } if check.IfNil(args.EnableEpochsHandler) { return nil, epochStart.ErrNilEnableEpochsHandler } - err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ + + err = core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly, common.StakingV2OwnerFlagInSpecificEpochOnly, common.CorrectLastUnJailedFlagInSpecificEpochOnly, @@ -152,133 +77,91 @@ func NewSystemSCProcessor(args ArgsNewEpochStartSystemSCProcessing) (*systemSCPr common.ESDTFlagInSpecificEpochOnly, common.GovernanceFlag, common.SaveJailedAlwaysFlag, + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingQueueFlag, + common.StakingV4StartedFlag, + common.DelegationSmartContractFlagInSpecificEpochOnly, + common.GovernanceFlagInSpecificEpochOnly, }) if err != nil { return nil, err } s := &systemSCProcessor{ - systemVM: args.SystemVM, - userAccountsDB: args.UserAccountsDB, - peerAccountsDB: args.PeerAccountsDB, - marshalizer: args.Marshalizer, - startRating: args.StartRating, - validatorInfoCreator: args.ValidatorInfoCreator, - genesisNodesConfig: args.GenesisNodesConfig, - endOfEpochCallerAddress: args.EndOfEpochCallerAddress, - stakingSCAddress: args.StakingSCAddress, - chanceComputer: args.ChanceComputer, - mapNumSwitchedPerShard: make(map[uint32]uint32), - mapNumSwitchablePerShard: make(map[uint32]uint32), - stakingDataProvider: args.StakingDataProvider, - nodesConfigProvider: args.NodesConfigProvider, - shardCoordinator: args.ShardCoordinator, - esdtOwnerAddressBytes: args.ESDTOwnerAddressBytes, - enableEpochsHandler: args.EnableEpochsHandler, + legacySystemSCProcessor: legacy, + auctionListSelector: args.AuctionListSelector, + enableEpochsHandler: args.EnableEpochsHandler, } - s.maxNodesEnableConfig = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) - copy(s.maxNodesEnableConfig, args.MaxNodesEnableConfig) - sort.Slice(s.maxNodesEnableConfig, func(i, j int) bool { - return s.maxNodesEnableConfig[i].EpochEnable < s.maxNodesEnableConfig[j].EpochEnable - }) - args.EpochNotifier.RegisterNotifyHandler(s) return s, nil } // ProcessSystemSmartContract does all the processing at end of epoch in case of system smart contract func (s *systemSCProcessor) ProcessSystemSmartContract( - validatorInfos map[uint32][]*state.ValidatorInfo, - nonce uint64, - epoch uint32, + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, ) error { - if s.enableEpochsHandler.IsFlagEnabled(common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly) { - err := s.updateSystemSCConfigMinNodes() - if err != nil { - return err - } + err := checkNilInputValues(validatorsInfoMap, header) + if err != nil { + return err } - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2OwnerFlagInSpecificEpochOnly) { - err := s.updateOwnersForBlsKeys() - if err != nil { - return err - } + err = s.processLegacy(validatorsInfoMap, header.GetNonce(), header.GetEpoch()) + if err != nil { + return err } + return s.processWithNewFlags(validatorsInfoMap, header) +} - if s.flagChangeMaxNodesEnabled.IsSet() { - err := s.updateMaxNodes(validatorInfos, nonce) - if err != nil { - return err - } +func checkNilInputValues(validatorsInfoMap state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + if check.IfNil(header) { + return process.ErrNilHeaderHandler } - - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlagInSpecificEpochOnly) { - err := s.resetLastUnJailed() - if err != nil { - return err - } + if validatorsInfoMap == nil { + return fmt.Errorf("systemSCProcessor.ProcessSystemSmartContract : %w, header nonce: %d ", + errNilValidatorsInfoMap, header.GetNonce()) } - if s.enableEpochsHandler.IsFlagEnabled(common.DelegationSmartContractFlag) { - err := s.initDelegationSystemSC() - if err != nil { - return err - } - } + return nil +} - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - err := s.cleanAdditionalQueue() +func (s *systemSCProcessor) processWithNewFlags( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { + if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlagInSpecificEpochOnly) { + err := s.updateToGovernanceV2() if err != nil { return err } } - if s.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { - err := s.computeNumWaitingPerShard(validatorInfos) - if err != nil { - return err - } - - err = s.swapJailedWithWaiting(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + err := s.stakeNodesFromQueue(validatorsInfoMap, math.MaxUint32, header.GetNonce(), common.AuctionList) if err != nil { return err } } - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - err := s.prepareRewardsData(validatorInfos) - if err != nil { - return err - } - - err = s.fillStakingDataForNonEligible(validatorInfos) + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + err := s.prepareStakingDataForEligibleNodes(validatorsInfoMap) if err != nil { return err } - numUnStaked, err := s.unStakeNodesWithNotEnoughFunds(validatorInfos, epoch) + err = s.fillStakingDataForNonEligible(validatorsInfoMap) if err != nil { return err } - err = s.stakeNodesFromQueue(validatorInfos, numUnStaked, nonce) + err = s.unStakeNodesWithNotEnoughFundsWithStakingV4(validatorsInfoMap, header.GetEpoch()) if err != nil { return err } - } - - if s.enableEpochsHandler.IsFlagEnabled(common.ESDTFlagInSpecificEpochOnly) { - err := s.initESDT() - if err != nil { - //not a critical error - log.Error("error while initializing ESDT", "err", err) - } - } - if s.enableEpochsHandler.IsFlagEnabled(common.GovernanceFlag) { - err := s.updateToGovernanceV2() + err = s.auctionListSelector.SelectNodesFromAuctionList(validatorsInfoMap, header.GetPrevRandSeed()) if err != nil { return err } @@ -287,1162 +170,73 @@ func (s *systemSCProcessor) ProcessSystemSmartContract( return nil } -// ToggleUnStakeUnBond will pause/unPause the unStake/unBond functions on the validator system sc -func (s *systemSCProcessor) ToggleUnStakeUnBond(value bool) error { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - return nil - } - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, - CallValue: big.NewInt(0), - }, - RecipientAddr: vm.ValidatorSCAddress, - Function: "unPauseUnStakeUnBond", - } - - if value { - vmInput.Function = "pauseUnStakeUnBond" - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemValidatorSCCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) unStakeNodesWithNotEnoughFunds( - validatorInfos map[uint32][]*state.ValidatorInfo, +func (s *systemSCProcessor) unStakeNodesWithNotEnoughFundsWithStakingV4( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, epoch uint32, -) (uint32, error) { - nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorInfos) +) error { + nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfoMap) if err != nil { - return 0, err + return err } - nodesUnStakedFromAdditionalQueue := uint32(0) - log.Debug("unStake nodes with not enough funds", "num", len(nodesToUnStake)) for _, blsKey := range nodesToUnStake { log.Debug("unStake at end of epoch for node", "blsKey", blsKey) err = s.unStakeOneNode(blsKey, epoch) - if err != nil { - return 0, err - } - - validatorInfo := getValidatorInfoWithBLSKey(validatorInfos, blsKey) - if validatorInfo == nil { - nodesUnStakedFromAdditionalQueue++ - log.Debug("unStaked node which was in additional queue", "blsKey", blsKey) - continue - } - - validatorInfo.List = string(common.LeavingList) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - return 0, err - } - - nodesToStakeFromQueue := uint32(len(nodesToUnStake)) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - nodesToStakeFromQueue -= nodesUnStakedFromAdditionalQueue - } - - log.Debug("stake nodes from waiting list", "num", nodesToStakeFromQueue) - return nodesToStakeFromQueue, nil -} - -func (s *systemSCProcessor) unStakeOneNode(blsKey []byte, epoch uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{blsKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeOneNode", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - account, errExists := s.peerAccountsDB.GetExistingAccount(blsKey) - if errExists != nil { - return nil - } - - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return epochStart.ErrWrongTypeAssertion - } - - peerAccount.SetListAndIndex(peerAccount.GetShardId(), string(common.LeavingList), peerAccount.GetIndexInList()) - peerAccount.SetUnStakedEpoch(epoch) - err = s.peerAccountsDB.SaveAccount(peerAccount) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateDelegationContracts(mapOwnerKeys map[string][][]byte) error { - sortedDelegationsSCs := make([]string, 0, len(mapOwnerKeys)) - for address := range mapOwnerKeys { - shardId := s.shardCoordinator.ComputeId([]byte(address)) - if shardId != core.MetachainShardId { - continue - } - sortedDelegationsSCs = append(sortedDelegationsSCs, address) - } - - sort.Slice(sortedDelegationsSCs, func(i, j int) bool { - return sortedDelegationsSCs[i] < sortedDelegationsSCs[j] - }) - - for _, address := range sortedDelegationsSCs { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: mapOwnerKeys[address], - CallValue: big.NewInt(0), - }, - RecipientAddr: []byte(address), - Function: "unStakeAtEndOfEpoch", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - log.Debug("unStakeAtEndOfEpoch", "returnMessage", vmOutput.ReturnMessage, "returnCode", vmOutput.ReturnCode.String()) - return epochStart.ErrUnStakeExecuteError - } - - err = s.processSCOutputAccounts(vmOutput) if err != nil { return err } - } - - return nil -} - -func getValidatorInfoWithBLSKey(validatorInfos map[uint32][]*state.ValidatorInfo, blsKey []byte) *state.ValidatorInfo { - for _, validatorsInfoSlice := range validatorInfos { - for _, validatorInfo := range validatorsInfoSlice { - if bytes.Equal(validatorInfo.PublicKey, blsKey) { - return validatorInfo - } - } - } - return nil -} - -func (s *systemSCProcessor) fillStakingDataForNonEligible(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shId, validatorsInfoSlice := range validatorInfos { - newList := make([]*state.ValidatorInfo, 0, len(validatorsInfoSlice)) - deleteCalled := false - - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - newList = append(newList, validatorInfo) - continue - } - - err := s.stakingDataProvider.FillValidatorInfo(validatorInfo.PublicKey) - if err != nil { - deleteCalled = true - - log.Error("fillStakingDataForNonEligible", "error", err) - if len(validatorInfo.List) > 0 { - return err - } - - err = s.peerAccountsDB.RemoveAccount(validatorInfo.PublicKey) - if err != nil { - log.Error("fillStakingDataForNonEligible removeAccount", "error", err) - } - - continue - } - - newList = append(newList, validatorInfo) - } - - if deleteCalled { - validatorInfos[shId] = newList - } - } - - return nil -} - -func (s *systemSCProcessor) prepareRewardsData( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) error { - eligibleNodesKeys := s.getEligibleNodesKeyMapOfType(validatorsInfo) - err := s.prepareStakingDataForRewards(eligibleNodesKeys) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) prepareStakingDataForRewards(eligibleNodesKeys map[uint32][][]byte) error { - sw := core.NewStopWatch() - sw.Start("prepareStakingDataForRewards") - defer func() { - sw.Stop("prepareStakingDataForRewards") - log.Debug("systemSCProcessor.prepareStakingDataForRewards time measurements", sw.GetMeasurements()...) - }() - - return s.stakingDataProvider.PrepareStakingDataForRewards(eligibleNodesKeys) -} - -func (s *systemSCProcessor) getEligibleNodesKeyMapOfType( - validatorsInfo map[uint32][]*state.ValidatorInfo, -) map[uint32][][]byte { - eligibleNodesKeys := make(map[uint32][][]byte) - for shardID, validatorsInfoSlice := range validatorsInfo { - eligibleNodesKeys[shardID] = make([][]byte, 0, s.nodesConfigProvider.ConsensusGroupSize(shardID)) - for _, validatorInfo := range validatorsInfoSlice { - if vInfo.WasEligibleInCurrentEpoch(validatorInfo) { - eligibleNodesKeys[shardID] = append(eligibleNodesKeys[shardID], validatorInfo.PublicKey) - } - } - } - - return eligibleNodesKeys -} - -func getRewardsMiniBlockForMeta(miniBlocks block.MiniBlockSlice) *block.MiniBlock { - for _, miniBlock := range miniBlocks { - if miniBlock.Type != block.RewardsBlock { - continue - } - if miniBlock.ReceiverShardID != core.MetachainShardId { - continue - } - return miniBlock - } - return nil -} - -// ProcessDelegationRewards will process the rewards which are directed towards the delegation system smart contracts -func (s *systemSCProcessor) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if txCache == nil { - return epochStart.ErrNilLocalTxCache - } - - rwdMb := getRewardsMiniBlockForMeta(miniBlocks) - if rwdMb == nil { - return nil - } - for _, txHash := range rwdMb.TxHashes { - rwdTx, err := txCache.GetTx(txHash) - if err != nil { - return err + validatorInfo := validatorsInfoMap.GetValidator(blsKey) + if validatorInfo == nil { + return fmt.Errorf( + "%w in systemSCProcessor.unStakeNodesWithNotEnoughFundsWithStakingV4 because validator might be in additional queue after staking v4", + epochStart.ErrNilValidatorInfo) } - err = s.executeRewardTx(rwdTx) + validatorLeaving := validatorInfo.ShallowClone() + validatorLeaving.SetListAndIndex(string(common.LeavingList), validatorLeaving.GetIndex(), true) + err = validatorsInfoMap.Replace(validatorInfo, validatorLeaving) if err != nil { return err } } - return nil -} - -func (s *systemSCProcessor) executeRewardTx(rwdTx data.TransactionHandler) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: nil, - CallValue: rwdTx.GetValue(), - }, - RecipientAddr: rwdTx.GetRcvAddr(), - Function: "updateRewards", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrSystemDelegationCall - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateSystemSCConfigMinNodes() error { - minNumberOfNodesWithHysteresis := s.genesisNodesConfig.MinNumberOfNodesWithHysteresis() - err := s.setMinNumberOfNodes(minNumberOfNodesWithHysteresis) - - return err + return s.updateDelegationContracts(mapOwnersKeys) } -func (s *systemSCProcessor) resetLastUnJailed() error { +func (s *systemSCProcessor) updateToGovernanceV2() error { vmInput := &vmcommon.ContractCallInput{ VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, + CallerAddr: vm.GovernanceSCAddress, CallValue: big.NewInt(0), + Arguments: [][]byte{}, }, - RecipientAddr: s.stakingSCAddress, - Function: "resetLastUnJailedFromQueue", + RecipientAddr: vm.GovernanceSCAddress, + Function: "initV2", } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err + vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) + if errRun != nil { + return fmt.Errorf("%w when updating to governanceV2", errRun) } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrResetLastUnJailedFromQueue - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err + return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) } - return nil -} - -// updates the configuration of the system SC if the flags permit -func (s *systemSCProcessor) updateMaxNodes(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64) error { - sw := core.NewStopWatch() - sw.Start("total") - defer func() { - sw.Stop("total") - log.Debug("systemSCProcessor.updateMaxNodes", sw.GetMeasurements()...) - }() - - maxNumberOfNodes := s.maxNodes - sw.Start("setMaxNumberOfNodes") - prevMaxNumberOfNodes, err := s.setMaxNumberOfNodes(maxNumberOfNodes) - sw.Stop("setMaxNumberOfNodes") + err := s.processSCOutputAccounts(vmOutput) if err != nil { return err } - if maxNumberOfNodes < prevMaxNumberOfNodes { - return epochStart.ErrInvalidMaxNumberOfNodes - } - - sw.Start("stakeNodesFromQueue") - err = s.stakeNodesFromQueue(validatorInfos, maxNumberOfNodes-prevMaxNumberOfNodes, nonce) - sw.Stop("stakeNodesFromQueue") - if err != nil { - return err - } return nil } -func (s *systemSCProcessor) computeNumWaitingPerShard(validatorInfos map[uint32][]*state.ValidatorInfo) error { - for shardID, validatorInfoList := range validatorInfos { - totalInWaiting := uint32(0) - for _, validatorInfo := range validatorInfoList { - switch validatorInfo.List { - case string(common.WaitingList): - totalInWaiting++ - } - } - s.mapNumSwitchablePerShard[shardID] = totalInWaiting - s.mapNumSwitchedPerShard[shardID] = 0 - } - return nil -} - -func (s *systemSCProcessor) swapJailedWithWaiting(validatorInfos map[uint32][]*state.ValidatorInfo) error { - jailedValidators := s.getSortedJailedNodes(validatorInfos) - - log.Debug("number of jailed validators", "num", len(jailedValidators)) - - newValidators := make(map[string]struct{}) - for _, jailedValidator := range jailedValidators { - if _, ok := newValidators[string(jailedValidator.PublicKey)]; ok { - continue - } - if isValidator(jailedValidator) && s.mapNumSwitchablePerShard[jailedValidator.ShardId] <= s.mapNumSwitchedPerShard[jailedValidator.ShardId] { - log.Debug("cannot switch in this epoch anymore for this shard as switched num waiting", - "shardID", jailedValidator.ShardId, - "numSwitched", s.mapNumSwitchedPerShard[jailedValidator.ShardId]) - continue - } - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{jailedValidator.PublicKey}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "switchJailedWithWaiting", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("switchJailedWithWaiting called for", - "key", jailedValidator.PublicKey, - "returnMessage", vmOutput.ReturnMessage) - if vmOutput.ReturnCode != vmcommon.Ok { - continue - } - - newValidator, err := s.stakingToValidatorStatistics(validatorInfos, jailedValidator, vmOutput) - if err != nil { - return err - } - - if len(newValidator) != 0 { - newValidators[string(newValidator)] = struct{}{} - } - } - - return nil -} - -func (s *systemSCProcessor) stakingToValidatorStatistics( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - vmOutput *vmcommon.VMOutput, -) ([]byte, error) { - stakingSCOutput, ok := vmOutput.OutputAccounts[string(s.stakingSCAddress)] - if !ok { - return nil, epochStart.ErrStakingSCOutputAccountNotFound - } - - var activeStorageUpdate *vmcommon.StorageUpdate - for _, storageUpdate := range stakingSCOutput.StorageUpdates { - isNewValidatorKey := len(storageUpdate.Offset) == len(jailedValidator.PublicKey) && - !bytes.Equal(storageUpdate.Offset, jailedValidator.PublicKey) - if isNewValidatorKey { - activeStorageUpdate = storageUpdate - break - } - } - if activeStorageUpdate == nil { - log.Debug("no one in waiting suitable for switch") - if s.enableEpochsHandler.IsFlagEnabled(common.SaveJailedAlwaysFlag) { - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - } - - return nil, nil - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return nil, err - } - - var stakingData systemSmartContracts.StakedDataV2_0 - err = s.marshalizer.Unmarshal(&stakingData, activeStorageUpdate.Data) - if err != nil { - return nil, err - } - - blsPubKey := activeStorageUpdate.Offset - log.Debug("staking validator key who switches with the jailed one", "blsKey", blsPubKey) - - account, isNew, err := state.GetPeerAccountAndReturnIfNew(s.peerAccountsDB, blsPubKey) - if err != nil { - return nil, err - } - - if !bytes.Equal(account.GetRewardAddress(), stakingData.RewardAddress) { - err = account.SetRewardAddress(stakingData.RewardAddress) - if err != nil { - return nil, err - } - } - - if !isNew { - // old jailed validator getting switched back after unJail with stake - must remove first from exported map - deleteNewValidatorIfExistsFromMap(validatorInfos, blsPubKey, account.GetShardId()) - } - - account.SetListAndIndex(jailedValidator.ShardId, string(common.NewList), uint32(stakingData.StakedNonce)) - account.SetTempRating(s.startRating) - account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(account) - if err != nil { - return nil, err - } - - jailedAccount, err := s.getPeerAccount(jailedValidator.PublicKey) - if err != nil { - return nil, err - } - - jailedAccount.SetListAndIndex(jailedValidator.ShardId, string(common.JailedList), jailedValidator.Index) - jailedAccount.ResetAtNewEpoch() - err = s.peerAccountsDB.SaveAccount(jailedAccount) - if err != nil { - return nil, err - } - - if isValidator(jailedValidator) { - s.mapNumSwitchedPerShard[jailedValidator.ShardId]++ - } - - newValidatorInfo := s.validatorInfoCreator.PeerAccountToValidatorInfo(account) - switchJailedWithNewValidatorInMap(validatorInfos, jailedValidator, newValidatorInfo) - - return blsPubKey, nil -} - -func isValidator(validator *state.ValidatorInfo) bool { - return validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) -} - -func deleteNewValidatorIfExistsFromMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - blsPubKey []byte, - shardID uint32, -) { - for index, validatorInfo := range validatorInfos[shardID] { - if bytes.Equal(validatorInfo.PublicKey, blsPubKey) { - length := len(validatorInfos[shardID]) - validatorInfos[shardID][index] = validatorInfos[shardID][length-1] - validatorInfos[shardID][length-1] = nil - validatorInfos[shardID] = validatorInfos[shardID][:length-1] - break - } - } -} - -func switchJailedWithNewValidatorInMap( - validatorInfos map[uint32][]*state.ValidatorInfo, - jailedValidator *state.ValidatorInfo, - newValidator *state.ValidatorInfo, -) { - for index, validatorInfo := range validatorInfos[jailedValidator.ShardId] { - if bytes.Equal(validatorInfo.PublicKey, jailedValidator.PublicKey) { - validatorInfos[jailedValidator.ShardId][index] = newValidator - break - } - } -} - -func (s *systemSCProcessor) getUserAccount(address []byte) (state.UserAccountHandler, error) { - acnt, err := s.userAccountsDB.LoadAccount(address) - if err != nil { - return nil, err - } - - stAcc, ok := acnt.(state.UserAccountHandler) - if !ok { - return nil, process.ErrWrongTypeAssertion - } - - return stAcc, nil -} - -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (s *systemSCProcessor) processSCOutputAccounts( - vmOutput *vmcommon.VMOutput, -) error { - - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := s.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = s.userAccountsDB.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) getSortedJailedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) []*state.ValidatorInfo { - newJailedValidators := make([]*state.ValidatorInfo, 0) - oldJailedValidators := make([]*state.ValidatorInfo, 0) - - minChance := s.chanceComputer.GetChance(0) - for _, listValidators := range validatorInfos { - for _, validatorInfo := range listValidators { - if validatorInfo.List == string(common.JailedList) { - oldJailedValidators = append(oldJailedValidators, validatorInfo) - } else if s.chanceComputer.GetChance(validatorInfo.TempRating) < minChance { - newJailedValidators = append(newJailedValidators, validatorInfo) - } - } - } - - sort.Sort(validatorList(oldJailedValidators)) - sort.Sort(validatorList(newJailedValidators)) - - return append(oldJailedValidators, newJailedValidators...) -} - -func (s *systemSCProcessor) getPeerAccount(key []byte) (state.PeerAccountHandler, error) { - account, err := s.peerAccountsDB.LoadAccount(key) - if err != nil { - return nil, err - } - - peerAcc, ok := account.(state.PeerAccountHandler) - if !ok { - return nil, epochStart.ErrWrongTypeAssertion - } - - return peerAcc, nil -} - -func (s *systemSCProcessor) setMinNumberOfNodes(minNumNodes uint32) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(minNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMinNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - - log.Debug("setMinNumberOfNodes called with", - "minNumNodes", minNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrInvalidMinNumberOfNodes - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) setMaxNumberOfNodes(maxNumNodes uint32) (uint32, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{big.NewInt(int64(maxNumNodes)).Bytes()}, - CallValue: big.NewInt(0), - }, - RecipientAddr: s.stakingSCAddress, - Function: "updateConfigMaxNodes", - } - - vmOutput, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return 0, err - } - - log.Debug("setMaxNumberOfNodes called with", - "maxNumNodes", maxNumNodes, - "returnMessage", vmOutput.ReturnMessage) - - if vmOutput.ReturnCode != vmcommon.Ok { - return 0, epochStart.ErrInvalidMaxNumberOfNodes - } - if len(vmOutput.ReturnData) != 1 { - return 0, epochStart.ErrInvalidSystemSCReturn - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return 0, err - } - - prevMaxNumNodes := big.NewInt(0).SetBytes(vmOutput.ReturnData[0]).Uint64() - return uint32(prevMaxNumNodes), nil -} - -func (s *systemSCProcessor) updateOwnersForBlsKeys() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.updateOwnersForBlsKeys time measurements", sw.GetMeasurements()...) - }() - - sw.Start("getValidatorSystemAccount") - userValidatorAccount, err := s.getValidatorSystemAccount() - sw.Stop("getValidatorSystemAccount") - if err != nil { - return err - } - - sw.Start("getArgumentsForSetOwnerFunctionality") - arguments, err := s.getArgumentsForSetOwnerFunctionality(userValidatorAccount) - sw.Stop("getArgumentsForSetOwnerFunctionality") - if err != nil { - return err - } - - sw.Start("callSetOwnersOnAddresses") - err = s.callSetOwnersOnAddresses(arguments) - sw.Stop("callSetOwnersOnAddresses") - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateToGovernanceV2() error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.GovernanceSCAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.GovernanceSCAddress, - Function: "initV2", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when updating to governanceV2", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when updating to governanceV2", vmOutput.ReturnCode) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) getValidatorSystemAccount() (state.UserAccountHandler, error) { - validatorAccount, err := s.userAccountsDB.LoadAccount(vm.ValidatorSCAddress) - if err != nil { - return nil, fmt.Errorf("%w when loading validator account", err) - } - - userValidatorAccount, ok := validatorAccount.(state.UserAccountHandler) - if !ok { - return nil, fmt.Errorf("%w when loading validator account", epochStart.ErrWrongTypeAssertion) - } - - if check.IfNil(userValidatorAccount.DataTrie()) { - return nil, epochStart.ErrNilDataTrie - } - - return userValidatorAccount, nil -} - -func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAccount state.UserAccountHandler) ([][]byte, error) { - arguments := make([][]byte, 0) - - leavesChannels := &common.TrieIteratorChannels{ - LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChanWrapper(), - } - err := userValidatorAccount.GetAllLeaves(leavesChannels, context.Background()) - if err != nil { - return nil, err - } - for leaf := range leavesChannels.LeavesChan { - validatorData := &systemSmartContracts.ValidatorDataV2{} - - err = s.marshalizer.Unmarshal(validatorData, leaf.Value()) - if err != nil { - continue - } - for _, blsKey := range validatorData.BlsPubKeys { - arguments = append(arguments, blsKey) - arguments = append(arguments, leaf.Key()) - } - } - - err = leavesChannels.ErrChan.ReadFromChanNonBlocking() - if err != nil { - return nil, err - } - - return arguments, nil -} - -func (s *systemSCProcessor) callSetOwnersOnAddresses(arguments [][]byte) error { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: arguments, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "setOwnersOnAddresses", - } - - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when calling setOwnersOnAddresses function", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when calling setOwnersOnAddresses", vmOutput.ReturnCode) - } - - return s.processSCOutputAccounts(vmOutput) -} - -func (s *systemSCProcessor) initDelegationSystemSC() error { - codeMetaData := &vmcommon.CodeMetadata{ - Upgradeable: false, - Payable: false, - Readable: true, - } - - vmInput := &vmcommon.ContractCreateInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.DelegationManagerSCAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - }, - ContractCode: vm.DelegationManagerSCAddress, - ContractCodeMetadata: codeMetaData.ToBytes(), - } - - vmOutput, err := s.systemVM.RunSmartContractCreate(vmInput) - if err != nil { - return err - } - if vmOutput.ReturnCode != vmcommon.Ok { - return epochStart.ErrCouldNotInitDelegationSystemSC - } - - err = s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.updateSystemSCContractsCode(vmInput.ContractCodeMetadata) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) updateSystemSCContractsCode(contractMetadata []byte) error { - contractsToUpdate := make([][]byte, 0) - contractsToUpdate = append(contractsToUpdate, vm.StakingSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ValidatorSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.GovernanceSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.ESDTSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.DelegationManagerSCAddress) - contractsToUpdate = append(contractsToUpdate, vm.FirstDelegationSCAddress) - - for _, address := range contractsToUpdate { - userAcc, err := s.getUserAccount(address) - if err != nil { - return err - } - - userAcc.SetOwnerAddress(address) - userAcc.SetCodeMetadata(contractMetadata) - userAcc.SetCode(address) - - err = s.userAccountsDB.SaveAccount(userAcc) - if err != nil { - return err - } - } - - return nil -} - -func (s *systemSCProcessor) cleanAdditionalQueue() error { - sw := core.NewStopWatch() - sw.Start("systemSCProcessor") - defer func() { - sw.Stop("systemSCProcessor") - log.Debug("systemSCProcessor.cleanAdditionalQueue time measurements", sw.GetMeasurements()...) - }() - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "cleanAdditionalQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when cleaning additional queue", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s, return message %s when cleaning additional queue", vmOutput.ReturnCode, vmOutput.ReturnMessage) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - // returnData format is list(address - all blsKeys which were unstaked for that) - addressLength := len(s.endOfEpochCallerAddress) - mapOwnersKeys := make(map[string][][]byte) - currentOwner := "" - for _, returnData := range vmOutput.ReturnData { - if len(returnData) == addressLength { - currentOwner = string(returnData) - continue - } - - if len(currentOwner) != addressLength { - continue - } - - mapOwnersKeys[currentOwner] = append(mapOwnersKeys[currentOwner], returnData) - } - - err = s.updateDelegationContracts(mapOwnersKeys) - if err != nil { - log.Error("update delegation contracts failed after cleaning additional queue", "error", err.Error()) - return err - } - - return nil -} - -func (s *systemSCProcessor) stakeNodesFromQueue( - validatorInfos map[uint32][]*state.ValidatorInfo, - nodesToStake uint32, - nonce uint64, -) error { - if nodesToStake == 0 { - return nil - } - - nodesToStakeAsBigInt := big.NewInt(0).SetUint64(uint64(nodesToStake)) - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: vm.EndOfEpochAddress, - CallValue: big.NewInt(0), - Arguments: [][]byte{nodesToStakeAsBigInt.Bytes()}, - }, - RecipientAddr: vm.StakingSCAddress, - Function: "stakeNodesFromQueue", - } - vmOutput, errRun := s.systemVM.RunSmartContractCall(vmInput) - if errRun != nil { - return fmt.Errorf("%w when staking nodes from waiting list", errRun) - } - if vmOutput.ReturnCode != vmcommon.Ok { - return fmt.Errorf("got return code %s when staking nodes from waiting list", vmOutput.ReturnCode) - } - if len(vmOutput.ReturnData)%2 != 0 { - return fmt.Errorf("%w return data must be divisible by 2 when staking nodes from waiting list", epochStart.ErrInvalidSystemSCReturn) - } - - err := s.processSCOutputAccounts(vmOutput) - if err != nil { - return err - } - - err = s.addNewlyStakedNodesToValidatorTrie(validatorInfos, vmOutput.ReturnData, nonce) - if err != nil { - return err - } - - return nil -} - -func (s *systemSCProcessor) addNewlyStakedNodesToValidatorTrie( - validatorInfos map[uint32][]*state.ValidatorInfo, - returnData [][]byte, - nonce uint64, -) error { - for i := 0; i < len(returnData); i += 2 { - blsKey := returnData[i] - rewardAddress := returnData[i+1] - - peerAcc, err := s.getPeerAccount(blsKey) - if err != nil { - return err - } - - err = peerAcc.SetRewardAddress(rewardAddress) - if err != nil { - return err - } - - peerAcc.SetListAndIndex(peerAcc.GetShardId(), string(common.NewList), uint32(nonce)) - peerAcc.SetTempRating(s.startRating) - peerAcc.SetUnStakedEpoch(common.DefaultUnstakedEpoch) - - err = s.peerAccountsDB.SaveAccount(peerAcc) - if err != nil { - return err - } - - validatorInfo := &state.ValidatorInfo{ - PublicKey: blsKey, - ShardId: peerAcc.GetShardId(), - List: string(common.NewList), - Index: uint32(nonce), - TempRating: s.startRating, - Rating: s.startRating, - RewardAddress: rewardAddress, - AccumulatedFees: big.NewInt(0), - } - validatorInfos[peerAcc.GetShardId()] = append(validatorInfos[peerAcc.GetShardId()], validatorInfo) - } - - return nil -} - -func (s *systemSCProcessor) initESDT() error { - currentConfigValues, err := s.extractConfigFromESDTContract() - if err != nil { - return err - } - - return s.changeESDTOwner(currentConfigValues) -} - -func (s *systemSCProcessor) extractConfigFromESDTContract() ([][]byte, error) { - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "getContractConfig", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return nil, err - } - if len(output.ReturnData) != 4 { - return nil, fmt.Errorf("%w getContractConfig should have returned 4 values", epochStart.ErrInvalidSystemSCReturn) - } - - return output.ReturnData, nil -} - -func (s *systemSCProcessor) changeESDTOwner(currentConfigValues [][]byte) error { - baseIssuingCost := currentConfigValues[1] - minTokenNameLength := currentConfigValues[2] - maxTokenNameLength := currentConfigValues[3] - - vmInput := &vmcommon.ContractCallInput{ - VMInput: vmcommon.VMInput{ - CallerAddr: s.endOfEpochCallerAddress, - Arguments: [][]byte{s.esdtOwnerAddressBytes, baseIssuingCost, minTokenNameLength, maxTokenNameLength}, - CallValue: big.NewInt(0), - GasProvided: math.MaxInt64, - }, - Function: "configChange", - RecipientAddr: vm.ESDTSCAddress, - } - - output, err := s.systemVM.RunSmartContractCall(vmInput) - if err != nil { - return err - } - if output.ReturnCode != vmcommon.Ok { - return fmt.Errorf("%w changeESDTOwner should have returned Ok", epochStart.ErrInvalidSystemSCReturn) - } - - return s.processSCOutputAccounts(output) -} - -// IsInterfaceNil returns true if underlying object is nil -func (s *systemSCProcessor) IsInterfaceNil() bool { - return s == nil +// IsInterfaceNil returns true if underlying object is nil +func (s *systemSCProcessor) IsInterfaceNil() bool { + return s == nil } // EpochConfirmed is called whenever a new epoch is confirmed func (s *systemSCProcessor) EpochConfirmed(epoch uint32, _ uint64) { - s.flagChangeMaxNodesEnabled.SetValue(false) - for _, maxNodesConfig := range s.maxNodesEnableConfig { - if epoch == maxNodesConfig.EpochEnable { - s.flagChangeMaxNodesEnabled.SetValue(true) - s.maxNodes = maxNodesConfig.MaxNumNodes - break - } - } - - log.Debug("systemSCProcessor: change of maximum number of nodes and/or shuffling percentage", - "enabled", s.flagChangeMaxNodesEnabled.IsSet(), - "epoch", epoch, - "maxNodes", s.maxNodes, - ) + s.legacyEpochConfirmed(epoch) } diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index 37896873f27..d203a2c1075 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -8,7 +8,7 @@ import ( "math" "math/big" "os" - "strconv" + "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -27,9 +27,9 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/process" - economicsHandler "github.com/multiversx/mx-chain-go/process/economics" vmFactory "github.com/multiversx/mx-chain-go/process/factory" metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/peer" @@ -48,7 +48,11 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageMock "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" @@ -98,6 +102,29 @@ func createPhysicalUnit(t *testing.T) (storage.Storer, string) { return unit, dir } +func createMockArgsForSystemSCProcessor() ArgsNewEpochStartSystemSCProcessing { + return ArgsNewEpochStartSystemSCProcessing{ + SystemVM: &mock.VMExecutionHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + PeerAccountsDB: &stateMock.AccountsStub{}, + Marshalizer: &marshallerMock.MarshalizerStub{}, + StartRating: 0, + ValidatorInfoCreator: &testscommon.ValidatorStatisticsProcessorStub{}, + ChanceComputer: &mock.ChanceComputerStub{}, + ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ESDTOwnerAddressBytes: vm.ESDTSCAddress, + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, + MaxNodesChangeConfigProvider: &testscommon.MaxNodesChangeConfigProviderStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + } +} + func TestNewSystemSCProcessor(t *testing.T) { t.Parallel() @@ -198,7 +225,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() vInfo := &state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, @@ -207,13 +234,13 @@ func TestSystemSCProcessor_ProcessSystemSmartContract(t *testing.T) { RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), } - validatorInfos[0] = append(validatorInfos[0], vInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + _ = validatorsInfo.Add(vInfo) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) } func TestSystemSCProcessor_JailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T) { @@ -243,7 +270,7 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s numEligible := 9 numWaiting := 5 numJailed := 8 - stakingScAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingScAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) createEligibleNodes(numEligible, stakingScAcc, args.Marshalizer) _ = createWaitingNodes(numWaiting, stakingScAcc, args.UserAccountsDB, args.Marshalizer) jailed := createJailedNodes(numJailed, stakingScAcc, args.UserAccountsDB, args.PeerAccountsDB, args.Marshalizer) @@ -251,25 +278,25 @@ func testSystemSCProcessorJailedNodesShouldNotBeSwappedAllAtOnce(t *testing.T, s _ = s.userAccountsDB.SaveAccount(stakingScAcc) _, _ = s.userAccountsDB.Commit() - addValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerForAll"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(900000), args.Marshalizer) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) - validatorsInfo[0] = append(validatorsInfo[0], jailed...) + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.SetValidatorsInShard(0, jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) - assert.Nil(t, err) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) for i := 0; i < numWaiting; i++ { - assert.Equal(t, string(common.NewList), validatorsInfo[0][i].List) + require.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } for i := numWaiting; i < numJailed; i++ { - assert.Equal(t, string(common.JailedList), validatorsInfo[0][i].List) + require.Equal(t, string(common.JailedList), validatorsInfo.GetShardValidatorsInfoMap()[0][i].GetList()) } newJailedNodes := jailed[numWaiting:numJailed] checkNodesStatusInSystemSCDataTrie(t, newJailedNodes, args.UserAccountsDB, args.Marshalizer, saveJailedAlwaysEnableEpoch == 0) } -func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorInfo, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { +func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []state.ValidatorInfoHandler, accounts state.AccountsAdapter, marshalizer marshal.Marshalizer, jailed bool) { account, err := accounts.LoadAccount(vm.StakingSCAddress) require.Nil(t, err) @@ -277,7 +304,7 @@ func checkNodesStatusInSystemSCDataTrie(t *testing.T, nodes []*state.ValidatorIn systemScAccount, ok := account.(state.UserAccountHandler) require.True(t, ok) for _, nodeInfo := range nodes { - buff, _, err = systemScAccount.RetrieveValue(nodeInfo.PublicKey) + buff, _, err = systemScAccount.RetrieveValue(nodeInfo.GetPublicKey()) require.Nil(t, err) require.True(t, len(buff) > 0) @@ -315,7 +342,7 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { _ = s.initDelegationSystemSC() doStake(t, s.systemVM, s.userAccountsDB, owner1, big.NewInt(1000), blsKeys...) doUnStake(t, s.systemVM, s.userAccountsDB, owner1, blsKeys[:3]...) - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() jailed := &state.ValidatorInfo{ PublicKey: blsKeys[0], ShardId: 0, @@ -324,13 +351,13 @@ func TestSystemSCProcessor_NobodyToSwapWithStakingV2(t *testing.T) { RewardAddress: []byte("owner1"), AccumulatedFees: big.NewInt(0), } - validatorsInfo[0] = append(validatorsInfo[0], jailed) + _ = validatorsInfo.Add(jailed) - err := s.ProcessSystemSmartContract(validatorsInfo, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorsInfo[0] { - assert.Equal(t, string(common.JailedList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.JailedList), vInfo.GetList()) } nodesToUnStake, mapOwnersKeys, err := s.stakingDataProvider.ComputeUnQualifiedNodes(validatorsInfo) @@ -541,13 +568,6 @@ func doUnStake(t *testing.T, systemVm vmcommon.VMExecutionHandler, accountsDB st saveOutputAccounts(t, accountsDB, vmOutput) } -func loadSCAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { - acc, _ := accountsDB.LoadAccount(address) - stakingSCAcc := acc.(state.UserAccountHandler) - - return stakingSCAcc -} - func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, marshalizer marshal.Marshalizer) { for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -563,8 +583,8 @@ func createEligibleNodes(numNodes int, stakingSCAcc state.UserAccountHandler, ma } } -func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []*state.ValidatorInfo { - validatorInfos := make([]*state.ValidatorInfo, 0) +func createJailedNodes(numNodes int, stakingSCAcc state.UserAccountHandler, userAccounts state.AccountsAdapter, peerAccounts state.AccountsAdapter, marshalizer marshal.Marshalizer) []state.ValidatorInfoHandler { + validatorInfos := make([]state.ValidatorInfoHandler, 0) for i := 0; i < numNodes; i++ { stakedData := &systemSmartContracts.StakedDataV2_0{ @@ -603,8 +623,8 @@ func addValidatorDataWithUnStakedKey( nodePrice *big.Int, marshalizer marshal.Marshalizer, ) { - stakingAccount := loadSCAccount(accountsDB, vm.StakingSCAddress) - validatorAccount := loadSCAccount(accountsDB, vm.ValidatorSCAddress) + stakingAccount := stakingcommon.LoadUserAccount(accountsDB, vm.StakingSCAddress) + validatorAccount := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) validatorData := &systemSmartContracts.ValidatorDataV2{ RegisterNonce: 0, @@ -705,50 +725,6 @@ func createWaitingNodes(numNodes int, stakingSCAcc state.UserAccountHandler, use return validatorInfos } -func addValidatorData( - accountsDB state.AccountsAdapter, - ownerKey []byte, - registeredKeys [][]byte, - totalStake *big.Int, - marshalizer marshal.Marshalizer, -) { - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: ownerKey, - TotalStakeValue: totalStake, - LockedStake: big.NewInt(0), - TotalUnstaked: big.NewInt(0), - BlsPubKeys: registeredKeys, - NumRegistered: uint32(len(registeredKeys)), - } - - marshaledData, _ := marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) -} - -func addStakedData( - accountsDB state.AccountsAdapter, - stakedKey []byte, - ownerKey []byte, - marshalizer marshal.Marshalizer, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: ownerKey, - OwnerAddress: ownerKey, - StakeValue: big.NewInt(0), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func prepareStakingContractWithData( accountsDB state.AccountsAdapter, stakedKey []byte, @@ -757,139 +733,14 @@ func prepareStakingContractWithData( rewardAddress []byte, ownerAddress []byte, ) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - - stakedData := &systemSmartContracts.StakedDataV2_0{ - Staked: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(stakedKey, marshaledData) - _ = accountsDB.SaveAccount(stakingSCAcc) + stakingcommon.AddStakingData(accountsDB, ownerAddress, rewardAddress, [][]byte{stakedKey}, marshalizer) + stakingcommon.AddKeysToWaitingList(accountsDB, [][]byte{waitingKey}, marshalizer, rewardAddress, ownerAddress) + stakingcommon.AddValidatorData(accountsDB, rewardAddress, [][]byte{stakedKey, waitingKey}, big.NewInt(10000000000), marshalizer) - saveOneKeyToWaitingList(accountsDB, waitingKey, marshalizer, rewardAddress, ownerAddress) - - validatorSC := loadSCAccount(accountsDB, vm.ValidatorSCAddress) - validatorData := &systemSmartContracts.ValidatorDataV2{ - RegisterNonce: 0, - Epoch: 0, - RewardAddress: rewardAddress, - TotalStakeValue: big.NewInt(10000000000), - LockedStake: big.NewInt(10000000000), - TotalUnstaked: big.NewInt(0), - NumRegistered: 2, - BlsPubKeys: [][]byte{stakedKey, waitingKey}, - } - - marshaledData, _ = marshalizer.Marshal(validatorData) - _ = validatorSC.SaveKeyValue(rewardAddress, marshaledData) - - _ = accountsDB.SaveAccount(validatorSC) _, err := accountsDB.Commit() log.LogIfError(err) } -func saveOneKeyToWaitingList( - accountsDB state.AccountsAdapter, - waitingKey []byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListHead := &systemSmartContracts.WaitingList{ - FirstKey: waitingKeyInList, - LastKey: waitingKeyInList, - Length: 1, - } - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: waitingKeyInList, - NextKey: make([]byte, 0), - } - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - -func addKeysToWaitingList( - accountsDB state.AccountsAdapter, - waitingKeys [][]byte, - marshalizer marshal.Marshalizer, - rewardAddress []byte, - ownerAddress []byte, -) { - stakingSCAcc := loadSCAccount(accountsDB, vm.StakingSCAddress) - - for _, waitingKey := range waitingKeys { - stakedData := &systemSmartContracts.StakedDataV2_0{ - Waiting: true, - RewardAddress: rewardAddress, - OwnerAddress: ownerAddress, - StakeValue: big.NewInt(100), - } - marshaledData, _ := marshalizer.Marshal(stakedData) - _ = stakingSCAcc.SaveKeyValue(waitingKey, marshaledData) - } - - marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) - waitingListHead := &systemSmartContracts.WaitingList{} - _ = marshalizer.Unmarshal(waitingListHead, marshaledData) - waitingListHead.Length += uint32(len(waitingKeys)) - lastKeyInList := []byte("w_" + string(waitingKeys[len(waitingKeys)-1])) - waitingListHead.LastKey = lastKeyInList - - marshaledData, _ = marshalizer.Marshal(waitingListHead) - _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) - - numWaitingKeys := len(waitingKeys) - previousKey := waitingListHead.FirstKey - for i, waitingKey := range waitingKeys { - - waitingKeyInList := []byte("w_" + string(waitingKey)) - waitingListElement := &systemSmartContracts.ElementInList{ - BLSPublicKey: waitingKey, - PreviousKey: previousKey, - NextKey: make([]byte, 0), - } - - if i < numWaitingKeys-1 { - nextKey := []byte("w_" + string(waitingKeys[i+1])) - waitingListElement.NextKey = nextKey - } - - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingKeyInList, marshaledData) - - previousKey = waitingKeyInList - } - - marshaledData, _, _ = stakingSCAcc.RetrieveValue(waitingListHead.FirstKey) - waitingListElement := &systemSmartContracts.ElementInList{} - _ = marshalizer.Unmarshal(waitingListElement, marshaledData) - waitingListElement.NextKey = []byte("w_" + string(waitingKeys[0])) - marshaledData, _ = marshalizer.Marshal(waitingListElement) - _ = stakingSCAcc.SaveKeyValue(waitingListHead.FirstKey, marshaledData) - - _ = accountsDB.SaveAccount(stakingSCAcc) -} - func createAccountsDB( hasher hashing.Hasher, marshaller marshal.Marshalizer, @@ -936,6 +787,9 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp accCreator, _ := factory.NewAccountCreator(argsAccCreator) peerAccCreator := factory.NewPeerAccountCreator() en := forking.NewGenericEpochNotifier() + enableEpochsConfig.StakeLimitsEnableEpoch = 10 + enableEpochsConfig.StakingV4Step1EnableEpoch = 444 + enableEpochsConfig.StakingV4Step2EnableEpoch = 445 epochsConfig := &config.EpochConfig{ EnableEpochs: enableEpochsConfig, } @@ -953,7 +807,7 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp PeerAdapter: peerAccountsDB, Rater: &mock.RaterStub{}, RewardsHandler: &mock.RewardsHandlerStub{}, - NodesSetup: &mock.NodesSetupStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, MaxComputableRounds: 1, MaxConsecutiveRoundsOfRatingDecrease: 2000, EnableEpochsHandler: enableEpochsHandler, @@ -961,15 +815,14 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp vCreator, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + gasSchedule := wasmConfig.MakeGasMapForTests() + gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) testDataPool := dataRetrieverMock.NewPoolsHolderMock() - gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) - gasScheduleNotifier := testscommon.NewGasScheduleNotifierMock(gasSchedule) - - nodesSetup := &mock.NodesSetupStub{} + nodesSetup := &genesisMocks.NodesSetupStub{} argsHook := hooks.ArgBlockChainHook{ Accounts: userAccountsDB, @@ -979,10 +832,10 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ShardCoordinator: &mock.ShardCoordinatorStub{}, Marshalizer: marshalizer, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), DataPool: testDataPool, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, CompiledSCPool: testDataPool.SmartContracts(), EpochNotifier: en, EnableEpochsHandler: enableEpochsHandler, @@ -992,11 +845,13 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } + defaults.FillGasMapInternal(gasSchedule, 1) + blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(argsHook) argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ BlockChainHook: blockChainHookImpl, PubkeyConv: argsHook.PubkeyConv, - Economics: createEconomicsData(), + Economics: stakingcommon.CreateEconomicsData(), MessageSignVerifier: signVerifer, GasSchedule: gasScheduleNotifier, NodesConfigProvider: nodesSetup, @@ -1032,6 +887,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MaxNumberOfNodesForStake: 5, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1042,21 +899,57 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: peerAccountsDB, UserAccountsDB: userAccountsDB, ChanceComputer: &mock.ChanceComputerStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + ArgBlockChainHook: argsHook, } metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) vmContainer, _ := metaVmFactory.Create() systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) - stakingSCprovider, _ := NewStakingDataProvider(systemVM, "1000") + argsStakingDataProvider := StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingSCProvider, _ := NewStakingDataProvider(argsStakingDataProvider) shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(en, nil) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingSCProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args := ArgsNewEpochStartSystemSCProcessing{ SystemVM: systemVM, UserAccountsDB: userAccountsDB, @@ -1069,7 +962,8 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp ChanceComputer: &mock.ChanceComputerStub{}, EpochNotifier: en, GenesisNodesConfig: nodesSetup, - StakingDataProvider: stakingSCprovider, + StakingDataProvider: stakingSCProvider, + AuctionListSelector: als, NodesConfigProvider: &shardingMocks.NodesCoordinatorStub{ ConsensusGroupSizeCalled: func(shardID uint32) int { if shardID == core.MetachainShardId { @@ -1078,87 +972,189 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp return 63 }, }, - ShardCoordinator: shardCoordinator, - ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), - EnableEpochsHandler: enableEpochsHandler, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + MaxNodesChangeConfigProvider: nodesConfigProvider, + EnableEpochsHandler: enableEpochsHandler, } return args, metaVmFactory.SystemSmartContractContainer() } -func createEconomicsData() process.EconomicsDataHandler { - maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) - minGasPrice := strconv.FormatUint(10, 10) - minGasLimit := strconv.FormatUint(10, 10) - - argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ - Economics: &config.EconomicsConfig{ - GlobalSettings: config.GlobalSettings{ - GenesisTotalSupply: "2000000000000000000000", - MinimumInflation: 0, - YearSettings: []*config.YearSetting{ - { - Year: 0, - MaximumInflation: 0.01, - }, - }, +func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } + + return true }, - RewardsSettings: config.RewardsSettings{ - RewardsConfigByEpoch: []config.EpochRewardSettings{ - { - LeaderPercentage: 0.1, - DeveloperPercentage: 0.1, - ProtocolSustainabilityPercentage: 0.1, - ProtocolSustainabilityAddress: "protocol", - TopUpGradientPoint: "300000000000000000000", - TopUpFactor: 0.25, - }, - }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") + + return nil, fmt.Errorf("should have not called") }, - FeeSettings: config.FeeSettings{ - GasLimitSettings: []config.GasLimitSetting{ - { - MaxGasLimitPerBlock: maxGasLimitPerBlock, - MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, - MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, - MaxGasLimitPerTx: maxGasLimitPerBlock, - MinGasLimit: minGasLimit, - ExtraGasLimitGuardedTx: "50000", - }, - }, - MinGasPrice: minGasPrice, - GasPerDataByte: "1", - GasPriceModifier: 1.0, - MaxGasPriceSetGuardian: "100000", + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly }, - }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - } - economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) - return economicsData + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract create call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.DelegationSmartContractFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCreateCalled: func(input *vmcommon.ContractCreateInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.True(t, runSmartContractCreateCalled) + }) } -func TestSystemSCProcessor_ProcessSystemSmartContractInitDelegationMgr(t *testing.T) { +func TestSystemSCProcessor_ProcessSystemSmartContractInitGovernance(t *testing.T) { t.Parallel() - args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 1000, - }, testscommon.CreateMemUnit()) - s, _ := NewSystemSCProcessor(args) + expectedErr := errors.New("expected error") + t.Run("flag not active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.GovernanceFlagInSpecificEpochOnly || + flag == common.StakingV4Step1Flag || + flag == common.StakingV4Step2Flag || + flag == common.SwitchHysteresisForMinNodesFlagInSpecificEpochOnly || + flag == common.StakingV2OwnerFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlagInSpecificEpochOnly || + flag == common.DelegationSmartContractFlagInSpecificEpochOnly || + flag == common.CorrectLastUnJailedFlag || + flag == common.SwitchJailWaitingFlag || + flag == common.StakingV2Flag || + flag == common.ESDTFlagInSpecificEpochOnly { + + return false + } - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) - assert.Nil(t, err) + return true + }, + } + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + assert.Fail(t, "should have not called") - acc, err := s.userAccountsDB.GetExistingAccount(vm.DelegationManagerSCAddress) - assert.Nil(t, err) + return nil, fmt.Errorf("should have not called") + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) - userAcc, _ := acc.(state.UserAccountHandler) - assert.Equal(t, userAcc.GetOwnerAddress(), vm.DelegationManagerSCAddress) - assert.NotNil(t, userAcc.GetCodeMetadata()) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + }) + t.Run("flag active", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return &vmcommon.VMOutput{}, nil + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + require.True(t, runSmartContractCreateCalled) + }) + t.Run("flag active but contract call errors, should error", func(t *testing.T) { + args := createMockArgsForSystemSCProcessor() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == common.GovernanceFlagInSpecificEpochOnly + }, + } + runSmartContractCreateCalled := false + args.SystemVM = &mock.VMExecutionHandlerStub{ + RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) { + runSmartContractCreateCalled = true + + return nil, expectedErr + }, + } + processor, _ := NewSystemSCProcessor(args) + require.NotNil(t, processor) + + validatorsInfo := state.NewShardValidatorsInfoMap() + err := processor.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.ErrorIs(t, err, expectedErr) + require.Contains(t, err.Error(), "governanceV2") + require.True(t, runSmartContractCreateCalled) + }) } func TestSystemSCProcessor_ProcessDelegationRewardsNothingToExecute(t *testing.T) { @@ -1294,7 +1290,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t * t.Parallel() args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}} + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{EpochEnable: 0, MaxNumNodes: 10}}) + args.MaxNodesChangeConfigProvider = nodesConfigProvider s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1306,8 +1303,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueue(t * []byte("rewardAddress"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1341,10 +1338,12 @@ func getTotalNumberOfRegisteredNodes(t *testing.T, s *systemSCProcessor) int { func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwnerNotSet(t *testing.T) { t.Parallel() + maxNodesChangeConfig := []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{ - StakingV2EnableEpoch: 10, + MaxNodesChangeEnableEpoch: maxNodesChangeConfig, + StakingV2EnableEpoch: 10, }, testscommon.CreateMemUnit()) - args.MaxNodesEnableConfig = []config.MaxNodesChangeConfig{{EpochEnable: 10, MaxNumNodes: 10}} + args.MaxNodesChangeConfigProvider, _ = notifier.NewNodesConfigProvider(args.EpochNotifier, maxNodesChangeConfig) s, _ := NewSystemSCProcessor(args) prepareStakingContractWithData( @@ -1359,8 +1358,8 @@ func TestSystemSCProcessor_ProcessSystemSmartContractMaxNodesStakedFromQueueOwne args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 10, }) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 10) + validatorsInfo := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{Epoch: 10}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1387,7 +1386,7 @@ func TestSystemSCProcessor_ESDTInitShouldWork(t *testing.T) { require.Equal(t, 4, len(initialContractConfig)) require.Equal(t, []byte("aaaaaa"), initialContractConfig[0]) - err = s.ProcessSystemSmartContract(nil, 1, 1) + err = s.ProcessSystemSmartContract(state.NewShardValidatorsInfoMap(), &block.Header{Nonce: 1, Epoch: 1}) require.Nil(t, err) @@ -1415,47 +1414,48 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t []byte("rewardAddress"), []byte("rewardAddress"), ) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + big.NewInt(2000), + args.Marshalizer, + ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, big.NewInt(2000), args.Marshalizer) - _, _ = args.UserAccountsDB.Commit() - - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) peerAcc, err := s.getPeerAccount([]byte("waitingPubKey")) @@ -1466,10 +1466,10 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeOneNodeStakeOthers(t peerAcc, _ = s.getPeerAccount([]byte("stakedPubKey1")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, string(common.LeavingList), validatorInfos[0][1].List) + assert.Equal(t, string(common.LeavingList), validatorsInfo.GetShardValidatorsInfoMap()[0][1].GetList()) - assert.Equal(t, 5, len(validatorInfos[0])) - assert.Equal(t, string(common.NewList), validatorInfos[0][4].List) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 5) + assert.Equal(t, string(common.NewList), validatorsInfo.GetShardValidatorsInfoMap()[0][4].GetList()) } func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWork(t *testing.T) { @@ -1487,18 +1487,18 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor []byte("rewardAddress"), ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, []byte("ownerKey"), []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, args.Marshalizer) addValidatorDataWithUnStakedKey(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey1")}, big.NewInt(1000), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("rewardAddress"), @@ -1508,7 +1508,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeTheOnlyNodeShouldWor args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) } @@ -1518,7 +1518,7 @@ func addDelegationData( stakedKeys [][]byte, marshalizer marshal.Marshalizer, ) { - delegatorSC := loadSCAccount(accountsDB, delegation) + delegatorSC := stakingcommon.LoadUserAccount(accountsDB, delegation) dStatus := &systemSmartContracts.DelegationContractStatus{ StakedKeys: make([]*systemSmartContracts.NodesData, 0), NotStakedKeys: make([]*systemSmartContracts.NodesData, 0), @@ -1546,68 +1546,71 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromDelegationContra contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( + stakingcommon.AddStakingData( args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, args.Marshalizer, + ) + allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} + stakingcommon.RegisterValidatorKeys( + args.UserAccountsDB, delegationAddr, delegationAddr, + allKeys, + big.NewInt(3000), + args.Marshalizer, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - allKeys := [][]byte{[]byte("stakedPubKey0"), []byte("waitingPubKey"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")} - addValidatorData(args.UserAccountsDB, delegationAddr, allKeys, big.NewInt(3000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, allKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.WaitingList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.NotEqual(t, string(common.NewList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.NotEqual(t, string(common.NewList), vInfo.GetList()) } peerAcc, _ := s.getPeerAccount([]byte("stakedPubKey2")) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) - assert.Equal(t, 4, len(validatorInfos[0])) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1634,67 +1637,55 @@ func TestSystemSCProcessor_ProcessSystemSmartContractShouldUnStakeFromAdditional contract, _ := scContainer.Get(vm.FirstDelegationSCAddress) _ = scContainer.Add(delegationAddr, contract) - prepareStakingContractWithData( - args.UserAccountsDB, - []byte("stakedPubKey0"), - []byte("waitingPubKey"), - args.Marshalizer, - delegationAddr, - delegationAddr, - ) - - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) + listOfKeysInWaiting := [][]byte{[]byte("waitingPubKey"), []byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} + allStakedKeys := append(listOfKeysInWaiting, []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - allStakedKeys := append(listOfKeysInWaiting, []byte("waitingPubKey"), []byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")) - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) - addValidatorData(args.UserAccountsDB, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, delegationAddr, delegationAddr, allStakedKeys, big.NewInt(4000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr, delegationAddr) addDelegationData(args.UserAccountsDB, delegationAddr, allStakedKeys, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, string(common.EligibleList), vInfo.List) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, string(common.EligibleList), vInfo.GetList()) } - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1729,10 +1720,14 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( delegationAddr, ) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), delegationAddr, args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), delegationAddr, args.Marshalizer) - addValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + delegationAddr, + delegationAddr, + [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(10000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr, [][]byte{[]byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() @@ -1741,47 +1736,47 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( _ = scContainer.Add(delegationAddr2, contract) listOfKeysInWaiting := [][]byte{[]byte("waitingPubKe1"), []byte("waitingPubKe2"), []byte("waitingPubKe3"), []byte("waitingPubKe4")} - addKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) - addValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, listOfKeysInWaiting, args.Marshalizer, delegationAddr2, delegationAddr2) + stakingcommon.AddValidatorData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, big.NewInt(2000), args.Marshalizer) addDelegationData(args.UserAccountsDB, delegationAddr2, listOfKeysInWaiting, args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: delegationAddr, AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + peerAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(peerAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - delegationSC := loadSCAccount(args.UserAccountsDB, delegationAddr2) + delegationSC := stakingcommon.LoadUserAccount(args.UserAccountsDB, delegationAddr2) marshalledData, _, err := delegationSC.RetrieveValue([]byte("delegationStatus")) assert.Nil(t, err) dStatus := &systemSmartContracts.DelegationContractStatus{ @@ -1797,7 +1792,7 @@ func TestSystemSCProcessor_ProcessSystemSmartContractUnStakeFromAdditionalQueue( assert.Equal(t, []byte("waitingPubKe4"), dStatus.UnStakedKeys[0].BLSKey) assert.Equal(t, []byte("waitingPubKe3"), dStatus.UnStakedKeys[1].BLSKey) - stakingSCAcc := loadSCAccount(args.UserAccountsDB, vm.StakingSCAddress) + stakingSCAcc := stakingcommon.LoadUserAccount(args.UserAccountsDB, vm.StakingSCAddress) marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) waitingListHead := &systemSmartContracts.WaitingList{} _ = args.Marshalizer.Unmarshal(waitingListHead, marshaledData) @@ -1819,42 +1814,42 @@ func TestSystemSCProcessor_ProcessSystemSmartContractWrongValidatorInfoShouldBeC []byte("oneAddress1"), ) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: "", RewardAddress: []byte("stakedPubKey0"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("oneAddress1"), List: string(common.EligibleList), RewardAddress: []byte("oneAddress1"), AccumulatedFees: big.NewInt(0), }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, len(validatorInfos[0]), 1) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 1) } func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { @@ -1866,14 +1861,14 @@ func TestSystemSCProcessor_TogglePauseUnPause(t *testing.T) { err := s.ToggleUnStakeUnBond(true) assert.Nil(t, err) - validatorSC := loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC := stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ := validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 1) err = s.ToggleUnStakeUnBond(false) assert.Nil(t, err) - validatorSC = loadSCAccount(s.userAccountsDB, vm.ValidatorSCAddress) + validatorSC = stakingcommon.LoadUserAccount(s.userAccountsDB, vm.ValidatorSCAddress) value, _, _ = validatorSC.RetrieveValue([]byte("unStakeUnBondPause")) assert.True(t, value[0] == 0) } @@ -1905,58 +1900,60 @@ func TestSystemSCProcessor_ProcessSystemSmartContractJailAndUnStake(t *testing.T args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) s, _ := NewSystemSCProcessor(args) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey0"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey1"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey2"), []byte("ownerKey"), args.Marshalizer) - addStakedData(args.UserAccountsDB, []byte("stakedPubKey3"), []byte("ownerKey"), args.Marshalizer) - saveOneKeyToWaitingList(args.UserAccountsDB, []byte("waitingPubKey"), args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) - addValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) + stakingcommon.AddStakingData(args.UserAccountsDB, + []byte("ownerKey"), + []byte("ownerKey"), + [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3")}, + args.Marshalizer, + ) + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, [][]byte{[]byte("waitingPubKey")}, args.Marshalizer, []byte("ownerKey"), []byte("ownerKey")) + stakingcommon.AddValidatorData(args.UserAccountsDB, []byte("ownerKey"), [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1"), []byte("stakedPubKey2"), []byte("stakedPubKey3"), []byte("waitingPubKey")}, big.NewInt(0), args.Marshalizer) _, _ = args.UserAccountsDB.Commit() - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey0"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey1"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey2"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - validatorInfos[0] = append(validatorInfos[0], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("stakedPubKey3"), List: string(common.EligibleList), RewardAddress: []byte("ownerKey"), AccumulatedFees: big.NewInt(0), }) - for _, vInfo := range validatorInfos[0] { - jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.PublicKey) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + jailedAcc, _ := args.PeerAccountsDB.LoadAccount(vInfo.GetPublicKey()) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) } args.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: 1, // disable stakingV2OwnerFlag }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) _, err = s.peerAccountsDB.GetExistingAccount([]byte("waitingPubKey")) assert.NotNil(t, err) - assert.Equal(t, 4, len(validatorInfos[0])) - for _, vInfo := range validatorInfos[0] { - assert.Equal(t, vInfo.List, string(common.LeavingList)) - peerAcc, _ := s.getPeerAccount(vInfo.PublicKey) + assert.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 4) + for _, vInfo := range validatorsInfo.GetShardValidatorsInfoMap()[0] { + assert.Equal(t, vInfo.GetList(), string(common.LeavingList)) + peerAcc, _ := s.getPeerAccount(vInfo.GetPublicKey()) assert.Equal(t, peerAcc.GetList(), string(common.LeavingList)) } } @@ -1986,28 +1983,488 @@ func TestSystemSCProcessor_ProcessSystemSmartContractSwapJailedWithWaiting(t *te jailedAcc, _ := args.PeerAccountsDB.LoadAccount([]byte("jailedPubKey0")) _ = args.PeerAccountsDB.SaveAccount(jailedAcc) - validatorInfos := make(map[uint32][]*state.ValidatorInfo) - vInfo := &state.ValidatorInfo{ + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("jailedPubKey0"), ShardId: 0, List: string(common.JailedList), TempRating: 1, RewardAddress: []byte("address"), AccumulatedFees: big.NewInt(0), - } - validatorInfos[0] = append(validatorInfos[0], vInfo) - - vInfo1 := &state.ValidatorInfo{ + }) + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte("waitingPubKey"), ShardId: 0, List: string(common.WaitingList), - } - validatorInfos[0] = append(validatorInfos[0], vInfo1) + }) - err := s.ProcessSystemSmartContract(validatorInfos, 0, 0) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) assert.Nil(t, err) - assert.Equal(t, 2, len(validatorInfos[0])) - newValidatorInfo := validatorInfos[0][0] - assert.Equal(t, newValidatorInfo.List, string(common.NewList)) + require.Len(t, validatorsInfo.GetShardValidatorsInfoMap()[0], 2) + newValidatorInfo := validatorsInfo.GetShardValidatorsInfoMap()[0][0] + require.Equal(t, newValidatorInfo.GetList(), string(common.NewList)) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Init(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + + owner1ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe0"), []byte("waitingPubKe1"), []byte("waitingPubKe2")} + owner1ListPubKeysStaked := [][]byte{[]byte("stakedPubKey0"), []byte("stakedPubKey1")} + owner1AllPubKeys := append(owner1ListPubKeysWaiting, owner1ListPubKeysStaked...) + + owner2ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe3"), []byte("waitingPubKe4")} + owner2ListPubKeysStaked := [][]byte{[]byte("stakedPubKey2")} + owner2AllPubKeys := append(owner2ListPubKeysWaiting, owner2ListPubKeysStaked...) + + owner3ListPubKeysWaiting := [][]byte{[]byte("waitingPubKe5"), []byte("waitingPubKe6")} + + // Owner1 has 2 staked nodes (one eligible, one waiting) in shard0 + 3 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner1ListPubKeysWaiting, args.Marshalizer, owner1, owner1) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1AllPubKeys, big.NewInt(5000), args.Marshalizer) + + // Owner2 has 1 staked node (eligible) in shard1 + 2 nodes in staking queue. + // It has enough stake for only ONE node from staking queue to be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner2ListPubKeysWaiting, args.Marshalizer, owner2, owner2) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2AllPubKeys, big.NewInt(2500), args.Marshalizer) + + // Owner3 has 0 staked node + 2 nodes in staking queue. + // It has enough stake so that all his staking queue nodes will be selected in the auction list + stakingcommon.AddKeysToWaitingList(args.UserAccountsDB, owner3ListPubKeysWaiting, args.Marshalizer, owner3, owner3) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3ListPubKeysWaiting, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2)) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Nil(t, err) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1ListPubKeysStaked[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1ListPubKeysStaked[1], common.WaitingList, "", 0, owner1), + createValidatorInfo(owner1ListPubKeysWaiting[0], common.AuctionList, "", 0, owner1), + createValidatorInfo(owner1ListPubKeysWaiting[1], common.AuctionList, "", 0, owner1), + createValidatorInfo(owner1ListPubKeysWaiting[2], common.AuctionList, "", 0, owner1), + + createValidatorInfo(owner2ListPubKeysWaiting[0], common.AuctionList, "", 0, owner2), + + createValidatorInfo(owner3ListPubKeysWaiting[0], common.AuctionList, "", 0, owner3), + createValidatorInfo(owner3ListPubKeysWaiting[1], common.AuctionList, "", 0, owner3), + }, + 1: { + createValidatorInfo(owner2ListPubKeysStaked[0], common.EligibleList, "", 1, owner2), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4EnabledCannotPrepareStakingData(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + + errProcessStakingData := errors.New("error processing staking data") + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + PrepareStakingDataCalled: func(validatorsMap state.ShardValidatorsInfoMapHandler) error { + return errProcessStakingData + }, + } + + owner := []byte("owner") + ownerStakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1")} + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner, owner, ownerStakedKeys, big.NewInt(2000), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[0], common.AuctionList, "", 0, owner)) + _ = validatorsInfo.Add(createValidatorInfo(ownerStakedKeys[1], common.AuctionList, "", 0, owner)) + + s, _ := NewSystemSCProcessor(args) + s.EpochConfirmed(stakingV4Step2EnableEpoch, 0) + + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{}) + require.Equal(t, errProcessStakingData, err) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractStakingV4Enabled(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigProvider, _ := notifier.NewNodesConfigProvider(args.EpochNotifier, []config.MaxNodesChangeConfig{{MaxNumNodes: 8}}) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := NewAuctionListDisplayer(ArgsAuctionListDisplayer{ + TableDisplayHandler: NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + Denomination: 0, + }) + + argsAuctionListSelector := AuctionListSelectorArgs{ + ShardCoordinator: args.ShardCoordinator, + StakingDataProvider: args.StakingDataProvider, + MaxNodesChangeConfigProvider: nodesConfigProvider, + SoftAuctionConfig: auctionCfg, + AuctionListDisplayHandler: ald, + } + als, _ := NewAuctionListSelector(argsAuctionListSelector) + args.AuctionListSelector = als + + owner1 := []byte("owner1") + owner2 := []byte("owner2") + owner3 := []byte("owner3") + owner4 := []byte("owner4") + owner5 := []byte("owner5") + owner6 := []byte("owner6") + owner7 := []byte("owner7") + + owner1StakedKeys := [][]byte{[]byte("pubKey0"), []byte("pubKey1"), []byte("pubKey2")} + owner2StakedKeys := [][]byte{[]byte("pubKey3"), []byte("pubKey4"), []byte("pubKey5")} + owner3StakedKeys := [][]byte{[]byte("pubKey6"), []byte("pubKey7")} + owner4StakedKeys := [][]byte{[]byte("pubKey8"), []byte("pubKey9"), []byte("pubKe10"), []byte("pubKe11")} + owner5StakedKeys := [][]byte{[]byte("pubKe12"), []byte("pubKe13")} + owner6StakedKeys := [][]byte{[]byte("pubKe14"), []byte("pubKe15")} + owner7StakedKeys := [][]byte{[]byte("pubKe16"), []byte("pubKe17")} + + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner1, owner1, owner1StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner2, owner2, owner2StakedKeys, big.NewInt(5555), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner3, owner3, owner3StakedKeys, big.NewInt(4444), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner4, owner4, owner4StakedKeys, big.NewInt(6666), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner5, owner5, owner5StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner6, owner6, owner6StakedKeys, big.NewInt(1500), args.Marshalizer) + stakingcommon.RegisterValidatorKeys(args.UserAccountsDB, owner7, owner7, owner7StakedKeys, big.NewInt(1500), args.Marshalizer) + + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1)) + _ = validatorsInfo.Add(createValidatorInfo(owner1StakedKeys[2], common.AuctionList, "", 0, owner1)) + + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2)) + _ = validatorsInfo.Add(createValidatorInfo(owner2StakedKeys[2], common.AuctionList, "", 1, owner2)) + + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3)) + _ = validatorsInfo.Add(createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3)) + + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[1], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4)) + _ = validatorsInfo.Add(createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4)) + + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5)) + _ = validatorsInfo.Add(createValidatorInfo(owner5StakedKeys[1], common.AuctionList, "", 1, owner5)) + + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[0], common.AuctionList, "", 1, owner6)) + _ = validatorsInfo.Add(createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6)) + + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[0], common.EligibleList, "", 2, owner7)) + _ = validatorsInfo.Add(createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7)) + + s, _ := NewSystemSCProcessor(args) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step2EnableEpoch}) + err := s.ProcessSystemSmartContract(validatorsInfo, &block.Header{PrevRandSeed: []byte("pubKey7")}) + require.Nil(t, err) + + /* + - owner5 does not have enough stake for 2 nodes=> his auction node (pubKe13) will be unStaked at the end of the epoch => + will not participate in auction selection + - owner6 does not have enough stake for 2 nodes => one of his auction nodes(pubKey14) will be unStaked at the end of the epoch => + his other auction node(pubKey15) will not participate in auction selection + - MaxNumNodes = 8 + - EligibleBlsKeys = 5 (pubKey0, pubKey1, pubKey3, pubKe13, pubKey17) + - QualifiedAuctionBlsKeys = 7 (pubKey2, pubKey4, pubKey5, pubKey7, pubKey9, pubKey10, pubKey11) + We can only select (MaxNumNodes - EligibleBlsKeys = 3) bls keys from AuctionList to be added to NewList + + -> Initial nodes config in auction list is: + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | Owner | Num staked nodes | Num active nodes | Num auction nodes | Total top up | Top up per node | Auction list nodes | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + | owner3 | 2 | 1 | 1 | 2444 | 1222 | pubKey7 | + | owner4 | 4 | 1 | 3 | 2666 | 666 | pubKey9, pubKe10, pubKe11 | + | owner1 | 3 | 2 | 1 | 3666 | 1222 | pubKey2 | + | owner2 | 3 | 1 | 2 | 2555 | 851 | pubKey4, pubKey5 | + +--------+------------------+------------------+-------------------+--------------+-----------------+---------------------------+ + -> Min possible topUp = 666; max possible topUp = 1333, min required topUp = 1216 + -> Selected nodes config in auction list. For each owner's auction nodes, qualified ones are selected by sorting the bls keys + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | Owner | Num staked nodes | TopUp per node | Total top up | Num auction nodes | Num qualified auction nodes | Num active nodes | Qualified top up per node | Selected auction list nodes | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + | owner1 | 3 | 1222 | 3666 | 1 | 1 | 2 | 1222 | pubKey2 | + | owner2 | 3 | 851 | 2555 | 2 | 1 | 1 | 1277 | pubKey5 | + | owner3 | 2 | 1222 | 2444 | 1 | 1 | 1 | 1222 | pubKey7 | + | owner4 | 4 | 666 | 2666 | 3 | 1 | 1 | 1333 | pubKey9 | + +--------+------------------+----------------+--------------+-------------------+-----------------------------+------------------+---------------------------+-----------------------------+ + -> Final selected nodes from auction list + +--------+----------------+--------------------------+ + | Owner | Registered key | Qualified TopUp per node | + +--------+----------------+--------------------------+ + | owner4 | pubKey9 | 1333 | + | owner2 | pubKey5 | 1277 | + | owner1 | pubKey2 | 1222 | + +--------+----------------+--------------------------+ + | owner3 | pubKey7 | 1222 | + +--------+----------------+--------------------------+ + + The following have 1222 top up per node: + - owner1 with 1 bls key = pubKey2 + - owner3 with 1 bls key = pubKey7 + + Since randomness = []byte("pubKey7"), nodes will be sorted based on blsKey XOR randomness, therefore: + - XOR1 = []byte("pubKey2") XOR []byte("pubKey7") = [0 0 0 0 0 0 5] + - XOR3 = []byte("pubKey7") XOR []byte("pubKey7") = [0 0 0 0 0 0 0] + */ + requireTopUpPerNodes(t, s.stakingDataProvider, owner1StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner2StakedKeys, big.NewInt(851)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner3StakedKeys, big.NewInt(1222)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner4StakedKeys, big.NewInt(666)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner5StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner6StakedKeys, big.NewInt(0)) + requireTopUpPerNodes(t, s.stakingDataProvider, owner7StakedKeys, big.NewInt(0)) + + expectedValidatorsInfo := map[uint32][]state.ValidatorInfoHandler{ + 0: { + createValidatorInfo(owner1StakedKeys[0], common.EligibleList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[1], common.WaitingList, "", 0, owner1), + createValidatorInfo(owner1StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 0, owner1), + }, + 1: { + createValidatorInfo(owner2StakedKeys[0], common.EligibleList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[1], common.AuctionList, "", 1, owner2), + createValidatorInfo(owner2StakedKeys[2], common.SelectedFromAuctionList, common.AuctionList, 1, owner2), + + createValidatorInfo(owner3StakedKeys[0], common.LeavingList, "", 1, owner3), + createValidatorInfo(owner3StakedKeys[1], common.AuctionList, "", 1, owner3), + + createValidatorInfo(owner4StakedKeys[0], common.JailedList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[1], common.SelectedFromAuctionList, common.AuctionList, 1, owner4), + createValidatorInfo(owner4StakedKeys[2], common.AuctionList, "", 1, owner4), + createValidatorInfo(owner4StakedKeys[3], common.AuctionList, "", 1, owner4), + + createValidatorInfo(owner5StakedKeys[0], common.EligibleList, "", 1, owner5), + createValidatorInfo(owner5StakedKeys[1], common.LeavingList, common.AuctionList, 1, owner5), + + createValidatorInfo(owner6StakedKeys[0], common.LeavingList, common.AuctionList, 1, owner6), + createValidatorInfo(owner6StakedKeys[1], common.AuctionList, "", 1, owner6), + }, + 2: { + createValidatorInfo(owner7StakedKeys[0], common.LeavingList, common.EligibleList, 2, owner7), + createValidatorInfo(owner7StakedKeys[1], common.EligibleList, "", 2, owner7), + }, + } + + require.Equal(t, expectedValidatorsInfo, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func TestSystemSCProcessor_LegacyEpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + nodesConfigProvider, _ := notifier.NewNodesConfigProvider( + args.EpochNotifier, + []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + }) + args.MaxNodesChangeConfigProvider = nodesConfigProvider + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakingV2Flag) + validatorsInfoMap := state.NewShardValidatorsInfoMap() + s, _ := NewSystemSCProcessor(args) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err := s.processLegacy(validatorsInfoMap, 0, 0) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch0.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 1, 1) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + for epoch := uint32(2); epoch <= 5; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 5, Nonce: 5}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 5, 5) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch1.MaxNumNodes, s.maxNodes) + + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 0, Nonce: 0}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 6, Nonce: 6}) + require.True(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 6, 6) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + + for epoch := uint32(7); epoch <= 20; epoch++ { + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: epoch, Nonce: uint64(epoch)}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, uint64(epoch), epoch) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) + } + + // simulate restart + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 1, Nonce: 1}) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: 21, Nonce: 21}) + require.False(t, s.flagChangeMaxNodesEnabled.IsSet()) + err = s.processLegacy(validatorsInfoMap, 21, 21) + require.Nil(t, err) + require.Equal(t, nodesConfigEpoch6.MaxNumNodes, s.maxNodes) +} + +func TestSystemSCProcessor_ProcessSystemSmartContractNilInputValues(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + s, _ := NewSystemSCProcessor(args) + + t.Run("nil validators info map, expect error", func(t *testing.T) { + t.Parallel() + + blockHeader := &block.Header{Nonce: 4} + err := s.ProcessSystemSmartContract(nil, blockHeader) + require.True(t, strings.Contains(err.Error(), errNilValidatorsInfoMap.Error())) + require.True(t, strings.Contains(err.Error(), fmt.Sprintf("%d", blockHeader.GetNonce()))) + }) + + t.Run("nil header, expect error", func(t *testing.T) { + t.Parallel() + + validatorsInfoMap := state.NewShardValidatorsInfoMap() + err := s.ProcessSystemSmartContract(validatorsInfoMap, nil) + require.Equal(t, process.ErrNilHeaderHandler, err) + }) +} + +func TestLegacySystemSCProcessor_addNewlyStakedNodesToValidatorTrie(t *testing.T) { + t.Parallel() + + args, _ := createFullArgumentsForSystemSCProcessing(config.EnableEpochs{}, testscommon.CreateMemUnit()) + sysProc, _ := NewSystemSCProcessor(args) + + pubKey := []byte("pubKey") + existingValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: "inactive", + } + + nonce := uint64(4) + newList := common.AuctionList + newlyAddedValidator := &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(newList), + Index: uint32(nonce), + TempRating: sysProc.startRating, + Rating: sysProc.startRating, + RewardAddress: pubKey, + AccumulatedFees: big.NewInt(0), + } + + // Check before stakingV4, we should have both validators + validatorsInfo := state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch - 1, Nonce: 1}) + err := sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {existingValidator, newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) + + // Check after stakingV4, we should only have the new one + validatorsInfo = state.NewShardValidatorsInfoMap() + _ = validatorsInfo.Add(existingValidator) + args.EpochNotifier.CheckEpoch(&block.Header{Epoch: stakingV4Step1EnableEpoch, Nonce: 1}) + err = sysProc.addNewlyStakedNodesToValidatorTrie( + validatorsInfo, + [][]byte{pubKey, pubKey}, + nonce, + newList, + ) + require.Nil(t, err) + require.Equal(t, map[uint32][]state.ValidatorInfoHandler{ + 0: {newlyAddedValidator}, + }, validatorsInfo.GetShardValidatorsInfoMap()) +} + +func requireTopUpPerNodes(t *testing.T, s epochStart.StakingDataProvider, stakedPubKeys [][]byte, topUp *big.Int) { + for _, pubKey := range stakedPubKeys { + owner, err := s.GetBlsKeyOwner(pubKey) + require.Nil(t, err) + + totalTopUp := s.GetOwnersData()[owner].TotalTopUp + topUpPerNode := big.NewInt(0).Div(totalTopUp, big.NewInt(int64(len(stakedPubKeys)))) + require.Equal(t, topUp, topUpPerNode) + } +} + +// This func sets rating and temp rating with the start rating value used in createFullArgumentsForSystemSCProcessing +func createValidatorInfo(pubKey []byte, list common.PeerType, previousList common.PeerType, shardID uint32, owner []byte) *state.ValidatorInfo { + rating := uint32(5) + + return &state.ValidatorInfo{ + PublicKey: pubKey, + List: string(list), + PreviousList: string(previousList), + ShardId: shardID, + RewardAddress: owner, + AccumulatedFees: zero, + Rating: rating, + TempRating: rating, + } } diff --git a/epochStart/metachain/tableDisplayer.go b/epochStart/metachain/tableDisplayer.go new file mode 100644 index 00000000000..275805489dc --- /dev/null +++ b/epochStart/metachain/tableDisplayer.go @@ -0,0 +1,32 @@ +package metachain + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/display" +) + +type tableDisplayer struct { +} + +// NewTableDisplayer will create a component able to display tables in logger +func NewTableDisplayer() *tableDisplayer { + return &tableDisplayer{} +} + +// DisplayTable will display a table in the log +func (tb *tableDisplayer) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + table, err := display.CreateTableString(tableHeader, lines) + if err != nil { + log.Error("could not create table", "tableHeader", tableHeader, "error", err) + return + } + + msg := fmt.Sprintf("%s\n%s", message, table) + log.Debug(msg) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (tb *tableDisplayer) IsInterfaceNil() bool { + return tb == nil +} diff --git a/epochStart/metachain/validatorList.go b/epochStart/metachain/validatorList.go new file mode 100644 index 00000000000..75c38a1b3c2 --- /dev/null +++ b/epochStart/metachain/validatorList.go @@ -0,0 +1,27 @@ +package metachain + +import ( + "bytes" + + "github.com/multiversx/mx-chain-go/state" +) + +type validatorList []state.ValidatorInfoHandler + +// Len will return the length of the validatorList +func (v validatorList) Len() int { return len(v) } + +// Swap will interchange the objects on input indexes +func (v validatorList) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +// Less will return true if object on index i should appear before object in index j +// Sorting of validators should be by index and public key +func (v validatorList) Less(i, j int) bool { + if v[i].GetTempRating() == v[j].GetTempRating() { + if v[i].GetIndex() == v[j].GetIndex() { + return bytes.Compare(v[i].GetPublicKey(), v[j].GetPublicKey()) < 0 + } + return v[i].GetIndex() < v[j].GetIndex() + } + return v[i].GetTempRating() < v[j].GetTempRating() +} diff --git a/epochStart/metachain/validators.go b/epochStart/metachain/validators.go index 081944230db..e8eff547a09 100644 --- a/epochStart/metachain/validators.go +++ b/epochStart/metachain/validators.go @@ -93,7 +93,7 @@ func NewValidatorInfoCreator(args ArgsNewValidatorInfoCreator) (*validatorInfoCr } // CreateValidatorInfoMiniBlocks creates the validatorInfo mini blocks according to the provided validatorInfo map -func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if validatorsInfo == nil { return nil, epochStart.ErrNilValidatorInfo } @@ -102,8 +102,9 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks := make([]*block.MiniBlock, 0) + validatorsMap := validatorsInfo.GetShardValidatorsInfoMap() for shardId := uint32(0); shardId < vic.shardCoordinator.NumberOfShards(); shardId++ { - validators := validatorsInfo[shardId] + validators := validatorsMap[shardId] if len(validators) == 0 { continue } @@ -116,7 +117,7 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma miniBlocks = append(miniBlocks, miniBlock) } - validators := validatorsInfo[core.MetachainShardId] + validators := validatorsMap[core.MetachainShardId] if len(validators) == 0 { return miniBlocks, nil } @@ -131,19 +132,19 @@ func (vic *validatorInfoCreator) CreateValidatorInfoMiniBlocks(validatorsInfo ma return miniBlocks, nil } -func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.ValidatorInfo) (*block.MiniBlock, error) { +func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []state.ValidatorInfoHandler) (*block.MiniBlock, error) { miniBlock := &block.MiniBlock{} miniBlock.SenderShardID = vic.shardCoordinator.SelfId() miniBlock.ReceiverShardID = core.AllShardId miniBlock.TxHashes = make([][]byte, len(validatorsInfo)) miniBlock.Type = block.PeerBlock - validatorsCopy := make([]*state.ValidatorInfo, len(validatorsInfo)) - copy(validatorsCopy, validatorsInfo) + validatorCopy := make([]state.ValidatorInfoHandler, len(validatorsInfo)) + copy(validatorCopy, validatorsInfo) - vic.sortValidators(validatorsCopy) + vic.sortValidators(validatorCopy) - for index, validator := range validatorsCopy { + for index, validator := range validatorCopy { shardValidatorInfo := createShardValidatorInfo(validator) shardValidatorInfoData, err := vic.getShardValidatorInfoData(shardValidatorInfo) @@ -157,7 +158,7 @@ func (vic *validatorInfoCreator) createMiniBlock(validatorsInfo []*state.Validat return miniBlock, nil } -func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) sortValidators(validators []state.ValidatorInfoHandler) { if vic.enableEpochsHandler.IsFlagEnabled(common.DeterministicSortOnValidatorsInfoFixFlag) { vic.deterministicSortValidators(validators) return @@ -166,9 +167,9 @@ func (vic *validatorInfoCreator) sortValidators(validators []*state.ValidatorInf vic.legacySortValidators(validators) } -func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) deterministicSortValidators(validators []state.ValidatorInfoHandler) { sort.SliceStable(validators, func(a, b int) bool { - result := bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) + result := bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) if result != 0 { return result < 0 } @@ -177,7 +178,8 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state bValidatorString := validators[b].GoString() // possible issues as we have 2 entries with the same public key. Print & assure deterministic sorting log.Warn("found 2 entries in validatorInfoCreator.deterministicSortValidators with the same public key", - "validator a", aValidatorString, "validator b", bValidatorString) + "validator a", aValidatorString, "validator b", bValidatorString, + "validator a pub key", validators[a].GetPublicKey(), "validator b pub key", validators[b].GetPublicKey()) // since the GoString will include all fields, we do not need to marshal the struct again. Strings comparison will // suffice in this case. @@ -185,12 +187,12 @@ func (vic *validatorInfoCreator) deterministicSortValidators(validators []*state }) } -func (vic *validatorInfoCreator) legacySortValidators(validators []*state.ValidatorInfo) { +func (vic *validatorInfoCreator) legacySortValidators(validators []state.ValidatorInfoHandler) { swap := func(a, b int) { validators[a], validators[b] = validators[b], validators[a] } less := func(a, b int) bool { - return bytes.Compare(validators[a].PublicKey, validators[b].PublicKey) < 0 + return bytes.Compare(validators[a].GetPublicKey(), validators[b].GetPublicKey()) < 0 } compatibility.SortSlice(swap, less, len(validators)) } @@ -219,18 +221,23 @@ func (vic *validatorInfoCreator) getShardValidatorInfoHash(shardValidatorInfo *s return shardValidatorInfoHash, nil } -func createShardValidatorInfo(validator *state.ValidatorInfo) *state.ShardValidatorInfo { +func createShardValidatorInfo(validator state.ValidatorInfoHandler) *state.ShardValidatorInfo { return &state.ShardValidatorInfo{ - PublicKey: validator.PublicKey, - ShardId: validator.ShardId, - List: validator.List, - Index: validator.Index, - TempRating: validator.TempRating, + PublicKey: validator.GetPublicKey(), + ShardId: validator.GetShardId(), + List: validator.GetList(), + PreviousList: validator.GetPreviousList(), + Index: validator.GetIndex(), + PreviousIndex: validator.GetPreviousIndex(), + TempRating: validator.GetTempRating(), } } // VerifyValidatorInfoMiniBlocks verifies if received validator info mini blocks are correct -func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (vic *validatorInfoCreator) VerifyValidatorInfoMiniBlocks( + miniBlocks []*block.MiniBlock, + validatorsInfo state.ShardValidatorsInfoMapHandler, +) error { if len(miniBlocks) == 0 { return epochStart.ErrNilMiniblocks } diff --git a/epochStart/metachain/validators_test.go b/epochStart/metachain/validators_test.go index 9589943162f..662b0192044 100644 --- a/epochStart/metachain/validators_test.go +++ b/epochStart/metachain/validators_test.go @@ -30,90 +30,90 @@ import ( "github.com/stretchr/testify/require" ) -func createMockValidatorInfo() map[uint32][]*state.ValidatorInfo { - validatorInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - PublicKey: []byte("a1"), - ShardId: 0, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardA1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("a2"), - ShardId: 0, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardA2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - core.MetachainShardId: { - &state.ValidatorInfo{ - PublicKey: []byte("m1"), - ShardId: core.MetachainShardId, - List: "eligible", - Index: 1, - TempRating: 100, - Rating: 1000, - RewardAddress: []byte("rewardM1"), - LeaderSuccess: 1, - LeaderFailure: 2, - ValidatorSuccess: 3, - ValidatorFailure: 4, - TotalLeaderSuccess: 10, - TotalLeaderFailure: 20, - TotalValidatorSuccess: 30, - TotalValidatorFailure: 40, - NumSelectedInSuccessBlocks: 5, - AccumulatedFees: big.NewInt(100), - }, - &state.ValidatorInfo{ - PublicKey: []byte("m0"), - ShardId: core.MetachainShardId, - List: "waiting", - Index: 2, - TempRating: 101, - Rating: 1001, - RewardAddress: []byte("rewardM2"), - LeaderSuccess: 6, - LeaderFailure: 7, - ValidatorSuccess: 8, - ValidatorFailure: 9, - TotalLeaderSuccess: 60, - TotalLeaderFailure: 70, - TotalValidatorSuccess: 80, - TotalValidatorFailure: 90, - NumSelectedInSuccessBlocks: 10, - AccumulatedFees: big.NewInt(101), - }, - }, - } - return validatorInfo +func createMockValidatorInfo() state.ShardValidatorsInfoMapHandler { + validatorsInfo := state.NewShardValidatorsInfoMap() + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a1"), + ShardId: 0, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardA1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("a2"), + ShardId: 0, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardA2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m1"), + ShardId: core.MetachainShardId, + List: "eligible", + Index: 1, + TempRating: 100, + Rating: 1000, + RewardAddress: []byte("rewardM1"), + LeaderSuccess: 1, + LeaderFailure: 2, + ValidatorSuccess: 3, + ValidatorFailure: 4, + TotalLeaderSuccess: 10, + TotalLeaderFailure: 20, + TotalValidatorSuccess: 30, + TotalValidatorFailure: 40, + NumSelectedInSuccessBlocks: 5, + AccumulatedFees: big.NewInt(100), + }) + + _ = validatorsInfo.Add(&state.ValidatorInfo{ + PublicKey: []byte("m0"), + ShardId: core.MetachainShardId, + List: "waiting", + Index: 2, + TempRating: 101, + Rating: 1001, + RewardAddress: []byte("rewardM2"), + LeaderSuccess: 6, + LeaderFailure: 7, + ValidatorSuccess: 8, + ValidatorFailure: 9, + TotalLeaderSuccess: 60, + TotalLeaderFailure: 70, + TotalValidatorSuccess: 80, + TotalValidatorFailure: 90, + NumSelectedInSuccessBlocks: 10, + AccumulatedFees: big.NewInt(101), + }) + + return validatorsInfo } func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator { @@ -145,7 +145,7 @@ func createMockEpochValidatorInfoCreatorsArguments() ArgsNewValidatorInfoCreator return argsNewEpochEconomics } -func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { +func verifyMiniBlocks(bl *block.MiniBlock, infos []state.ValidatorInfoHandler, marshalledShardValidatorsInfo [][]byte, marshalizer marshal.Marshalizer) bool { if bl.SenderShardID != core.MetachainShardId || bl.ReceiverShardID != core.AllShardId || len(bl.TxHashes) == 0 || @@ -153,10 +153,10 @@ func verifyMiniBlocks(bl *block.MiniBlock, infos []*state.ValidatorInfo, marshal return false } - validatorCopy := make([]*state.ValidatorInfo, len(infos)) + validatorCopy := make([]state.ValidatorInfoHandler, len(infos)) copy(validatorCopy, infos) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for i, marshalledShardValidatorInfo := range marshalledShardValidatorsInfo { @@ -304,22 +304,22 @@ func TestEpochValidatorInfoCreator_CreateValidatorInfoMiniBlocksShouldBeCorrect( vic, _ := NewValidatorInfoCreator(arguments) mbs, _ := vic.CreateValidatorInfoMiniBlocks(validatorInfo) - shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo[0])) - marshalledShardValidatorInfo := make([][]byte, len(validatorInfo[0])) - for i := 0; i < len(validatorInfo[0]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[0][i]) + shardValidatorInfo := make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + marshalledShardValidatorInfo := make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[0])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[0]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[0][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo[0], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMB0 := verifyMiniBlocks(mbs[0], validatorInfo.GetShardValidatorsInfoMap()[0], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMB0) - shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo[core.MetachainShardId])) - marshalledShardValidatorInfo = make([][]byte, len(validatorInfo[core.MetachainShardId])) - for i := 0; i < len(validatorInfo[core.MetachainShardId]); i++ { - shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo[core.MetachainShardId][i]) + shardValidatorInfo = make([]*state.ShardValidatorInfo, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + marshalledShardValidatorInfo = make([][]byte, len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId])) + for i := 0; i < len(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId]); i++ { + shardValidatorInfo[i] = createShardValidatorInfo(validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId][i]) marshalledShardValidatorInfo[i], _ = arguments.Marshalizer.Marshal(shardValidatorInfo[i]) } - correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) + correctMbMeta := verifyMiniBlocks(mbs[1], validatorInfo.GetShardValidatorsInfoMap()[core.MetachainShardId], marshalledShardValidatorInfo, arguments.Marshalizer) require.True(t, correctMbMeta) } @@ -398,11 +398,11 @@ func TestEpochValidatorInfoCreator_VerifyValidatorInfoMiniBlocksNilOneMiniblock( } func createValidatorInfoMiniBlocks( - validatorInfo map[uint32][]*state.ValidatorInfo, + validatorInfo state.ShardValidatorsInfoMapHandler, arguments ArgsNewValidatorInfoCreator, ) []*block.MiniBlock { miniblocks := make([]*block.MiniBlock, 0) - for _, validators := range validatorInfo { + for _, validators := range validatorInfo.GetShardValidatorsInfoMap() { if len(validators) == 0 { continue } @@ -413,10 +413,10 @@ func createValidatorInfoMiniBlocks( miniBlock.TxHashes = make([][]byte, len(validators)) miniBlock.Type = block.PeerBlock - validatorCopy := make([]*state.ValidatorInfo, len(validators)) + validatorCopy := make([]state.ValidatorInfoHandler, len(validators)) copy(validatorCopy, validators) sort.Slice(validatorCopy, func(a, b int) bool { - return bytes.Compare(validatorCopy[a].PublicKey, validatorCopy[b].PublicKey) < 0 + return bytes.Compare(validatorCopy[a].GetPublicKey(), validatorCopy[b].GetPublicKey()) < 0 }) for index, validator := range validatorCopy { @@ -1129,7 +1129,7 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl require.Equal(t, len(input), len(expected)) - validators := make([]*state.ValidatorInfo, 0, len(input)) + validators := state.NewShardValidatorsInfoMap() marshaller := &marshal.GogoProtoMarshalizer{} for _, marshalledData := range input { vinfo := &state.ValidatorInfo{} @@ -1139,7 +1139,8 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl err = marshaller.Unmarshal(vinfo, buffMarshalledData) require.Nil(t, err) - validators = append(validators, vinfo) + err = validators.Add(vinfo) + require.Nil(t, err) } arguments := createMockEpochValidatorInfoCreatorsArguments() @@ -1157,7 +1158,7 @@ func testCreateMiniblockBackwardsCompatibility(t *testing.T, deterministFixEnabl arguments.ValidatorInfoStorage = storer vic, _ := NewValidatorInfoCreator(arguments) - mb, err := vic.createMiniBlock(validators) + mb, err := vic.createMiniBlock(validators.GetAllValidatorsInfo()) require.Nil(t, err) // test all generated miniblock's "txhashes" are the same with the expected ones @@ -1274,7 +1275,7 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], secondValidator) // order not changed for the ones with same public key @@ -1292,7 +1293,7 @@ func TestValidatorInfoCreator_sortValidators(t *testing.T) { } vic, _ := NewValidatorInfoCreator(arguments) - list := []*state.ValidatorInfo{thirdValidator, secondValidator, firstValidator} + list := []state.ValidatorInfoHandler{thirdValidator, secondValidator, firstValidator} vic.sortValidators(list) assert.Equal(t, list[0], firstValidator) // proper sorting diff --git a/epochStart/mock/builtInCostHandlerStub.go b/epochStart/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/epochStart/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/epochStart/mock/nodesSetupStub.go b/epochStart/mock/nodesSetupStub.go deleted file mode 100644 index 8a79c4330cb..00000000000 --- a/epochStart/mock/nodesSetupStub.go +++ /dev/null @@ -1,191 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/epochStart/mock/validatorStatisticsProcessorStub.go b/epochStart/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 4bb574a5ba5..00000000000 --- a/epochStart/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,38 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - IsInterfaceNilCalled func() bool -} - -// Process - -func (pm *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if pm.ProcessCalled != nil { - return pm.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (pm *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if pm.CommitCalled != nil { - return pm.CommitCalled() - } - - return nil, nil -} - -// IsInterfaceNil - -func (pm *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - if pm.IsInterfaceNilCalled != nil { - return pm.IsInterfaceNilCalled() - } - return false -} diff --git a/epochStart/notifier/errors.go b/epochStart/notifier/errors.go new file mode 100644 index 00000000000..eba24016fa1 --- /dev/null +++ b/epochStart/notifier/errors.go @@ -0,0 +1,5 @@ +package notifier + +import "errors" + +var errNoMaxNodesConfigChangeForStakingV4 = errors.New("no MaxNodesChangeEnableEpoch config found for EpochEnable = StakingV4Step3EnableEpoch") diff --git a/epochStart/notifier/nodesConfigProvider.go b/epochStart/notifier/nodesConfigProvider.go new file mode 100644 index 00000000000..273f750ae44 --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider.go @@ -0,0 +1,82 @@ +package notifier + +import ( + "sort" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProvider struct { + mutex sync.RWMutex + currentEpoch uint32 + currentNodesConfig config.MaxNodesChangeConfig + allNodesConfigs []config.MaxNodesChangeConfig +} + +// NewNodesConfigProvider returns a new instance of nodesConfigProvider, which provides the current +// config.MaxNodesChangeConfig based on the current epoch +func NewNodesConfigProvider( + epochNotifier process.EpochNotifier, + maxNodesEnableConfig []config.MaxNodesChangeConfig, +) (*nodesConfigProvider, error) { + if check.IfNil(epochNotifier) { + return nil, epochStart.ErrNilEpochNotifier + } + + ncp := &nodesConfigProvider{ + allNodesConfigs: make([]config.MaxNodesChangeConfig, len(maxNodesEnableConfig)), + } + copy(ncp.allNodesConfigs, maxNodesEnableConfig) + ncp.sortConfigs() + epochNotifier.RegisterNotifyHandler(ncp) + + return ncp, nil +} + +func (ncp *nodesConfigProvider) sortConfigs() { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + sort.Slice(ncp.allNodesConfigs, func(i, j int) bool { + return ncp.allNodesConfigs[i].EpochEnable < ncp.allNodesConfigs[j].EpochEnable + }) +} + +// GetAllNodesConfig returns all config.MaxNodesChangeConfig +func (ncp *nodesConfigProvider) GetAllNodesConfig() []config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.allNodesConfigs +} + +// GetCurrentNodesConfig returns the current config.MaxNodesChangeConfig, based on epoch +func (ncp *nodesConfigProvider) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + return ncp.currentNodesConfig +} + +// EpochConfirmed is called whenever a new epoch is confirmed +func (ncp *nodesConfigProvider) EpochConfirmed(epoch uint32, _ uint64) { + ncp.mutex.Lock() + defer ncp.mutex.Unlock() + + for _, maxNodesConfig := range ncp.allNodesConfigs { + if epoch >= maxNodesConfig.EpochEnable { + ncp.currentNodesConfig = maxNodesConfig + } + } + + ncp.currentEpoch = epoch +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProvider) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI.go b/epochStart/notifier/nodesConfigProviderAPI.go new file mode 100644 index 00000000000..3db0d028ece --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI.go @@ -0,0 +1,71 @@ +package notifier + +import ( + "fmt" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" +) + +type nodesConfigProviderAPI struct { + *nodesConfigProvider + stakingV4Step2Epoch uint32 + stakingV4Step3MaxNodesConfig config.MaxNodesChangeConfig +} + +// NewNodesConfigProviderAPI returns a new instance of nodes config provider for API calls only, which provides the current +// max nodes change config based on the current epoch +func NewNodesConfigProviderAPI( + epochNotifier process.EpochNotifier, + cfg config.EnableEpochs, +) (*nodesConfigProviderAPI, error) { + nodesCfgProvider, err := NewNodesConfigProvider(epochNotifier, cfg.MaxNodesChangeEnableEpoch) + if err != nil { + return nil, err + } + + stakingV4Step3MaxNodesConfig, err := getStakingV4Step3MaxNodesConfig(nodesCfgProvider.allNodesConfigs, cfg.StakingV4Step3EnableEpoch) + if err != nil { + return nil, err + } + + return &nodesConfigProviderAPI{ + nodesConfigProvider: nodesCfgProvider, + stakingV4Step2Epoch: cfg.StakingV4Step2EnableEpoch, + stakingV4Step3MaxNodesConfig: stakingV4Step3MaxNodesConfig, + }, nil +} + +func getStakingV4Step3MaxNodesConfig( + allNodesConfigs []config.MaxNodesChangeConfig, + stakingV4Step3EnableEpoch uint32, +) (config.MaxNodesChangeConfig, error) { + for _, cfg := range allNodesConfigs { + if cfg.EpochEnable == stakingV4Step3EnableEpoch { + return cfg, nil + } + } + + return config.MaxNodesChangeConfig{}, fmt.Errorf("%w when creating api nodes config provider", errNoMaxNodesConfigChangeForStakingV4) +} + +// GetCurrentNodesConfig retrieves the current configuration of nodes. However, when invoked during epoch stakingV4 step 2 +// through API calls, it will provide the nodes configuration as it will appear in epoch stakingV4 step 3. This adjustment +// is made because, with the transition to step 3 at the epoch change, the maximum number of nodes will be reduced. +// Therefore, calling this API during step 2 aims to offer a preview of the upcoming epoch, accurately reflecting the +// adjusted number of nodes that will qualify from the auction. +func (ncp *nodesConfigProviderAPI) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + ncp.mutex.RLock() + defer ncp.mutex.RUnlock() + + if ncp.currentEpoch == ncp.stakingV4Step2Epoch { + return ncp.stakingV4Step3MaxNodesConfig + } + + return ncp.currentNodesConfig +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncp *nodesConfigProviderAPI) IsInterfaceNil() bool { + return ncp == nil +} diff --git a/epochStart/notifier/nodesConfigProviderAPI_test.go b/epochStart/notifier/nodesConfigProviderAPI_test.go new file mode 100644 index 00000000000..5438d533741 --- /dev/null +++ b/epochStart/notifier/nodesConfigProviderAPI_test.go @@ -0,0 +1,95 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/stretchr/testify/require" +) + +func getEnableEpochCfg() config.EnableEpochs { + return config.EnableEpochs{ + StakingV4Step1EnableEpoch: 2, + StakingV4Step2EnableEpoch: 3, + StakingV4Step3EnableEpoch: 4, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 64, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: 4, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + }, + }, + } +} + +func TestNewNodesConfigProviderAPI(t *testing.T) { + t.Parallel() + + t.Run("nil epoch notifier, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(nil, config.EnableEpochs{}) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.Nil(t, ncp) + }) + + t.Run("no nodes config for staking v4 step 3, should return error", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, config.EnableEpochs{}) + require.ErrorIs(t, err, errNoMaxNodesConfigChangeForStakingV4) + require.Nil(t, ncp) + }) + + t.Run("should work", func(t *testing.T) { + ncp, err := NewNodesConfigProviderAPI(&epochNotifierMock.EpochNotifierStub{}, getEnableEpochCfg()) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) + }) +} + +func TestNodesConfigProviderAPI_GetCurrentNodesConfig(t *testing.T) { + t.Parallel() + + epochNotifier := forking.NewGenericEpochNotifier() + enableEpochCfg := getEnableEpochCfg() + ncp, _ := NewNodesConfigProviderAPI(epochNotifier, enableEpochCfg) + + maxNodesConfig1 := enableEpochCfg.MaxNodesChangeEnableEpoch[0] + maxNodesConfig2 := enableEpochCfg.MaxNodesChangeEnableEpoch[1] + maxNodesConfigStakingV4Step3 := enableEpochCfg.MaxNodesChangeEnableEpoch[2] + + require.Equal(t, maxNodesConfig1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step1EnableEpoch}) + require.Equal(t, maxNodesConfig2, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch + 1}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step2EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: enableEpochCfg.StakingV4Step3EnableEpoch}) + require.Equal(t, maxNodesConfigStakingV4Step3, ncp.GetCurrentNodesConfig()) +} diff --git a/epochStart/notifier/nodesConfigProvider_test.go b/epochStart/notifier/nodesConfigProvider_test.go new file mode 100644 index 00000000000..a813ff4b48d --- /dev/null +++ b/epochStart/notifier/nodesConfigProvider_test.go @@ -0,0 +1,121 @@ +package notifier + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process" + "github.com/stretchr/testify/require" +) + +func TestNewNodesConfigProvider(t *testing.T) { + t.Parallel() + + ncp, err := NewNodesConfigProvider(nil, nil) + require.Equal(t, process.ErrNilEpochNotifier, err) + require.True(t, ncp.IsInterfaceNil()) + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, err = NewNodesConfigProvider(epochNotifier, nil) + require.Nil(t, err) + require.False(t, ncp.IsInterfaceNil()) +} + +func TestNodesConfigProvider_GetAllNodesConfigSorted(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + unsortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch6, + nodesConfigEpoch0, + nodesConfigEpoch1, + } + sortedNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, unsortedNodesConfig) + require.Equal(t, sortedNodesConfig, ncp.GetAllNodesConfig()) +} + +func TestNodesConfigProvider_EpochConfirmedCorrectMaxNumNodesAfterNodeRestart(t *testing.T) { + t.Parallel() + + nodesConfigEpoch0 := config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: 36, + NodesToShufflePerShard: 4, + } + nodesConfigEpoch1 := config.MaxNodesChangeConfig{ + EpochEnable: 1, + MaxNumNodes: 56, + NodesToShufflePerShard: 2, + } + nodesConfigEpoch6 := config.MaxNodesChangeConfig{ + EpochEnable: 6, + MaxNumNodes: 48, + NodesToShufflePerShard: 1, + } + + allNodesConfig := []config.MaxNodesChangeConfig{ + nodesConfigEpoch0, + nodesConfigEpoch1, + nodesConfigEpoch6, + } + epochNotifier := forking.NewGenericEpochNotifier() + ncp, _ := NewNodesConfigProvider(epochNotifier, allNodesConfig) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + require.Equal(t, nodesConfigEpoch0, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(2); epoch <= 5; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 5}) + require.Equal(t, nodesConfigEpoch1, ncp.GetCurrentNodesConfig()) + + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 0}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 6}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + + for epoch := uint32(7); epoch <= 20; epoch++ { + epochNotifier.CheckEpoch(&block.Header{Epoch: epoch}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) + } + + // simulate restart + epochNotifier.CheckEpoch(&block.Header{Epoch: 1}) + epochNotifier.CheckEpoch(&block.Header{Epoch: 21}) + require.Equal(t, nodesConfigEpoch6, ncp.GetCurrentNodesConfig()) +} diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index e3f09fdf2a0..496702b8d81 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -46,14 +46,15 @@ type ArgsShardEpochStartTrigger struct { HeaderValidator epochStart.HeaderValidator Uint64Converter typeConverters.Uint64ByteSliceConverter - DataPool dataRetriever.PoolsHolder - Storage dataRetriever.StorageService - RequestHandler epochStart.RequestHandler - EpochStartNotifier epochStart.Notifier - PeerMiniBlocksSyncer process.ValidatorInfoSyncer - RoundHandler process.RoundHandler - AppStatusHandler core.AppStatusHandler - EnableEpochsHandler common.EnableEpochsHandler + DataPool dataRetriever.PoolsHolder + Storage dataRetriever.StorageService + RequestHandler epochStart.RequestHandler + EpochStartNotifier epochStart.Notifier + PeerMiniBlocksSyncer process.ValidatorInfoSyncer + RoundHandler process.RoundHandler + AppStatusHandler core.AppStatusHandler + EnableEpochsHandler common.EnableEpochsHandler + ExtraDelayForRequestBlockInfo time.Duration Epoch uint32 Validity uint64 @@ -112,6 +113,8 @@ type trigger struct { mutMissingMiniBlocks sync.RWMutex mutMissingValidatorsInfo sync.RWMutex cancelFunc func() + + extraDelayForRequestBlockInfo time.Duration } type metaInfo struct { @@ -221,10 +224,14 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { return nil, err } - trigggerStateKey := common.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) + if args.ExtraDelayForRequestBlockInfo != common.ExtraDelayForRequestBlockInfo { + log.Warn("different delay for request block info: the epoch change trigger might not behave normally", + "value from config", args.ExtraDelayForRequestBlockInfo.String(), "expected", common.ExtraDelayForRequestBlockInfo.String()) + } + triggerStateKey := common.TriggerRegistryInitialKeyPrefix + fmt.Sprintf("%d", args.Epoch) t := &trigger{ - triggerStateKey: []byte(trigggerStateKey), + triggerStateKey: []byte(triggerStateKey), epoch: args.Epoch, metaEpoch: args.Epoch, currentRoundIndex: 0, @@ -260,6 +267,7 @@ func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { appStatusHandler: args.AppStatusHandler, roundHandler: args.RoundHandler, enableEpochsHandler: args.EnableEpochsHandler, + extraDelayForRequestBlockInfo: args.ExtraDelayForRequestBlockInfo, } t.headersPool.RegisterHandler(t.receivedMetaBlock) @@ -586,7 +594,7 @@ func (t *trigger) receivedMetaBlock(headerHandler data.HeaderHandler, metaBlockH t.newEpochHdrReceived = true t.mapEpochStartHdrs[string(metaBlockHash)] = metaHdr // waiting for late broadcast of mini blocks and transactions to be done and received - wait := common.ExtraDelayForRequestBlockInfo + wait := t.extraDelayForRequestBlockInfo roundDifferences := t.roundHandler.Index() - int64(headerHandler.GetRound()) if roundDifferences > 1 { wait = 0 diff --git a/errors/errors.go b/errors/errors.go index 81f547d8bea..771c65adc07 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -593,5 +593,5 @@ var ErrEmptyAddress = errors.New("empty Address") // ErrInvalidNodeOperationMode signals that an invalid node operation mode has been provided var ErrInvalidNodeOperationMode = errors.New("invalid node operation mode") -// ErrNilTxExecutionOrderHandler signals that a nil tx execution order handler has been provided -var ErrNilTxExecutionOrderHandler = errors.New("nil tx execution order handler") +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 1d45caace60..7411a2078e9 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -156,6 +156,11 @@ func (inf *initialNodeFacade) ValidatorStatisticsApi() (map[string]*validator.Va return nil, errNodeStarting } +// AuctionListApi returns nil and error +func (inf *initialNodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return nil, errNodeStarting +} + // SendBulkTransactions returns 0 and error func (inf *initialNodeFacade) SendBulkTransactions(_ []*transaction.Transaction) (uint64, error) { return uint64(0), errNodeStarting @@ -426,6 +431,11 @@ func (inf *initialNodeFacade) GetManagedKeys() []string { return nil } +// GetLoadedKeys returns nil +func (inf *initialNodeFacade) GetLoadedKeys() []string { + return nil +} + // GetEligibleManagedKeys returns nil and error func (inf *initialNodeFacade) GetEligibleManagedKeys() ([]string, error) { return nil, errNodeStarting diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 2633349d69f..294f0accfca 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -95,6 +95,10 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, v1) assert.Equal(t, errNodeStarting, err) + v2, err := inf.AuctionListApi() + assert.Nil(t, v2) + assert.Equal(t, errNodeStarting, err) + u1, err := inf.SendBulkTransactions(nil) assert.Equal(t, uint64(0), u1) assert.Equal(t, errNodeStarting, err) @@ -329,12 +333,15 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, txPoolGaps) assert.Equal(t, errNodeStarting, err) - cnt := inf.GetManagedKeysCount() - assert.Zero(t, cnt) + count := inf.GetManagedKeysCount() + assert.Zero(t, count) keys := inf.GetManagedKeys() assert.Nil(t, keys) + keys = inf.GetLoadedKeys() + assert.Nil(t, keys) + keys, err = inf.GetEligibleManagedKeys() assert.Nil(t, keys) assert.Equal(t, errNodeStarting, err) diff --git a/facade/interface.go b/facade/interface.go index 4c782e6a574..07488622a96 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + coreData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" @@ -86,6 +87,8 @@ type NodeHandler interface { // ValidatorStatisticsApi return the statistics for all the validators ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error IsSelfTrigger() bool @@ -106,7 +109,7 @@ type NodeHandler interface { // TransactionSimulatorProcessor defines the actions which a transaction simulator processor has to implement type TransactionSimulatorProcessor interface { - ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTx(tx *transaction.Transaction, currentHeader coreData.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) IsInterfaceNil() bool } @@ -142,6 +145,7 @@ type ApiResolver interface { GetGasConfigs() map[string]map[string]uint64 GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/facade/mock/apiResolverStub.go b/facade/mock/apiResolverStub.go index e2ab9aa3707..33bae8518aa 100644 --- a/facade/mock/apiResolverStub.go +++ b/facade/mock/apiResolverStub.go @@ -46,6 +46,7 @@ type ApiResolverStub struct { GetGasConfigsCalled func() map[string]map[string]uint64 GetManagedKeysCountCalled func() int GetManagedKeysCalled func() []string + GetLoadedKeysCalled func() []string GetEligibleManagedKeysCalled func() ([]string, error) GetWaitingManagedKeysCalled func() ([]string, error) GetWaitingEpochsLeftForPublicKeyCalled func(publicKey string) (uint32, error) @@ -309,6 +310,14 @@ func (ars *ApiResolverStub) GetManagedKeys() []string { return make([]string, 0) } +// GetLoadedKeys - +func (ars *ApiResolverStub) GetLoadedKeys() []string { + if ars.GetLoadedKeysCalled != nil { + return ars.GetLoadedKeysCalled() + } + return make([]string, 0) +} + // GetEligibleManagedKeys - func (ars *ApiResolverStub) GetEligibleManagedKeys() ([]string, error) { if ars.GetEligibleManagedKeysCalled != nil { diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 254f92218ba..74c9cbea536 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -54,6 +54,7 @@ type NodeStub struct { VerifyProofCalled func(rootHash string, address string, proof [][]byte) (bool, error) GetTokenSupplyCalled func(token string) (*api.ESDTSupply, error) IsDataTrieMigratedCalled func(address string, options api.AccountQueryOptions) (bool, error) + AuctionListApiCalled func() ([]*common.AuctionListValidatorAPIResponse, error) } // GetProof - @@ -139,7 +140,11 @@ func (ns *NodeStub) DecodeAddressPubkey(pk string) ([]byte, error) { // GetBalance - func (ns *NodeStub) GetBalance(address string, options api.AccountQueryOptions) (*big.Int, api.BlockInfo, error) { - return ns.GetBalanceCalled(address, options) + if ns.GetBalanceCalled != nil { + return ns.GetBalanceCalled(address, options) + } + + return nil, api.BlockInfo{}, nil } // CreateTransaction - @@ -150,22 +155,38 @@ func (ns *NodeStub) CreateTransaction(txArgs *external.ArgsCreateTransaction) (* // ValidateTransaction - func (ns *NodeStub) ValidateTransaction(tx *transaction.Transaction) error { - return ns.ValidateTransactionHandler(tx) + if ns.ValidateTransactionHandler != nil { + return ns.ValidateTransactionHandler(tx) + } + + return nil } // ValidateTransactionForSimulation - func (ns *NodeStub) ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error { - return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + if ns.ValidateTransactionForSimulationCalled != nil { + return ns.ValidateTransactionForSimulationCalled(tx, bypassSignature) + } + + return nil } // SendBulkTransactions - func (ns *NodeStub) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { - return ns.SendBulkTransactionsHandler(txs) + if ns.SendBulkTransactionsHandler != nil { + return ns.SendBulkTransactionsHandler(txs) + } + + return 0, nil } // GetAccount - func (ns *NodeStub) GetAccount(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - return ns.GetAccountCalled(address, options) + if ns.GetAccountCalled != nil { + return ns.GetAccountCalled(address, options) + } + + return api.AccountResponse{}, api.BlockInfo{}, nil } // GetCode - @@ -179,22 +200,47 @@ func (ns *NodeStub) GetCode(codeHash []byte, options api.AccountQueryOptions) ([ // GetHeartbeats - func (ns *NodeStub) GetHeartbeats() []data.PubKeyHeartbeat { - return ns.GetHeartbeatsHandler() + if ns.GetHeartbeatsHandler != nil { + return ns.GetHeartbeatsHandler() + } + + return nil } // ValidatorStatisticsApi - func (ns *NodeStub) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) { - return ns.ValidatorStatisticsApiCalled() + if ns.ValidatorStatisticsApiCalled != nil { + return ns.ValidatorStatisticsApiCalled() + } + + return nil, nil +} + +// AuctionListApi - +func (ns *NodeStub) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + if ns.AuctionListApiCalled != nil { + return ns.AuctionListApiCalled() + } + + return nil, nil } // DirectTrigger - func (ns *NodeStub) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { - return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + if ns.DirectTriggerCalled != nil { + return ns.DirectTriggerCalled(epoch, withEarlyEndOfEpoch) + } + + return nil } // IsSelfTrigger - func (ns *NodeStub) IsSelfTrigger() bool { - return ns.IsSelfTriggerCalled() + if ns.IsSelfTriggerCalled != nil { + return ns.IsSelfTriggerCalled() + } + + return false } // GetQueryHandler - diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index f91a405a96c..03dd77c76b7 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -163,7 +163,7 @@ func (nf *nodeFacade) RestAPIServerDebugMode() bool { // RestApiInterface returns the interface on which the rest API should start on, based on the config file provided. // The API will start on the DefaultRestInterface value unless a correct value is passed or -// the value is explicitly set to off, in which case it will not start at all +// // the value is explicitly set to off, in which case it will not start at all func (nf *nodeFacade) RestApiInterface() string { if nf.config.RestApiInterface == "" { return DefaultRestInterface @@ -284,6 +284,11 @@ func (nf *nodeFacade) ValidatorStatisticsApi() (map[string]*validator.ValidatorS return nf.node.ValidatorStatisticsApi() } +// AuctionListApi will return the data about the validators in the auction list +func (nf *nodeFacade) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return nf.node.AuctionListApi() +} + // SendBulkTransactions will send a bulk of transactions on the topic channel func (nf *nodeFacade) SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) { return nf.node.SendBulkTransactions(txs) @@ -590,11 +595,16 @@ func (nf *nodeFacade) GetManagedKeysCount() int { return nf.apiResolver.GetManagedKeysCount() } -// GetManagedKeys returns all keys managed by the current node when running in multikey mode +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (nf *nodeFacade) GetManagedKeys() []string { return nf.apiResolver.GetManagedKeys() } +// GetLoadedKeys returns all keys that were loaded by this node +func (nf *nodeFacade) GetLoadedKeys() []string { + return nf.apiResolver.GetLoadedKeys() +} + // GetEligibleManagedKeys returns the eligible managed keys when node is running in multikey mode func (nf *nodeFacade) GetEligibleManagedKeys() ([]string, error) { return nf.apiResolver.GetEligibleManagedKeys() diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 299fa1cfb50..21823b60b6e 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -1317,6 +1317,22 @@ func TestNodeFacade_GetEligibleManagedKeys(t *testing.T) { assert.Equal(t, expectedResult, result) } +func TestNodeFacade_GetLoadedKeys(t *testing.T) { + t.Parallel() + + providedLoadedKeys := []string{"pk1", "pk2"} + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetLoadedKeysCalled: func() []string { + return providedLoadedKeys + }, + } + nf, _ := NewNodeFacade(arg) + + keys := nf.GetLoadedKeys() + require.Equal(t, providedLoadedKeys, keys) +} + func TestNodeFacade_GetWaitingEpochsLeftForPublicKey(t *testing.T) { t.Parallel() diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index 157fe12642d..852cfd9ce09 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/disabled" + "github.com/multiversx/mx-chain-go/common/operationmodes" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -71,40 +72,42 @@ type ApiResolverArgs struct { } type scQueryServiceArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - statusCoreComponents factory.StatusCoreComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - guardedAccountHandler process.GuardedAccountHandler - allowVMQueriesChan chan struct{} - workingDir string - processingMode common.NodeProcessingMode + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + statusCoreComponents factory.StatusCoreComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string + processingMode common.NodeProcessingMode + isInHistoricalBalancesMode bool } type scQueryElementArgs struct { - generalConfig *config.Config - epochConfig *config.EpochConfig - coreComponents factory.CoreComponentsHolder - stateComponents factory.StateComponentsHolder - dataComponents factory.DataComponentsHolder - processComponents factory.ProcessComponentsHolder - statusCoreComponents factory.StatusCoreComponentsHolder - gasScheduleNotifier core.GasScheduleNotifier - messageSigVerifier vm.MessageSignVerifier - systemSCConfig *config.SystemSmartContractsConfig - bootstrapper process.Bootstrapper - guardedAccountHandler process.GuardedAccountHandler - allowVMQueriesChan chan struct{} - workingDir string - index int - processingMode common.NodeProcessingMode + generalConfig *config.Config + epochConfig *config.EpochConfig + coreComponents factory.CoreComponentsHolder + stateComponents factory.StateComponentsHolder + dataComponents factory.DataComponentsHolder + processComponents factory.ProcessComponentsHolder + statusCoreComponents factory.StatusCoreComponentsHolder + gasScheduleNotifier core.GasScheduleNotifier + messageSigVerifier vm.MessageSignVerifier + systemSCConfig *config.SystemSmartContractsConfig + bootstrapper process.Bootstrapper + guardedAccountHandler process.GuardedAccountHandler + allowVMQueriesChan chan struct{} + workingDir string + index int + processingMode common.NodeProcessingMode + isInHistoricalBalancesMode bool } // CreateApiResolver is able to create an ApiResolver instance that will solve the REST API requests through the node facade @@ -112,24 +115,25 @@ type scQueryElementArgs struct { func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { apiWorkingDir := filepath.Join(args.Configs.FlagsConfig.WorkingDir, common.TemporaryPath) argsSCQuery := &scQueryServiceArgs{ - generalConfig: args.Configs.GeneralConfig, - epochConfig: args.Configs.EpochConfig, - coreComponents: args.CoreComponents, - dataComponents: args.DataComponents, - stateComponents: args.StateComponents, - processComponents: args.ProcessComponents, - statusCoreComponents: args.StatusCoreComponents, - gasScheduleNotifier: args.GasScheduleNotifier, - messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), - systemSCConfig: args.Configs.SystemSCConfig, - bootstrapper: args.Bootstrapper, - guardedAccountHandler: args.BootstrapComponents.GuardedAccountHandler(), - allowVMQueriesChan: args.AllowVMQueriesChan, - workingDir: apiWorkingDir, - processingMode: args.ProcessingMode, - } - - scQueryService, err := createScQueryService(argsSCQuery) + generalConfig: args.Configs.GeneralConfig, + epochConfig: args.Configs.EpochConfig, + coreComponents: args.CoreComponents, + dataComponents: args.DataComponents, + stateComponents: args.StateComponents, + processComponents: args.ProcessComponents, + statusCoreComponents: args.StatusCoreComponents, + gasScheduleNotifier: args.GasScheduleNotifier, + messageSigVerifier: args.CryptoComponents.MessageSignVerifier(), + systemSCConfig: args.Configs.SystemSCConfig, + bootstrapper: args.Bootstrapper, + guardedAccountHandler: args.BootstrapComponents.GuardedAccountHandler(), + allowVMQueriesChan: args.AllowVMQueriesChan, + workingDir: apiWorkingDir, + processingMode: args.ProcessingMode, + isInHistoricalBalancesMode: operationmodes.IsInHistoricalBalancesMode(args.Configs), + } + + scQueryService, storageManagers, err := createScQueryService(argsSCQuery) if err != nil { return nil, err } @@ -271,7 +275,9 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { AccountsParser: args.ProcessComponents.AccountsParser(), GasScheduleNotifier: args.GasScheduleNotifier, ManagedPeersMonitor: args.StatusComponents.ManagedPeersMonitor(), + PublicKey: args.CryptoComponents.PublicKeyString(), NodesCoordinator: args.ProcessComponents.NodesCoordinator(), + StorageManagers: storageManagers, } return external.NewNodeApiResolver(argsApiResolver) @@ -279,75 +285,91 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { func createScQueryService( args *scQueryServiceArgs, -) (process.SCQueryService, error) { +) (process.SCQueryService, []common.StorageManager, error) { numConcurrentVms := args.generalConfig.VirtualMachine.Querying.NumConcurrentVMs if numConcurrentVms < 1 { - return nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") + return nil, nil, fmt.Errorf("VirtualMachine.Querying.NumConcurrentVms should be a positive number more than 1") } argsQueryElem := &scQueryElementArgs{ - generalConfig: args.generalConfig, - epochConfig: args.epochConfig, - coreComponents: args.coreComponents, - stateComponents: args.stateComponents, - dataComponents: args.dataComponents, - processComponents: args.processComponents, - statusCoreComponents: args.statusCoreComponents, - gasScheduleNotifier: args.gasScheduleNotifier, - messageSigVerifier: args.messageSigVerifier, - systemSCConfig: args.systemSCConfig, - bootstrapper: args.bootstrapper, - guardedAccountHandler: args.guardedAccountHandler, - allowVMQueriesChan: args.allowVMQueriesChan, - workingDir: args.workingDir, - index: 0, - processingMode: args.processingMode, + generalConfig: args.generalConfig, + epochConfig: args.epochConfig, + coreComponents: args.coreComponents, + stateComponents: args.stateComponents, + dataComponents: args.dataComponents, + processComponents: args.processComponents, + statusCoreComponents: args.statusCoreComponents, + gasScheduleNotifier: args.gasScheduleNotifier, + messageSigVerifier: args.messageSigVerifier, + systemSCConfig: args.systemSCConfig, + bootstrapper: args.bootstrapper, + guardedAccountHandler: args.guardedAccountHandler, + allowVMQueriesChan: args.allowVMQueriesChan, + workingDir: args.workingDir, + index: 0, + processingMode: args.processingMode, + isInHistoricalBalancesMode: args.isInHistoricalBalancesMode, } var err error var scQueryService process.SCQueryService + var storageManager common.StorageManager + storageManagers := make([]common.StorageManager, 0, numConcurrentVms) list := make([]process.SCQueryService, 0, numConcurrentVms) for i := 0; i < numConcurrentVms; i++ { argsQueryElem.index = i - scQueryService, err = createScQueryElement(argsQueryElem) + scQueryService, storageManager, err = createScQueryElement(*argsQueryElem) if err != nil { - return nil, err + return nil, nil, err } list = append(list, scQueryService) + storageManagers = append(storageManagers, storageManager) } sqQueryDispatcher, err := smartContract.NewScQueryServiceDispatcher(list) if err != nil { - return nil, err + return nil, nil, err } - return sqQueryDispatcher, nil + return sqQueryDispatcher, storageManagers, nil } func createScQueryElement( - args *scQueryElementArgs, -) (process.SCQueryService, error) { + args scQueryElementArgs, +) (process.SCQueryService, common.StorageManager, error) { var err error + selfShardID := args.processComponents.ShardCoordinator().SelfId() + pkConverter := args.coreComponents.AddressPubKeyConverter() automaticCrawlerAddressesStrings := args.generalConfig.BuiltInFunctions.AutomaticCrawlerAddresses convertedAddresses, errDecode := factory.DecodeAddresses(pkConverter, automaticCrawlerAddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode } dnsV2AddressesStrings := args.generalConfig.BuiltInFunctions.DNSV2Addresses convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) if errDecode != nil { - return nil, errDecode + return nil, nil, errDecode + } + + apiBlockchain, err := createBlockchainForScQuery(selfShardID) + if err != nil { + return nil, nil, err + } + + accountsAdapterApi, storageManager, err := createNewAccountsAdapterApi(args, apiBlockchain) + if err != nil { + return nil, nil, err } builtInFuncFactory, err := createBuiltinFuncs( args.gasScheduleNotifier, args.coreComponents.InternalMarshalizer(), - args.stateComponents.AccountsAdapterAPI(), + accountsAdapterApi, args.processComponents.ShardCoordinator(), args.coreComponents.EpochNotifier(), args.coreComponents.EnableEpochsHandler(), @@ -357,13 +379,13 @@ func createScQueryElement( convertedDNSV2Addresses, ) if err != nil { - return nil, err + return nil, nil, err } cacherCfg := storageFactory.GetCacherFromConfig(args.generalConfig.SmartContractDataPool) smartContractsCache, err := storageunit.NewCache(cacherCfg) if err != nil { - return nil, err + return nil, nil, err } scStorage := args.generalConfig.SmartContractsStorageForSCQuery @@ -387,76 +409,76 @@ func createScQueryElement( GasSchedule: args.gasScheduleNotifier, Counter: counters.NewDisabledCounter(), MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), + Accounts: accountsAdapterApi, + BlockChain: apiBlockchain, } - var apiBlockchain data.ChainHandler var vmFactory process.VirtualMachinesContainerFactory maxGasForVmQueries := args.generalConfig.VirtualMachine.GasConfig.ShardMaxGasPerVmQuery - if args.processComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if selfShardID == core.MetachainShardId { maxGasForVmQueries = args.generalConfig.VirtualMachine.GasConfig.MetaMaxGasPerVmQuery - apiBlockchain, vmFactory, err = createMetaVmContainerFactory(args, argsHook) + vmFactory, err = createMetaVmContainerFactory(args, argsHook) } else { - apiBlockchain, vmFactory, err = createShardVmContainerFactory(args, argsHook) + vmFactory, err = createShardVmContainerFactory(args, argsHook) } if err != nil { - return nil, err + return nil, nil, err } log.Debug("maximum gas per VM Query", "value", maxGasForVmQueries) vmContainer, err := vmFactory.Create() if err != nil { - return nil, err + return nil, nil, err } err = vmFactory.BlockChainHookImpl().SetVMContainer(vmContainer) if err != nil { - return nil, err + return nil, nil, err } err = builtInFuncFactory.SetPayableHandler(vmFactory.BlockChainHookImpl()) if err != nil { - return nil, err + return nil, nil, err } argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ - VmContainer: vmContainer, - EconomicsFee: args.coreComponents.EconomicsData(), - BlockChainHook: vmFactory.BlockChainHookImpl(), - MainBlockChain: args.dataComponents.Blockchain(), - APIBlockChain: apiBlockchain, - WasmVMChangeLocker: args.coreComponents.WasmVMChangeLocker(), - Bootstrapper: args.bootstrapper, - AllowExternalQueriesChan: args.allowVMQueriesChan, - MaxGasLimitPerQuery: maxGasForVmQueries, - HistoryRepository: args.processComponents.HistoryRepository(), - ShardCoordinator: args.processComponents.ShardCoordinator(), - StorageService: args.dataComponents.StorageService(), - Marshaller: args.coreComponents.InternalMarshalizer(), - Hasher: args.coreComponents.Hasher(), - Uint64ByteSliceConverter: args.coreComponents.Uint64ByteSliceConverter(), - } - - return smartContract.NewSCQueryService(argsNewSCQueryService) + VmContainer: vmContainer, + EconomicsFee: args.coreComponents.EconomicsData(), + BlockChainHook: vmFactory.BlockChainHookImpl(), + MainBlockChain: args.dataComponents.Blockchain(), + APIBlockChain: apiBlockchain, + WasmVMChangeLocker: args.coreComponents.WasmVMChangeLocker(), + Bootstrapper: args.bootstrapper, + AllowExternalQueriesChan: args.allowVMQueriesChan, + MaxGasLimitPerQuery: maxGasForVmQueries, + HistoryRepository: args.processComponents.HistoryRepository(), + ShardCoordinator: args.processComponents.ShardCoordinator(), + StorageService: args.dataComponents.StorageService(), + Marshaller: args.coreComponents.InternalMarshalizer(), + Hasher: args.coreComponents.Hasher(), + Uint64ByteSliceConverter: args.coreComponents.Uint64ByteSliceConverter(), + IsInHistoricalBalancesMode: args.isInHistoricalBalancesMode, + } + + scQueryService, err := smartContract.NewSCQueryService(argsNewSCQueryService) + + return scQueryService, storageManager, err } -func createMetaVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (data.ChainHandler, process.VirtualMachinesContainerFactory, error) { - apiBlockchain, err := blockchain.NewMetaChain(disabled.NewAppStatusHandler()) - if err != nil { - return nil, nil, err +func createBlockchainForScQuery(selfShardID uint32) (data.ChainHandler, error) { + isMetachain := selfShardID == core.MetachainShardId + if isMetachain { + return blockchain.NewMetaChain(disabled.NewAppStatusHandler()) } - accountsAdapterApi, err := createNewAccountsAdapterApi(args, apiBlockchain) - if err != nil { - return nil, nil, err - } - - argsHook.BlockChain = apiBlockchain - argsHook.Accounts = accountsAdapterApi + return blockchain.NewBlockChain(disabled.NewAppStatusHandler()) +} +func createMetaVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (process.VirtualMachinesContainerFactory, error) { blockChainHookImpl, errBlockChainHook := hooks.NewBlockChainHookImpl(argsHook) if errBlockChainHook != nil { - return nil, nil, errBlockChainHook + return nil, errBlockChainHook } argsNewVmFactory := metachain.ArgsNewVMContainerFactory{ @@ -474,38 +496,26 @@ func createMetaVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBl ChanceComputer: args.coreComponents.Rater(), ShardCoordinator: args.processComponents.ShardCoordinator(), EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + NodesCoordinator: args.processComponents.NodesCoordinator(), } vmFactory, err := metachain.NewVMContainerFactory(argsNewVmFactory) if err != nil { - return nil, nil, err + return nil, err } - return apiBlockchain, vmFactory, nil + return vmFactory, nil } -func createShardVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (data.ChainHandler, process.VirtualMachinesContainerFactory, error) { - apiBlockchain, err := blockchain.NewBlockChain(disabled.NewAppStatusHandler()) - if err != nil { - return nil, nil, err - } - - accountsAdapterApi, err := createNewAccountsAdapterApi(args, apiBlockchain) - if err != nil { - return nil, nil, err - } - - argsHook.BlockChain = apiBlockchain - argsHook.Accounts = accountsAdapterApi - +func createShardVmContainerFactory(args scQueryElementArgs, argsHook hooks.ArgBlockChainHook) (process.VirtualMachinesContainerFactory, error) { queryVirtualMachineConfig := args.generalConfig.VirtualMachine.Querying.VirtualMachineConfig esdtTransferParser, errParser := parsers.NewESDTTransferParser(args.coreComponents.InternalMarshalizer()) if errParser != nil { - return nil, nil, errParser + return nil, errParser } blockChainHookImpl, errBlockChainHook := hooks.NewBlockChainHookImpl(argsHook) if errBlockChainHook != nil { - return nil, nil, errBlockChainHook + return nil, errBlockChainHook } argsNewVMFactory := shard.ArgVMContainerFactory{ @@ -527,13 +537,13 @@ func createShardVmContainerFactory(args *scQueryElementArgs, argsHook hooks.ArgB vmFactory, err := shard.NewVMContainerFactory(argsNewVMFactory) if err != nil { - return nil, nil, err + return nil, err } - return apiBlockchain, vmFactory, nil + return vmFactory, nil } -func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, error) { +func createNewAccountsAdapterApi(args scQueryElementArgs, chainHandler data.ChainHandler) (state.AccountsAdapterAPI, common.StorageManager, error) { argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: args.coreComponents.Hasher(), Marshaller: args.coreComponents.InternalMarshalizer(), @@ -541,17 +551,17 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha } accountFactory, err := factoryState.NewAccountCreator(argsAccCreator) if err != nil { - return nil, err + return nil, nil, err } storagePruning, err := newStoragePruningManager(args) if err != nil { - return nil, err + return nil, nil, err } storageService := args.dataComponents.StorageService() trieStorer, err := storageService.GetStorer(dataRetriever.UserAccountsUnit) if err != nil { - return nil, err + return nil, nil, err } trieFactoryArgs := trieFactory.TrieFactoryArgs{ @@ -562,7 +572,7 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha } trFactory, err := trieFactory.NewTrieFactory(trieFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } trieCreatorArgs := trieFactory.TrieCreateArgs{ @@ -575,9 +585,9 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), StatsCollector: args.statusCoreComponents.StateStatsHandler(), } - _, merkleTrie, err := trFactory.Create(trieCreatorArgs) + trieStorageManager, merkleTrie, err := trFactory.Create(trieCreatorArgs) if err != nil { - return nil, err + return nil, nil, err } argsAPIAccountsDB := state.ArgsAccountsDB{ @@ -593,18 +603,20 @@ func createNewAccountsAdapterApi(args *scQueryElementArgs, chainHandler data.Cha provider, err := blockInfoProviders.NewCurrentBlockInfo(chainHandler) if err != nil { - return nil, err + return nil, nil, err } accounts, err := state.NewAccountsDB(argsAPIAccountsDB) if err != nil { - return nil, err + return nil, nil, err } - return state.NewAccountsDBApi(accounts, provider) + accountsDB, err := state.NewAccountsDBApi(accounts, provider) + + return accountsDB, trieStorageManager, err } -func newStoragePruningManager(args *scQueryElementArgs) (state.StoragePruningManager, error) { +func newStoragePruningManager(args scQueryElementArgs) (state.StoragePruningManager, error) { argsMemEviction := evictionWaitingList.MemoryEvictionWaitingListArgs{ RootHashesSize: args.generalConfig.EvictionWaitingList.RootHashesSize, HashesSize: args.generalConfig.EvictionWaitingList.HashesSize, diff --git a/factory/api/apiResolverFactory_test.go b/factory/api/apiResolverFactory_test.go index 591ea31b79f..e929d66e701 100644 --- a/factory/api/apiResolverFactory_test.go +++ b/factory/api/apiResolverFactory_test.go @@ -1,6 +1,7 @@ package api_test import ( + "fmt" "strings" "sync" "testing" @@ -26,6 +27,7 @@ import ( epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -70,7 +72,7 @@ func createMockArgs(t *testing.T) *api.ApiResolverArgs { cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - stateComponents := componentsMock.GetStateComponents(coreComponents) + stateComponents := componentsMock.GetStateComponents(coreComponents, componentsMock.GetStatusCoreComponents()) processComponents := componentsMock.GetProcessComponents(shardCoordinator, coreComponents, networkComponents, dataComponents, cryptoComponents, stateComponents) argsB := componentsMock.GetBootStrapFactoryArgs() @@ -327,7 +329,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, Hash: &testscommon.HasherStub{}, RatingHandler: &testscommon.RaterMock{}, WasmVMChangeLockerInternal: &sync.RWMutex{}, @@ -346,6 +348,7 @@ func createMockSCQueryElementArgs() api.SCQueryElementArgs { AppStatusHandlerCalled: func() core.AppStatusHandler { return &statusHandler.AppStatusHandlerStub{} }, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, }, DataComponents: &mock.DataComponentsMock{ Storage: genericMocks.NewChainStorerMock(0), @@ -379,9 +382,10 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() args.GuardedAccountHandler = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.Equal(t, process.ErrNilGuardedAccountHandler, err) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("DecodeAddresses fails", func(t *testing.T) { t.Parallel() @@ -390,10 +394,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args.CoreComponents = &mock.CoreComponentsMock{ AddrPubKeyConv: nil, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "public key converter")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("createBuiltinFuncs fails", func(t *testing.T) { t.Parallel() @@ -401,10 +406,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.IntMarsh = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "marshalizer")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("NewCache fails", func(t *testing.T) { t.Parallel() @@ -414,10 +420,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { Type: "LRU", SizeInBytes: 1, } - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "lru")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("metachain - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -432,10 +439,11 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { } coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) t.Run("shard - NewVMContainerFactory fails", func(t *testing.T) { t.Parallel() @@ -443,10 +451,30 @@ func TestCreateApiResolver_createScQueryElement(t *testing.T) { args := createMockSCQueryElementArgs() coreCompMock := args.CoreComponents.(*mock.CoreComponentsMock) coreCompMock.Hash = nil - scQueryService, err := api.CreateScQueryElement(args) + scQueryService, storageManager, err := api.CreateScQueryElement(args) require.NotNil(t, err) require.True(t, strings.Contains(strings.ToLower(err.Error()), "hasher")) require.Nil(t, scQueryService) + require.Nil(t, storageManager) }) +} + +func TestCreateApiResolver_createBlockchainForScQuery(t *testing.T) { + t.Parallel() + + t.Run("for metachain", func(t *testing.T) { + t.Parallel() + apiBlockchain, err := api.CreateBlockchainForScQuery(core.MetachainShardId) + require.NoError(t, err) + require.Equal(t, "*blockchain.metaChain", fmt.Sprintf("%T", apiBlockchain)) + }) + + t.Run("for shard", func(t *testing.T) { + t.Parallel() + + apiBlockchain, err := api.CreateBlockchainForScQuery(0) + require.NoError(t, err) + require.Equal(t, "*blockchain.blockChain", fmt.Sprintf("%T", apiBlockchain)) + }) } diff --git a/factory/api/export_test.go b/factory/api/export_test.go index 0164c0c2b10..a17ddfad30c 100644 --- a/factory/api/export_test.go +++ b/factory/api/export_test.go @@ -2,6 +2,8 @@ package api import ( "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" @@ -28,8 +30,8 @@ type SCQueryElementArgs struct { } // CreateScQueryElement - -func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, error) { - return createScQueryElement(&scQueryElementArgs{ +func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, common.StorageManager, error) { + return createScQueryElement(scQueryElementArgs{ generalConfig: args.GeneralConfig, epochConfig: args.EpochConfig, coreComponents: args.CoreComponents, @@ -47,3 +49,8 @@ func CreateScQueryElement(args SCQueryElementArgs) (process.SCQueryService, erro guardedAccountHandler: args.GuardedAccountHandler, }) } + +// CreateBlockchainForScQuery - +func CreateBlockchainForScQuery(selfShardID uint32) (data.ChainHandler, error) { + return createBlockchainForScQuery(selfShardID) +} diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index 988b72764e0..a9ef7851ccb 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/directoryhandler" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" @@ -55,14 +56,15 @@ type bootstrapComponentsFactory struct { } type bootstrapComponents struct { - epochStartBootstrapper factory.EpochStartBootstrapper - bootstrapParamsHolder factory.BootstrapParamsHolder - nodeType core.NodeType - shardCoordinator sharding.Coordinator - headerVersionHandler nodeFactory.HeaderVersionHandler - versionedHeaderFactory nodeFactory.VersionedHeaderFactory - headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - guardedAccountHandler process.GuardedAccountHandler + epochStartBootstrapper factory.EpochStartBootstrapper + bootstrapParamsHolder factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + headerVersionHandler nodeFactory.HeaderVersionHandler + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory } // NewBootstrapComponentsFactory creates an instance of bootstrapComponentsFactory @@ -70,6 +72,9 @@ func NewBootstrapComponentsFactory(args BootstrapComponentsFactoryArgs) (*bootst if check.IfNil(args.CoreComponents) { return nil, errors.ErrNilCoreComponentsHolder } + if check.IfNil(args.CoreComponents.EnableEpochsHandler()) { + return nil, errors.ErrNilEnableEpochsHandler + } if check.IfNil(args.CryptoComponents) { return nil, errors.ErrNilCryptoComponentsHolder } @@ -185,31 +190,40 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { return nil, err } + nodesCoordinatorRegistryFactory, err := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + bcf.coreComponents.InternalMarshalizer(), + bcf.coreComponents.EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag), + ) + if err != nil { + return nil, err + } + epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ - CoreComponentsHolder: bcf.coreComponents, - CryptoComponentsHolder: bcf.cryptoComponents, - MainMessenger: bcf.networkComponents.NetworkMessenger(), - FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), - GeneralConfig: bcf.config, - PrefsConfig: bcf.prefConfig.Preferences, - FlagsConfig: bcf.flagsConfig, - EconomicsData: bcf.coreComponents.EconomicsData(), - GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), - GenesisShardCoordinator: genesisShardCoordinator, - StorageUnitOpener: unitOpener, - Rater: bcf.coreComponents.Rater(), - DestinationShardAsObserver: destShardIdAsObserver, - NodeShuffler: bcf.coreComponents.NodesShuffler(), - RoundHandler: bcf.coreComponents.RoundHandler(), - LatestStorageDataProvider: latestStorageDataProvider, - ArgumentsParser: smartContract.NewArgumentParser(), - StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), - HeaderIntegrityVerifier: headerIntegrityVerifier, - DataSyncerCreator: dataSyncerFactory, - ScheduledSCRsStorer: nil, // will be updated after sync from network - TrieSyncStatisticsProvider: tss, - NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), - StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), + CoreComponentsHolder: bcf.coreComponents, + CryptoComponentsHolder: bcf.cryptoComponents, + MainMessenger: bcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), + GeneralConfig: bcf.config, + PrefsConfig: bcf.prefConfig.Preferences, + FlagsConfig: bcf.flagsConfig, + EconomicsData: bcf.coreComponents.EconomicsData(), + GenesisNodesConfig: bcf.coreComponents.GenesisNodesSetup(), + GenesisShardCoordinator: genesisShardCoordinator, + StorageUnitOpener: unitOpener, + Rater: bcf.coreComponents.Rater(), + DestinationShardAsObserver: destShardIdAsObserver, + NodeShuffler: bcf.coreComponents.NodesShuffler(), + RoundHandler: bcf.coreComponents.RoundHandler(), + LatestStorageDataProvider: latestStorageDataProvider, + ArgumentsParser: smartContract.NewArgumentParser(), + StatusHandler: bcf.statusCoreComponents.AppStatusHandler(), + HeaderIntegrityVerifier: headerIntegrityVerifier, + DataSyncerCreator: dataSyncerFactory, + ScheduledSCRsStorer: nil, // will be updated after sync from network + TrieSyncStatisticsProvider: tss, + NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), + StateStatsHandler: bcf.statusCoreComponents.StateStatsHandler(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } var epochStartBootstrapper factory.EpochStartBootstrapper @@ -260,12 +274,13 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { bootstrapParamsHolder: &bootstrapParams{ bootstrapParams: bootstrapParameters, }, - nodeType: nodeType, - shardCoordinator: shardCoordinator, - headerVersionHandler: headerVersionHandler, - headerIntegrityVerifier: headerIntegrityVerifier, - versionedHeaderFactory: versionedHeaderFactory, - guardedAccountHandler: guardedAccountHandler, + nodeType: nodeType, + shardCoordinator: shardCoordinator, + headerVersionHandler: headerVersionHandler, + headerIntegrityVerifier: headerIntegrityVerifier, + versionedHeaderFactory: versionedHeaderFactory, + guardedAccountHandler: guardedAccountHandler, + nodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, }, nil } diff --git a/factory/bootstrap/bootstrapComponentsHandler.go b/factory/bootstrap/bootstrapComponentsHandler.go index bda412e2759..7401f4834f4 100644 --- a/factory/bootstrap/bootstrapComponentsHandler.go +++ b/factory/bootstrap/bootstrapComponentsHandler.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) var _ factory.ComponentHandler = (*managedBootstrapComponents)(nil) @@ -118,6 +119,18 @@ func (mbf *managedBootstrapComponents) EpochBootstrapParams() factory.BootstrapP return mbf.bootstrapComponents.bootstrapParamsHolder } +// NodesCoordinatorRegistryFactory returns the NodesCoordinatorRegistryFactory +func (mbf *managedBootstrapComponents) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + mbf.mutBootstrapComponents.RLock() + defer mbf.mutBootstrapComponents.RUnlock() + + if mbf.bootstrapComponents == nil { + return nil + } + + return mbf.bootstrapComponents.nodesCoordinatorRegistryFactory +} + // IsInterfaceNil returns true if the underlying object is nil func (mbf *managedBootstrapComponents) IsInterfaceNil() bool { return mbf == nil diff --git a/factory/bootstrap/bootstrapComponents_test.go b/factory/bootstrap/bootstrapComponents_test.go index 85c22017b28..180315b1f36 100644 --- a/factory/bootstrap/bootstrapComponents_test.go +++ b/factory/bootstrap/bootstrapComponents_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/bootstrap" @@ -38,6 +39,19 @@ func TestNewBootstrapComponentsFactory(t *testing.T) { require.Nil(t, bcf) require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) + t.Run("nil enable epochs handler should error", func(t *testing.T) { + t.Parallel() + + argsCopy := args + argsCopy.CoreComponents = &factory.CoreComponentsHolderStub{ + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return nil + }, + } + bcf, err := bootstrap.NewBootstrapComponentsFactory(argsCopy) + require.Nil(t, bcf) + require.Equal(t, errorsMx.ErrNilEnableEpochsHandler, err) + }) t.Run("nil crypto components should error", func(t *testing.T) { t.Parallel() @@ -218,7 +232,8 @@ func TestBootstrapComponentsFactory_Create(t *testing.T) { coreComponents := componentsMock.GetDefaultCoreComponents() args.CoreComponents = coreComponents coreComponents.RatingHandler = nil - bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) + bcf, err := bootstrap.NewBootstrapComponentsFactory(args) + require.Nil(t, err) require.NotNil(t, bcf) bc, err := bcf.Create() diff --git a/factory/bootstrap/bootstrapParameters.go b/factory/bootstrap/bootstrapParameters.go index 5002f597e55..0002beb1f62 100644 --- a/factory/bootstrap/bootstrapParameters.go +++ b/factory/bootstrap/bootstrapParameters.go @@ -25,7 +25,7 @@ func (bph *bootstrapParams) NumOfShards() uint32 { } // NodesConfig returns the nodes coordinator config after bootstrap -func (bph *bootstrapParams) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bph *bootstrapParams) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { return bph.bootstrapParams.NodesConfig } diff --git a/factory/bootstrap/shardingFactory.go b/factory/bootstrap/shardingFactory.go index 40472acae1e..6662129299b 100644 --- a/factory/bootstrap/shardingFactory.go +++ b/factory/bootstrap/shardingFactory.go @@ -113,6 +113,7 @@ func CreateNodesCoordinator( nodeTypeProvider core.NodeTypeProviderHandler, enableEpochsHandler common.EnableEpochsHandler, validatorInfoCacher epochStart.ValidatorInfoCacher, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, ) (nodesCoordinator.NodesCoordinator, error) { if check.IfNil(nodeShufflerOut) { return nil, errErd.ErrNilShuffleOutCloser @@ -165,15 +166,15 @@ func CreateNodesCoordinator( if bootstrapParameters.NodesConfig() != nil { nodeRegistry := bootstrapParameters.NodesConfig() currentEpoch = bootstrapParameters.Epoch() - epochsConfig, ok := nodeRegistry.EpochsConfig[fmt.Sprintf("%d", currentEpoch)] + epochsConfig, ok := nodeRegistry.GetEpochsConfig()[fmt.Sprintf("%d", currentEpoch)] if ok { - eligibles := epochsConfig.EligibleValidators + eligibles := epochsConfig.GetEligibleValidators() eligibleValidators, err = nodesCoordinator.SerializableValidatorsToValidators(eligibles) if err != nil { return nil, err } - waitings := epochsConfig.WaitingValidators + waitings := epochsConfig.GetWaitingValidators() waitingValidators, err = nodesCoordinator.SerializableValidatorsToValidators(waitings) if err != nil { return nil, err @@ -197,28 +198,29 @@ func CreateNodesCoordinator( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: marshalizer, - Hasher: hasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartNotifier, - BootStorer: bootStorer, - ShardIDAsObserver: shardIDAsObserver, - NbShards: nbShards, - EligibleNodes: eligibleValidators, - WaitingNodes: waitingValidators, - SelfPublicKey: pubKeyBytes, - ConsensusGroupCache: consensusGroupCache, - ShuffledOutHandler: shuffledOutHandler, - Epoch: currentEpoch, - StartEpoch: startEpoch, - ChanStopNode: chanNodeStop, - NodeTypeProvider: nodeTypeProvider, - IsFullArchive: prefsConfig.FullArchive, - EnableEpochsHandler: enableEpochsHandler, - ValidatorInfoCacher: validatorInfoCacher, - GenesisNodesSetupHandler: nodesConfig, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: marshalizer, + Hasher: hasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartNotifier, + BootStorer: bootStorer, + ShardIDAsObserver: shardIDAsObserver, + NbShards: nbShards, + EligibleNodes: eligibleValidators, + WaitingNodes: waitingValidators, + SelfPublicKey: pubKeyBytes, + ConsensusGroupCache: consensusGroupCache, + ShuffledOutHandler: shuffledOutHandler, + Epoch: currentEpoch, + StartEpoch: startEpoch, + ChanStopNode: chanNodeStop, + NodeTypeProvider: nodeTypeProvider, + IsFullArchive: prefsConfig.FullArchive, + EnableEpochsHandler: enableEpochsHandler, + ValidatorInfoCacher: validatorInfoCacher, + GenesisNodesSetupHandler: nodesConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseNodesCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/factory/bootstrap/shardingFactory_test.go b/factory/bootstrap/shardingFactory_test.go index 0df777933b0..c7a54e077f4 100644 --- a/factory/bootstrap/shardingFactory_test.go +++ b/factory/bootstrap/shardingFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -41,7 +42,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil pub key should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, nil, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilPublicKey, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -49,7 +50,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Run("nil logger should error", func(t *testing.T) { t.Parallel() - shardC, nodeType, err := CreateShardCoordinator(&testscommon.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) + shardC, nodeType, err := CreateShardCoordinator(&genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{}, config.PreferencesConfig{}, nil) require.Equal(t, errErd.ErrNilLogger, err) require.Empty(t, nodeType) require.True(t, check.IfNil(shardC)) @@ -58,7 +59,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, expectedErr @@ -75,7 +76,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, &cryptoMocks.PublicKeyStub{ ToByteArrayStub: func() ([]byte, error) { return nil, sharding.ErrPublicKeyNotFoundInGenesis // force this error here @@ -95,7 +96,7 @@ func TestCreateShardCoordinator(t *testing.T) { counter := 0 shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -123,7 +124,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return 0, sharding.ErrPublicKeyNotFoundInGenesis // force this error }, @@ -149,7 +150,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -169,7 +170,7 @@ func TestCreateShardCoordinator(t *testing.T) { t.Parallel() shardC, nodeType, err := CreateShardCoordinator( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetShardIDForPubKeyCalled: func(pubKey []byte) (uint32, error) { return core.MetachainShardId, nil }, @@ -192,7 +193,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( nil, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -208,6 +209,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilShuffleOutCloser, err) require.True(t, check.IfNil(nodesC)) @@ -233,6 +235,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilGenesisNodesSetupHandler, err) require.True(t, check.IfNil(nodesC)) @@ -242,7 +245,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, nil, &cryptoMocks.PublicKeyStub{}, @@ -258,6 +261,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilEpochStartNotifier, err) require.True(t, check.IfNil(nodesC)) @@ -267,7 +271,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, nil, @@ -283,6 +287,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilPublicKey, err) require.True(t, check.IfNil(nodesC)) @@ -292,7 +297,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -308,6 +313,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, errErd.ErrNilBootstrapParamsHandler, err) require.True(t, check.IfNil(nodesC)) @@ -317,7 +323,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{}, &mock.EpochStartNotifierStub{}, &cryptoMocks.PublicKeyStub{}, @@ -333,6 +339,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Equal(t, nodesCoordinator.ErrNilNodeStopChannel, err) require.True(t, check.IfNil(nodesC)) @@ -342,7 +349,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "", }, @@ -360,6 +367,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -369,7 +377,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -391,6 +399,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -400,7 +409,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -422,6 +431,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.True(t, errors.Is(err, expectedErr)) require.True(t, check.IfNil(nodesC)) @@ -431,7 +441,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -453,6 +463,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -462,7 +473,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -484,6 +495,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -493,7 +505,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "0", }, @@ -510,7 +522,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -536,6 +548,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.NotNil(t, err) require.True(t, check.IfNil(nodesC)) @@ -545,7 +558,7 @@ func TestCreateNodesCoordinator(t *testing.T) { nodesC, err := CreateNodesCoordinator( &testscommon.ShuffleOutCloserStub{}, - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.PreferencesConfig{ DestinationShardAsObserver: "disabled", }, @@ -562,7 +575,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &shardingMocks.NodeShufflerMock{}, 0, &bootstrapMocks.BootstrapParamsHandlerMock{ - NodesConfigCalled: func() *nodesCoordinator.NodesCoordinatorRegistry { + NodesConfigCalled: func() nodesCoordinator.NodesCoordinatorRegistryHandler { return &nodesCoordinator.NodesCoordinatorRegistry{ EpochsConfig: map[string]*nodesCoordinator.EpochValidators{ "0": { @@ -588,6 +601,7 @@ func TestCreateNodesCoordinator(t *testing.T) { &nodeTypeProviderMock.NodeTypeProviderStub{}, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, &validatorInfoCacherMocks.ValidatorInfoCacherStub{}, + &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, ) require.Nil(t, err) require.False(t, check.IfNil(nodesC)) @@ -608,7 +622,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MaxShuffledOutRestartThreshold: 5.0, }, @@ -621,7 +635,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{ MinShuffledOutRestartThreshold: 5.0, }, @@ -634,7 +648,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{}, + &genesisMocks.NodesSetupStub{}, config.EpochStartConfig{}, nil, // force NewShuffleOutCloser to fail ) @@ -645,7 +659,7 @@ func TestCreateNodesShuffleOut(t *testing.T) { t.Parallel() shuffler, err := CreateNodesShuffleOut( - &testscommon.NodesSetupStub{ + &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 4000 }, diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index a2dc7a3e1bf..decdb7c85fa 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -261,11 +261,6 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - sentSignaturesHandler, err := spos.NewSentSignaturesTracker(ccf.cryptoComponents.KeysHandler()) - if err != nil { - return nil, err - } - fct, err := sposFactory.GetSubroundsFactory( consensusDataContainer, consensusState, @@ -273,7 +268,7 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { ccf.config.Consensus.Type, ccf.statusCoreComponents.AppStatusHandler(), ccf.statusComponents.OutportHandler(), - sentSignaturesHandler, + ccf.processComponents.SentSignaturesTracker(), []byte(ccf.coreComponents.ChainID()), ccf.networkComponents.NetworkMessenger().ID(), ) diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 67f551acf1d..a7b00e6a347 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -29,6 +29,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" outportMocks "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" @@ -57,7 +58,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent AlarmSch: &testscommon.AlarmSchedulerStub{}, NtpSyncTimer: &testscommon.SyncTimerStub{}, GenesisBlockTime: time.Time{}, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, @@ -139,6 +140,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent HeaderSigVerif: &testsMocks.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, + SentSignaturesTrackerInternal: &testscommon.SentSignatureTrackerStub{}, }, StateComponents: &factoryMocks.StateComponentsMock{ StorageManagers: map[string]common.StorageManager{ diff --git a/factory/core/coreComponents.go b/factory/core/coreComponents.go index f04afe47d61..247ee7e05f8 100644 --- a/factory/core/coreComponents.go +++ b/factory/core/coreComponents.go @@ -33,7 +33,6 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" "github.com/multiversx/mx-chain-go/process/rating" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/statusHandler" @@ -244,35 +243,15 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { } wasmVMChangeLocker := &sync.RWMutex{} - gasScheduleConfigurationFolderName := ccf.configPathsHolder.GasScheduleDirectoryName - argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ - GasScheduleConfig: ccf.epochConfig.GasSchedule, - ConfigDir: gasScheduleConfigurationFolderName, - EpochNotifier: epochNotifier, - WasmVMChangeLocker: wasmVMChangeLocker, - } - gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) - if err != nil { - return nil, err - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: gasScheduleNotifier, - }) - if err != nil { - return nil, err - } txVersionChecker := versioning.NewTxVersionChecker(ccf.config.GeneralSettings.MinTransactionVersion) log.Trace("creating economics data components") argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: &ccf.economicsConfig, - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCostHandler, - TxVersionChecker: txVersionChecker, + Economics: &ccf.economicsConfig, + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: txVersionChecker, } economicsData, err := economics.NewEconomicsData(argsNewEconomicsData) if err != nil { @@ -311,6 +290,7 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { ShuffleBetweenShards: true, MaxNodesEnableConfig: ccf.epochConfig.EnableEpochs.MaxNodesChangeEnableEpoch, EnableEpochsHandler: enableEpochsHandler, + EnableEpochs: ccf.epochConfig.EnableEpochs, } nodesShuffler, err := nodesCoordinator.NewHashValidatorsShuffler(argsNodesShuffler) diff --git a/factory/core/coreComponents_test.go b/factory/core/coreComponents_test.go index 79aba4a2532..d88a8a2284e 100644 --- a/factory/core/coreComponents_test.go +++ b/factory/core/coreComponents_test.go @@ -248,18 +248,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidRoundConfigShouldErr(t require.NotNil(t, err) } -func TestCoreComponentsFactory_CreateCoreComponentsInvalidEpochConfigShouldErr(t *testing.T) { - t.Parallel() - - args := componentsMock.GetCoreArgs() - args.EpochConfig = config.EpochConfig{} - ccf, _ := coreComp.NewCoreComponentsFactory(args) - - cc, err := ccf.Create() - require.Nil(t, cc) - require.NotNil(t, err) -} - func TestCoreComponentsFactory_CreateCoreComponentsInvalidGenesisMaxNumberOfShardsShouldErr(t *testing.T) { t.Parallel() diff --git a/factory/disabled/auctionListDisplayer.go b/factory/disabled/auctionListDisplayer.go new file mode 100644 index 00000000000..ec2d2f0774b --- /dev/null +++ b/factory/disabled/auctionListDisplayer.go @@ -0,0 +1,35 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/state" +) + +type auctionListDisplayer struct { +} + +// NewDisabledAuctionListDisplayer creates a disabled auction list displayer +func NewDisabledAuctionListDisplayer() *auctionListDisplayer { + return &auctionListDisplayer{} +} + +// DisplayOwnersData does nothing +func (ald *auctionListDisplayer) DisplayOwnersData(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayOwnersSelectedNodes does nothing +func (ald *auctionListDisplayer) DisplayOwnersSelectedNodes(_ map[string]*metachain.OwnerAuctionData) { +} + +// DisplayAuctionList does nothing +func (ald *auctionListDisplayer) DisplayAuctionList( + _ []state.ValidatorInfoHandler, + _ map[string]*metachain.OwnerAuctionData, + _ uint32, +) { +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ald *auctionListDisplayer) IsInterfaceNil() bool { + return ald == nil +} diff --git a/factory/disabled/auctionListSelector.go b/factory/disabled/auctionListSelector.go new file mode 100644 index 00000000000..281102a4a7f --- /dev/null +++ b/factory/disabled/auctionListSelector.go @@ -0,0 +1,21 @@ +package disabled + +import "github.com/multiversx/mx-chain-go/state" + +type auctionListSelector struct { +} + +// NewDisabledAuctionListSelector returns a new instance of a disabled auction list selector +func NewDisabledAuctionListSelector() *auctionListSelector { + return &auctionListSelector{} +} + +// SelectNodesFromAuctionList returns nil +func (als *auctionListSelector) SelectNodesFromAuctionList(state.ShardValidatorsInfoMapHandler, []byte) error { + return nil +} + +// IsInterfaceNil returns true if the underlying pointer is nil +func (als *auctionListSelector) IsInterfaceNil() bool { + return als == nil +} diff --git a/factory/disabled/stakingDataProvider.go b/factory/disabled/stakingDataProvider.go new file mode 100644 index 00000000000..f24b7b735b2 --- /dev/null +++ b/factory/disabled/stakingDataProvider.go @@ -0,0 +1,38 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +type stakingDataProvider struct { +} + +// NewDisabledStakingDataProvider returns a new instance of stakingDataProvider +func NewDisabledStakingDataProvider() *stakingDataProvider { + return &stakingDataProvider{} +} + +// FillValidatorInfo returns a nil error +func (s *stakingDataProvider) FillValidatorInfo(state.ValidatorInfoHandler) error { + return nil +} + +// ComputeUnQualifiedNodes returns nil values +func (s *stakingDataProvider) ComputeUnQualifiedNodes(_ state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + return nil, nil, nil +} + +// GetOwnersData returns nil +func (s *stakingDataProvider) GetOwnersData() map[string]*epochStart.OwnerData { + return nil +} + +// Clean does nothing +func (s *stakingDataProvider) Clean() { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stakingDataProvider) IsInterfaceNil() bool { + return s == nil +} diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index a551f22e869..97164a7240e 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -272,32 +272,6 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error return nil, err } - argsMainCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerShardMapper: hcf.processComponents.PeerShardMapper(), - } - mainCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsMainCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - err = hcf.networkComponents.NetworkMessenger().AddPeerTopicNotifier(mainCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - - argsFullArchiveCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), - } - fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsFullArchiveCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - err = hcf.networkComponents.FullArchiveNetworkMessenger().AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) - if err != nil { - return nil, err - } - return &heartbeatV2Components{ sender: heartbeatV2Sender, peerAuthRequestsProcessor: paRequestsProcessor, diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index f013294a7d1..6b5088cab5b 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -11,7 +11,6 @@ import ( errorsMx "github.com/multiversx/mx-chain-go/errors" heartbeatComp "github.com/multiversx/mx-chain-go/factory/heartbeat" testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" - "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" @@ -504,26 +503,6 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.Nil(t, hc) assert.Error(t, err) }) - t.Run("AddPeerTopicNotifier fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockHeartbeatV2ComponentsFactoryArgs() - args.NetworkComponents = &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{ - AddPeerTopicNotifierCalled: func(notifier p2p.PeerTopicNotifier) error { - return expectedErr - }, - }, - FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, - } - hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) - assert.NotNil(t, hcf) - assert.NoError(t, err) - - hc, err := hcf.Create() - assert.Nil(t, hc) - assert.Equal(t, expectedErr, err) - }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/factory/interface.go b/factory/interface.go index 2498cc916c4..ede9f39089b 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -309,6 +309,7 @@ type ProcessComponentsHolder interface { ESDTDataStorageHandlerForAPI() vmcommon.ESDTNFTStorageHandler AccountsParser() genesis.AccountsParser ReceiptsRepository() ReceiptsRepository + SentSignaturesTracker() process.SentSignaturesTracker IsInterfaceNil() bool } @@ -435,7 +436,7 @@ type BootstrapParamsHolder interface { Epoch() uint32 SelfShardID() uint32 NumOfShards() uint32 - NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler IsInterfaceNil() bool } @@ -456,6 +457,7 @@ type BootstrapComponentsHolder interface { HeaderVersionHandler() factory.HeaderVersionHandler HeaderIntegrityVerifier() factory.HeaderIntegrityVerifierHandler GuardedAccountHandler() process.GuardedAccountHandler + NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory IsInterfaceNil() bool } diff --git a/factory/mock/nodesSetupStub.go b/factory/mock/nodesSetupStub.go deleted file mode 100644 index 835ad9fc0d8..00000000000 --- a/factory/mock/nodesSetupStub.go +++ /dev/null @@ -1,142 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfMetaNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 2 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() - } - return 1 -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index 51265a22997..e646958281c 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -56,6 +56,7 @@ type ProcessComponentsMock struct { ESDTDataStorageHandlerForAPIInternal vmcommon.ESDTNFTStorageHandler AccountsParserInternal genesis.AccountsParser ReceiptsRepositoryInternal factory.ReceiptsRepository + SentSignaturesTrackerInternal process.SentSignaturesTracker } // Create - @@ -278,6 +279,11 @@ func (pcm *ProcessComponentsMock) ReceiptsRepository() factory.ReceiptsRepositor return pcm.ReceiptsRepositoryInternal } +// SentSignaturesTracker - +func (pcm *ProcessComponentsMock) SentSignaturesTracker() process.SentSignaturesTracker { + return pcm.SentSignaturesTrackerInternal +} + // IsInterfaceNil - func (pcm *ProcessComponentsMock) IsInterfaceNil() bool { return pcm == nil diff --git a/factory/mock/validatorStatisticsProcessorStub.go b/factory/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 1cb51e79f41..00000000000 --- a/factory/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/factory/mock/validatorsProviderStub.go b/factory/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/factory/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 7bccd5d8af0..7db9e20cf7d 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -12,7 +12,9 @@ import ( debugFactory "github.com/multiversx/mx-chain-go/debug/factory" "github.com/multiversx/mx-chain-go/epochStart" metachainEpochStart "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" mainFactory "github.com/multiversx/mx-chain-go/factory" + factoryDisabled "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/outport" processOutport "github.com/multiversx/mx-chain-go/outport/process" @@ -65,6 +67,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository mainFactory.ReceiptsRepository, blockCutoffProcessingHandler cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, ) (*blockProcessorAndVmFactories, error) { shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { @@ -82,6 +85,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository, blockCutoffProcessingHandler, missingTrieNodesNotifier, + sentSignaturesTracker, ) } if shardCoordinator.SelfId() == core.MetachainShardId { @@ -99,6 +103,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( processedMiniBlocksTracker, receiptsRepository, blockCutoffProcessingHandler, + sentSignaturesTracker, ) } @@ -121,6 +126,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( receiptsRepository mainFactory.ReceiptsRepository, blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, ) (*blockProcessorAndVmFactories, error) { argsParser := smartContract.NewArgumentParser() @@ -226,11 +232,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + txFeeHandler := postprocess.NewFeeAccumulator() argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: vmContainer, ArgsParser: argsParser, @@ -432,6 +434,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( OutportDataProvider: outportDataProvider, BlockProcessingCutoffHandler: blockProcessingCutoffHandler, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + SentSignaturesTracker: sentSignaturesTracker, } arguments := block.ArgShardProcessor{ ArgBaseProcessor: argumentsBaseProcessor, @@ -447,10 +450,15 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - return &blockProcessorAndVmFactories{ + blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, vmFactoryForProcessing: vmFactory, - }, nil + } + + pcf.stakingDataProviderAPI = factoryDisabled.NewDisabledStakingDataProvider() + pcf.auctionListSelectorAPI = factoryDisabled.NewDisabledAuctionListSelector() + + return blockProcessorComponents, nil } func (pcf *processComponentsFactory) newMetaBlockProcessor( @@ -467,6 +475,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, blockProcessingCutoffhandler cutoff.BlockProcessingCutoffHandler, + sentSignaturesTracker process.SentSignaturesTracker, ) (*blockProcessorAndVmFactories, error) { builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(pcf.state.AccountsAdapter(), make(map[string]struct{})) if err != nil { @@ -550,11 +559,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } - txFeeHandler, err := postprocess.NewFeeAccumulator() - if err != nil { - return nil, err - } - + txFeeHandler := postprocess.NewFeeAccumulator() enableEpochs := pcf.epochConfig.EnableEpochs argsNewScProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: vmContainer, @@ -754,8 +759,14 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + argsStakingDataProvider := metachainEpochStart.StakingDataProviderArgs{ + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, + } + // TODO: in case of changing the minimum node price, make sure to update the staking data provider - stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(systemVM, pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice) + stakingDataProvider, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) if err != nil { return nil, err } @@ -770,6 +781,13 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( return nil, err } + stakingDataProviderAPI, err := metachainEpochStart.NewStakingDataProvider(argsStakingDataProvider) + if err != nil { + return nil, err + } + + pcf.stakingDataProviderAPI = stakingDataProviderAPI + argsEpochRewards := metachainEpochStart.RewardsCreatorProxyArgs{ BaseRewardsCreatorArgs: metachainEpochStart.BaseRewardsCreatorArgs{ ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), @@ -852,6 +870,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( OutportDataProvider: outportDataProvider, BlockProcessingCutoffHandler: blockProcessingCutoffhandler, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), + SentSignaturesTracker: sentSignaturesTracker, } esdtOwnerAddress, err := pcf.coreData.AddressPubKeyConverter().Decode(pcf.systemSCConfig.ESDTSystemSCConfig.OwnerAddress) @@ -860,25 +879,79 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( "in processComponentsFactory.newMetaBlockProcessor", err) } + maxNodesChangeConfigProvider, err := notifier.NewNodesConfigProvider( + pcf.epochNotifier, + enableEpochs.MaxNodesChangeEnableEpoch, + ) + if err != nil { + return nil, err + } + + argsAuctionListDisplayer := metachainEpochStart.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachainEpochStart.NewTableDisplayer(), + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListDisplayer, err := metachainEpochStart.NewAuctionListDisplayer(argsAuctionListDisplayer) + if err != nil { + return nil, err + } + + argsAuctionListSelector := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: auctionListDisplayer, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + } + auctionListSelector, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelector) + if err != nil { + return nil, err + } + + maxNodesChangeConfigProviderAPI, err := notifier.NewNodesConfigProviderAPI(pcf.epochNotifier, pcf.epochConfig.EnableEpochs) + if err != nil { + return nil, err + } + argsAuctionListSelectorAPI := metachainEpochStart.AuctionListSelectorArgs{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + StakingDataProvider: stakingDataProviderAPI, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProviderAPI, + SoftAuctionConfig: pcf.systemSCConfig.SoftAuctionConfig, + Denomination: pcf.economicsConfig.GlobalSettings.Denomination, + AuctionListDisplayHandler: factoryDisabled.NewDisabledAuctionListDisplayer(), + } + auctionListSelectorAPI, err := metachainEpochStart.NewAuctionListSelector(argsAuctionListSelectorAPI) + if err != nil { + return nil, err + } + + pcf.auctionListSelectorAPI = auctionListSelectorAPI + argsEpochSystemSC := metachainEpochStart.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: pcf.state.AccountsAdapter(), - PeerAccountsDB: pcf.state.PeerAccounts(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - StartRating: pcf.coreData.RatingsData().StartRating(), - ValidatorInfoCreator: validatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: pcf.coreData.Rater(), - EpochNotifier: pcf.coreData.EpochNotifier(), - GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), - MaxNodesEnableConfig: enableEpochs.MaxNodesChangeEnableEpoch, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: pcf.nodesCoordinator, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ESDTOwnerAddressBytes: esdtOwnerAddress, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + SystemVM: systemVM, + UserAccountsDB: pcf.state.AccountsAdapter(), + PeerAccountsDB: pcf.state.PeerAccounts(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + StartRating: pcf.coreData.RatingsData().StartRating(), + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: pcf.coreData.Rater(), + EpochNotifier: pcf.coreData.EpochNotifier(), + GenesisNodesConfig: pcf.coreData.GenesisNodesSetup(), + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: pcf.nodesCoordinator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ESDTOwnerAddressBytes: esdtOwnerAddress, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, } + epochStartSystemSCProcessor, err := metachainEpochStart.NewSystemSCProcessor(argsEpochSystemSC) if err != nil { return nil, err @@ -1068,6 +1141,7 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( ChanceComputer: pcf.coreData.Rater(), ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + NodesCoordinator: pcf.nodesCoordinator, } return metachain.NewVMContainerFactory(argsNewVMContainer) } diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index fc104af5ed5..92941e4778e 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/processMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageManager "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -41,10 +42,10 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, @@ -54,6 +55,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { &testscommon.ReceiptsRepositoryStub{}, &testscommon.BlockProcessingCutoffStub{}, &testscommon.MissingTrieNodesNotifierStub{}, + &testscommon.SentSignatureTrackerStub{}, ) require.NoError(t, err) @@ -166,10 +168,10 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, - &mock.ForkDetectorStub{}, + &processMocks.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, &mock.BoostrapStorerStub{}, - &mock.ValidatorStatisticsProcessorStub{}, + &testscommon.ValidatorStatisticsProcessorStub{}, &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, @@ -179,6 +181,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { &testscommon.ReceiptsRepositoryStub{}, &testscommon.BlockProcessingCutoffStub{}, &testscommon.MissingTrieNodesNotifierStub{}, + &testscommon.SentSignatureTrackerStub{}, ) require.NoError(t, err) diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index 3187bd729b1..50c5123634c 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -24,6 +24,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( receiptsRepository factory.ReceiptsRepository, blockProcessingCutoff cutoff.BlockProcessingCutoffHandler, missingTrieNodesNotifier common.MissingTrieNodesNotifier, + sentSignaturesTracker process.SentSignaturesTracker, ) (process.BlockProcessor, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, @@ -40,6 +41,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( receiptsRepository, blockProcessingCutoff, missingTrieNodesNotifier, + sentSignaturesTracker, ) if err != nil { return nil, err diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 3669e9a29c5..72d75c69dc3 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -130,6 +130,7 @@ type processComponents struct { esdtDataStorageForApi vmcommon.ESDTNFTStorageHandler accountsParser genesis.AccountsParser receiptsRepository mainFactory.ReceiptsRepository + sentSignaturesTracker process.SentSignaturesTracker } // ProcessComponentsFactoryArgs holds the arguments needed to create a process components factory @@ -139,6 +140,7 @@ type ProcessComponentsFactoryArgs struct { EpochConfig config.EpochConfig PrefConfigs config.Preferences ImportDBConfig config.ImportDbConfig + EconomicsConfig config.EconomicsConfig AccountsParser genesis.AccountsParser SmartContractParser genesis.InitialSmartContractParser GasSchedule core.GasScheduleNotifier @@ -161,6 +163,9 @@ type ProcessComponentsFactoryArgs struct { StatusComponents factory.StatusComponentsHolder StatusCoreComponents factory.StatusCoreComponentsHolder TxExecutionOrderHandler common.TxExecutionOrderHandler + + GenesisNonce uint64 + GenesisRound uint64 } type processComponentsFactory struct { @@ -169,6 +174,7 @@ type processComponentsFactory struct { epochConfig config.EpochConfig prefConfigs config.Preferences importDBConfig config.ImportDbConfig + economicsConfig config.EconomicsConfig accountsParser genesis.AccountsParser smartContractParser genesis.InitialSmartContractParser gasSchedule core.GasScheduleNotifier @@ -185,6 +191,8 @@ type processComponentsFactory struct { importHandler update.ImportHandler flagsConfig config.ContextFlagsConfig esdtNftStorage vmcommon.ESDTNFTStorageHandler + stakingDataProviderAPI peer.StakingDataProviderAPI + auctionListSelectorAPI epochStart.AuctionListSelector data factory.DataComponentsHolder coreData factory.CoreComponentsHolder @@ -195,6 +203,9 @@ type processComponentsFactory struct { statusComponents factory.StatusComponentsHolder statusCoreComponents factory.StatusCoreComponentsHolder txExecutionOrderHandler common.TxExecutionOrderHandler + + genesisNonce uint64 + genesisRound uint64 } // NewProcessComponentsFactory will return a new instance of processComponentsFactory @@ -209,6 +220,7 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom epochConfig: args.EpochConfig, prefConfigs: args.PrefConfigs, importDBConfig: args.ImportDBConfig, + economicsConfig: args.EconomicsConfig, accountsParser: args.AccountsParser, smartContractParser: args.SmartContractParser, gasSchedule: args.GasSchedule, @@ -231,6 +243,9 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom statusCoreComponents: args.StatusCoreComponents, flagsConfig: args.FlagsConfig, txExecutionOrderHandler: args.TxExecutionOrderHandler, + genesisNonce: args.GenesisNonce, + genesisRound: args.GenesisRound, + roundConfig: args.RoundConfig, }, nil } @@ -402,30 +417,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() - if startEpochNum == 0 { - err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) - if err != nil { - return nil, err - } - } - - cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second - argVSP := peer.ArgValidatorsProvider{ - NodesCoordinator: pcf.nodesCoordinator, - StartEpoch: startEpochNum, - EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - CacheRefreshIntervalDurationInSec: cacheRefreshDuration, - ValidatorStatistics: validatorStatisticsProcessor, - MaxRating: pcf.maxRating, - PubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), - } - - validatorsProvider, err := peer.NewValidatorsProvider(argVSP) - if err != nil { - return nil, err - } - epochStartTrigger, err := pcf.newEpochStartTrigger(requestHandler) if err != nil { return nil, err @@ -606,6 +597,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + sentSignaturesTracker, err := track.NewSentSignaturesTracker(pcf.crypto.KeysHandler()) + if err != nil { + return nil, fmt.Errorf("%w when assembling components for the sent signatures tracker", err) + } + blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -621,11 +617,39 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { receiptsRepository, blockCutoffProcessingHandler, pcf.state.MissingTrieNodesNotifier(), + sentSignaturesTracker, ) if err != nil { return nil, err } + startEpochNum := pcf.bootstrapComponents.EpochBootstrapParams().Epoch() + if startEpochNum == 0 { + err = pcf.indexGenesisBlocks(genesisBlocks, initialTxs, genesisAccounts) + if err != nil { + return nil, err + } + } + + cacheRefreshDuration := time.Duration(pcf.config.ValidatorStatistics.CacheRefreshIntervalInSec) * time.Second + argVSP := peer.ArgValidatorsProvider{ + NodesCoordinator: pcf.nodesCoordinator, + StartEpoch: startEpochNum, + EpochStartEventNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + CacheRefreshIntervalDurationInSec: cacheRefreshDuration, + ValidatorStatistics: validatorStatisticsProcessor, + MaxRating: pcf.maxRating, + ValidatorPubKeyConverter: pcf.coreData.ValidatorPubKeyConverter(), + AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), + AuctionListSelector: pcf.auctionListSelectorAPI, + StakingDataProvider: pcf.stakingDataProviderAPI, + } + + validatorsProvider, err := peer.NewValidatorsProvider(argVSP) + if err != nil { + return nil, err + } + conversionBase := 10 genesisNodePrice, ok := big.NewInt(0).SetString(pcf.systemSCConfig.StakingSystemSCConfig.GenesisNodePrice, conversionBase) if !ok { @@ -734,11 +758,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { esdtDataStorageForApi: pcf.esdtNftStorage, accountsParser: pcf.accountsParser, receiptsRepository: receiptsRepository, + sentSignaturesTracker: sentSignaturesTracker, }, nil } func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process.ValidatorStatisticsProcessor, error) { - storageService := pcf.data.StorageService() var peerDataPool peer.DataPool = pcf.data.Datapool() @@ -802,21 +826,22 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt } argEpochStart := &shardchain.ArgsShardEpochStartTrigger{ - Marshalizer: pcf.coreData.InternalMarshalizer(), - Hasher: pcf.coreData.Hasher(), - HeaderValidator: headerValidator, - Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), - DataPool: pcf.data.Datapool(), - Storage: pcf.data.StorageService(), - RequestHandler: requestHandler, - Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - Validity: process.MetaBlockValidity, - Finality: process.BlockFinality, - PeerMiniBlocksSyncer: peerMiniBlockSyncer, - RoundHandler: pcf.coreData.RoundHandler(), - AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + HeaderValidator: headerValidator, + Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), + DataPool: pcf.data.Datapool(), + Storage: pcf.data.StorageService(), + RequestHandler: requestHandler, + Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), + EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + Validity: process.MetaBlockValidity, + Finality: process.BlockFinality, + PeerMiniBlocksSyncer: peerMiniBlockSyncer, + RoundHandler: pcf.coreData.RoundHandler(), + AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + ExtraDelayForRequestBlockInfo: time.Duration(pcf.config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } return shardchain.NewEpochStartTrigger(argEpochStart) } @@ -872,13 +897,17 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc HardForkConfig: pcf.config.Hardfork, TrieStorageManagers: pcf.state.TrieStorageManagers(), SystemSCConfig: *pcf.systemSCConfig, - RoundConfig: &pcf.roundConfig, - EpochConfig: &pcf.epochConfig, + RoundConfig: pcf.roundConfig, + EpochConfig: pcf.epochConfig, + HeaderVersionConfigs: pcf.config.Versions, BlockSignKeyGen: pcf.crypto.BlockSignKeyGen(), HistoryRepository: pcf.historyRepo, GenesisNodePrice: genesisNodePrice, GenesisString: pcf.config.GeneralSettings.GenesisString, TxExecutionOrderHandler: pcf.txExecutionOrderHandler, + GenesisEpoch: pcf.config.EpochStartConfig.GenesisEpoch, + GenesisNonce: pcf.genesisNonce, + GenesisRound: pcf.genesisRound, } gbc, err := processGenesis.NewGenesisBlockCreator(arg) @@ -1357,23 +1386,24 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - MainMessenger: pcf.network.NetworkMessenger(), - FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + NumConcurrentResolvingTrieNodesJobs: pcf.config.Antiflood.NumConcurrentResolvingTrieNodesJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } resolversContainerFactory, err := resolverscontainer.NewShardResolversContainerFactory(resolversContainerFactoryArgs) if err != nil { @@ -1393,23 +1423,24 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - MainMessenger: pcf.network.NetworkMessenger(), - FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + NumConcurrentResolvingTrieNodesJobs: pcf.config.Antiflood.NumConcurrentResolvingTrieNodesJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) diff --git a/factory/processing/processComponentsHandler.go b/factory/processing/processComponentsHandler.go index b544ba901ef..a5b71ca3b28 100644 --- a/factory/processing/processComponentsHandler.go +++ b/factory/processing/processComponentsHandler.go @@ -55,7 +55,7 @@ func (m *managedProcessComponents) Create() error { return nil } -// Close will close all underlying sub-components +// Close will close all underlying subcomponents func (m *managedProcessComponents) Close() error { m.mutProcessComponents.Lock() defer m.mutProcessComponents.Unlock() @@ -174,6 +174,9 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.esdtDataStorageForApi) { return errors.ErrNilESDTDataStorage } + if check.IfNil(m.processComponents.sentSignaturesTracker) { + return errors.ErrNilSentSignatureTracker + } return nil } @@ -658,6 +661,18 @@ func (m *managedProcessComponents) ReceiptsRepository() factory.ReceiptsReposito return m.processComponents.receiptsRepository } +// SentSignaturesTracker returns the signature tracker +func (m *managedProcessComponents) SentSignaturesTracker() process.SentSignaturesTracker { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.sentSignaturesTracker +} + // IsInterfaceNil returns true if the interface is nil func (m *managedProcessComponents) IsInterfaceNil() bool { return m == nil diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 152b7637dc6..36638afacfd 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -92,6 +92,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) require.True(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.True(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) + require.True(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) err := managedProcessComponents.Create() require.NoError(t, err) @@ -135,6 +136,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) require.False(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.False(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) + require.False(t, check.IfNil(managedProcessComponents.SentSignaturesTracker())) require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) }) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index e264b185dac..a1654ce3ba3 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -44,6 +44,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" @@ -79,8 +80,19 @@ var ( func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { args := processComp.ProcessComponentsFactoryArgs{ - Config: testscommon.GetGeneralConfig(), - EpochConfig: config.EpochConfig{}, + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), PrefConfigs: config.Preferences{}, ImportDBConfig: config.ImportDbConfig{}, FlagsConfig: config.ContextFlagsConfig{ @@ -127,7 +139,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ - GenesisNodePrice: "2500000000000000000000", + GenesisNodePrice: "2500", MinStakeValue: "1", UnJailValue: "1", MinStepValue: "1", @@ -138,6 +150,8 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + NodeLimitPercentage: 100.0, + StakeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -148,6 +162,12 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ImportStartHandler: &testscommon.ImportStartHandlerStub{}, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, @@ -170,7 +190,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: addrPubKeyConv, ValPubKeyConv: valPubKeyConv, - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &nodesSetupMock.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 2 }, @@ -216,6 +236,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, }, Network: &testsMocks.NetworkComponentsStub{ Messenger: &p2pmocks.MessengerStub{}, @@ -243,7 +264,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } - args.State = components.GetStateComponents(args.CoreData) + args.State = components.GetStateComponents(args.CoreData, args.StatusCoreComponents) return args } @@ -352,7 +373,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: nil, } pcf, err := processComp.NewProcessComponentsFactory(args) @@ -365,7 +386,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: nil, } @@ -379,7 +400,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: nil, @@ -394,7 +415,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -410,7 +431,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &nodesSetupMock.NodesSetupStub{}, AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: &testscommon.PubkeyConverterStub{}, @@ -731,7 +752,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args := createMockProcessComponentsFactoryArgs() coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) coreCompStub.GenesisNodesSetupCalled = func() sharding.GenesisNodesSetupHandler { - return &testscommon.NodesSetupStub{ + return &nodesSetupMock.NodesSetupStub{ AllInitialNodesCalled: func() []nodesCoordinator.GenesisNodeInfoHandler { return []nodesCoordinator.GenesisNodeInfoHandler{ &genesisMocks.GenesisNodeInfoHandlerMock{ diff --git a/factory/processing/txSimulatorProcessComponents.go b/factory/processing/txSimulatorProcessComponents.go index 2a5e8c5a7a2..257a46af1a5 100644 --- a/factory/processing/txSimulatorProcessComponents.go +++ b/factory/processing/txSimulatorProcessComponents.go @@ -79,6 +79,7 @@ func (pcf *processComponentsFactory) createAPITransactionEvaluator() (factory.Tr Accounts: simulationAccountsDB, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BlockChain: pcf.data.Blockchain(), }) return apiTransactionEvaluator, vmContainerFactory, err @@ -141,6 +142,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + vmContainer, err := vmContainerFactory.Create() if err != nil { return args, nil, nil, err @@ -301,6 +304,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( return args, nil, nil, err } + args.BlockChainHook = vmContainerFactory.BlockChainHookImpl() + err = builtInFuncFactory.SetPayableHandler(vmContainerFactory.BlockChainHookImpl()) if err != nil { return args, nil, nil, err diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index ba552ed416a..e73600180ff 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -27,7 +27,7 @@ func TestNewManagedStateComponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -42,7 +42,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -56,7 +56,7 @@ func TestManagedStateComponents_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, err) @@ -87,7 +87,7 @@ func TestManagedStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) require.NoError(t, managedStateComponents.Close()) @@ -102,7 +102,7 @@ func TestManagedStateComponents_CheckSubcomponents(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.CheckSubcomponents() @@ -121,7 +121,7 @@ func TestManagedStateComponents_Setters(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ := stateComp.NewManagedStateComponents(stateComponentsFactory) err := managedStateComponents.Create() @@ -153,7 +153,7 @@ func TestManagedStateComponents_IsInterfaceNil(t *testing.T) { require.True(t, managedStateComponents.IsInterfaceNil()) coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) stateComponentsFactory, _ := stateComp.NewStateComponentsFactory(args) managedStateComponents, _ = stateComp.NewManagedStateComponents(stateComponentsFactory) require.False(t, managedStateComponents.IsInterfaceNil()) diff --git a/factory/state/stateComponents_test.go b/factory/state/stateComponents_test.go index 177407226d8..bf5068e8dd7 100644 --- a/factory/state/stateComponents_test.go +++ b/factory/state/stateComponents_test.go @@ -20,7 +20,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Core = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -31,7 +31,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.StatusCore = nil scf, err := stateComp.NewStateComponentsFactory(args) @@ -42,7 +42,7 @@ func TestNewStateComponentsFactory(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, err := stateComp.NewStateComponentsFactory(args) require.NoError(t, err) @@ -57,7 +57,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { return nil @@ -73,7 +73,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) args.Config.EvictionWaitingList.RootHashesSize = 0 scf, _ := stateComp.NewStateComponentsFactory(args) @@ -85,7 +85,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -107,7 +107,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) coreCompStub := factory.NewCoreComponentsHolderStubFromRealComponent(args.Core) cnt := 0 @@ -129,7 +129,7 @@ func TestStateComponentsFactory_Create(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() @@ -143,7 +143,7 @@ func TestStateComponents_Close(t *testing.T) { t.Parallel() coreComponents := componentsMock.GetCoreComponents() - args := componentsMock.GetStateFactoryArgs(coreComponents) + args := componentsMock.GetStateFactoryArgs(coreComponents, componentsMock.GetStatusCoreComponents()) scf, _ := stateComp.NewStateComponentsFactory(args) sc, err := scf.Create() diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index ee81a353e31..c7252cbf6de 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -16,18 +16,14 @@ import ( ) func TestNewManagedStatusComponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil factory should error", func(t *testing.T) { - t.Parallel() - managedStatusComponents, err := statusComp.NewManagedStatusComponents(nil) require.Equal(t, errorsMx.ErrNilStatusComponentsFactory, err) require.Nil(t, managedStatusComponents) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -37,11 +33,9 @@ func TestNewManagedStatusComponents(t *testing.T) { } func TestManagedStatusComponents_Create(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("invalid params should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factoryMocks.StatusCoreComponentsStub{ AppStatusHandlerField: nil, @@ -56,8 +50,6 @@ func TestManagedStatusComponents_Create(t *testing.T) { require.Error(t, err) }) t.Run("should work with getters", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.Nil(t, err) managedStatusComponents, err := statusComp.NewManagedStatusComponents(scf) @@ -78,7 +70,7 @@ func TestManagedStatusComponents_Create(t *testing.T) { } func TestManagedStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -96,7 +88,7 @@ func TestManagedStatusComponents_Close(t *testing.T) { } func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -112,7 +104,7 @@ func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { } func TestManagedStatusComponents_SetForkDetector(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) @@ -126,11 +118,9 @@ func TestManagedStatusComponents_SetForkDetector(t *testing.T) { } func TestManagedStatusComponents_StartPolling(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("NewAppStatusPolling fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -142,8 +132,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("RegisterPollingFunc fails should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.GeneralSettings.StatusPollingIntervalSec = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -155,8 +143,6 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { require.Equal(t, errorsMx.ErrStatusPollingInit, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) managedStatusComponents, _ := statusComp.NewManagedStatusComponents(scf) err := managedStatusComponents.Create() @@ -168,7 +154,7 @@ func TestManagedStatusComponents_StartPolling(t *testing.T) { } func TestComputeNumConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeNumConnectedPeers("")) t.Run("full archive network", testComputeNumConnectedPeers(common.FullArchiveMetricSuffix)) @@ -176,8 +162,6 @@ func TestComputeNumConnectedPeers(t *testing.T) { func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ ConnectedAddressesCalled: func() []string { return []string{"addr1", "addr2", "addr3"} @@ -195,7 +179,7 @@ func testComputeNumConnectedPeers(suffix string) func(t *testing.T) { } func TestComputeConnectedPeers(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("main network", testComputeConnectedPeers("")) t.Run("full archive network", testComputeConnectedPeers(common.FullArchiveMetricSuffix)) @@ -203,8 +187,6 @@ func TestComputeConnectedPeers(t *testing.T) { func testComputeConnectedPeers(suffix string) func(t *testing.T) { return func(t *testing.T) { - t.Parallel() - netMes := &p2pmocks.MessengerStub{ GetConnectedPeersInfoCalled: func() *p2p.ConnectedPeersInfo { return &p2p.ConnectedPeersInfo{ @@ -294,7 +276,7 @@ func testComputeConnectedPeers(suffix string) func(t *testing.T) { } func TestManagedStatusComponents_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components managedStatusComponents, _ := statusComp.NewManagedStatusComponents(nil) require.True(t, managedStatusComponents.IsInterfaceNil()) diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 35c7041d844..3e1c0f8ba53 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -2,6 +2,7 @@ package status_test import ( "errors" + "runtime" "testing" "github.com/multiversx/mx-chain-communication-go/websocket/data" @@ -15,6 +16,7 @@ import ( componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/require" @@ -45,7 +47,7 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, EpochStartNotifier: &mock.EpochStartNotifierStub{}, CoreComponents: &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 1000 }, @@ -66,11 +68,9 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA } func TestNewStatusComponentsFactory(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("nil CoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -78,8 +78,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCoreComponentsHolder, err) }) t.Run("CoreComponents with nil GenesisNodesSetup should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ NodesConfig: nil, @@ -89,8 +87,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilGenesisNodesSetupHandler, err) }) t.Run("nil NetworkComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NetworkComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -98,8 +94,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNetworkComponentsHolder, err) }) t.Run("nil ShardCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ShardCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -107,8 +101,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilShardCoordinator, err) }) t.Run("nil NodesCoordinator should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -116,8 +108,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilNodesCoordinator, err) }) t.Run("nil EpochStartNotifier should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.EpochStartNotifier = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -125,8 +115,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilEpochStartNotifier, err) }) t.Run("nil StatusCoreComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -134,8 +122,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilStatusCoreComponents, err) }) t.Run("nil CryptoComponents should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CryptoComponents = nil scf, err := statusComp.NewStatusComponentsFactory(args) @@ -143,8 +129,6 @@ func TestNewStatusComponentsFactory(t *testing.T) { require.Equal(t, errorsMx.ErrNilCryptoComponents, err) }) t.Run("should work", func(t *testing.T) { - t.Parallel() - scf, err := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) require.NotNil(t, scf) require.NoError(t, err) @@ -152,11 +136,9 @@ func TestNewStatusComponentsFactory(t *testing.T) { } func TestStatusComponentsFactory_Create(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components t.Run("NewSoftwareVersionFactory fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ AppStatusHandlerField: nil, // make NewSoftwareVersionFactory fail @@ -169,8 +151,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("softwareVersionCheckerFactory.Create fails should return error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.Config.SoftwareVersionConfig.PollingIntervalInMinutes = 0 scf, _ := statusComp.NewStatusComponentsFactory(args) @@ -181,11 +161,9 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("invalid round duration should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.CoreComponents = &mock.CoreComponentsMock{ - NodesConfig: &testscommon.NodesSetupStub{ + NodesConfig: &genesisMocks.NodesSetupStub{ GetRoundDurationCalled: func() uint64 { return 0 }, @@ -199,8 +177,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("makeWebSocketDriverArgs fails due to invalid marshaller type should error", func(t *testing.T) { - t.Parallel() - args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig[0].Enabled = true args.ExternalConfig.HostDriversConfig[0].MarshallerType = "invalid type" @@ -212,7 +188,9 @@ func TestStatusComponentsFactory_Create(t *testing.T) { require.Nil(t, sc) }) t.Run("should work", func(t *testing.T) { - t.Parallel() + if runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" { + t.Skip("skipping test on darwin amd64") + } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.SelfIDCalled = func() uint32 { @@ -232,7 +210,7 @@ func TestStatusComponentsFactory_Create(t *testing.T) { } func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ @@ -252,7 +230,7 @@ func TestStatusComponentsFactory_epochStartEventHandler(t *testing.T) { } func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.CoreComponents = nil @@ -264,7 +242,7 @@ func TestStatusComponentsFactory_IsInterfaceNil(t *testing.T) { } func TestStatusComponents_Close(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components scf, _ := statusComp.NewStatusComponentsFactory(createMockStatusComponentsFactoryArgs()) cc, err := scf.Create() @@ -275,7 +253,7 @@ func TestStatusComponents_Close(t *testing.T) { } func TestMakeHostDriversArgs(t *testing.T) { - t.Parallel() + // no t.Parallel for these tests as they create real components args := createMockStatusComponentsFactoryArgs() args.ExternalConfig.HostDriversConfig = []config.HostDriversConfig{ diff --git a/genesis/interface.go b/genesis/interface.go index 1a618a44efe..e58708a236f 100644 --- a/genesis/interface.go +++ b/genesis/interface.go @@ -84,7 +84,7 @@ type InitialSmartContractHandler interface { } // InitialSmartContractParser contains the parsed genesis initial smart contracts -//json file and has some functionality regarding processed data +// json file and has some functionality regarding processed data type InitialSmartContractParser interface { InitialSmartContractsSplitOnOwnersShards(shardCoordinator sharding.Coordinator) (map[uint32][]InitialSmartContractHandler, error) GetDeployedSCAddresses(scType string) (map[string]struct{}, error) @@ -115,3 +115,9 @@ type DeployProcessor interface { Deploy(sc InitialSmartContractHandler) ([][]byte, error) IsInterfaceNil() bool } + +// VersionedHeaderFactory creates versioned headers +type VersionedHeaderFactory interface { + Create(epoch uint32) data.HeaderHandler + IsInterfaceNil() bool +} diff --git a/genesis/process/argGenesisBlockCreator.go b/genesis/process/argGenesisBlockCreator.go index e4374b7f6f0..19b5fc9adcc 100644 --- a/genesis/process/argGenesisBlockCreator.go +++ b/genesis/process/argGenesisBlockCreator.go @@ -44,7 +44,10 @@ type dataComponentsHandler interface { // ArgsGenesisBlockCreator holds the arguments which are needed to create a genesis block type ArgsGenesisBlockCreator struct { GenesisTime uint64 + GenesisNonce uint64 + GenesisRound uint64 StartEpochNum uint32 + GenesisEpoch uint32 Data dataComponentsHandler Core coreComponentsHandler Accounts state.AccountsAdapter @@ -60,8 +63,9 @@ type ArgsGenesisBlockCreator struct { HardForkConfig config.HardforkConfig TrieStorageManagers map[string]common.StorageManager SystemSCConfig config.SystemSmartContractsConfig - RoundConfig *config.RoundConfig - EpochConfig *config.EpochConfig + RoundConfig config.RoundConfig + EpochConfig config.EpochConfig + HeaderVersionConfigs config.VersionsConfig WorkingDir string BlockSignKeyGen crypto.KeyGenerator HistoryRepository dblookupext.HistoryRepository @@ -69,6 +73,8 @@ type ArgsGenesisBlockCreator struct { GenesisNodePrice *big.Int GenesisString string + // created components - importHandler update.ImportHandler + importHandler update.ImportHandler + versionedHeaderFactory genesis.VersionedHeaderFactory } diff --git a/genesis/process/disabled/nodesCoordinator.go b/genesis/process/disabled/nodesCoordinator.go new file mode 100644 index 00000000000..610230dd56f --- /dev/null +++ b/genesis/process/disabled/nodesCoordinator.go @@ -0,0 +1,15 @@ +package disabled + +// NodesCoordinator implements the NodesCoordinator interface, it does nothing as it is disabled +type NodesCoordinator struct { +} + +// GetNumTotalEligible - +func (n *NodesCoordinator) GetNumTotalEligible() uint64 { + return 1600 +} + +// IsInterfaceNil - +func (n *NodesCoordinator) IsInterfaceNil() bool { + return n == nil +} diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 2e9b14d7db3..7c37922ae28 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + factoryBlock "github.com/multiversx/mx-chain-go/factory/block" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/genesis/process/intermediate" @@ -82,7 +83,7 @@ func getGenesisBlocksRoundNonceEpoch(arg ArgsGenesisBlockCreator) (uint64, uint6 if arg.HardForkConfig.AfterHardFork { return arg.HardForkConfig.StartRound, arg.HardForkConfig.StartNonce, arg.HardForkConfig.StartEpoch } - return 0, 0, 0 + return arg.GenesisRound, arg.GenesisNonce, arg.GenesisEpoch } func (gbc *genesisBlockCreator) createHardForkImportHandler() error { @@ -195,12 +196,6 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { if arg.TrieStorageManagers == nil { return genesis.ErrNilTrieStorageManager } - if arg.EpochConfig == nil { - return genesis.ErrNilEpochConfig - } - if arg.RoundConfig == nil { - return genesis.ErrNilRoundConfig - } if check.IfNil(arg.HistoryRepository) { return process.ErrNilHistoryRepository } @@ -212,7 +207,7 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { } func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { - genesisEpoch := uint32(0) + genesisEpoch := arg.GenesisEpoch if arg.HardForkConfig.AfterHardFork { genesisEpoch = arg.HardForkConfig.StartEpoch } @@ -225,7 +220,7 @@ func mustDoGenesisProcess(arg ArgsGenesisBlockCreator) bool { } func (gbc *genesisBlockCreator) createEmptyGenesisBlocks() (map[uint32]data.HeaderHandler, error) { - err := gbc.computeDNSAddresses(createGenesisConfig()) + err := gbc.computeDNSAddresses(createGenesisConfig(gbc.arg.EpochConfig.EnableEpochs)) if err != nil { return nil, err } @@ -486,12 +481,17 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl var err error isCurrentShard := shardID == gbc.arg.ShardCoordinator.SelfId() + newArgument := gbc.arg // copy the arguments + newArgument.versionedHeaderFactory, err = gbc.createVersionedHeaderFactory() + if err != nil { + return ArgsGenesisBlockCreator{}, fmt.Errorf("'%w' while generating a VersionedHeaderFactory instance for shard %d", + err, shardID) + } + if isCurrentShard { - newArgument := gbc.arg // copy the arguments newArgument.Data = newArgument.Data.Clone().(dataComponentsHandler) return newArgument, nil } - newArgument := gbc.arg // copy the arguments argsAccCreator := factoryState.ArgsAccountCreator{ Hasher: newArgument.Core.Hasher(), @@ -530,6 +530,25 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl return newArgument, err } +func (gbc *genesisBlockCreator) createVersionedHeaderFactory() (genesis.VersionedHeaderFactory, error) { + cacheConfig := factory.GetCacherFromConfig(gbc.arg.HeaderVersionConfigs.Cache) + cache, err := storageunit.NewCache(cacheConfig) + if err != nil { + return nil, err + } + + headerVersionHandler, err := factoryBlock.NewHeaderVersionHandler( + gbc.arg.HeaderVersionConfigs.VersionsByEpochs, + gbc.arg.HeaderVersionConfigs.DefaultVersion, + cache, + ) + if err != nil { + return nil, err + } + + return factoryBlock.NewShardHeaderFactory(headerVersionHandler) +} + func (gbc *genesisBlockCreator) saveGenesisBlock(header data.HeaderHandler) error { blockBuff, err := gbc.arg.Core.InternalMarshalizer().Marshal(header) if err != nil { diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 90b46757a86..7553025f369 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -13,6 +13,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -151,6 +153,8 @@ func createMockArgument( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -161,27 +165,33 @@ func createMockArgument( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, TrieStorageManagers: trieStorageManagers, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: nodePrice, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ - BuiltInFunctionsEnableEpoch: 0, - SCDeployEnableEpoch: 0, - RelayedTransactionsEnableEpoch: 0, - PenalizedTooMuchGasEnableEpoch: 0, - }, - }, - RoundConfig: &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, + SCDeployEnableEpoch: unreachableEpoch, + CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, + SCProcessorV2EnableEpoch: unreachableEpoch, + StakeLimitsEnableEpoch: 10, }, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + versionedHeaderFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.Header{} + }, + }, } arg.ShardCoordinator = &mock.ShardCoordinatorMock{ @@ -427,16 +437,6 @@ func TestNewGenesisBlockCreator(t *testing.T) { require.True(t, errors.Is(err, genesis.ErrNilTrieStorageManager)) require.Nil(t, gbc) }) - t.Run("nil EpochConfig should error", func(t *testing.T) { - t.Parallel() - - arg := createMockArgument(t, "testdata/genesisTest1.json", &mock.InitialNodesHandlerStub{}, big.NewInt(22000)) - arg.EpochConfig = nil - - gbc, err := NewGenesisBlockCreator(arg) - require.True(t, errors.Is(err, genesis.ErrNilEpochConfig)) - require.Nil(t, gbc) - }) t.Run("invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index 40b5f606241..de3500d2e2f 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -48,9 +48,6 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -const unreachableEpoch = ^uint32(0) -const unreachableRound = ^uint64(0) - // CreateMetaGenesisBlock will create a metachain genesis block func CreateMetaGenesisBlock( arg ArgsGenesisBlockCreator, @@ -70,7 +67,11 @@ func CreateMetaGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForMetaGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForMetaGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -295,7 +296,7 @@ func saveGenesisMetaToStorage( return nil } -func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { epochNotifier := forking.NewGenericEpochNotifier() temporaryMetaHeader := &block.MetaBlock{ Epoch: arg.StartEpochNum, @@ -308,7 +309,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc epochNotifier.CheckEpoch(temporaryMetaHeader) roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } @@ -360,6 +361,7 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc ChanceComputer: &disabled.Rater{}, ShardCoordinator: arg.ShardCoordinator, EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &disabled.NodesCoordinator{}, } virtualMachineFactory, err := metachain.NewVMContainerFactory(argsNewVMContainerFactory) if err != nil { diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index 9fef8f05569..3c7e47070c7 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "math/big" - "strconv" "sync" "github.com/multiversx/mx-chain-core-go/core/check" @@ -45,8 +44,9 @@ import ( "github.com/multiversx/mx-chain-vm-common-go/parsers" ) -var log = logger.GetOrCreate("genesis/process") +const unreachableEpoch = ^uint32(0) +var log = logger.GetOrCreate("genesis/process") var zero = big.NewInt(0) type deployedScMetrics struct { @@ -54,112 +54,26 @@ type deployedScMetrics struct { numOtherTypes int } -func createGenesisConfig() config.EnableEpochs { - blsMultiSignerEnableEpoch := []config.MultiSignerConfig{ +func createGenesisConfig(providedEnableEpochs config.EnableEpochs) config.EnableEpochs { + clonedConfig := providedEnableEpochs + clonedConfig.BuiltInFunctionsEnableEpoch = 0 + clonedConfig.PenalizedTooMuchGasEnableEpoch = unreachableEpoch + clonedConfig.MaxNodesChangeEnableEpoch = []config.MaxNodesChangeConfig{ { - EnableEpoch: 0, - Type: "no-KOSK", + EpochEnable: unreachableEpoch, + MaxNumNodes: 0, + NodesToShufflePerShard: 0, }, } + clonedConfig.DoubleKeyProtectionEnableEpoch = 0 - return config.EnableEpochs{ - SCDeployEnableEpoch: unreachableEpoch, - BuiltInFunctionsEnableEpoch: 0, - RelayedTransactionsEnableEpoch: unreachableEpoch, - PenalizedTooMuchGasEnableEpoch: unreachableEpoch, - SwitchJailWaitingEnableEpoch: unreachableEpoch, - SwitchHysteresisForMinNodesEnableEpoch: unreachableEpoch, - BelowSignedThresholdEnableEpoch: unreachableEpoch, - TransactionSignedWithTxHashEnableEpoch: unreachableEpoch, - MetaProtectionEnableEpoch: unreachableEpoch, - AheadOfTimeGasUsageEnableEpoch: unreachableEpoch, - GasPriceModifierEnableEpoch: unreachableEpoch, - RepairCallbackEnableEpoch: unreachableEpoch, - MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ - { - EpochEnable: unreachableEpoch, - MaxNumNodes: 0, - NodesToShufflePerShard: 0, - }, - }, - BlockGasAndFeesReCheckEnableEpoch: unreachableEpoch, - StakingV2EnableEpoch: unreachableEpoch, - StakeEnableEpoch: unreachableEpoch, // no need to enable this, we have builtin exceptions in staking system SC - DoubleKeyProtectionEnableEpoch: 0, - ESDTEnableEpoch: unreachableEpoch, - GovernanceEnableEpoch: unreachableEpoch, - DelegationManagerEnableEpoch: unreachableEpoch, - DelegationSmartContractEnableEpoch: unreachableEpoch, - CorrectLastUnjailedEnableEpoch: unreachableEpoch, - BalanceWaitingListsEnableEpoch: unreachableEpoch, - ReturnDataToLastTransferEnableEpoch: unreachableEpoch, - SenderInOutTransferEnableEpoch: unreachableEpoch, - RelayedTransactionsV2EnableEpoch: unreachableEpoch, - UnbondTokensV2EnableEpoch: unreachableEpoch, - SaveJailedAlwaysEnableEpoch: unreachableEpoch, - ValidatorToDelegationEnableEpoch: unreachableEpoch, - ReDelegateBelowMinCheckEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, - IncrementSCRNonceInMultiTransferEnableEpoch: unreachableEpoch, - ESDTMultiTransferEnableEpoch: unreachableEpoch, - GlobalMintBurnDisableEpoch: unreachableEpoch, - ESDTTransferRoleEnableEpoch: unreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: unreachableEpoch, - ComputeRewardCheckpointEnableEpoch: unreachableEpoch, - SCRSizeInvariantCheckEnableEpoch: unreachableEpoch, - BackwardCompSaveKeyValueEnableEpoch: unreachableEpoch, - ESDTNFTCreateOnMultiShardEnableEpoch: unreachableEpoch, - MetaESDTSetEnableEpoch: unreachableEpoch, - AddTokensToDelegationEnableEpoch: unreachableEpoch, - MultiESDTTransferFixOnCallBackOnEnableEpoch: unreachableEpoch, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: unreachableEpoch, - CorrectFirstQueuedEpoch: unreachableEpoch, - CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, - FixOOGReturnCodeEnableEpoch: unreachableEpoch, - RemoveNonUpdatedStorageEnableEpoch: unreachableEpoch, - DeleteDelegatorAfterClaimRewardsEnableEpoch: unreachableEpoch, - OptimizeNFTStoreEnableEpoch: unreachableEpoch, - CreateNFTThroughExecByCallerEnableEpoch: unreachableEpoch, - StopDecreasingValidatorRatingWhenStuckEnableEpoch: unreachableEpoch, - FrontRunningProtectionEnableEpoch: unreachableEpoch, - IsPayableBySCEnableEpoch: unreachableEpoch, - CleanUpInformativeSCRsEnableEpoch: unreachableEpoch, - StorageAPICostOptimizationEnableEpoch: unreachableEpoch, - TransformToMultiShardCreateEnableEpoch: unreachableEpoch, - ESDTRegisterAndSetAllRolesEnableEpoch: unreachableEpoch, - ScheduledMiniBlocksEnableEpoch: unreachableEpoch, - FailExecutionOnEveryAPIErrorEnableEpoch: unreachableEpoch, - AddFailedRelayedTxToInvalidMBsDisableEpoch: unreachableEpoch, - SCRSizeInvariantOnBuiltInResultEnableEpoch: unreachableEpoch, - ManagedCryptoAPIsEnableEpoch: unreachableEpoch, - CheckCorrectTokenIDForTransferRoleEnableEpoch: unreachableEpoch, - DisableExecByCallerEnableEpoch: unreachableEpoch, - RefactorContextEnableEpoch: unreachableEpoch, - CheckFunctionArgumentEnableEpoch: unreachableEpoch, - CheckExecuteOnReadOnlyEnableEpoch: unreachableEpoch, - MiniBlockPartialExecutionEnableEpoch: unreachableEpoch, - ESDTMetadataContinuousCleanupEnableEpoch: unreachableEpoch, - FixAsyncCallBackArgsListEnableEpoch: unreachableEpoch, - FixOldTokenLiquidityEnableEpoch: unreachableEpoch, - SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, - RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, - SCProcessorV2EnableEpoch: unreachableEpoch, - DoNotReturnOldBlockInBlockchainHookEnableEpoch: unreachableEpoch, - MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, - BLSMultiSignerEnableEpoch: blsMultiSignerEnableEpoch, - SetGuardianEnableEpoch: unreachableEpoch, - ScToScLogEventEnableEpoch: unreachableEpoch, - } + return clonedConfig } -func createGenesisRoundConfig() *config.RoundConfig { - return &config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: strconv.FormatUint(unreachableRound, 10), - }, - }, - } +func createGenesisRoundConfig(providedEnableRounds config.RoundConfig) config.RoundConfig { + clonedConfig := providedEnableRounds + + return clonedConfig } // CreateShardGenesisBlock will create a shard genesis block @@ -181,7 +95,11 @@ func CreateShardGenesisBlock( DeployInitialScTxs: make([]data.TransactionHandler, 0), } - processors, err := createProcessorsForShardGenesisBlock(arg, createGenesisConfig(), createGenesisRoundConfig()) + processors, err := createProcessorsForShardGenesisBlock( + arg, + createGenesisConfig(arg.EpochConfig.EnableEpochs), + createGenesisRoundConfig(arg.RoundConfig), + ) if err != nil { return nil, nil, nil, err } @@ -241,22 +159,10 @@ func CreateShardGenesisBlock( ) round, nonce, epoch := getGenesisBlocksRoundNonceEpoch(arg) - header := &block.Header{ - Epoch: epoch, - Round: round, - Nonce: nonce, - ShardID: arg.ShardCoordinator.SelfId(), - BlockBodyType: block.StateBlock, - PubKeysBitmap: []byte{1}, - Signature: rootHash, - RootHash: rootHash, - PrevRandSeed: rootHash, - RandSeed: rootHash, - TimeStamp: arg.GenesisTime, - AccumulatedFees: big.NewInt(0), - DeveloperFees: big.NewInt(0), - ChainID: []byte(arg.Core.ChainID()), - SoftwareVersion: []byte(""), + headerHandler := arg.versionedHeaderFactory.Create(epoch) + err = setInitialDataInHeader(headerHandler, arg, epoch, nonce, round, rootHash) + if err != nil { + return nil, nil, nil, err } err = processors.vmContainer.Close() @@ -269,7 +175,46 @@ func CreateShardGenesisBlock( return nil, nil, nil, err } - return header, scAddresses, indexingData, nil + return headerHandler, scAddresses, indexingData, nil +} + +func setInitialDataInHeader( + headerHandler data.HeaderHandler, + arg ArgsGenesisBlockCreator, + epoch uint32, + nonce uint64, + round uint64, + rootHash []byte, +) error { + shardHeaderHandler, ok := headerHandler.(data.ShardHeaderHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + + setErrors := make([]error, 0) + setErrors = append(setErrors, shardHeaderHandler.SetEpoch(epoch)) + setErrors = append(setErrors, shardHeaderHandler.SetNonce(nonce)) + setErrors = append(setErrors, shardHeaderHandler.SetRound(round)) + setErrors = append(setErrors, shardHeaderHandler.SetShardID(arg.ShardCoordinator.SelfId())) + setErrors = append(setErrors, shardHeaderHandler.SetBlockBodyTypeInt32(int32(block.StateBlock))) + setErrors = append(setErrors, shardHeaderHandler.SetPubKeysBitmap([]byte{1})) + setErrors = append(setErrors, shardHeaderHandler.SetSignature(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRootHash(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetPrevRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetRandSeed(rootHash)) + setErrors = append(setErrors, shardHeaderHandler.SetTimeStamp(arg.GenesisTime)) + setErrors = append(setErrors, shardHeaderHandler.SetAccumulatedFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetDeveloperFees(big.NewInt(0))) + setErrors = append(setErrors, shardHeaderHandler.SetChainID([]byte(arg.Core.ChainID()))) + setErrors = append(setErrors, shardHeaderHandler.SetSoftwareVersion([]byte(""))) + + for _, err := range setErrors { + if err != nil { + return err + } + } + + return nil } func createShardGenesisBlockAfterHardFork( @@ -399,7 +344,7 @@ func setBalanceToTrie(arg ArgsGenesisBlockCreator, accnt genesis.InitialAccountH return arg.Accounts.SaveAccount(account) } -func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig *config.RoundConfig) (*genesisProcessors, error) { +func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpochsConfig config.EnableEpochs, roundConfig config.RoundConfig) (*genesisProcessors, error) { genesisWasmVMLocker := &sync.RWMutex{} // use a local instance as to not run in concurrent issues when doing bootstrap epochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, err := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifier) @@ -408,7 +353,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } roundNotifier := forking.NewGenericRoundNotifier() - enableRoundsHandler, err := enablers.NewEnableRoundsHandler(*roundConfig, roundNotifier) + enableRoundsHandler, err := enablers.NewEnableRoundsHandler(roundConfig, roundNotifier) if err != nil { return nil, err } diff --git a/go.mod b/go.mod index 9f27d2e1ffd..86225522dcc 100644 --- a/go.mod +++ b/go.mod @@ -12,19 +12,20 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 + github.com/klauspost/cpuid/v2 v2.2.5 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 - github.com/multiversx/mx-chain-core-go v1.2.19-0.20231214115026-a1e7279b14f1 - github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b - github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 - github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 - github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 - github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 - github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20231228070003-ae14e1e0adfa - github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 + github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126121117-627adccf10ad + github.com/multiversx/mx-chain-core-go v1.2.19-0.20240222081523-011c96ab2548 + github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479 + github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a + github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c + github.com/multiversx/mx-chain-scenario-go v1.4.3-0.20240212160120-cc32d1580157 + github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8 + github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240305123516-2231c71162a2 + github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240307121727-b8d371971d9a + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240308085208-3b5a4ab4dd34 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240308082903-132f9002736b + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.96-0.20240308082831-f05004a05b35 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 @@ -48,7 +49,7 @@ require ( github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/denisbrodbeck/machineid v1.0.1 // indirect @@ -92,7 +93,6 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.5 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -150,8 +150,7 @@ require ( github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/smartystreets/assertions v1.13.1 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect diff --git a/go.sum b/go.sum index 0375c025713..f12ab723392 100644 --- a/go.sum +++ b/go.sum @@ -72,8 +72,9 @@ github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -384,30 +385,30 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381 h1:M4JNeubA+zq7NaH2LP5YsWUVeKn9hNL+HgSw2kqwWUc= -github.com/multiversx/mx-chain-communication-go v1.0.13-0.20231129114230-d280af707381/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20231214115026-a1e7279b14f1 h1:8rz1ZpRAsWVxSEBy7PJIUStQMKiHs3I4mvpRmHUpsbI= -github.com/multiversx/mx-chain-core-go v1.2.19-0.20231214115026-a1e7279b14f1/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= -github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b h1:TIE6it719ZIW0E1bFgPAgE+U3zPSkPfAloFYEIeOL3U= -github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231129101537-ef355850e34b/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058 h1:6XH7ua4vUqhbE4NMzs8K63b7A/9KMO4H8XZfYjyy778= -github.com/multiversx/mx-chain-es-indexer-go v1.4.18-0.20231228064619-e3b0caf29058/go.mod h1:9BzrDTbIjruFXN6YcDOBsnOP0cUHhQobRUlmNOwkDME= -github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040 h1:rsEflKFn5StRh0ADxElUkI/9wZV0Lbig+b0671LmjTk= -github.com/multiversx/mx-chain-logger-go v1.0.14-0.20231215125130-a3bed6e76040/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= -github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296 h1:jDGGEubkiTJfEFcbErUYCYM2Z6wKapgZyGaICScpynk= -github.com/multiversx/mx-chain-scenario-go v1.2.2-0.20231129113427-ad3056f45296/go.mod h1:WocyahfHCC3oGILEVdRe7I4/+q/TLCORoTo1X4wGmF4= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4 h1:2RJ6T31pLN75l4xfhTicGZ+gVOPMxSGPip+O1XYVYac= -github.com/multiversx/mx-chain-storage-go v1.0.15-0.20231213110622-e222ba96a9f4/go.mod h1:ioCT2oHQ+TyHQYpgjxzlUdy7dCdv56+w5HnBg9z96eY= -github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20231228070003-ae14e1e0adfa h1:xdDeUC4yOfiUwctkYioYMjjigBZoZo5RZq1e5WoCVRs= -github.com/multiversx/mx-chain-vm-common-go v1.5.10-0.20231228070003-ae14e1e0adfa/go.mod h1:7jjGRykSfLeMs6iQdszlE0lGK2xp9/cctiVdeKbQLLM= -github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3 h1:qfzeTPI2oSgxnw52KiVWc2fHMem6FZIkX1Azwy64098= -github.com/multiversx/mx-chain-vm-go v1.5.23-0.20231228064104-964359cb8dd3/go.mod h1:4kcpwq70UB3Clnc6Q0krGA8hgQ26JTQpmCP+4y5aiV0= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216 h1:CDSn4hgiGwoOSSLmajgOvjdoRxfJSXjEu/CfXiqihwo= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.65-0.20231228071026-eed2cb19c216/go.mod h1:h87SKR/p66XP0Er2Mx2KfjzS6mLmW6l3tDWyO1oNr94= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14 h1:7r2zQiAfqGjN7U8j5obXIoRSh+vnoupBhxBgQGUA2ck= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.66-0.20231228071108-6b89bcebab14/go.mod h1:MnpQOi/P4K744ZJl8pQksulsHazmN6YRzJ4amgtZ0OQ= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955 h1:5b0+UeSbcyh+9z9x/6Nql3cYwaNWzTwj+KIfH4YaASs= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.92-0.20231228071246-c1b45eae5955/go.mod h1:+DLltGV0h3/H9bJaz01JyeapKNki3Rh4o5VGpjd2ZNc= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126121117-627adccf10ad h1:izxTyKCxvT7z2mhXCWAZibSxwRVgLmq/kDovs4Nx/6Y= +github.com/multiversx/mx-chain-communication-go v1.0.13-0.20240126121117-627adccf10ad/go.mod h1:n4E8BWIV0g3AcNGe1gf+vcjUC8A2QCJ4ARQSbiUDGrI= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240222081523-011c96ab2548 h1:WQoVgQG9YWiYM5Q3MmnbnxeoQkfHr63iFJZScFYsMxk= +github.com/multiversx/mx-chain-core-go v1.2.19-0.20240222081523-011c96ab2548/go.mod h1:B5zU4MFyJezmEzCsAHE9YNULmGCm2zbPHvl9hazNxmE= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479 h1:beVIhs5ysylwNplQ/bZ0h5DoDlqKNWgpWE/NMHHNmAw= +github.com/multiversx/mx-chain-crypto-go v1.2.10-0.20231206065052-38843c1f1479/go.mod h1:Ap6p7QZFtwPlb++OvCG+85BfuZ+bLP/JtQp6EwjWJsI= +github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a h1:mOMUhbsjTq7n5oAv4KkVnL67ngS0+wkqmkiv1XJfBIY= +github.com/multiversx/mx-chain-es-indexer-go v1.4.19-0.20240129150813-a772c480d33a/go.mod h1:3aSGRJNvfUuPQkZUGHWuF11rPPxphsKGuAuIB+eD3is= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c h1:QIUOn8FgNRa5cir4BCWHZi/Qcr6Gg0eGNhns4+jy6+k= +github.com/multiversx/mx-chain-logger-go v1.0.14-0.20240129144507-d00e967c890c/go.mod h1:fH/fR/GEBsDjPkBoZDVJMoYo2HhlA7++DP6QfITJ1N8= +github.com/multiversx/mx-chain-scenario-go v1.4.3-0.20240212160120-cc32d1580157 h1:ydzN3f+Y7H0InXuxAcNUSyVc+omNYL8uYtLqVzqaaX4= +github.com/multiversx/mx-chain-scenario-go v1.4.3-0.20240212160120-cc32d1580157/go.mod h1:ndk45i9J9McuCJpTcgiaK4ocd0yhnBBCPrlFwO6GRcs= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8 h1:/EYv/HGX0OKbeNFt667J0yZRtuJiZH0lEK8YtobuH/c= +github.com/multiversx/mx-chain-storage-go v1.0.15-0.20240129144933-b1c0d642d7f8/go.mod h1:zl1A6teNe39T8yhdZlkX3ckm5aLYrMIJJZ6Ord1E71M= +github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240305123516-2231c71162a2 h1:sBH1Zf5jdMqS+1LDfXBmsIdmol8CFloPzjDCtmBZGEc= +github.com/multiversx/mx-chain-vm-common-go v1.5.12-0.20240305123516-2231c71162a2/go.mod h1:OUyhCFqZKqUk1uaPsenyPDwO1830SlHNDU7Q7b6CBVI= +github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240307121727-b8d371971d9a h1:QvIC6R5sf0koeSwAs+Ye8J+CjNkAdaosTMSNTVBB8sA= +github.com/multiversx/mx-chain-vm-go v1.5.28-0.20240307121727-b8d371971d9a/go.mod h1:Xs0xFsPv+c1p8pwurLV7VBS7bEpIN/0jZrCwXVU26zw= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240308085208-3b5a4ab4dd34 h1:aLJhYiDBtWW4yjizhvQgTU00KfkK3oL3GnEh7pVUPRs= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.66-0.20240308085208-3b5a4ab4dd34/go.mod h1:8uugq3HUeDiE6G4AS3F8/B3zA1Pabzbl7SSD6Cebwz8= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240308082903-132f9002736b h1:iDDarqnGFZBXxqpaPWp8ePOqhG5G3DeAoopGgRLteu0= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.67-0.20240308082903-132f9002736b/go.mod h1:4uezxguZiX42kUaYMK/x46LLbgpYqn/iQXbcGM7zdM0= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.96-0.20240308082831-f05004a05b35 h1:yRfY/Mj1CXPoGd21F3y84cqBIKsktSgPuxz/5a7FA3w= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.96-0.20240308082831-f05004a05b35/go.mod h1:Nvanb5BZVhqnFFlWUtn7PQ/GIsl72zPVcMEw/ZvYiQA= github.com/multiversx/mx-components-big-int v1.0.0 h1:Wkr8lSzK2nDqixOrrBa47VNuqdhV1m/aJhaP1EMaiS8= github.com/multiversx/mx-components-big-int v1.0.0/go.mod h1:maIEMgHlNE2u78JaDD0oLzri+ShgU4okHfzP3LWGdQM= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -486,8 +487,9 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -512,7 +514,6 @@ github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= diff --git a/heartbeat/interface.go b/heartbeat/interface.go index 12eb29a5d61..3652170d8ba 100644 --- a/heartbeat/interface.go +++ b/heartbeat/interface.go @@ -83,6 +83,7 @@ type ManagedPeersHolder interface { IncrementRoundsWithoutReceivedMessages(pkBytes []byte) ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNode() [][]byte IsKeyManagedByCurrentNode(pkBytes []byte) bool IsKeyRegistered(pkBytes []byte) bool IsPidManagedByCurrentNode(pid core.PeerID) bool diff --git a/heartbeat/monitor/crossShardPeerTopicNotifier.go b/heartbeat/monitor/crossShardPeerTopicNotifier.go deleted file mode 100644 index aa25995fc71..00000000000 --- a/heartbeat/monitor/crossShardPeerTopicNotifier.go +++ /dev/null @@ -1,111 +0,0 @@ -package monitor - -import ( - "fmt" - "strconv" - "strings" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/heartbeat" - "github.com/multiversx/mx-chain-go/sharding" -) - -const topicSeparator = "_" - -// ArgsCrossShardPeerTopicNotifier represents the arguments for the cross shard peer topic notifier -type ArgsCrossShardPeerTopicNotifier struct { - ShardCoordinator sharding.Coordinator - PeerShardMapper heartbeat.PeerShardMapper -} - -type crossShardPeerTopicNotifier struct { - shardCoordinator sharding.Coordinator - peerShardMapper heartbeat.PeerShardMapper -} - -// NewCrossShardPeerTopicNotifier create a new cross shard peer topic notifier instance -func NewCrossShardPeerTopicNotifier(args ArgsCrossShardPeerTopicNotifier) (*crossShardPeerTopicNotifier, error) { - err := checkArgsCrossShardPeerTopicNotifier(args) - if err != nil { - return nil, err - } - - notifier := &crossShardPeerTopicNotifier{ - shardCoordinator: args.ShardCoordinator, - peerShardMapper: args.PeerShardMapper, - } - - return notifier, nil -} - -func checkArgsCrossShardPeerTopicNotifier(args ArgsCrossShardPeerTopicNotifier) error { - if check.IfNil(args.PeerShardMapper) { - return heartbeat.ErrNilPeerShardMapper - } - if check.IfNil(args.ShardCoordinator) { - return heartbeat.ErrNilShardCoordinator - } - - return nil -} - -// NewPeerFound is called whenever a new peer was found -func (notifier *crossShardPeerTopicNotifier) NewPeerFound(pid core.PeerID, topic string) { - splt := strings.Split(topic, topicSeparator) - if len(splt) != 3 { - // not a cross shard peer or the topic is global - return - } - - shardID1, err := notifier.getShardID(splt[1]) - if err != nil { - log.Error("failed to extract first shard for topic", "topic", topic, "error", err.Error()) - return - } - - shardID2, err := notifier.getShardID(splt[2]) - if err != nil { - log.Error("failed to extract second shard for topic", "topic", topic, "error", err.Error()) - return - } - if shardID1 == shardID2 { - return - } - notifier.checkAndAddShardID(pid, shardID1, topic, shardID2) - notifier.checkAndAddShardID(pid, shardID2, topic, shardID1) -} - -// TODO make a standalone component out of this -func (notifier *crossShardPeerTopicNotifier) getShardID(data string) (uint32, error) { - if data == common.MetachainTopicIdentifier { - return common.MetachainShardId, nil - } - val, err := strconv.Atoi(data) - if err != nil { - return 0, err - } - if uint32(val) >= notifier.shardCoordinator.NumberOfShards() || val < 0 { - return 0, fmt.Errorf("invalid value in crossShardPeerTopicNotifier.getShardID %d", val) - } - - return uint32(val), nil -} - -func (notifier *crossShardPeerTopicNotifier) checkAndAddShardID(pid core.PeerID, shardID1 uint32, topic string, shardID2 uint32) { - if shardID1 != notifier.shardCoordinator.SelfId() { - return - } - - log.Trace("crossShardPeerTopicNotifier.NewPeerFound found a cross shard peer", - "topic", topic, - "pid", pid.Pretty(), - "shard", shardID2) - notifier.peerShardMapper.PutPeerIdShardId(pid, shardID2) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (notifier *crossShardPeerTopicNotifier) IsInterfaceNil() bool { - return notifier == nil -} diff --git a/heartbeat/monitor/crossShardPeerTopicNotifier_test.go b/heartbeat/monitor/crossShardPeerTopicNotifier_test.go deleted file mode 100644 index e4951586852..00000000000 --- a/heartbeat/monitor/crossShardPeerTopicNotifier_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package monitor - -import ( - "math" - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/heartbeat" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/stretchr/testify/assert" -) - -func createMockArgsCrossShardPeerTopicNotifier() ArgsCrossShardPeerTopicNotifier { - return ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: &testscommon.ShardsCoordinatorMock{ - NoShards: 3, - CurrentShard: 1, - }, - PeerShardMapper: &mock.PeerShardMapperStub{}, - } -} - -func TestNewCrossShardPeerTopicNotifier(t *testing.T) { - t.Parallel() - - t.Run("nil sharding coordinator should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.ShardCoordinator = nil - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.True(t, check.IfNil(notifier)) - assert.Equal(t, heartbeat.ErrNilShardCoordinator, err) - }) - t.Run("nil peer shard mapper should error", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = nil - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.True(t, check.IfNil(notifier)) - assert.Equal(t, heartbeat.ErrNilPeerShardMapper, err) - }) - t.Run("should work", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - - notifier, err := NewCrossShardPeerTopicNotifier(args) - assert.False(t, check.IfNil(notifier)) - assert.Nil(t, err) - }) -} - -func TestCrossShardPeerTopicNotifier_NewPeerFound(t *testing.T) { - t.Parallel() - - testTopic := "test" - t.Run("global topic should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - notifier.NewPeerFound("pid", "random topic") - }) - t.Run("intra-shard topic should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 0) - notifier.NewPeerFound("pid", topic) - }) - t.Run("cross-shard topic but not relevant to current node should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 2) - notifier.NewPeerFound("pid", topic) - }) - t.Run("first shard ID is a NaN should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_NaN_1" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is a NaN should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_NaN" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is a negative value should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_-1" - notifier.NewPeerFound("pid", topic) - }) - t.Run("second shard ID is an out of range value should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_1_4" - notifier.NewPeerFound("pid", topic) - }) - t.Run("same shard IDs should not notice", func(t *testing.T) { - t.Parallel() - - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Fail(t, "should have not called PutPeerIdShardId") - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + "_0_0" - notifier.NewPeerFound("pid", topic) - }) - t.Run("cross-shard between 0 and 1 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(0, 1) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(0), notifiedShardID) - }) - t.Run("cross-shard between 1 and 2 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(1, 2) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(2), notifiedShardID) - }) - t.Run("cross-shard between 1 and META should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(1, common.MetachainShardId) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, common.MetachainShardId, notifiedShardID) - }) - t.Run("cross-shard between META and 1 should notice", func(t *testing.T) { - t.Parallel() - - expectedPid := core.PeerID("pid") - notifiedShardID := uint32(math.MaxUint32) - args := createMockArgsCrossShardPeerTopicNotifier() - args.ShardCoordinator = &testscommon.ShardsCoordinatorMock{ - NoShards: 3, - CurrentShard: common.MetachainShardId, - } - args.PeerShardMapper = &mock.PeerShardMapperStub{ - PutPeerIdShardIdCalled: func(pid core.PeerID, shardId uint32) { - assert.Equal(t, pid, expectedPid) - notifiedShardID = shardId - }, - } - - notifier, _ := NewCrossShardPeerTopicNotifier(args) - topic := testTopic + core.CommunicationIdentifierBetweenShards(common.MetachainShardId, 1) - notifier.NewPeerFound("pid", topic) - assert.Equal(t, uint32(1), notifiedShardID) - }) -} - -func BenchmarkCrossShardPeerTopicNotifier_NewPeerFound(b *testing.B) { - args := createMockArgsCrossShardPeerTopicNotifier() - notifier, _ := NewCrossShardPeerTopicNotifier(args) - - for i := 0; i < b.N; i++ { - switch i % 6 { - case 0: - notifier.NewPeerFound("pid", "global") - case 2: - notifier.NewPeerFound("pid", "intrashard_1") - case 3: - notifier.NewPeerFound("pid", "crossshard_1_2") - case 4: - notifier.NewPeerFound("pid", "crossshard_1_META") - case 5: - notifier.NewPeerFound("pid", "crossshard_META_1") - case 6: - notifier.NewPeerFound("pid", "crossshard_2_META") - } - } -} diff --git a/integrationTests/chainSimulator/interface.go b/integrationTests/chainSimulator/interface.go new file mode 100644 index 00000000000..6d66b9d62c0 --- /dev/null +++ b/integrationTests/chainSimulator/interface.go @@ -0,0 +1,24 @@ +package chainSimulator + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" +) + +// ChainSimulator defines the operations for an entity that can simulate operations of a chain +type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error + GenerateBlocksUntilEpochIsReached(targetEpoch int32) error + AddValidatorKeys(validatorsPrivateKeys [][]byte) error + GetNodeHandler(shardID uint32) process.NodeHandler + SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlockToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) + SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) + SetStateMultiple(stateSlice []*dtos.AddressState) error + GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) + GetInitialWalletKeys() *dtos.InitialWalletKeys + GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) +} diff --git a/integrationTests/chainSimulator/staking/delegation_test.go b/integrationTests/chainSimulator/staking/delegation_test.go new file mode 100644 index 00000000000..b7e2e628d98 --- /dev/null +++ b/integrationTests/chainSimulator/staking/delegation_test.go @@ -0,0 +1,1725 @@ +package staking + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + dataVm "github.com/multiversx/mx-chain-core-go/data/vm" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + mclsig "github.com/multiversx/mx-chain-crypto-go/signing/mcl/singlesig" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const mockBLSSignature = "010101" +const gasLimitForStakeOperation = 50_000_000 +const gasLimitForConvertOperation = 510_000_000 +const gasLimitForDelegationContractCreationOperation = 500_000_000 +const gasLimitForAddNodesOperation = 500_000_000 +const gasLimitForUndelegateOperation = 500_000_000 +const gasLimitForMergeOperation = 600_000_000 +const gasLimitForDelegate = 12_000_000 +const gasLimitForUnBond = 12_000_000 +const minGasPrice = 1000000000 +const txVersion = 1 +const mockTxSignature = "sig" +const queuedStatus = "queued" +const stakedStatus = "staked" +const notStakedStatus = "notStaked" +const unStakedStatus = "unStaked" +const auctionStatus = "auction" +const okReturnCode = "ok" +const maxCap = "00" // no cap +const hexServiceFee = "0ea1" // 37.45% +const walletAddressBytesLen = 32 + +var initialDelegationValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(1250)) +var zeroValue = big.NewInt(0) +var oneEGLD = big.NewInt(1000000000000000000) +var minimumStakeValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(2500)) + +// Test description: +// Test that delegation contract created with MakeNewContractFromValidatorData works properly +// Also check that delegate and undelegate works properly and the top-up remain the same if every delegator undelegates. +// Test that the top-up from normal stake will be transferred after creating the contract and will be used in auction list computing + +// Internal test scenario #10 +func TestChainSimulator_MakeNewContractFromValidatorData(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on queue and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Do a stake transaction for the validator key and test that the new key is on auction list and topup is 500 + // 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on auction list and topup is 500 + // 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700 + // 6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500 + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorData(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorData(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add a new validator private key in the multi key handler") + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for the owner and the 2 delegators") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator1, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator2, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "newValidatorOwner", validatorOwner.Bech32, "delegator1", delegator1.Bech32, "delegator2", delegator2.Bech32) + + log.Info("Step 3. Do a stake transaction for the validator key and test that the new key is on queue / auction list and the correct topup") + stakeValue := big.NewInt(0).Set(minimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(500)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwner.Bytes, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 4. Execute the MakeNewContractFromValidatorData transaction and test that the key is on queue / auction list and the correct topup") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := generateTransaction(validatorOwner.Bytes, 1, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + delegationAddressBech32 := metachainNode.GetCoreComponents().AddressPubKeyConverter().SilentEncode(delegationAddress, log) + log.Info("generated delegation address", "address", delegationAddressBech32) + + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 5. Execute 2 delegation operations of 100 EGLD each, check the topup is 700") + delegateValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + txDelegate1 := generateTransaction(delegator1.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + txDelegate2 := generateTransaction(delegator2.Bytes, 0, delegationAddress, delegateValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp := big.NewInt(0).Mul(oneEGLD, big.NewInt(700)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("6. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 500") + unDelegateValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate1 := generateTransaction(delegator1.Bytes, 1, delegationAddress, zeroValue, txDataField, gasLimitForDelegate) + unDelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate1Tx) + + txDataField = fmt.Sprintf("unDelegate@%s", hex.EncodeToString(unDelegateValue.Bytes())) + txUnDelegate2 := generateTransaction(delegator2.Bytes, 1, delegationAddress, zeroValue, txDataField, gasLimitForDelegate) + unDelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnDelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unDelegate2Tx) + + expectedTopUp = big.NewInt(0).Mul(oneEGLD, big.NewInt(500)) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) +} + +func testBLSKeyIsInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKey string, expectedTopUp *big.Int, actionListSize int) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + assert.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, address)) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, expectedTopUp, actionListSize, statistics, 1, address) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, queuedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +func testBLSKeyIsInAuction( + t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeyBytes []byte, + blsKey string, + topUpInAuctionList *big.Int, + actionListSize int, + validatorStatistics map[string]*validator.ValidatorStatistics, + numNodes int, + owner []byte, +) { + require.Equal(t, stakedStatus, getBLSKeyStatus(t, metachainNode, blsKeyBytes)) + + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + currentEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + if metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step2Flag) == currentEpoch { + // starting from phase 2, we have the shuffled out nodes from the previous epoch in the action list + actionListSize += 8 + } + if metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step3Flag) <= currentEpoch { + // starting from phase 3, we have the shuffled out nodes from the previous epoch in the action list + actionListSize += 4 + } + + require.Equal(t, actionListSize, len(auctionList)) + ownerAsBech32, err := metachainNode.GetCoreComponents().AddressPubKeyConverter().Encode(owner) + require.Nil(t, err) + if actionListSize != 0 { + nodeWasFound := false + for _, item := range auctionList { + if item.Owner != ownerAsBech32 { + continue + } + + require.Equal(t, numNodes, len(auctionList[0].Nodes)) + for _, node := range item.Nodes { + if node.BlsKey == blsKey { + require.Equal(t, topUpInAuctionList.String(), item.TopUpPerNode) + nodeWasFound = true + } + } + } + require.True(t, nodeWasFound) + } + + // in staking ph 4 we should find the key in the validators statics + validatorInfo, found := validatorStatistics[blsKey] + require.True(t, found) + require.Equal(t, auctionStatus, validatorInfo.ValidatorStatus) +} + +func testBLSKeysAreInQueueOrAuction(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte, blsKeys []string, totalTopUp *big.Int, actionListSize int) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + statistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + assert.Equal(t, totalTopUp, getBLSTopUpValue(t, metachainNode, address)) + + individualTopup := big.NewInt(0).Set(totalTopUp) + individualTopup.Div(individualTopup, big.NewInt(int64(len(blsKeys)))) + + for _, blsKey := range blsKeys { + decodedBLSKey, _ := hex.DecodeString(blsKey) + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + testBLSKeyIsInAuction(t, metachainNode, decodedBLSKey, blsKey, individualTopup, actionListSize, statistics, len(blsKeys), address) + continue + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := statistics[blsKey] + require.False(t, found) + require.Equal(t, queuedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) + } +} + +// Test description: +// Test that 2 different contracts with different topups that came from the normal stake will be considered in auction list computing in the correct order +// 1. Add 2 new validator private keys in the multi key handler +// 2. Set the initial state for 2 owners (mint 2 new wallets) +// 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup - 100 and 200 EGLD, respectively +// 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup +// 5. If the staking v4 is activated (regardless the steps), check that the auction list sorted the 2 BLS keys based on topup + +// Internal test scenario #11 +func TestChainSimulator_MakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 1) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 2) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 3) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t, cs, 4) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith2StakingContracts(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 2 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 2 owners") + mintValue := big.NewInt(3010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwnerA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorOwnerB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "validatorOwnerA", validatorOwnerA.Bech32, "validatorOwnerB", validatorOwnerB.Bech32) + + log.Info("Step 3. Do 2 stake transactions and test that the new keys are on queue / auction list and have the correct topup") + + topupA := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + stakeValueA := big.NewInt(0).Add(minimumStakeValue, topupA) + txStakeA := generateStakeTransaction(t, cs, validatorOwnerA, blsKeys[0], stakeValueA) + + topupB := big.NewInt(0).Mul(oneEGLD, big.NewInt(200)) + stakeValueB := big.NewInt(0).Add(minimumStakeValue, topupB) + txStakeB := generateStakeTransaction(t, cs, validatorOwnerB, blsKeys[1], stakeValueB) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeA, txStakeB}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerA.Bytes, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorOwnerB.Bytes, blsKeys[1], topupB, 2) + + log.Info("Step 4. Convert both validators into staking providers and test that the new keys are on queue / auction list and have the correct topup") + + txConvertA := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerA) + txConvertB := generateConvertToStakingProviderTransaction(t, cs, validatorOwnerB) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvertA, txConvertB}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 2, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddressA := convertTxs[0].Logs.Events[0].Topics[1] + delegationAddressB := convertTxs[1].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressA, blsKeys[0], topupA, 2) + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddressB, blsKeys[1], topupB, 2) + + log.Info("Step 5. If the staking v4 is activated, check that the auction list sorted the 2 BLS keys based on topup") + step1ActivationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if step1ActivationEpoch > metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + // we are in staking v3.5, the test ends here + return + } + + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + firstAuctionPosition := auctionList[0] + secondAuctionPosition := auctionList[1] + // check the correct order of the nodes in the auction list based on topup + require.Equal(t, blsKeys[1], firstAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupB.String(), firstAuctionPosition.TopUpPerNode) + + require.Equal(t, blsKeys[0], secondAuctionPosition.Nodes[0].BlsKey) + require.Equal(t, topupA.String(), secondAuctionPosition.TopUpPerNode) +} + +// Test description: +// Test that 1 contract having 3 BLS keys proper handles the stakeNodes-unstakeNodes-unBondNodes sequence for 2 of the BLS keys +// 1. Add 3 new validator private keys in the multi key handler +// 2. Set the initial state for 1 owner and 1 delegator +// 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup +// 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup +// 5. Add 2 nodes in the staking contract +// 6. Delegate 5000 EGLD to the contract +// 7. Stake the 2 nodes +// 8. UnStake 2 nodes (latest staked) +// 9. Unbond the 2 nodes (that were un staked) + +// Internal test scenario #85 +func TestWIP(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 80, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + // unbond succeeded because the nodes were on queue + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 1, notStakedStatus) + }) + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 2, unStakedStatus) + }) + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 3, unStakedStatus) + }) + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + cfg.EpochConfig.EnableEpochs.AlwaysMergeContextsInEEIEnableEpoch = 1 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond(t, cs, 4, unStakedStatus) + }) +} + +func testChainSimulatorMakeNewContractFromValidatorDataWith1StakingContractUnstakeAndUnbond( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + targetEpoch int32, + nodesStatusAfterUnBondTx string, +) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Step 1. Add 3 new validator private keys in the multi key handler") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + log.Info("Step 2. Set the initial state for 1 owner and 1 delegator") + mintValue := big.NewInt(10001) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + owner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + delegator, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("working with the following addresses", + "owner", owner.Bech32, "", delegator.Bech32) + + log.Info("Step 3. Do a stake transaction and test that the new key is on queue / auction list and has the correct topup") + + topup := big.NewInt(0).Mul(oneEGLD, big.NewInt(99)) + stakeValue := big.NewInt(0).Add(minimumStakeValue, topup) + txStake := generateStakeTransaction(t, cs, owner, blsKeys[0], stakeValue) + + stakeTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStake}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, owner.Bytes, blsKeys[0], topup, 1) + + log.Info("Step 4. Convert the validator into a staking providers and test that the key is on queue / auction list and has the correct topup") + + txConvert := generateConvertToStakingProviderTransaction(t, cs, owner) + + convertTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txConvert}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(convertTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + assert.Nil(t, err) + + delegationAddress := convertTxs[0].Logs.Events[0].Topics[1] + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], topup, 1) + + log.Info("Step 5. Add 2 nodes in the staking contract") + txDataFieldAddNodes := fmt.Sprintf("addNodes@%s@%s@%s@%s", blsKeys[1], mockBLSSignature+"02", blsKeys[2], mockBLSSignature+"03") + ownerNonce := getNonce(t, cs, owner) + txAddNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldAddNodes, gasLimitForStakeOperation) + + addNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txAddNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(addNodesTxs)) + + log.Info("Step 6. Delegate 5000 EGLD to the contract") + delegateValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(5000)) + txDataFieldDelegate := "delegate" + delegatorNonce := getNonce(t, cs, delegator) + txDelegate := generateTransaction(delegator.Bytes, delegatorNonce, delegationAddress, delegateValue, txDataFieldDelegate, gasLimitForStakeOperation) + + delegateTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txDelegate}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(delegateTxs)) + + log.Info("Step 7. Stake the 2 nodes") + txDataFieldStakeNodes := fmt.Sprintf("stakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = getNonce(t, cs, owner) + txStakeNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldStakeNodes, gasLimitForStakeOperation) + + stakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txStakeNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(stakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all 3 nodes should be staked (auction list is 1 as there is one delegation SC with 3 BLS keys in the auction list) + testBLSKeysAreInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys, topup, 1) + + log.Info("Step 8. UnStake 2 nodes (latest staked)") + + txDataFieldUnStakeNodes := fmt.Sprintf("unStakeNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = getNonce(t, cs, owner) + txUnStakeNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnStakeNodes, gasLimitForStakeOperation) + + unStakeNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnStakeNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unStakeNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + // all that only one node is staked (auction list is 1 as there is one delegation SC with 1 BLS key in the auction list) + expectedTopUp := big.NewInt(0) + expectedTopUp.Add(topup, delegateValue) // 99 + 5000 = 5099 + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], expectedTopUp, 1) + + log.Info("Step 9. Unbond the 2 nodes (that were un staked)") + + txDataFieldUnBondNodes := fmt.Sprintf("unBondNodes@%s@%s", blsKeys[1], blsKeys[2]) + ownerNonce = getNonce(t, cs, owner) + txUnBondNodes := generateTransaction(owner.Bytes, ownerNonce, delegationAddress, big.NewInt(0), txDataFieldUnBondNodes, gasLimitForStakeOperation) + + unBondNodesTxs, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txUnBondNodes}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 1, len(unBondNodesTxs)) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the nodes + assert.Nil(t, err) + + keyStatus := getAllNodeStates(t, metachainNode, delegationAddress) + require.Equal(t, len(blsKeys), len(keyStatus)) + // key[0] should be staked + require.Equal(t, stakedStatus, keyStatus[blsKeys[0]]) + // key[1] and key[2] should be unstaked (unbond was not executed) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[1]]) + require.Equal(t, nodesStatusAfterUnBondTx, keyStatus[blsKeys[2]]) +} + +func getNonce(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, address dtos.WalletAddress) uint64 { + account, err := cs.GetAccount(address) + require.Nil(t, err) + + return account.Nonce +} + +func getAllNodeStates(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) map[string]string { + scQuery := &process.SCQuery{ + ScAddress: address, + FuncName: "getAllNodeStates", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + m := make(map[string]string) + status := "" + for _, resultData := range result.ReturnData { + if len(resultData) != 96 { + // not a BLS key + status = string(resultData) + continue + } + + m[hex.EncodeToString(resultData)] = status + } + + return m +} + +func generateStakeTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, + blsKeyHex string, + stakeValue *big.Int, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeyHex, mockBLSSignature) + return generateTransaction(owner.Bytes, account.Nonce, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) +} + +func generateConvertToStakingProviderTransaction( + t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + owner dtos.WalletAddress, +) *transaction.Transaction { + account, err := cs.GetAccount(owner) + require.Nil(t, err) + + txDataField := fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + return generateTransaction(owner.Bytes, account.Nonce, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForConvertOperation) +} + +// Test description +// Test the creation of a new delegation contract, adding nodes to it, delegating, and undelegating. + +// Test scenario +// 1. Initialize the chain simulator +// 2. Generate blocks to activate staking phases +// 3. Create a new delegation contract +// 4. Add validator nodes to the delegation contract +// 5. Perform delegation operations +// 6. Perform undelegation operations +// 7. Validate the results at each step +func TestChainSimulator_CreateNewDelegationContract(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test scenario done in staking 3.5 phase (staking v4 is not active) + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is staked + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 1) + }) + + // Test scenario done in staking v4 phase step 1 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 2) + }) + + // Test scenario done in staking v4 phase step 2 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 3) + }) + + // Test scenario done in staking v4 phase step 3 + // 1. Add a new validator private key in the multi key handler + // 2. Set the initial state for the owner and the 2 delegators + // 3. Create a new delegation contract with 1250 egld + // 3. Add node to the delegation contract + // 4. Execute 2 delegation operations of 1250 EGLD each, check the topup is 3750 + // 5. Stake node, check the topup is 1250, check the node is in action list + // 5. Execute 2 unDelegate operations of 100 EGLD each, check the topup is back to 1250 + // 6. Check the node is unstaked in the next epoch + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorCreateNewDelegationContract(t, cs, 4) + }) + +} + +func testChainSimulatorCreateNewDelegationContract(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + // Create new validator owner and delegators with initial funds + validatorOwnerBytes := generateWalletAddressBytes() + validatorOwner, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(validatorOwnerBytes) + delegator1Bytes := generateWalletAddressBytes() + delegator1, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegator1Bytes) + delegator2Bytes := generateWalletAddressBytes() + delegator2, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegator2Bytes) + initialFunds := big.NewInt(0).Mul(oneEGLD, big.NewInt(10000)) // 10000 EGLD for each + addresses := []*dtos.AddressState{ + {Address: validatorOwner, Balance: initialFunds.String()}, + {Address: delegator1, Balance: initialFunds.String()}, + {Address: delegator2, Balance: initialFunds.String()}, + } + err = cs.SetStateMultiple(addresses) + require.Nil(t, err) + + // Step 3: Create a new delegation contract + maxDelegationCap := big.NewInt(0).Mul(oneEGLD, big.NewInt(51000)) // 51000 EGLD cap + txCreateDelegationContract := generateTransaction(validatorOwnerBytes, 0, vm.DelegationManagerSCAddress, initialDelegationValue, + fmt.Sprintf("createNewDelegationContract@%s@%s", hex.EncodeToString(maxDelegationCap.Bytes()), hexServiceFee), + gasLimitForDelegationContractCreationOperation) + createDelegationContractTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txCreateDelegationContract, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, createDelegationContractTx) + + // Check delegation contract creation was successful + data := createDelegationContractTx.SmartContractResults[0].Data + parts := strings.Split(data, "@") + require.Equal(t, 3, len(parts)) + + require.Equal(t, hex.EncodeToString([]byte("ok")), parts[1]) + delegationContractAddressHex, _ := hex.DecodeString(parts[2]) + delegationContractAddress, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(delegationContractAddressHex) + + output, err := executeQuery(cs, core.MetachainShardId, vm.DelegationManagerSCAddress, "getAllContractAddresses", nil) + require.Nil(t, err) + returnAddress, err := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Encode(output.ReturnData[0]) + require.Nil(t, err) + require.Equal(t, delegationContractAddress, returnAddress) + delegationContractAddressBytes := output.ReturnData[0] + + // Step 2: Add validator nodes to the delegation contract + // This step requires generating BLS keys for validators, signing messages, and sending the "addNodes" transaction. + // Add checks to verify nodes are added successfully. + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + signatures := getSignatures(delegationContractAddressBytes, validatorSecretKeysBytes) + txAddNodes := generateTransaction(validatorOwnerBytes, 1, delegationContractAddressBytes, zeroValue, addNodesTxData(blsKeys, signatures), gasLimitForAddNodesOperation) + addNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txAddNodes, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, addNodesTx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys := getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 1, len(notStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(notStakedKeys[0])) + require.Equal(t, 0, len(unStakedKeys)) + + expectedTopUp := big.NewInt(0).Set(initialDelegationValue) + expectedTotalStaked := big.NewInt(0).Set(initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{validatorOwnerBytes}) + require.Nil(t, err) + require.Equal(t, initialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 3: Perform delegation operations + txDelegate1 := generateTransaction(delegator1Bytes, 0, delegationContractAddressBytes, initialDelegationValue, "delegate", gasLimitForDelegate) + delegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate1Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, initialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1Bytes}) + require.Nil(t, err) + require.Equal(t, initialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + txDelegate2 := generateTransaction(delegator2Bytes, 0, delegationContractAddressBytes, initialDelegationValue, "delegate", gasLimitForDelegate) + delegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txDelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, delegate2Tx) + + expectedTopUp = expectedTopUp.Add(expectedTopUp, initialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Add(expectedTotalStaked, initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2Bytes}) + require.Nil(t, err) + require.Equal(t, initialDelegationValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + // Step 4: Perform stakeNodes + + txStakeNodes := generateTransaction(validatorOwnerBytes, 2, delegationContractAddressBytes, zeroValue, fmt.Sprintf("stakeNodes@%s", blsKeys[0]), gasLimitForStakeOperation) + stakeNodesTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStakeNodes, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeNodesTx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, initialDelegationValue) + expectedTopUp = expectedTopUp.Sub(expectedTopUp, initialDelegationValue) + require.Equal(t, expectedTopUp, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Make block finalized + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationContractAddressBytes, blsKeys[0], expectedTopUp, 1) + + // Step 5: Perform unDelegate from 1 user + // The nodes should remain in the staked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate1 := generateTransaction(delegator1Bytes, 1, delegationContractAddressBytes, zeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(initialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate1Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate1, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate1Tx) + + expectedTopUp = expectedTopUp.Sub(expectedTopUp, initialDelegationValue) + expectedTotalStaked = expectedTotalStaked.Sub(expectedTotalStaked, initialDelegationValue) + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, expectedTotalStaked, big.NewInt(0).SetBytes(output.ReturnData[0])) + require.Equal(t, expectedTopUp.String(), getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes).String()) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator1Bytes}) + require.Nil(t, err) + require.Equal(t, zeroValue, big.NewInt(0).SetBytes(output.ReturnData[0])) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + // Step 6: Perform unDelegate from last user + // The nodes should remain in the unStaked state + // The total active stake should be reduced by the amount undelegated + + txUndelegate2 := generateTransaction(delegator2Bytes, 1, delegationContractAddressBytes, zeroValue, fmt.Sprintf("unDelegate@%s", hex.EncodeToString(initialDelegationValue.Bytes())), gasLimitForUndelegateOperation) + undelegate2Tx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUndelegate2, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, undelegate2Tx) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getTotalActiveStake", nil) + require.Nil(t, err) + require.Equal(t, "1250000000000000000000", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + require.Equal(t, zeroValue, getBLSTopUpValue(t, metachainNode, delegationContractAddressBytes)) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getUserActiveStake", [][]byte{delegator2Bytes}) + require.Nil(t, err) + require.Equal(t, "0", big.NewInt(0).SetBytes(output.ReturnData[0]).String()) + + // still staked until epoch change + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 1, len(stakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(stakedKeys[0])) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 0, len(unStakedKeys)) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + output, err = executeQuery(cs, core.MetachainShardId, delegationContractAddressBytes, "getAllNodeStates", nil) + require.Nil(t, err) + stakedKeys, notStakedKeys, unStakedKeys = getNodesFromContract(output.ReturnData) + require.Equal(t, 0, len(stakedKeys)) + require.Equal(t, 0, len(notStakedKeys)) + require.Equal(t, 1, len(unStakedKeys)) + require.Equal(t, blsKeys[0], hex.EncodeToString(unStakedKeys[0])) +} + +func generateWalletAddressBytes() []byte { + buff := make([]byte, walletAddressBytesLen) + _, _ = rand.Read(buff) + + return buff +} + +func executeQuery(cs chainSimulatorIntegrationTests.ChainSimulator, shardID uint32, scAddress []byte, funcName string, args [][]byte) (*dataVm.VMOutputApi, error) { + output, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: funcName, + Arguments: args, + }) + return output, err +} + +func addNodesTxData(blsKeys []string, sigs [][]byte) string { + txData := "addNodes" + + for i := range blsKeys { + txData = txData + "@" + blsKeys[i] + "@" + hex.EncodeToString(sigs[i]) + } + + return txData +} + +func getSignatures(msg []byte, blsKeys [][]byte) [][]byte { + signer := mclsig.NewBlsSigner() + + signatures := make([][]byte, len(blsKeys)) + for i, blsKey := range blsKeys { + sk, _ := signing.NewKeyGenerator(mcl.NewSuiteBLS12()).PrivateKeyFromByteArray(blsKey) + signatures[i], _ = signer.Sign(sk, msg) + } + + return signatures +} + +func getNodesFromContract(returnData [][]byte) ([][]byte, [][]byte, [][]byte) { + var stakedKeys, notStakedKeys, unStakedKeys [][]byte + + for i := 0; i < len(returnData); i += 2 { + switch string(returnData[i]) { + case "staked": + stakedKeys = append(stakedKeys, returnData[i+1]) + case "notStaked": + notStakedKeys = append(notStakedKeys, returnData[i+1]) + case "unStaked": + unStakedKeys = append(unStakedKeys, returnData[i+1]) + } + } + return stakedKeys, notStakedKeys, unStakedKeys +} + +func getBLSKeyStatus(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) string { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getBLSKeyStatus", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return string(result.ReturnData[0]) +} + +func getBLSTopUpValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, address []byte) *big.Int { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStakedTopUpStakedBlsKeys", + CallerAddr: vm.StakingSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{address}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + if len(result.ReturnData[0]) == 0 { + return big.NewInt(0) + } + + return big.NewInt(0).SetBytes(result.ReturnData[0]) +} + +func generateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + return &transaction.Transaction{ + Nonce: nonce, + Value: value, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), + } +} + +// Test description: +// Test that merging delegation with whiteListForMerge and mergeValidatorToDelegationWithWhitelist contracts still works properly +// Test that their topups will merge too and will be used by auction list computing. +// +// Internal test scenario #12 +func TestChainSimulator_MergeDelegation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test steps: + // 1. User A - Stake 1 node to have 100 egld more than minimum required stake value + // 2. User A - Execute `makeNewContractFromValidatorData` to create delegation contract based on User A account + // 3. User B - Stake 1 node with more than 2500 egld + // 4. User A - Execute `whiteListForMerge@addressA` in order to whitelist for merge User B + // 5. User B - Execute `mergeValidatorToDelegationWithWhitelist@delegationContract` in order to merge User B to delegation contract created at step 2. + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorMergingDelegation(t, cs, 4) + }) +} + +func testChainSimulatorMergingDelegation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(3000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorA, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + validatorB, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + log.Info("Step 1. User A: - stake 1 node to have 100 egld more than minimum stake value") + stakeValue := big.NewInt(0).Set(minimumStakeValue) + addedStakedValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorA.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorA.Bytes, blsKeys[0], addedStakedValue, 1) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorA.Bytes)) + + log.Info("Step 2. Execute MakeNewContractFromValidatorData for User A") + txDataField = fmt.Sprintf("makeNewContractFromValidatorData@%s@%s", maxCap, hexServiceFee) + txConvert := generateTransaction(validatorA.Bytes, 1, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForConvertOperation) + convertTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + delegationAddress := convertTx.Logs.Events[0].Topics[1] + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, delegationAddress, blsKeys[0], addedStakedValue, 1) + + log.Info("Step 3. User B: - stake 1 node to have 100 egld more") + stakeValue = big.NewInt(0).Set(minimumStakeValue) + addedStakedValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(100)) + stakeValue.Add(stakeValue, addedStakedValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorB.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyIsInQueueOrAuction(t, metachainNode, validatorB.Bytes, blsKeys[1], addedStakedValue, 2) + require.Equal(t, addedStakedValue, getBLSTopUpValue(t, metachainNode, validatorB.Bytes)) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + require.Equal(t, validatorB.Bytes, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + log.Info("Step 4. User A : whitelistForMerge@addressB") + txDataField = fmt.Sprintf("whitelistForMerge@%s", hex.EncodeToString(validatorB.Bytes)) + whitelistForMerge := generateTransaction(validatorA.Bytes, 2, delegationAddress, zeroValue, txDataField, gasLimitForDelegate) + whitelistForMergeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(whitelistForMerge, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, whitelistForMergeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 5. User A : mergeValidatorToDelegationWithWhitelist") + txDataField = fmt.Sprintf("mergeValidatorToDelegationWithWhitelist@%s", hex.EncodeToString(delegationAddress)) + + txConvert = generateTransaction(validatorB.Bytes, 1, vm.DelegationManagerSCAddress, zeroValue, txDataField, gasLimitForMergeOperation) + convertTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txConvert, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, convertTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + decodedBLSKey0, _ = hex.DecodeString(blsKeys[0]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey0)) + + decodedBLSKey1, _ = hex.DecodeString(blsKeys[1]) + require.Equal(t, delegationAddress, getBLSKeyOwner(t, metachainNode, decodedBLSKey1)) + + expectedTopUpValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(200)) + require.Equal(t, expectedTopUpValue, getBLSTopUpValue(t, metachainNode, delegationAddress)) +} + +func getBLSKeyOwner(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.StakingSCAddress, + FuncName: "getOwner", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} diff --git a/integrationTests/chainSimulator/staking/jail_test.go b/integrationTests/chainSimulator/staking/jail_test.go new file mode 100644 index 00000000000..c2e6b13e9d1 --- /dev/null +++ b/integrationTests/chainSimulator/staking/jail_test.go @@ -0,0 +1,256 @@ +package staking + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4JailUnJailStep1EnableEpoch = 5 + + epochWhenNodeIsJailed = 4 +) + +// Test description +// All test cases will do a stake transaction and wait till the new node is jailed +// testcase1 -- unJail transaction will be sent when staking v3.5 is still action --> node status should be `new` after unjail +// testcase2 -- unJail transaction will be sent when staking v4 step1 is action --> node status should be `auction` after unjail +// testcase3 -- unJail transaction will be sent when staking v4 step2 is action --> node status should be `auction` after unjail +// testcase4 -- unJail transaction will be sent when staking v4 step3 is action --> node status should be `auction` after unjail +func TestChainSimulator_ValidatorJailUnJail(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 4, "new") + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 5, "auction") + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 6, "auction") + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testChainSimulatorJailAndUnJail(t, 7, "auction") + }) +} + +func testChainSimulatorJailAndUnJail(t *testing.T, targetEpoch int32, nodeStatusAfterUnJail string) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 2, + MetaChainMinNodes: 2, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + configs.SetQuickJailRatingConfig(cfg) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(1) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(3000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey, _ := hex.DecodeString(blsKeys[0]) + status := getBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "jailed", status) + + // do an unjail transaction + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := generateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, gasLimitForStakeOperation) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey) + require.Equal(t, "staked", status) + + checkValidatorStatus(t, cs, blsKeys[0], nodeStatusAfterUnJail) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + checkValidatorStatus(t, cs, blsKeys[0], "waiting") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 2) + require.Nil(t, err) + + checkValidatorStatus(t, cs, blsKeys[0], "eligible") +} + +// Test description +// Add a new node and wait until the node get jailed +// Add a second node to take the place of the jailed node +// UnJail the first node --> should go in queue +// Activate staking v4 step 1 --> node should be moved from queue to auction list + +// Internal test scenario #2 +func TestChainSimulator_FromQueueToAuctionList(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, stakingV4JailUnJailStep1EnableEpoch) + configs.SetQuickJailRatingConfig(cfg) + + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 1 + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys([][]byte{privateKeys[1]}) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(6000)) + walletAddress, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + // wait node to be jailed + err = cs.GenerateBlocksUntilEpochIsReached(epochWhenNodeIsJailed) + require.Nil(t, err) + + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + status := getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "jailed", status) + + // add one more node + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey1) + require.Equal(t, "staked", status) + + // unJail the first node + unJailValue, _ := big.NewInt(0).SetString("2500000000000000000", 10) + txUnJailDataField := fmt.Sprintf("unJail@%s", blsKeys[0]) + txUnJail := generateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, unJailValue, txUnJailDataField, gasLimitForStakeOperation) + + unJailTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnJail, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unJailTx) + require.Equal(t, transaction.TxStatusSuccess, unJailTx.Status) + + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "queued", status) + + err = cs.GenerateBlocksUntilEpochIsReached(stakingV4JailUnJailStep1EnableEpoch) + require.Nil(t, err) + + status = getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + require.Equal(t, "staked", status) + + checkValidatorStatus(t, cs, blsKeys[0], "auction") +} + +func checkValidatorStatus(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, blsKey string, expectedStatus string) { + err := cs.GetNodeHandler(core.MetachainShardId).GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + validatorsStatistics, err := cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + require.Equal(t, expectedStatus, validatorsStatistics[blsKey].ValidatorStatus) +} diff --git a/integrationTests/chainSimulator/staking/simpleStake_test.go b/integrationTests/chainSimulator/staking/simpleStake_test.go new file mode 100644 index 00000000000..6439e14d623 --- /dev/null +++ b/integrationTests/chainSimulator/staking/simpleStake_test.go @@ -0,0 +1,267 @@ +package staking + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/vm" + "github.com/stretchr/testify/require" +) + +// Test scenarios +// Do 3 stake transactions from 3 different wallets - tx value 2499, 2500, 2501 +// testcase1 -- staking v3.5 --> tx1 fail, tx2 - node in queue, tx3 - node in queue with topUp 1 +// testcase2 -- staking v4 step1 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase3 -- staking v4 step2 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 +// testcase4 -- staking v3.step3 --> tx1 fail, tx2 - node in auction, tx3 - node in auction with topUp 1 + +// // Internal test scenario #3 +func TestChainSimulator_SimpleStake(t *testing.T) { + t.Run("staking ph 4 is not active", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 1, "queued") + }) + + t.Run("staking ph 4 step1", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 2, "auction") + }) + + t.Run("staking ph 4 step2", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 3, "auction") + }) + + t.Run("staking ph 4 step3", func(t *testing.T) { + testChainSimulatorSimpleStake(t, 4, "auction") + }) +} + +func testChainSimulatorSimpleStake(t *testing.T, targetEpoch int32, nodesStatus string) { + if testing.Short() { + t.Skip("this is not a short test") + } + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + configs.SetStakingV4ActivationEpochs(cfg, 2) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + defer cs.Close() + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(3000)) + wallet1, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet2, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + wallet3, err := cs.GenerateAndMintWalletAddress(0, mintValue) + require.Nil(t, err) + + _, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(3) + require.Nil(t, err) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + dataFieldTx1 := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + tx1Value := big.NewInt(0).Mul(big.NewInt(2499), oneEGLD) + tx1 := generateTransaction(wallet1.Bytes, 0, vm.ValidatorSCAddress, tx1Value, dataFieldTx1, gasLimitForStakeOperation) + + dataFieldTx2 := fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + tx2 := generateTransaction(wallet3.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, dataFieldTx2, gasLimitForStakeOperation) + + dataFieldTx3 := fmt.Sprintf("stake@01@%s@%s", blsKeys[2], mockBLSSignature) + tx3Value := big.NewInt(0).Mul(big.NewInt(2501), oneEGLD) + tx3 := generateTransaction(wallet2.Bytes, 0, vm.ValidatorSCAddress, tx3Value, dataFieldTx3, gasLimitForStakeOperation) + + results, err := cs.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx1, tx2, tx3}, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.Equal(t, 3, len(results)) + require.NotNil(t, results) + + // tx1 should fail + require.Equal(t, "insufficient stake value: expected 2500000000000000000000, got 2499000000000000000000", string(results[0].Logs.Events[0].Topics[1])) + + _ = cs.GenerateBlocks(1) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + if targetEpoch < 2 { + bls1, _ := hex.DecodeString(blsKeys[1]) + bls2, _ := hex.DecodeString(blsKeys[2]) + + blsKeyStatus := getBLSKeyStatus(t, metachainNode, bls1) + require.Equal(t, nodesStatus, blsKeyStatus) + + blsKeyStatus = getBLSKeyStatus(t, metachainNode, bls2) + require.Equal(t, nodesStatus, blsKeyStatus) + } else { + // tx2 -- validator should be in queue + checkValidatorStatus(t, cs, blsKeys[1], nodesStatus) + // tx3 -- validator should be in queue + checkValidatorStatus(t, cs, blsKeys[2], nodesStatus) + } +} + +// Test auction list api calls during stakingV4 step 2 and onwards. +// Nodes configuration at genesis consisting of a total of 32 nodes, distributed on 3 shards + meta: +// - 4 eligible nodes/shard +// - 4 waiting nodes/shard +// - 2 nodes to shuffle per shard +// - max num nodes config for stakingV4 step3 = 24 (being downsized from previously 32 nodes) +// Steps: +// 1. Stake 1 node and check that in stakingV4 step1 it is found in auction +// 2. From stakingV4 step2 onwards, check that api returns 8 qualified + 1 unqualified nodes +func TestChainSimulator_StakingV4Step2APICalls(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + stakingV4Step1Epoch := uint32(2) + stakingV4Step2Epoch := uint32(3) + stakingV4Step3Epoch := uint32(4) + + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: uint64(6000), + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 30, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 4, + MetaChainMinNodes: 4, + NumNodesWaitingListMeta: 4, + NumNodesWaitingListShard: 4, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = stakingV4Step1Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = stakingV4Step2Epoch + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = stakingV4Step3Epoch + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].MaxNumNodes = 32 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[1].NodesToShufflePerShard = 2 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = stakingV4Step3Epoch + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].MaxNumNodes = 24 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].NodesToShufflePerShard = 2 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + mintValue := big.NewInt(0).Add(minimumStakeValue, oneEGLD) + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + // Stake a new validator that should end up in auction in step 1 + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = cs.GenerateBlocksUntilEpochIsReached(int32(stakingV4Step1Epoch)) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // In step 1, only the previously staked node should be in auction list + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + require.Equal(t, []*common.AuctionListValidatorAPIResponse{ + { + Owner: validatorOwner.Bech32, + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: blsKeys[0], + Qualified: true, + }, + }, + }, + }, auctionList) + + // For steps 2,3 and onwards, when making API calls, we'll be using the api nodes config provider to mimic the max number of + // nodes as it will be in step 3. This means we'll see the 8 nodes that were shuffled out from the eligible list, + // plus the additional node that was staked manually. + // Since those 8 shuffled out nodes will be replaced only with another 8 nodes, and the auction list size = 9, + // the outcome should show 8 nodes qualifying and 1 node not qualifying + for epochToSimulate := int32(stakingV4Step2Epoch); epochToSimulate < int32(stakingV4Step3Epoch)+3; epochToSimulate++ { + err = cs.GenerateBlocksUntilEpochIsReached(epochToSimulate) + require.Nil(t, err) + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + numQualified, numUnQualified := getNumQualifiedAndUnqualified(t, metachainNode) + require.Equal(t, 8, numQualified) + require.Equal(t, 1, numUnQualified) + } +} + +func getNumQualifiedAndUnqualified(t *testing.T, metachainNode process.NodeHandler) (int, int) { + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + auctionList, err := metachainNode.GetProcessComponents().ValidatorsProvider().GetAuctionList() + require.Nil(t, err) + + numQualified := 0 + numUnQualified := 0 + + for _, auctionOwnerData := range auctionList { + for _, auctionNode := range auctionOwnerData.Nodes { + if auctionNode.Qualified { + numQualified++ + } else { + numUnQualified++ + } + } + } + + return numQualified, numUnQualified +} diff --git a/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go b/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go new file mode 100644 index 00000000000..34ab9c44f78 --- /dev/null +++ b/integrationTests/chainSimulator/staking/stakeAndUnStake_test.go @@ -0,0 +1,2289 @@ +package staking + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + coreAPI "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/data/validator" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + chainSimulatorIntegrationTests "github.com/multiversx/mx-chain-go/integrationTests/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../../cmd/node/config/" + maxNumOfBlockToGenerateWhenExecutingTx = 7 +) + +var log = logger.GetOrCreate("integrationTests/chainSimulator") + +// TODO scenarios +// Make a staking provider with max num of nodes +// DO a merge transaction + +// Test scenario +// 1. Add a new validator private key in the multi key handler +// 2. Do a stake transaction for the validator key +// 3. Do an unstake transaction (to make a place for the new validator) +// 4. Check if the new validator has generated rewards +func TestChainSimulator_AddValidatorKey(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 0, + NumNodesWaitingListShard: 0, + AlterConfigsFunction: func(cfg *config.Configs) { + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(30) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "10000000000000000000000", + }, + }) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS key of the validator key that was added at step 1 + stakeValue, _ := big.NewInt(0).SetString("2500000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@01@%s@010101", blsKeys[0])), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(newValidatorOwnerBytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeActiveValidator := accountValidatorOwner.Balance + + // Step 5 --- create an unStake transaction with the bls key of an initial validator and execute the transaction to make place for the validator that was added at step 3 + firstValidatorKey, err := cs.GetValidatorPrivateKeys()[0].GeneratePublic().ToByteArray() + require.Nil(t, err) + + initialAddressWithValidators := cs.GetInitialWalletKeys().StakeWallets[0].Address + shardID := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(initialAddressWithValidators.Bytes) + initialAccount, _, err := cs.GetNodeHandler(shardID).GetFacadeHandler().GetAccount(initialAddressWithValidators.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + tx = &transaction.Transaction{ + Nonce: initialAccount.Nonce, + Value: big.NewInt(0), + SndAddr: initialAddressWithValidators.Bytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("unStake@%s", hex.EncodeToString(firstValidatorKey))), + GasLimit: 50_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + _, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + + // Step 6 --- generate 8 epochs to get rewards + err = cs.GenerateBlocksUntilEpochIsReached(8) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + checkValidatorsRating(t, validatorStatistics) + + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(newValidatorOwner, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterActiveValidator := accountValidatorOwner.Balance + + log.Info("balance before validator", "value", balanceBeforeActiveValidator) + log.Info("balance after validator", "value", balanceAfterActiveValidator) + + balanceBeforeBig, _ := big.NewInt(0).SetString(balanceBeforeActiveValidator, 10) + balanceAfterBig, _ := big.NewInt(0).SetString(balanceAfterActiveValidator, 10) + diff := balanceAfterBig.Sub(balanceAfterBig, balanceBeforeBig) + log.Info("difference", "value", diff.String()) + + // Step 7 --- check the balance of the validator owner has been increased + require.True(t, diff.Cmp(big.NewInt(0)) > 0) +} + +func TestChainSimulator_AddANewValidatorAfterStakingV4(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.NodeLimitPercentage = 1 + cfg.GeneralConfig.ValidatorStatistics.CacheRefreshIntervalInSec = 1 + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 8 // 8 nodes until new nodes will be placed on queue + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocks(150) + require.Nil(t, err) + + // Step 1 --- add a new validator key in the chain simulator + numOfNodes := 20 + validatorSecretKeysBytes, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(numOfNodes) + require.Nil(t, err) + err = cs.AddValidatorKeys(validatorSecretKeysBytes) + require.Nil(t, err) + + newValidatorOwner := "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl" + newValidatorOwnerBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(newValidatorOwner) + rcv := "erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqplllst77y4l" + rcvAddrBytes, _ := cs.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(rcv) + + // Step 2 --- set an initial balance for the address that will initialize all the transactions + err = cs.SetStateMultiple([]*dtos.AddressState{ + { + Address: "erd1l6xt0rqlyzw56a3k8xwwshq2dcjwy3q9cppucvqsmdyw8r98dz3sae0kxl", + Balance: "1000000000000000000000000", + }, + }) + require.Nil(t, err) + + // Step 3 --- generate and send a stake transaction with the BLS keys of the validators key that were added at step 1 + validatorData := "" + for _, blsKey := range blsKeys { + validatorData += fmt.Sprintf("@%s@010101", blsKey) + } + + numOfNodesHex := hex.EncodeToString(big.NewInt(int64(numOfNodes)).Bytes()) + stakeValue, _ := big.NewInt(0).SetString("51000000000000000000000", 10) + tx := &transaction.Transaction{ + Nonce: 0, + Value: stakeValue, + SndAddr: newValidatorOwnerBytes, + RcvAddr: rcvAddrBytes, + Data: []byte(fmt.Sprintf("stake@%s%s", numOfNodesHex, validatorData)), + GasLimit: 500_000_000, + GasPrice: 1000000000, + Signature: []byte("dummy"), + ChainID: []byte(configs.ChainID), + Version: 1, + } + + txFromNetwork, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(tx, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, txFromNetwork) + + err = cs.GenerateBlocks(1) + require.Nil(t, err) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + err = metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + results, err := metachainNode.GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + require.Equal(t, newValidatorOwner, results[0].Owner) + require.Equal(t, 20, len(results[0].Nodes)) + checkTotalQualified(t, results, 8) + + err = cs.GenerateBlocks(100) + require.Nil(t, err) + + results, err = cs.GetNodeHandler(core.MetachainShardId).GetFacadeHandler().AuctionListApi() + require.Nil(t, err) + checkTotalQualified(t, results, 0) +} + +// Internal test scenario #4 #5 #6 +// do stake +// do unStake +// do unBondNodes +// do unBondTokens +func TestChainSimulatorStakeUnStakeUnBond(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 1) + }) + + t.Run("staking ph 4 step 1 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 4) + }) + + t.Run("staking ph 4 step 2 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 5) + }) + + t.Run("staking ph 4 step 3 active", func(t *testing.T) { + testStakeUnStakeUnBond(t, 6) + }) +} + +func testStakeUnStakeUnBond(t *testing.T, targetEpoch int32) { + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + numOfShards := uint32(3) + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: numOfShards, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriod = 1 + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 1 + newNumNodes := cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake + 10 + configs.SetMaxNumberOfNodesInConfigs(cfg, newNumNodes, numOfShards) + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + + mintValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + walletAddressShardID := uint32(0) + walletAddress, err := cs.GenerateAndMintWalletAddress(walletAddressShardID, mintValue) + require.Nil(t, err) + + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(walletAddress.Bytes, 0, vm.ValidatorSCAddress, minimumStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + bls0, _ := hex.DecodeString(blsKeys[0]) + blsKeyStatus := getBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "staked", blsKeyStatus) + + // do unStake + txUnStake := generateTransaction(walletAddress.Bytes, 1, vm.ValidatorSCAddress, zeroValue, fmt.Sprintf("unStake@%s", blsKeys[0]), gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + blsKeyStatus = getBLSKeyStatus(t, metachainNode, bls0) + require.Equal(t, "unStaked", blsKeyStatus) + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + // do unBond + txUnBond := generateTransaction(walletAddress.Bytes, 2, vm.ValidatorSCAddress, zeroValue, fmt.Sprintf("unBondNodes@%s", blsKeys[0]), gasLimitForStakeOperation) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + // do claim + txClaim := generateTransaction(walletAddress.Bytes, 3, vm.ValidatorSCAddress, zeroValue, "unBondTokens", gasLimitForStakeOperation) + claimTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txClaim, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, claimTx) + + err = cs.GenerateBlocks(5) + require.Nil(t, err) + + // check tokens are in the wallet balance + walletAccount, _, err := cs.GetNodeHandler(walletAddressShardID).GetFacadeHandler().GetAccount(walletAddress.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + walletBalanceBig, _ := big.NewInt(0).SetString(walletAccount.Balance, 10) + require.True(t, walletBalanceBig.Cmp(minimumStakeValue) > 0) +} + +func checkTotalQualified(t *testing.T, auctionList []*common.AuctionListValidatorAPIResponse, expected int) { + totalQualified := 0 + for _, res := range auctionList { + for _, node := range res.Nodes { + if node.Qualified { + totalQualified++ + } + } + } + require.Equal(t, expected, totalQualified) +} + +func checkValidatorsRating(t *testing.T, validatorStatistics map[string]*validator.ValidatorStatistics) { + countRatingIncreased := 0 + for _, validatorInfo := range validatorStatistics { + validatorSignedAtLeastOneBlock := validatorInfo.NumValidatorSuccess > 0 || validatorInfo.NumLeaderSuccess > 0 + if !validatorSignedAtLeastOneBlock { + continue + } + countRatingIncreased++ + require.Greater(t, validatorInfo.TempRating, validatorInfo.Rating) + } + require.Greater(t, countRatingIncreased, 0) +} + +// Test description +// Stake funds - happy flow +// +// Preconditions: have an account with egld and 2 staked nodes (2500 stake per node) - directly staked, and no unstake +// +// 1. Check the stake amount for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance +// 2. Create from the owner of staked nodes a transaction to stake 1 EGLD and send it to the network +// 3. Check the outcome of the TX & verify new stake state with vmquery + +// Internal test scenario #24 +func TestChainSimulator_DirectStakingNodes_StakeFunds(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedNodesStakingFunds(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedNodesStakingFunds(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + log.Info("Preconditions. Have an account with 2 staked nodes") + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(minimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + stakeValue = big.NewInt(0).Set(minimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of the staked nodes a tx to stake 1 EGLD") + + stakeValue = big.NewInt(0).Mul(oneEGLD, big.NewInt(1)) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + log.Info("Step 3. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5001) +} + +func checkExpectedStakedValue(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte, expectedValue int64) { + totalStaked := getTotalStaked(t, metachainNode, blsKey) + + expectedStaked := big.NewInt(expectedValue) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(totalStaked)) +} + +func getTotalStaked(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +// Test description: +// Unstake funds with deactivation of node if below 2500 -> the rest of funds are distributed as topup at epoch change +// +// Internal test scenario #26 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery "getTotalStaked" and "getUnStakedTokensList" + // 4. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(5010) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(minimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + stakeValue = big.NewInt(0).Set(minimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[1], targetEpoch) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + checkOneOfTheNodesIsUnstaked(t, metachainNode, blsKeys[:2]) +} + +func getUnStakedTokensList(t *testing.T, metachainNode chainSimulatorProcess.NodeHandler, blsKey []byte) []byte { + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{blsKey}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + return result.ReturnData[0] +} + +func checkOneOfTheNodesIsUnstaked(t *testing.T, + metachainNode chainSimulatorProcess.NodeHandler, + blsKeys []string, +) { + decodedBLSKey0, _ := hex.DecodeString(blsKeys[0]) + keyStatus0 := getBLSKeyStatus(t, metachainNode, decodedBLSKey0) + log.Info("Key info", "key", blsKeys[0], "status", keyStatus0) + + isNotStaked0 := keyStatus0 == unStakedStatus + + decodedBLSKey1, _ := hex.DecodeString(blsKeys[1]) + keyStatus1 := getBLSKeyStatus(t, metachainNode, decodedBLSKey1) + log.Info("Key info", "key", blsKeys[1], "status", keyStatus1) + + isNotStaked1 := keyStatus1 == unStakedStatus + + require.True(t, isNotStaked0 != isNotStaked1) +} + +func testBLSKeyStaked(t *testing.T, + cs chainSimulatorIntegrationTests.ChainSimulator, + metachainNode chainSimulatorProcess.NodeHandler, + blsKey string, targetEpoch int32, +) { + decodedBLSKey, _ := hex.DecodeString(blsKey) + err := metachainNode.GetProcessComponents().ValidatorsProvider().ForceUpdate() + require.Nil(t, err) + + validatorStatistics, err := metachainNode.GetFacadeHandler().ValidatorStatisticsApi() + require.Nil(t, err) + + activationEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetActivationEpoch(common.StakingV4Step1Flag) + if activationEpoch <= metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() { + require.Equal(t, stakedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) + return + } + + // in staking ph 2/3.5 we do not find the bls key on the validator statistics + _, found := validatorStatistics[blsKey] + require.False(t, found) + require.Equal(t, queuedStatus, getBLSKeyStatus(t, metachainNode, decodedBLSKey)) +} + +// Test description: +// Unstake funds with deactivation of node, followed by stake with sufficient ammount does not unstake node at end of epoch +// +// Internal test scenario #27 +func TestChainSimulator_DirectStakingNodes_UnstakeFundsWithDeactivation_WithReactivation(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Check the stake amount and number of nodes for the owner of the staked nodes with the vmquery "getTotalStaked", and the account current EGLD balance + // 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network + // 3. Check the outcome of the TX & verify new stake state with vmquery + // 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network + // 5. Check the outcome of the TX & verify new stake state with vmquery + // 6. Wait for change of epoch and check the outcome + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedUnstakeFundsWithDeactivationAndReactivation(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKeys, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(2) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKeys) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(6000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Set(minimumStakeValue) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + stakeValue = big.NewInt(0).Set(minimumStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[1], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[1], targetEpoch) + + log.Info("Step 1. Check the stake amount for the owner of the staked nodes") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to unstake 10 EGLD and send it to the network") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery getTotalStaked and getUnStakedTokensList") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 4990) + + unStakedTokensAmount := getUnStakedTokensList(t, metachainNode, validatorOwner.Bytes) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(unStakedTokensAmount).String()) + + log.Info("Step 4. Create from the owner of staked nodes a transaction to stake 10 EGLD and send it to the network") + + newStakeValue := big.NewInt(10) + newStakeValue = newStakeValue.Mul(oneEGLD, newStakeValue) + txDataField = fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake = generateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, newStakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("5. Check the outcome of the TX & verify new stake state with vmquery") + checkExpectedStakedValue(t, metachainNode, validatorOwner.Bytes, 5000) + + log.Info("Step 6. Wait for change of epoch and check the outcome") + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + testBLSKeyStaked(t, cs, metachainNode, blsKeys[1], targetEpoch) +} + +// Test description: +// Withdraw unstaked funds before unbonding period should return error +// +// Internal test scenario #28 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedFundsBeforeUnbonding(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 2. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsBeforeUnbonding(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 2. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + // the owner balance should decrease only with the txs fee + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Withdraw unstaked funds in first available withdraw epoch +// +// Internal test scenario #29 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInWithdrawEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Wait for the unbonding epoch to start + // 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds + // 3. Check the outcome of the TX & verify new stake state with vmquery ("getUnStakedTokensList") + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInFirstEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(10000) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + err = cs.GenerateBlocks(2) // allow the metachain to finalize the block that contains the staking of the node + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + unStakeValue := big.NewInt(10) + unStakeValue = unStakeValue.Mul(oneEGLD, unStakeValue) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(10) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + log.Info("Step 1. Wait for the unbonding epoch to start") + + err = cs.GenerateBlocksUntilEpochIsReached(targetEpoch + 1) + require.Nil(t, err) + + log.Info("Step 2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + log.Info("Step 3. Check the outcome of the TX & verify new stake state with vmquery (`getUnStakedTokensList`)") + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2590) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + // the owner balance should increase with the (10 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + // substract unbonding value + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue) + + txsFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + balanceAfterUnbondingWithFee := big.NewInt(0).Add(balanceAfterUnbonding, txsFee) + + txsFee, _ = big.NewInt(0).SetString(unStakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + txsFee, _ = big.NewInt(0).SetString(stakeTx.Fee, 10) + balanceAfterUnbondingWithFee.Add(balanceAfterUnbondingWithFee, txsFee) + + require.Equal(t, 1, balanceAfterUnbondingWithFee.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstaking funds in different batches allows correct withdrawal for each batch +// at the corresponding epoch. +// +// Internal test scenario #30 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInBatches(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions in consecutive epochs, one TX in each epoch. + // 3. Wait for the epoch when first tx unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + // 5. Wait for an epoch + // 6. Create another transaction for withdraw and send it to the network + // 7. Wait for an epoch + // 8. Create another transasction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 6 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInBatches(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutive epochs, one TX in each epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(oneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + testEpoch := targetEpoch + 1 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(oneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(oneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + // check bls key is still staked + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch += 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.2. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = generateTransaction(validatorOwner.Bytes, 5, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) + + log.Info("Step 4.3. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + testEpoch++ + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond = generateTransaction(validatorOwner.Bytes, 6, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ = big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee.Add(txsFee, unBondTxFee) + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} + +// Test description: +// Unstake funds in different batches in the same epoch allows correct withdrawal in the correct epoch +// +// Internal test scenario #31 +func TestChainSimulator_DirectStakingNodes_WithdrawUnstakedInEpoch(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 30, + } + + // Test Steps + // 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld. + // 2. Send the transactions consecutively in the same epoch + // 3. Wait for the epoch when unbonding period ends. + // 4. Create a transaction for withdraw and send it to the network + + t.Run("staking ph 4 is not active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 100 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 101 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 102 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 102 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 1) + }) + + t.Run("staking ph 4 step 1 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 2) + }) + + t.Run("staking ph 4 step 2 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 3) + }) + + t.Run("staking ph 4 step 3 is active", func(t *testing.T) { + cs, err := chainSimulator.NewChainSimulator(chainSimulator.ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: time.Now().Unix(), + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 3, + MetaChainMinNodes: 3, + NumNodesWaitingListMeta: 3, + NumNodesWaitingListShard: 3, + AlterConfigsFunction: func(cfg *config.Configs) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = 2 + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = 3 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = 4 + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = 4 + + cfg.SystemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs = 3 + }, + }) + require.Nil(t, err) + require.NotNil(t, cs) + + defer cs.Close() + + testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t, cs, 4) + }) +} + +func testChainSimulatorDirectStakedWithdrawUnstakedFundsInEpoch(t *testing.T, cs chainSimulatorIntegrationTests.ChainSimulator, targetEpoch int32) { + err := cs.GenerateBlocksUntilEpochIsReached(targetEpoch) + require.Nil(t, err) + + privateKey, blsKeys, err := chainSimulator.GenerateBlsPrivateKeys(1) + require.Nil(t, err) + + err = cs.AddValidatorKeys(privateKey) + require.Nil(t, err) + metachainNode := cs.GetNodeHandler(core.MetachainShardId) + + mintValue := big.NewInt(2700) + mintValue = mintValue.Mul(oneEGLD, mintValue) + + validatorOwner, err := cs.GenerateAndMintWalletAddress(core.AllShardId, mintValue) + require.Nil(t, err) + + stakeValue := big.NewInt(0).Mul(oneEGLD, big.NewInt(2600)) + txDataField := fmt.Sprintf("stake@01@%s@%s", blsKeys[0], mockBLSSignature) + txStake := generateTransaction(validatorOwner.Bytes, 0, vm.ValidatorSCAddress, stakeValue, txDataField, gasLimitForStakeOperation) + stakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, stakeTx) + + stakeTxFee, _ := big.NewInt(0).SetString(stakeTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + shardIDValidatorOwner := cs.GetNodeHandler(0).GetShardCoordinator().ComputeId(validatorOwner.Bytes) + accountValidatorOwner, _, err := cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceBeforeUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + log.Info("Step 1. Create 3 transactions for unstaking: first one unstaking 11 egld each, second one unstaking 12 egld and third one unstaking 13 egld.") + log.Info("Step 2. Send the transactions in consecutively in same epoch.") + + unStakeValue1 := big.NewInt(11) + unStakeValue1 = unStakeValue1.Mul(oneEGLD, unStakeValue1) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue1.Bytes())) + txUnStake := generateTransaction(validatorOwner.Bytes, 1, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeTxFee, _ := big.NewInt(0).SetString(unStakeTx.Fee, 10) + + unStakeValue2 := big.NewInt(12) + unStakeValue2 = unStakeValue2.Mul(oneEGLD, unStakeValue2) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue2.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 2, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + unStakeValue3 := big.NewInt(13) + unStakeValue3 = unStakeValue3.Mul(oneEGLD, unStakeValue3) + txDataField = fmt.Sprintf("unStakeTokens@%s", hex.EncodeToString(unStakeValue3.Bytes())) + txUnStake = generateTransaction(validatorOwner.Bytes, 3, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForStakeOperation) + unStakeTx, err = cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnStake, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unStakeTx) + + // check bls key is still staked + testBLSKeyStaked(t, cs, metachainNode, blsKeys[0], targetEpoch) + + scQuery := &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getUnStakedTokensList", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err := metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedUnStaked := big.NewInt(11 + 12 + 13) + expectedUnStaked = expectedUnStaked.Mul(oneEGLD, expectedUnStaked) + require.Equal(t, expectedUnStaked.String(), big.NewInt(0).SetBytes(result.ReturnData[0]).String()) + + scQuery = &process.SCQuery{ + ScAddress: vm.ValidatorSCAddress, + FuncName: "getTotalStaked", + CallerAddr: vm.ValidatorSCAddress, + CallValue: big.NewInt(0), + Arguments: [][]byte{validatorOwner.Bytes}, + } + result, _, err = metachainNode.GetFacadeHandler().ExecuteSCQuery(scQuery) + require.Nil(t, err) + require.Equal(t, okReturnCode, result.ReturnCode) + + expectedStaked := big.NewInt(2600 - 11 - 12 - 13) + expectedStaked = expectedStaked.Mul(oneEGLD, expectedStaked) + require.Equal(t, expectedStaked.String(), string(result.ReturnData[0])) + + log.Info("Step 3. Wait for the unbonding epoch to start") + + testEpoch := targetEpoch + 3 + err = cs.GenerateBlocksUntilEpochIsReached(testEpoch) + require.Nil(t, err) + + log.Info("Step 4.1. Create from the owner of staked nodes a transaction to withdraw the unstaked funds") + + txDataField = fmt.Sprintf("unBondTokens@%s", blsKeys[0]) + txUnBond := generateTransaction(validatorOwner.Bytes, 4, vm.ValidatorSCAddress, zeroValue, txDataField, gasLimitForUnBond) + unBondTx, err := cs.SendTxAndGenerateBlockTilTxIsExecuted(txUnBond, maxNumOfBlockToGenerateWhenExecutingTx) + require.Nil(t, err) + require.NotNil(t, unBondTx) + + unBondTxFee, _ := big.NewInt(0).SetString(unBondTx.Fee, 10) + + err = cs.GenerateBlocks(2) + require.Nil(t, err) + + // the owner balance should increase with the (11+12+13 EGLD - tx fee) + accountValidatorOwner, _, err = cs.GetNodeHandler(shardIDValidatorOwner).GetFacadeHandler().GetAccount(validatorOwner.Bech32, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + balanceAfterUnbonding, _ := big.NewInt(0).SetString(accountValidatorOwner.Balance, 10) + + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue1) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue2) + balanceAfterUnbonding.Sub(balanceAfterUnbonding, unStakeValue3) + + txsFee := big.NewInt(0) + + txsFee.Add(txsFee, stakeTxFee) + txsFee.Add(txsFee, unBondTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + txsFee.Add(txsFee, unStakeTxFee) + + balanceAfterUnbonding.Add(balanceAfterUnbonding, txsFee) + + require.Equal(t, 1, balanceAfterUnbonding.Cmp(balanceBeforeUnbonding)) +} diff --git a/integrationTests/common.go b/integrationTests/common.go new file mode 100644 index 00000000000..e4365471cd7 --- /dev/null +++ b/integrationTests/common.go @@ -0,0 +1,38 @@ +package integrationTests + +import ( + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// ProcessSCOutputAccounts will save account changes in accounts db from vmOutput +func ProcessSCOutputAccounts(vmOutput *vmcommon.VMOutput, accountsDB state.AccountsAdapter) error { + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + acc := stakingcommon.LoadUserAccount(accountsDB, outAcc.Address) + + storageUpdates := process.GetSortedStorageUpdates(outAcc) + for _, storeUpdate := range storageUpdates { + err := acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + if err != nil { + return err + } + + if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { + err = acc.AddToBalance(outAcc.BalanceDelta) + if err != nil { + return err + } + } + + err = accountsDB.SaveAccount(acc) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/integrationTests/factory/consensusComponents/consensusComponents_test.go b/integrationTests/factory/consensusComponents/consensusComponents_test.go index 5be694c740d..f560f099705 100644 --- a/integrationTests/factory/consensusComponents/consensusComponents_test.go +++ b/integrationTests/factory/consensusComponents/consensusComponents_test.go @@ -67,6 +67,7 @@ func TestConsensusComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go index 0bd34fd45e4..9082ce63c06 100644 --- a/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go +++ b/integrationTests/factory/heartbeatComponents/heartbeatComponents_test.go @@ -67,6 +67,7 @@ func TestHeartbeatComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/processComponents/processComponents_test.go b/integrationTests/factory/processComponents/processComponents_test.go index d81d921e74c..2f2c859bc94 100644 --- a/integrationTests/factory/processComponents/processComponents_test.go +++ b/integrationTests/factory/processComponents/processComponents_test.go @@ -68,6 +68,7 @@ func TestProcessComponents_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/factory/statusComponents/statusComponents_test.go b/integrationTests/factory/statusComponents/statusComponents_test.go index 9865ce593ce..62e2ad1e289 100644 --- a/integrationTests/factory/statusComponents/statusComponents_test.go +++ b/integrationTests/factory/statusComponents/statusComponents_test.go @@ -68,6 +68,7 @@ func TestStatusComponents_Create_Close_ShouldWork(t *testing.T) { managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(t, err) managedStatusComponents, err := nr.CreateManagedStatusComponents( diff --git a/integrationTests/interface.go b/integrationTests/interface.go index abe0b1a7be8..e4be7fe388c 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -96,6 +96,7 @@ type Facade interface { EncodeAddressPubkey(pk []byte) (string, error) GetThrottlerForEndpoint(endpoint string) (core.Throttler, bool) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatistics, error) + AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) ExecuteSCQuery(*process.SCQuery) (*vm.VMOutputApi, api.BlockInfo, error) DecodeAddressPubkey(pk string) ([]byte, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) @@ -113,6 +114,7 @@ type Facade interface { IsDataTrieMigrated(address string, options api.AccountQueryOptions) (bool, error) GetManagedKeysCount() int GetManagedKeys() []string + GetLoadedKeys() []string GetEligibleManagedKeys() ([]string, error) GetWaitingManagedKeys() ([]string, error) GetWaitingEpochsLeftForPublicKey(publicKey string) (uint32, error) diff --git a/integrationTests/miniNetwork.go b/integrationTests/miniNetwork.go new file mode 100644 index 00000000000..e9c64f5606d --- /dev/null +++ b/integrationTests/miniNetwork.go @@ -0,0 +1,113 @@ +package integrationTests + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/transaction" +) + +// MiniNetwork is a mini network, useful for some integration tests +type MiniNetwork struct { + Round uint64 + Nonce uint64 + + Nodes []*TestProcessorNode + ShardNode *TestProcessorNode + MetachainNode *TestProcessorNode + Users map[string]*TestWalletAccount +} + +// NewMiniNetwork creates a MiniNetwork +func NewMiniNetwork() *MiniNetwork { + n := &MiniNetwork{} + + nodes := CreateNodes( + 1, + 1, + 1, + ) + + n.Nodes = nodes + n.ShardNode = nodes[0] + n.MetachainNode = nodes[1] + n.Users = make(map[string]*TestWalletAccount) + + return n +} + +// Stop stops the mini network +func (n *MiniNetwork) Stop() { + n.ShardNode.Close() + n.MetachainNode.Close() +} + +// FundAccount funds an account +func (n *MiniNetwork) FundAccount(address []byte, value *big.Int) { + shard := n.MetachainNode.ShardCoordinator.ComputeId(address) + + if shard == n.MetachainNode.ShardCoordinator.SelfId() { + MintAddress(n.MetachainNode.AccntState, address, value) + } else { + MintAddress(n.ShardNode.AccntState, address, value) + } +} + +// AddUser adds a user (account) to the mini network +func (n *MiniNetwork) AddUser(balance *big.Int) *TestWalletAccount { + user := CreateTestWalletAccount(n.ShardNode.ShardCoordinator, 0) + n.Users[string(user.Address)] = user + n.FundAccount(user.Address, balance) + return user +} + +// Start starts the mini network +func (n *MiniNetwork) Start() { + n.Round = 1 + n.Nonce = 1 +} + +// Continue advances processing with a number of rounds +func (n *MiniNetwork) Continue(t *testing.T, numRounds int) { + idxProposers := []int{0, 1} + + for i := int64(0); i < int64(numRounds); i++ { + n.Nonce, n.Round = ProposeAndSyncOneBlock(t, n.Nodes, idxProposers, n.Round, n.Nonce) + } +} + +// SendTransaction sends a transaction +func (n *MiniNetwork) SendTransaction( + senderPubkey []byte, + receiverPubkey []byte, + value *big.Int, + data string, + additionalGasLimit uint64, +) (string, error) { + sender, ok := n.Users[string(senderPubkey)] + if !ok { + return "", fmt.Errorf("unknown sender: %s", hex.EncodeToString(senderPubkey)) + } + + tx := &transaction.Transaction{ + Nonce: sender.Nonce, + Value: new(big.Int).Set(value), + SndAddr: sender.Address, + RcvAddr: receiverPubkey, + Data: []byte(data), + GasPrice: MinTxGasPrice, + GasLimit: MinTxGasLimit + uint64(len(data)) + additionalGasLimit, + ChainID: ChainID, + Version: MinTransactionVersion, + } + + txBuff, _ := tx.GetDataForSigning(TestAddressPubkeyConverter, TestTxSignMarshalizer, TestTxSignHasher) + tx.Signature, _ = sender.SingleSigner.Sign(sender.SkTxSign, txBuff) + txHash, err := n.ShardNode.SendTransaction(tx) + + sender.Nonce++ + + return txHash, err +} diff --git a/integrationTests/mock/builtInCostHandlerStub.go b/integrationTests/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/integrationTests/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/integrationTests/mock/epochRewardsCreatorStub.go b/integrationTests/mock/epochRewardsCreatorStub.go deleted file mode 100644 index 22c425f3e41..00000000000 --- a/integrationTests/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index e5a94dd78c1..e0407b5d6f9 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -59,6 +59,7 @@ type ProcessComponentsStub struct { ProcessedMiniBlocksTrackerInternal process.ProcessedMiniBlocksTracker ReceiptsRepositoryInternal factory.ReceiptsRepository ESDTDataStorageHandlerForAPIInternal vmcommon.ESDTNFTStorageHandler + SentSignaturesTrackerInternal process.SentSignaturesTracker } // Create - @@ -290,6 +291,11 @@ func (pcs *ProcessComponentsStub) ESDTDataStorageHandlerForAPI() vmcommon.ESDTNF return pcs.ESDTDataStorageHandlerForAPIInternal } +// SentSignaturesTracker - +func (pcs *ProcessComponentsStub) SentSignaturesTracker() process.SentSignaturesTracker { + return pcs.SentSignaturesTrackerInternal +} + // IsInterfaceNil - func (pcs *ProcessComponentsStub) IsInterfaceNil() bool { return pcs == nil diff --git a/integrationTests/mock/validatorStatisticsProcessorStub.go b/integrationTests/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 34a0e35cad1..00000000000 --- a/integrationTests/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/integrationTests/mock/validatorsProviderStub.go b/integrationTests/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/integrationTests/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go index cf104b736db..eec61878296 100644 --- a/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks/executingMiniblocks_test.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" "testing" "time" @@ -14,13 +13,14 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-crypto-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { @@ -61,15 +61,15 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { proposerNode := nodes[0] - //sender shard keys, receivers keys + // sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) receiversPublicKeys := make(map[uint32][]crypto.PublicKey) for i := 0; i < txToGenerateInEachMiniBlock; i++ { sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) - //receivers in same shard with the sender + // receivers in same shard with the sender _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) - //receivers in other shards + // receivers in other shards for _, shardId := range recvShards { _, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk) @@ -111,13 +111,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test sender balances + // test sender balances for _, sk := range sendersPrivateKeys { valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -136,7 +136,7 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { continue } - //test receiver balances from same shard + // test receiver balances from same shard for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] { integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } @@ -352,87 +352,6 @@ func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEn } } -func TestExecuteBlocksWithGapsBetweenBlocks(t *testing.T) { - //TODO fix this test - t.Skip("TODO fix this test") - if testing.Short() { - t.Skip("this is not a short test") - } - nodesPerShard := 2 - shardConsensusGroupSize := 2 - nbMetaNodes := 400 - nbShards := 1 - consensusGroupSize := 400 - - cacheMut := &sync.Mutex{} - - putCounter := 0 - cacheMap := make(map[string]interface{}) - - // create map of shard - testNodeProcessors for metachain and shard chain - nodesMap := integrationTests.CreateNodesWithNodesCoordinatorWithCacher( - nodesPerShard, - nbMetaNodes, - nbShards, - shardConsensusGroupSize, - consensusGroupSize, - ) - - roundsPerEpoch := uint64(1000) - maxGasLimitPerBlock := uint64(100000) - gasPrice := uint64(10) - gasLimit := uint64(100) - for _, nodes := range nodesMap { - integrationTests.SetEconomicsParameters(nodes, maxGasLimitPerBlock, gasPrice, gasLimit) - integrationTests.DisplayAndStartNodes(nodes[0:1]) - - for _, node := range nodes { - node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) - } - } - - defer func() { - for _, nodes := range nodesMap { - for _, n := range nodes { - n.Close() - } - } - }() - - round := uint64(1) - roundDifference := 10 - nonce := uint64(1) - - firstNodeOnMeta := nodesMap[core.MetachainShardId][0] - body, header, _ := firstNodeOnMeta.ProposeBlock(round, nonce) - - // set bitmap for all consensus nodes signing - bitmap := make([]byte, consensusGroupSize/8+1) - for i := range bitmap { - bitmap[i] = 0xFF - } - - bitmap[consensusGroupSize/8] >>= uint8(8 - (consensusGroupSize % 8)) - err := header.SetPubKeysBitmap(bitmap) - assert.Nil(t, err) - - firstNodeOnMeta.CommitBlock(body, header) - - round += uint64(roundDifference) - nonce++ - putCounter = 0 - - cacheMut.Lock() - for k := range cacheMap { - delete(cacheMap, k) - } - cacheMut.Unlock() - - firstNodeOnMeta.ProposeBlock(round, nonce) - - assert.Equal(t, roundDifference, putCounter) -} - // TestShouldSubtractTheCorrectTxFee uses the mock VM as it's gas model is predictable // The test checks the tx fee subtraction from the sender account when deploying a SC // It also checks the fee obtained by the leader is correct diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go index d89abd3aae5..dd964aeb745 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment/epochStartChangeWithContinuousTransactionsInMultiShardedEnvironment_test.go @@ -23,6 +23,9 @@ func TestEpochStartChangeWithContinuousTransactionsInMultiShardedEnvironment(t * StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go index b7b658e4ca2..d14eb086de6 100644 --- a/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go +++ b/integrationTests/multiShard/endOfEpoch/epochStartChangeWithoutTransactionInMultiShardedEnvironment/epochStartChangeWithoutTransactionInMultiShardedEnvironment_test.go @@ -22,6 +22,9 @@ func TestEpochStartChangeWithoutTransactionInMultiShardedEnvironment(t *testing. StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 8ce1b1a72ec..ce933a22666 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -33,6 +33,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/scheduledDataSyncer" @@ -67,6 +69,9 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( @@ -149,7 +154,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui pksBytes := integrationTests.CreatePkBytes(uint32(numOfShards)) address := []byte("afafafafafafafafafafafafafafafaf") - nodesConfig := &mock.NodesSetupStub{ + nodesConfig := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < uint32(numOfShards); i++ { @@ -181,7 +186,6 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui return integrationTests.MinTransactionVersion }, } - defer func() { errRemoveDir := os.RemoveAll("Epoch_0") assert.NoError(t, errRemoveDir) @@ -217,7 +221,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui cryptoComponents.BlKeyGen = &mock.KeyGenMock{} cryptoComponents.TxKeyGen = &mock.KeyGenMock{} - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestMarshalizer coreComponents.HasherField = integrationTests.TestHasher @@ -231,12 +235,17 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui coreComponents.ChanStopNodeProcessField = endProcess.GetDummyEndProcessChannel() coreComponents.HardforkTriggerPubKeyField = []byte("provided hardfork pub key") + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + 444, + ) argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ - CryptoComponentsHolder: cryptoComponents, - CoreComponentsHolder: coreComponents, - MainMessenger: nodeToJoinLate.MainMessenger, - FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, - GeneralConfig: generalConfig, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + CryptoComponentsHolder: cryptoComponents, + CoreComponentsHolder: coreComponents, + MainMessenger: nodeToJoinLate.MainMessenger, + FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, + GeneralConfig: generalConfig, PrefsConfig: config.PreferencesConfig{ FullArchive: false, }, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 6b11b95a439..6686aa5b5c2 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" @@ -387,7 +388,7 @@ func hardForkImport( defaults.FillGasMapInternal(gasSchedule, 1) log.Warn("started import process") - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestMarshalizer coreComponents.HasherField = integrationTests.TestHasher @@ -406,8 +407,6 @@ func hardForkImport( dataComponents.DataPool = node.DataPool dataComponents.BlockChain = node.BlockChain - roundConfig := integrationTests.GetDefaultRoundsConfig() - argsGenesis := process.ArgsGenesisBlockCreator{ GenesisTime: 0, StartEpochNum: 100, @@ -465,6 +464,8 @@ func hardForkImport( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -475,11 +476,17 @@ func hardForkImport( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: &genesisMocks.AccountsParserStub{}, SmartContractParser: &mock.SmartContractParserStub{}, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: config.EnableEpochs{ BuiltInFunctionsEnableEpoch: 0, SCDeployEnableEpoch: 0, @@ -491,7 +498,8 @@ func hardForkImport( DelegationSmartContractEnableEpoch: 0, }, }, - RoundConfig: &roundConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -559,7 +567,7 @@ func createHardForkExporter( returnedConfigs[node.ShardCoordinator.SelfId()] = append(returnedConfigs[node.ShardCoordinator.SelfId()], exportConfig) returnedConfigs[node.ShardCoordinator.SelfId()] = append(returnedConfigs[node.ShardCoordinator.SelfId()], keysConfig) - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = integrationTests.TestMarshalizer coreComponents.TxMarshalizerField = integrationTests.TestTxSignMarshalizer coreComponents.HasherField = integrationTests.TestHasher diff --git a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go index d01f900d5e2..e09c0fe12c2 100644 --- a/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go +++ b/integrationTests/multiShard/smartContract/polynetworkbridge/bridge_test.go @@ -29,7 +29,6 @@ func TestBridgeSetupAndBurn(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FixAsyncCallBackArgsListEnableEpoch: integrationTests.UnreachableEpoch, } diff --git a/integrationTests/multiShard/softfork/scDeploy_test.go b/integrationTests/multiShard/softfork/scDeploy_test.go index dc735b26abd..8af125f5797 100644 --- a/integrationTests/multiShard/softfork/scDeploy_test.go +++ b/integrationTests/multiShard/softfork/scDeploy_test.go @@ -26,7 +26,6 @@ func TestScDeploy(t *testing.T) { t.Skip("this is not a short test") } - builtinEnableEpoch := uint32(0) deployEnableEpoch := uint32(1) relayedTxEnableEpoch := uint32(0) penalizedTooMuchGasEnableEpoch := uint32(0) @@ -34,11 +33,13 @@ func TestScDeploy(t *testing.T) { scProcessorV2EnableEpoch := integrationTests.UnreachableEpoch enableEpochs := integrationTests.CreateEnableEpochsConfig() - enableEpochs.BuiltInFunctionOnMetaEnableEpoch = builtinEnableEpoch enableEpochs.SCDeployEnableEpoch = deployEnableEpoch enableEpochs.RelayedTransactionsEnableEpoch = relayedTxEnableEpoch enableEpochs.PenalizedTooMuchGasEnableEpoch = penalizedTooMuchGasEnableEpoch enableEpochs.SCProcessorV2EnableEpoch = scProcessorV2EnableEpoch + enableEpochs.StakingV4Step1EnableEpoch = integrationTests.StakingV4Step1EnableEpoch + enableEpochs.StakingV4Step2EnableEpoch = integrationTests.StakingV4Step2EnableEpoch + enableEpochs.StakingV4Step3EnableEpoch = integrationTests.StakingV4Step3EnableEpoch shardNode := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: 1, diff --git a/integrationTests/node/getAccount/getAccount_test.go b/integrationTests/node/getAccount/getAccount_test.go index 388ef74c5a3..16fa37909c3 100644 --- a/integrationTests/node/getAccount/getAccount_test.go +++ b/integrationTests/node/getAccount/getAccount_test.go @@ -37,7 +37,7 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { accDB, _ := integrationTests.CreateAccountsDB(0, trieStorage) rootHash, _ := accDB.Commit() - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.AddressPubKeyConverterField = integrationTests.TestAddressPubkeyConverter dataComponents := integrationTests.GetDefaultDataComponents() @@ -77,7 +77,7 @@ func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { testPubkey := integrationTests.CreateAccount(accDB, testNonce, testBalance) rootHash, _ := accDB.Commit() - coreComponents := integrationTests.GetDefaultCoreComponents() + coreComponents := integrationTests.GetDefaultCoreComponents(integrationTests.CreateEnableEpochsConfig()) coreComponents.AddressPubKeyConverterField = testscommon.RealWorldBech32PubkeyConverter dataComponents := integrationTests.GetDefaultDataComponents() diff --git a/integrationTests/nodesCoordinatorFactory.go b/integrationTests/nodesCoordinatorFactory.go index 08bf6f0f3dd..28267d44c5a 100644 --- a/integrationTests/nodesCoordinatorFactory.go +++ b/integrationTests/nodesCoordinatorFactory.go @@ -11,8 +11,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -54,7 +54,12 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd MaxNodesEnableConfig: nil, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(nodeShufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -75,14 +80,15 @@ func (tpn *IndexHashedNodesCoordinatorFactory) CreateNodesCoordinator(arg ArgInd IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { - if flag == common.RefactorPeersMiniBlocksFlag { + if flag == common.RefactorPeersMiniBlocksFlag || flag == common.StakingV4Step2Flag { return UnreachableEpoch } return 0 }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { @@ -114,7 +120,12 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato MaxNodesEnableConfig: nil, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + TestMarshalizer, + StakingV4Step2EnableEpoch, + ) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ ShardConsensusGroupSize: arg.shardConsensusGroupSize, MetaConsensusGroupSize: arg.metaConsensusGroupSize, @@ -141,8 +152,9 @@ func (ihncrf *IndexHashedNodesCoordinatorWithRaterFactory) CreateNodesCoordinato return 0 }, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } baseCoordinator, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/oneNodeNetwork.go b/integrationTests/oneNodeNetwork.go deleted file mode 100644 index 720ff0529c6..00000000000 --- a/integrationTests/oneNodeNetwork.go +++ /dev/null @@ -1,70 +0,0 @@ -package integrationTests - -import ( - "math/big" - "testing" - - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/process" -) - -type oneNodeNetwork struct { - Round uint64 - Nonce uint64 - - Node *TestProcessorNode -} - -// NewOneNodeNetwork creates a one-node network, useful for some integration tests -func NewOneNodeNetwork() *oneNodeNetwork { - n := &oneNodeNetwork{} - - nodes := CreateNodes( - 1, - 1, - 0, - ) - - n.Node = nodes[0] - return n -} - -// Stop stops the test network -func (n *oneNodeNetwork) Stop() { - n.Node.Close() -} - -// Mint mints the given address -func (n *oneNodeNetwork) Mint(address []byte, value *big.Int) { - MintAddress(n.Node.AccntState, address, value) -} - -// GetMinGasPrice returns the min gas price -func (n *oneNodeNetwork) GetMinGasPrice() uint64 { - return n.Node.EconomicsData.GetMinGasPrice() -} - -// MaxGasLimitPerBlock returns the max gas per block -func (n *oneNodeNetwork) MaxGasLimitPerBlock() uint64 { - return n.Node.EconomicsData.MaxGasLimitPerBlock(0) - 1 -} - -// GoToRoundOne advances processing to block and round 1 -func (n *oneNodeNetwork) GoToRoundOne() { - n.Round = IncrementAndPrintRound(n.Round) - n.Nonce++ -} - -// Continue advances processing with a number of rounds -func (n *oneNodeNetwork) Continue(t *testing.T, numRounds int) { - n.Nonce, n.Round = WaitOperationToBeDone(t, []*TestProcessorNode{n.Node}, numRounds, n.Nonce, n.Round, []int{0}) -} - -// AddTxToPool adds a transaction to the pool (skips signature checks and interceptors) -func (n *oneNodeNetwork) AddTxToPool(tx *transaction.Transaction) { - txHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, tx) - sourceShard := n.Node.ShardCoordinator.ComputeId(tx.SndAddr) - cacheIdentifier := process.ShardCacherIdentifier(sourceShard, sourceShard) - n.Node.DataPool.Transactions().AddData(txHash, tx, tx.Size(), cacheIdentifier) -} diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 6881284899b..f788de20f84 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -304,6 +304,7 @@ func (pr *ProcessorRunner) createStatusComponents(tb testing.TB) { pr.CoreComponents.NodeTypeProvider(), pr.CoreComponents.EnableEpochsHandler(), pr.DataComponents.Datapool().CurrentEpochValidatorInfo(), + pr.BootstrapComponents.NodesCoordinatorRegistryFactory(), ) require.Nil(tb, err) @@ -406,6 +407,7 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ Config: *pr.Config.GeneralConfig, EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, PrefConfigs: *pr.Config.PreferencesConfig, ImportDBConfig: *pr.Config.ImportDbConfig, FlagsConfig: config.ContextFlagsConfig{ diff --git a/integrationTests/realcomponents/processorRunner_test.go b/integrationTests/realcomponents/processorRunner_test.go index 55951b63831..78d0013597e 100644 --- a/integrationTests/realcomponents/processorRunner_test.go +++ b/integrationTests/realcomponents/processorRunner_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/require" ) func TestNewProcessorRunnerAndClose(t *testing.T) { @@ -11,7 +12,9 @@ func TestNewProcessorRunnerAndClose(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../cmd/node/config") + require.Nil(t, err) + pr := NewProcessorRunner(t, *cfg) pr.Close(t) } diff --git a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go index 7aa899e5afa..fe162c5a2d5 100644 --- a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go +++ b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go @@ -23,7 +23,9 @@ func TestTransactionSimulationComponentConstructionOnMetachain(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../../cmd/node/config") + require.Nil(t, err) + cfg.EpochConfig.EnableEpochs.ESDTEnableEpoch = 0 cfg.EpochConfig.EnableEpochs.BuiltInFunctionsEnableEpoch = 0 cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "metachain" // the problem was only on the metachain @@ -72,7 +74,9 @@ func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { t.Skip("this is not a short test") } - cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg, err := testscommon.CreateTestConfigs(t.TempDir(), "../../../cmd/node/config") + require.Nil(t, err) + cfg.EpochConfig.EnableEpochs.SCDeployEnableEpoch = 0 cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "0" cfg.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ @@ -98,7 +102,7 @@ func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { // deploy the contract txDeploy, hash := pr.CreateDeploySCTx(t, alice, "../testdata/adder/adder.wasm", 3000000, []string{"01"}) - err := pr.ExecuteTransactionAsScheduled(t, txDeploy) + err = pr.ExecuteTransactionAsScheduled(t, txDeploy) require.Nil(t, err) // get the contract address from logs diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index 3b8ab5a722a..0ac4b86c7ee 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -2,6 +2,7 @@ package stateTrie import ( "bytes" + "context" "encoding/base64" "encoding/binary" "encoding/hex" @@ -24,12 +25,14 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/statistics" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/mock" + esdtCommon "github.com/multiversx/mx-chain-go/integrationTests/vm/esdt" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" @@ -219,15 +222,15 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -308,15 +311,15 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te acc, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock := acc.(state.UserAccountHandler) - _ = stateMock.AddToBalance(balance2) + userAccount := acc.(state.UserAccountHandler) + _ = userAccount.AddToBalance(balance2) key := []byte("ABC") val := []byte("123") - _ = stateMock.SaveKeyValue(key, val) + _ = userAccount.SaveKeyValue(key, val) _ = adb.SaveAccount(state1) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) // states are now prepared, committing @@ -449,9 +452,9 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) @@ -475,8 +478,8 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { hrWithNonce1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with nonce 40: %v\n", hrWithNonce1) - stateMock.(state.UserAccountHandler).IncreaseNonce(50) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).IncreaseNonce(50) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -526,9 +529,9 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = adb.SaveAccount(stateMock) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -553,8 +556,8 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { hrWithBalance1 := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - account with balance 40: %v\n", hrWithBalance1) - _ = stateMock.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) - _ = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).AddToBalance(big.NewInt(50)) + _ = adb.SaveAccount(userAccount) rootHash, err = adb.RootHash() require.Nil(t, err) @@ -607,10 +610,10 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - stateMock.(state.UserAccountHandler).SetCode(code) - _ = adb.SaveAccount(stateMock) + userAccount.(state.UserAccountHandler).SetCode(code) + _ = adb.SaveAccount(userAccount) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -682,10 +685,10 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -761,16 +764,16 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test fmt.Printf("State root - created 1-st account: %v\n", hrCreated1) fmt.Printf("data root - 1-st account: %v\n", hrRoot1) - stateMock, err := adb.LoadAccount(adr2) + userAccount, err := adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, val) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, val) + err = adb.SaveAccount(userAccount) require.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2 := base64.StdEncoding.EncodeToString(rootHash) @@ -792,15 +795,15 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test // Step 4. 2-nd account changes its data snapshotMod := adb.JournalLen() - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - _ = stateMock.(state.UserAccountHandler).SaveKeyValue(key, newVal) - err = adb.SaveAccount(stateMock) + _ = userAccount.(state.UserAccountHandler).SaveKeyValue(key, newVal) + err = adb.SaveAccount(userAccount) require.Nil(t, err) rootHash, err = adb.RootHash() require.Nil(t, err) hrCreated2p1 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2p1 := base64.StdEncoding.EncodeToString(rootHash) @@ -820,9 +823,9 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test require.Nil(t, err) hrCreated2Rev := base64.StdEncoding.EncodeToString(rootHash) - stateMock, err = adb.LoadAccount(adr2) + userAccount, err = adb.LoadAccount(adr2) require.Nil(t, err) - rootHash, err = stateMock.(state.UserAccountHandler).DataTrie().RootHash() + rootHash, err = userAccount.(state.UserAccountHandler).DataTrie().RootHash() require.Nil(t, err) hrRoot2Rev := base64.StdEncoding.EncodeToString(rootHash) fmt.Printf("State root - reverted 2-nd account: %v\n", hrCreated2Rev) @@ -1246,17 +1249,17 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { _ = adb.SaveAccount(state1) acc2, _ := adb.LoadAccount(address2) - stateMock := acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value1) - _ = stateMock.SaveKeyValue(key2, value1) - _ = adb.SaveAccount(stateMock) + userAccount := acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value1) + _ = userAccount.SaveKeyValue(key2, value1) + _ = adb.SaveAccount(userAccount) oldRootHash, _ := adb.Commit() acc2, _ = adb.LoadAccount(address2) - stateMock = acc2.(state.UserAccountHandler) - _ = stateMock.SaveKeyValue(key1, value2) - _ = adb.SaveAccount(stateMock) + userAccount = acc2.(state.UserAccountHandler) + _ = userAccount.SaveKeyValue(key1, value2) + _ = adb.SaveAccount(userAccount) newRootHash, _ := adb.Commit() adb.PruneTrie(oldRootHash, state.OldRoot, state.NewPruningHandler(state.EnableDataRemoval)) @@ -1268,13 +1271,13 @@ func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { require.Nil(t, err) collapseTrie(state1, t) - collapseTrie(stateMock, t) + collapseTrie(userAccount, t) val, _, err := state1.RetrieveValue(key1) require.Nil(t, err) require.Equal(t, value1, val) - val, _, err = stateMock.RetrieveValue(key2) + val, _, err = userAccount.RetrieveValue(key2) require.Nil(t, err) require.Equal(t, value1, val) } @@ -2338,6 +2341,224 @@ func Test_SnapshotStateRemovesLastSnapshotStartedAfterSnapshotFinished(t *testin assert.NotNil(t, err) } +func TestMigrateDataTrieBuiltinFunc(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + t.Run("migrate shard 0 system account", func(t *testing.T) { + shardId := byte(0) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, core.SystemAccountAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 0 user account", func(t *testing.T) { + shardId := byte(0) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + migrationAddress := nodes[shardId].OwnAccount.Address + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, migrationAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 1 system account", func(t *testing.T) { + shardId := byte(1) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, core.SystemAccountAddress, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, core.SystemAccountAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) + t.Run("migrate shard 1 user account", func(t *testing.T) { + shardId := byte(1) + nodes, idxProposers, nonce, round := startNodesAndIssueToken(t, 2, shardId) + defer func() { + for _, n := range nodes { + n.Close() + } + }() + + migrationAddress := nodes[shardId].OwnAccount.Address + valuesBeforeMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + migrateDataTrieBuiltInFunc(t, nodes, shardId, nodes[shardId].OwnAccount.Address, nonce, round, idxProposers) + valuesAfterMigration := getValuesFromAccount(t, nodes[shardId].AccntState, migrationAddress) + + require.Equal(t, len(valuesBeforeMigration), len(valuesAfterMigration)) + require.True(t, len(valuesAfterMigration) > 0) + for i := range valuesBeforeMigration { + require.Equal(t, valuesBeforeMigration[i], valuesAfterMigration[i]) + } + }) +} + +func getValuesFromAccount(t *testing.T, adb state.AccountsAdapter, address []byte) [][]byte { + account, err := adb.GetExistingAccount(address) + require.Nil(t, err) + + chLeaves := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err = account.(state.UserAccountHandler).GetAllLeaves(chLeaves, context.Background()) + require.Nil(t, err) + + values := make([][]byte, 0) + for leaf := range chLeaves.LeavesChan { + values = append(values, leaf.Value()) + } + + err = chLeaves.ErrChan.ReadFromChanNonBlocking() + require.Nil(t, err) + + return values +} + +func migrateDataTrieBuiltInFunc( + t *testing.T, + nodes []*integrationTests.TestProcessorNode, + shardId byte, + migrationAddress []byte, + nonce uint64, + round uint64, + idxProposers []int, +) { + require.True(t, nodes[shardId].EnableEpochsHandler.IsFlagEnabled(common.AutoBalanceDataTriesFlag)) + isMigrated := getAddressMigrationStatus(t, nodes[shardId].AccntState, migrationAddress) + require.False(t, isMigrated) + + integrationTests.CreateAndSendTransactionWithSenderAccount(nodes[shardId], nodes, big.NewInt(0), nodes[shardId].OwnAccount, getDestAccountAddress(migrationAddress, shardId), core.BuiltInFunctionMigrateDataTrie, 1000000) + + time.Sleep(time.Second) + nrRoundsToPropagate := 5 + _, _ = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagate, nonce, round, idxProposers) + + isMigrated = getAddressMigrationStatus(t, nodes[shardId].AccntState, migrationAddress) + require.True(t, isMigrated) +} + +func startNodesAndIssueToken( + t *testing.T, + numOfShards int, + issuerShardId byte, +) ([]*integrationTests.TestProcessorNode, []int, uint64, uint64) { + nodesPerShard := 1 + numMetachainNodes := 1 + + enableEpochs := config.EnableEpochs{ + GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV2EnableEpoch: integrationTests.UnreachableEpoch, + StakeLimitsEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, + AutoBalanceDataTriesEnableEpoch: 1, + } + nodes := integrationTests.CreateNodesWithEnableEpochs( + numOfShards, + nodesPerShard, + numMetachainNodes, + enableEpochs, + ) + + roundsPerEpoch := uint64(5) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + initialVal := int64(10000000000) + integrationTests.MintAllNodes(nodes, big.NewInt(initialVal)) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + // send token issue + initialSupply := int64(10000000000) + ticker := "TCK" + esdtCommon.IssueTestTokenWithIssuerAccount(nodes, nodes[issuerShardId].OwnAccount, initialSupply, ticker) + + time.Sleep(time.Second) + nrRoundsToPropagate := 8 + nonce, round = integrationTests.WaitOperationToBeDone(t, nodes, nrRoundsToPropagate, nonce, round, idxProposers) + time.Sleep(time.Second) + + tokenIdentifier := string(integrationTests.GetTokenIdentifier(nodes, []byte(ticker))) + + esdtCommon.CheckAddressHasTokens(t, nodes[issuerShardId].OwnAccount.Address, nodes, []byte(tokenIdentifier), 0, initialSupply) + + return nodes, idxProposers, nonce, round +} + +func getDestAccountAddress(migrationAddress []byte, shardId byte) []byte { + if bytes.Equal(migrationAddress, core.SystemAccountAddress) && shardId == 0 { + systemAccountAddress := bytes.Repeat([]byte{255}, 30) + systemAccountAddress = append(systemAccountAddress, []byte{0, 0}...) + return systemAccountAddress + } + + return migrationAddress +} + +func getAddressMigrationStatus(t *testing.T, adb state.AccountsAdapter, address []byte) bool { + account, err := adb.LoadAccount(address) + require.Nil(t, err) + + userAccount, ok := account.(state.UserAccountHandler) + require.True(t, ok) + + isMigrated, err := userAccount.DataTrie().IsMigratedToLatestVersion() + require.Nil(t, err) + + return isMigrated +} + func addDataTriesForAccountsStartingWithIndex( startIndex uint32, nbAccounts uint32, diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 1d9b2d505b0..5f5987b11cf 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -43,6 +43,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -234,7 +235,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { tcn.initAccountsDB() - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.SyncTimerField = syncer coreComponents.RoundHandlerField = roundHandler coreComponents.InternalMarshalizerField = TestMarshalizer @@ -244,7 +245,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { return string(ChainID) } coreComponents.GenesisTimeField = time.Unix(args.StartTime, 0) - coreComponents.GenesisNodesSetupField = &testscommon.NodesSetupStub{ + coreComponents.GenesisNodesSetupField = &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return uint32(args.ConsensusSize) }, @@ -320,6 +321,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { processComponents.RoundHandlerField = roundHandler processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} + processComponents.SentSignaturesTrackerInternal = &testscommon.SentSignatureTrackerStub{} dataComponents := GetDefaultDataComponents() dataComponents.BlockChain = tcn.ChainHandler @@ -366,26 +368,27 @@ func (tcn *TestConsensusNode) initNodesCoordinator( cache storage.Cacher, ) { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusSize, - MetaConsensusGroupSize: consensusSize, - Marshalizer: TestMarshalizer, - Hasher: hasher, - Shuffler: &shardingMocks.NodeShufflerMock{}, - EpochStartNotifier: epochStartRegistrationHandler, - BootStorer: CreateMemUnit(), - NbShards: maxShards, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: pkBytes, - ConsensusGroupCache: cache, - ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: consensusSize, + MetaConsensusGroupSize: consensusSize, + Marshalizer: TestMarshalizer, + Hasher: hasher, + Shuffler: &shardingMocks.NodeShufflerMock{}, + EpochStartNotifier: epochStartRegistrationHandler, + BootStorer: CreateMemUnit(), + NbShards: maxShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: pkBytes, + ConsensusGroupCache: cache, + ShuffledOutHandler: &chainShardingMocks.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + ShardIDAsObserver: tcn.ShardCoordinator.SelfId(), + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } tcn.NodesCoordinator, _ = nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 25ab4a21e6e..1ba488b9e12 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -29,7 +29,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers" "github.com/multiversx/mx-chain-go/epochStart/notifier" - "github.com/multiversx/mx-chain-go/heartbeat/monitor" "github.com/multiversx/mx-chain-go/heartbeat/processor" "github.com/multiversx/mx-chain-go/heartbeat/sender" "github.com/multiversx/mx-chain-go/integrationTests/mock" @@ -52,6 +51,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -349,27 +349,28 @@ func CreateNodesWithTestHeartbeatNode( suCache, _ := storageunit.NewCache(cacherCfg) for shardId, validatorList := range validatorsMap { argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -396,27 +397,28 @@ func CreateNodesWithTestHeartbeatNode( } argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - ShardIDAsObserver: shardId, - NbShards: uint32(numShards), - EligibleNodes: validatorsForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: suCache, - Shuffler: &shardingMocks.NodeShufflerMock{}, - BootStorer: CreateMemUnit(), - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - Epoch: 0, - EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + ShardIDAsObserver: shardId, + NbShards: uint32(numShards), + EligibleNodes: validatorsForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: suCache, + Shuffler: &shardingMocks.NodeShufflerMock{}, + BootStorer: CreateMemUnit(), + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + Epoch: 0, + EpochStartNotifier: notifier.NewEpochStartSubscriptionHandler(), + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: &shardingMocks.NodesCoordinatorRegistryFactoryMock{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) log.LogIfError(err) @@ -447,7 +449,6 @@ func (thn *TestHeartbeatNode) InitTestHeartbeatNode(tb testing.TB, minPeersWaiti thn.initResolversAndRequesters() thn.initInterceptors() thn.initShardSender(tb) - thn.initCrossShardPeerTopicNotifier(tb) thn.initDirectConnectionProcessor(tb) for len(thn.MainMessenger.Peers()) < minPeersWaiting { @@ -527,13 +528,14 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { return &trieMock.TrieStub{} }, }, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: payloadValidator, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: payloadValidator, } requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ @@ -793,29 +795,6 @@ func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { require.Nil(tb, err) } -func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { - argsCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.MainPeerShardMapper, - } - crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) - require.Nil(tb, err) - - err = thn.MainMessenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) - require.Nil(tb, err) - - argsCrossShardPeerTopicNotifier = monitor.ArgsCrossShardPeerTopicNotifier{ - ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.FullArchivePeerShardMapper, - } - fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) - require.Nil(tb, err) - - err = thn.FullArchiveMessenger.AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) - require.Nil(tb, err) - -} - // ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger func (thn *TestHeartbeatNode) ConnectOnMain(connectable Connectable) error { if check.IfNil(connectable) { @@ -861,13 +840,19 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st for _, n := range nodesList { buffPk, _ := n.NodeKeys.MainKey.Pk.ToByteArray() + validatorMarker := "" + v, _, _ := n.NodesCoordinator.GetValidatorWithPublicKey(buffPk) + if v != nil { + validatorMarker = "*" + } + peerInfo := n.MainMessenger.GetConnectedPeersInfo() pid := n.MainMessenger.ID().Pretty() lineData := display.NewLineData( false, []string{ - core.GetTrimmedPk(hex.EncodeToString(buffPk)), + core.GetTrimmedPk(hex.EncodeToString(buffPk)) + validatorMarker, pid[len(pid)-6:], fmt.Sprintf("%d", shardId), fmt.Sprintf("%d", n.CountGlobalMessages()), diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 98173eaac0c..76a826e70aa 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -69,6 +69,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -109,7 +110,6 @@ const ( adaptivity = false hysteresis = float32(0.2) maxTrieLevelInMemory = uint(5) - delegationManagementKey = "delegationManagement" delegationContractsList = "delegationContracts" ) @@ -648,7 +648,7 @@ func CreateFullGenesisBlocks( gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(enableEpochsConfig) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.HasherField = TestHasher @@ -666,8 +666,6 @@ func CreateFullGenesisBlocks( dataComponents.DataPool = dataPool dataComponents.BlockChain = blkc - roundsConfig := GetDefaultRoundsConfig() - argsGenesis := genesisProcess.ArgsGenesisBlockCreator{ Core: coreComponents, Data: dataComponents, @@ -716,6 +714,8 @@ func CreateFullGenesisBlocks( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -726,14 +726,21 @@ func CreateFullGenesisBlocks( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AccountsParser: accountsParser, SmartContractParser: smartContractParser, BlockSignKeyGen: &mock.KeyGenMock{}, - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, - RoundConfig: &roundsConfig, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -764,7 +771,7 @@ func CreateGenesisMetaBlock( gasSchedule := wasmConfig.MakeGasMapForTests() defaults.FillGasMapInternal(gasSchedule, 1) - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(enableEpochsConfig) coreComponents.InternalMarshalizerField = marshalizer coreComponents.HasherField = hasher coreComponents.Uint64ByteSliceConverterField = uint64Converter @@ -823,6 +830,8 @@ func CreateGenesisMetaBlock( MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -833,12 +842,20 @@ func CreateGenesisMetaBlock( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, BlockSignKeyGen: &mock.KeyGenMock{}, GenesisNodePrice: big.NewInt(1000), - EpochConfig: &config.EpochConfig{ + EpochConfig: config.EpochConfig{ EnableEpochs: enableEpochsConfig, }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + HeaderVersionConfigs: testscommon.GetDefaultHeaderVersionConfig(), HistoryRepository: &dblookupext.HistoryRepositoryStub{}, TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, } @@ -1380,7 +1397,7 @@ func CreateNodesWithEnableEpochsAndVmConfig( nodesPerShard, numMetaChainNodes, epochConfig, - GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) } @@ -1520,6 +1537,9 @@ func CreateNodesWithFullGenesis( ) ([]*TestProcessorNode, *TestProcessorNode) { enableEpochsConfig := GetDefaultEnableEpochsConfig() enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step1EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step2EnableEpoch = UnreachableEpoch + enableEpochsConfig.StakingV4Step3EnableEpoch = UnreachableEpoch return CreateNodesWithFullGenesisCustomEnableEpochs(numOfShards, nodesPerShard, numMetaChainNodes, genesisFile, enableEpochsConfig) } @@ -2167,7 +2187,7 @@ func generateValidTx( _ = accnts.SaveAccount(acc) _, _ = accnts.Commit() - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.VmMarshalizerField = TestMarshalizer @@ -2608,18 +2628,7 @@ func SaveDelegationManagerConfig(nodes []*TestProcessorNode) { continue } - acc, _ := n.AccntState.LoadAccount(vm.DelegationManagerSCAddress) - userAcc, _ := acc.(state.UserAccountHandler) - - managementData := &systemSmartContracts.DelegationManagement{ - MinDeposit: big.NewInt(100), - LastAddress: vm.FirstDelegationSCAddress, - MinDelegationAmount: big.NewInt(1), - } - marshaledData, _ := TestMarshalizer.Marshal(managementData) - _ = userAcc.SaveKeyValue([]byte(delegationManagementKey), marshaledData) - _ = n.AccntState.SaveAccount(userAcc) - _, _ = n.AccntState.Commit() + stakingcommon.SaveDelegationManagerConfig(n.AccntState, TestMarshalizer) } } diff --git a/integrationTests/testNetwork.go b/integrationTests/testNetwork.go index e22222d41a7..a08b3aa85c7 100644 --- a/integrationTests/testNetwork.go +++ b/integrationTests/testNetwork.go @@ -34,7 +34,7 @@ type GasScheduleMap = map[string]map[string]uint64 // TestNetwork wraps a set of TestProcessorNodes along with a set of test // Wallets, instantiates them, controls them and provides operations with them; // designed to be used in integration tests. -// TODO combine TestNetwork with the preexisting TestContext and OneNodeNetwork +// TODO combine TestNetwork with the preexisting TestContext and MiniNetwork // into a single struct containing the functionality of all three type TestNetwork struct { NumShards int diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 5b59fedb896..b52cc3585a8 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -115,6 +115,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -222,6 +223,18 @@ const sizeCheckDelta = 100 // UnreachableEpoch defines an unreachable epoch for integration tests const UnreachableEpoch = uint32(1000000) +// StakingV4Step1EnableEpoch defines the epoch for integration tests when stakingV4 init is enabled +const StakingV4Step1EnableEpoch = 4443 + +// StakingV4Step2EnableEpoch defines the epoch for integration tests when stakingV4 is enabled; should be greater than StakingV2Epoch +const StakingV4Step2EnableEpoch = 4444 + +// StakingV4Step3EnableEpoch defines the epoch for integration tests when nodes distribution from auction to waiting list is enabled in staking v4 +const StakingV4Step3EnableEpoch = 4445 + +// ScheduledMiniBlocksEnableEpoch defines the epoch for integration tests when scheduled nini blocks are enabled +const ScheduledMiniBlocksEnableEpoch = 1000 + // TestSingleSigner defines a Ed25519Signer var TestSingleSigner = &ed25519SingleSig.Ed25519Signer{} @@ -483,7 +496,7 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } if args.RoundsConfig == nil { - defaultRoundsConfig := GetDefaultRoundsConfig() + defaultRoundsConfig := testscommon.GetDefaultRoundsConfig() args.RoundsConfig = &defaultRoundsConfig } genericRoundNotifier := forking.NewGenericRoundNotifier() @@ -653,7 +666,7 @@ func (tpn *TestProcessorNode) initValidatorStatistics() { rater, _ := rating.NewBlockSigningRater(tpn.RatingsData) if check.IfNil(tpn.NodesSetup) { - tpn.NodesSetup = &mock.NodesSetupStub{ + tpn.NodesSetup = &genesisMocks.NodesSetupStub{ MinNumberOfNodesCalled: func() uint32 { return tpn.ShardCoordinator.NumberOfShards() * 2 }, @@ -942,6 +955,8 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -952,12 +967,19 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: tpn.NodesCoordinator, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } tpn.EpochNotifier.CheckEpoch(&testscommon.HeaderHandlerStub{ EpochField: tpn.EnableEpochs.DelegationSmartContractEnableEpoch, @@ -1081,11 +1103,10 @@ func (tpn *TestProcessorNode) initChainHandler() { func (tpn *TestProcessorNode) initEconomicsData(economicsConfig *config.EconomicsConfig) { tpn.EnableEpochs.PenalizedTooMuchGasEnableEpoch = 0 argsNewEconomicsData := economics.ArgsNewEconomicsData{ - Economics: economicsConfig, - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + Economics: economicsConfig, + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) tpn.EconomicsData = economics.NewTestEconomicsData(economicsData) @@ -1240,7 +1261,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { tpn.EpochStartNotifier = notifier.NewEpochStartSubscriptionHandler() } - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer coreComponents.HasherField = TestHasher @@ -1434,22 +1455,23 @@ func (tpn *TestProcessorNode) initResolvers() { fullArchivePreferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) resolverContainerFactory := resolverscontainer.FactoryArgs{ - ShardCoordinator: tpn.ShardCoordinator, - MainMessenger: tpn.MainMessenger, - FullArchiveMessenger: tpn.FullArchiveMessenger, - Store: tpn.Storage, - Marshalizer: TestMarshalizer, - DataPools: tpn.DataPool, - Uint64ByteSliceConverter: TestUint64Converter, - DataPacker: dataPacker, - TriesContainer: tpn.TrieContainer, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - MainPreferredPeersHolder: preferredPeersHolder, - FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, - PayloadValidator: payloadValidator, + ShardCoordinator: tpn.ShardCoordinator, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, + Store: tpn.Storage, + Marshalizer: TestMarshalizer, + DataPools: tpn.DataPool, + Uint64ByteSliceConverter: TestUint64Converter, + DataPacker: dataPacker, + TriesContainer: tpn.TrieContainer, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + NumConcurrentResolvingTrieNodesJobs: 3, + MainPreferredPeersHolder: preferredPeersHolder, + FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, + PayloadValidator: payloadValidator, } var err error @@ -1526,7 +1548,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u } if tpn.ValidatorStatisticsProcessor == nil { - tpn.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + tpn.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator statistics root hash"), nil }, @@ -1651,7 +1673,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u mockVM.GasForOperation = OpGasValueForMockVm _ = tpn.VMContainer.Add(procFactory.InternalTestingVM, mockVM) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -1897,6 +1919,8 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -1907,12 +1931,19 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MinServiceFee: 0, MaxServiceFee: 100000, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: tpn.PeerState, UserAccountsDB: tpn.AccntState, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + NodesCoordinator: tpn.NodesCoordinator, } vmFactory, _ := metaProcess.NewVMContainerFactory(argsVMContainerFactory) @@ -1922,7 +1953,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri tpn.SystemSCFactory = vmFactory.SystemSmartContractContainerFactory() tpn.addMockVm(tpn.BlockchainHook) - tpn.FeeAccumulator, _ = postprocess.NewFeeAccumulator() + tpn.FeeAccumulator = postprocess.NewFeeAccumulator() tpn.ArgsParser = smartContract.NewArgumentParser() esdtTransferParser, _ := parsers.NewESDTTransferParser(TestMarshalizer) argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ @@ -2069,7 +2100,7 @@ func (tpn *TestProcessorNode) InitDelegationManager() { log.Error("error while initializing system SC", "return code", vmOutput.ReturnCode) } - err = tpn.processSCOutputAccounts(vmOutput) + err = ProcessSCOutputAccounts(vmOutput, tpn.AccntState) log.LogIfError(err) err = tpn.updateSystemSCContractsCode(vmInput.ContractCodeMetadata, vm.DelegationManagerSCAddress) @@ -2092,39 +2123,6 @@ func (tpn *TestProcessorNode) updateSystemSCContractsCode(contractMetadata []byt return tpn.AccntState.SaveAccount(userAcc) } -// save account changes in state from vmOutput - protected by VM - every output can be treated as is. -func (tpn *TestProcessorNode) processSCOutputAccounts(vmOutput *vmcommon.VMOutput) error { - outputAccounts := process.SortVMOutputInsideData(vmOutput) - for _, outAcc := range outputAccounts { - acc, err := tpn.getUserAccount(outAcc.Address) - if err != nil { - return err - } - - storageUpdates := process.GetSortedStorageUpdates(outAcc) - for _, storeUpdate := range storageUpdates { - err = acc.SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) - if err != nil { - return err - } - } - - if outAcc.BalanceDelta != nil && outAcc.BalanceDelta.Cmp(zero) != 0 { - err = acc.AddToBalance(outAcc.BalanceDelta) - if err != nil { - return err - } - } - - err = tpn.AccntState.SaveAccount(acc) - if err != nil { - return err - } - } - - return nil -} - func (tpn *TestProcessorNode) getUserAccount(address []byte) (state.UserAccountHandler, error) { acnt, err := tpn.AccntState.LoadAccount(address) if err != nil { @@ -2159,7 +2157,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { accountsDb[state.UserAccountsState] = tpn.AccntState accountsDb[state.PeerAccountsState] = tpn.PeerState - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.HasherField = TestHasher coreComponents.Uint64ByteSliceConverterField = TestUint64Converter @@ -2212,6 +2210,7 @@ func (tpn *TestProcessorNode) initBlockProcessor() { OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } if check.IfNil(tpn.EpochStartNotifier) { @@ -2286,7 +2285,13 @@ func (tpn *TestProcessorNode) initBlockProcessor() { if errGet != nil { log.Error("initBlockProcessor tpn.VMContainer.Get", "error", errGet) } - stakingDataProvider, errRsp := metachain.NewStakingDataProvider(systemVM, "1000") + + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + SystemVM: systemVM, + MinNodePrice: "1000", + } + stakingDataProvider, errRsp := metachain.NewStakingDataProvider(argsStakingDataProvider) if errRsp != nil { log.Error("initBlockProcessor NewRewardsStakingProvider", "error", errRsp) } @@ -2325,23 +2330,52 @@ func (tpn *TestProcessorNode) initBlockProcessor() { EnableEpochsHandler: tpn.EnableEpochsHandler, } epochStartValidatorInfo, _ := metachain.NewValidatorInfoCreator(argsEpochValidatorInfo) + + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + tpn.EpochNotifier, + nil, + ) + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: tpn.ShardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + argsEpochSystemSC := metachain.ArgsNewEpochStartSystemSCProcessing{ - SystemVM: systemVM, - UserAccountsDB: tpn.AccntState, - PeerAccountsDB: tpn.PeerState, - Marshalizer: TestMarshalizer, - StartRating: tpn.RatingsData.StartRating(), - ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, - EndOfEpochCallerAddress: vm.EndOfEpochAddress, - StakingSCAddress: vm.StakingSCAddress, - ChanceComputer: tpn.NodesCoordinator, - EpochNotifier: tpn.EpochNotifier, - GenesisNodesConfig: tpn.NodesSetup, - StakingDataProvider: stakingDataProvider, - NodesConfigProvider: tpn.NodesCoordinator, - ShardCoordinator: tpn.ShardCoordinator, - ESDTOwnerAddressBytes: vm.EndOfEpochAddress, - EnableEpochsHandler: tpn.EnableEpochsHandler, + SystemVM: systemVM, + UserAccountsDB: tpn.AccntState, + PeerAccountsDB: tpn.PeerState, + Marshalizer: TestMarshalizer, + StartRating: tpn.RatingsData.StartRating(), + ValidatorInfoCreator: tpn.ValidatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: tpn.NodesCoordinator, + EpochNotifier: tpn.EpochNotifier, + GenesisNodesConfig: tpn.NodesSetup, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: tpn.NodesCoordinator, + ShardCoordinator: tpn.ShardCoordinator, + ESDTOwnerAddressBytes: vm.EndOfEpochAddress, + EnableEpochsHandler: tpn.EnableEpochsHandler, + AuctionListSelector: auctionListSelector, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, } epochStartSystemSCProcessor, _ := metachain.NewSystemSCProcessor(argsEpochSystemSC) tpn.EpochStartSystemSCProcessor = epochStartSystemSCProcessor @@ -2425,7 +2459,7 @@ func (tpn *TestProcessorNode) initNode() { AppStatusHandlerField: tpn.AppStatusHandler, } - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.VmMarshalizerField = TestVmMarshalizer coreComponents.TxMarshalizerField = TestTxSignMarshalizer @@ -3063,14 +3097,14 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.ShardCoord = tpn.ShardCoordinator processComponents.IntContainer = tpn.MainInterceptorsContainer processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer - processComponents.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(_ []byte) (map[uint32][]*state.ValidatorInfo, error) { - return map[uint32][]*state.ValidatorInfo{ - 0: {{PublicKey: []byte("pk0")}}, - }, nil + processComponents.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(_ []byte) (state.ShardValidatorsInfoMapHandler, error) { + ret := state.NewShardValidatorsInfoMap() + _ = ret.Add(&state.ValidatorInfo{PublicKey: []byte("pk0")}) + return ret, nil }, } - processComponents.ValidatorProvider = &mock.ValidatorsProviderStub{} + processComponents.ValidatorProvider = &stakingcommon.ValidatorsProviderStub{} processComponents.EpochTrigger = tpn.EpochStartTrigger processComponents.EpochNotifier = tpn.EpochStartNotifier processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs @@ -3183,12 +3217,10 @@ func CreateEnableEpochsConfig() config.EnableEpochs { SaveJailedAlwaysEnableEpoch: UnreachableEpoch, ValidatorToDelegationEnableEpoch: UnreachableEpoch, ReDelegateBelowMinCheckEnableEpoch: UnreachableEpoch, - WaitingListFixEnableEpoch: UnreachableEpoch, IncrementSCRNonceInMultiTransferEnableEpoch: UnreachableEpoch, ESDTMultiTransferEnableEpoch: UnreachableEpoch, GlobalMintBurnDisableEpoch: UnreachableEpoch, ESDTTransferRoleEnableEpoch: UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: UnreachableEpoch, ComputeRewardCheckpointEnableEpoch: UnreachableEpoch, SCRSizeInvariantCheckEnableEpoch: UnreachableEpoch, BackwardCompSaveKeyValueEnableEpoch: UnreachableEpoch, @@ -3223,10 +3255,9 @@ func CreateEnableEpochsConfig() config.EnableEpochs { } // GetDefaultCoreComponents - -func GetDefaultCoreComponents() *mock.CoreComponentsStub { - enableEpochsCfg := CreateEnableEpochsConfig() +func GetDefaultCoreComponents(enableEpochsConfig config.EnableEpochs) *mock.CoreComponentsStub { genericEpochNotifier := forking.NewGenericEpochNotifier() - enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsCfg, genericEpochNotifier) + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, genericEpochNotifier) return &mock.CoreComponentsStub{ InternalMarshalizerField: TestMarshalizer, @@ -3252,7 +3283,7 @@ func GetDefaultCoreComponents() *mock.CoreComponentsStub { EconomicsDataField: &economicsmocks.EconomicsHandlerStub{}, RatingsDataField: &testscommon.RatingsInfoMock{}, RaterField: &testscommon.RaterMock{}, - GenesisNodesSetupField: &testscommon.NodesSetupStub{}, + GenesisNodesSetupField: &genesisMocks.NodesSetupStub{}, GenesisTimeField: time.Time{}, EpochNotifierField: genericEpochNotifier, EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, @@ -3281,8 +3312,8 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { BootSore: &mock.BoostrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, @@ -3469,7 +3500,7 @@ func getDefaultVMConfig() *config.VirtualMachineConfig { } func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes map[uint32][]byte) sharding.GenesisNodesSetupHandler { - return &mock.NodesSetupStub{ + return &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) for i := uint32(0); i < maxShards; i++ { @@ -3492,7 +3523,7 @@ func getDefaultNodesSetup(maxShards, numNodes uint32, address []byte, pksBytes m func getDefaultNodesCoordinator(maxShards uint32, pksBytes map[uint32][]byte) nodesCoordinator.NodesCoordinator { return &shardingMocks.NodesCoordinatorStub{ - ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validators []nodesCoordinator.Validator, err error) { v, _ := nodesCoordinator.NewValidator(pksBytes[shardId], 1, defaultChancesSelection) return []nodesCoordinator.Validator{v}, nil }, @@ -3522,16 +3553,8 @@ func GetDefaultEnableEpochsConfig() *config.EnableEpochs { MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, - } -} - -// GetDefaultRoundsConfig - -func GetDefaultRoundsConfig() config.RoundConfig { - return config.RoundConfig{ - RoundActivations: map[string]config.ActivationRoundByName{ - "DisableAsyncCallV1": { - Round: "18446744073709551615", - }, - }, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, + StakingV4Step3EnableEpoch: UnreachableEpoch, } } diff --git a/integrationTests/testProcessorNodeWithCoordinator.go b/integrationTests/testProcessorNodeWithCoordinator.go index 71f6c3afd51..63392658a76 100644 --- a/integrationTests/testProcessorNodeWithCoordinator.go +++ b/integrationTests/testProcessorNodeWithCoordinator.go @@ -13,8 +13,8 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/storage/cache" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" ) @@ -48,7 +48,7 @@ func CreateProcessorNodesWithNodesCoordinator( waitingMap := GenValidatorsFromPubKeys(pubKeysWaiting, nbShards) waitingMapForNodesCoordinator, _ := nodesCoordinator.NodesInfoToValidators(waitingMap) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -76,7 +76,7 @@ func CreateProcessorNodesWithNodesCoordinator( IsFullArchive: false, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index d7de5cc05cc..42f08a62b39 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -31,6 +31,8 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" vic "github.com/multiversx/mx-chain-go/testscommon/validatorInfoCacher" @@ -89,7 +91,7 @@ func CreateNodesWithNodesCoordinatorAndTxKeys( } waitingMapForNodesCoordinator[core.MetachainShardId] = make([]nodesCoordinator.Validator, 0) - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }} @@ -221,7 +223,7 @@ func CreateNodesWithNodesCoordinatorFactory( numNodes := nbShards*nodesPerShard + nbMetaNodes - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, @@ -236,6 +238,9 @@ func CreateNodesWithNodesCoordinatorFactory( MiniBlockPartialExecutionEnableEpoch: UnreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: UnreachableEpoch, DynamicGasCostForDataTrieStorageLoadEnableEpoch: UnreachableEpoch, + StakingV4Step1EnableEpoch: UnreachableEpoch, + StakingV4Step2EnableEpoch: UnreachableEpoch, + StakingV4Step3EnableEpoch: UnreachableEpoch, } nodesMap := make(map[uint32][]*TestProcessorNode) @@ -406,34 +411,39 @@ func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() bootStorer := CreateMemUnit() - nodesSetup := &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + nodesSetup := &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, nil }} + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { consensusCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - BootStorer: bootStorer, - EpochStartNotifier: epochStartSubscriber, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: consensusCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: epochStartSubscriber, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: make(map[uint32][]nodesCoordinator.Validator), + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: consensusCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoordinatorInstance, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) @@ -519,37 +529,42 @@ func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( epochStartSubscriber := notifier.NewEpochStartSubscriptionHandler() nodeShuffler := &shardingMocks.NodeShufflerMock{} - nodesSetup := &mock.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return validatorsMap, waitingMap }, } + nodesCoordinatorRegistryFactory, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + &marshallerMock.MarshalizerMock{}, + StakingV4Step2EnableEpoch, + ) completeNodesList := make([]Connectable, 0) for shardId, validatorList := range validatorsMap { bootStorer := CreateMemUnit() lruCache, _ := cache.NewLRUCache(10000) argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ - ShardConsensusGroupSize: shardConsensusGroupSize, - MetaConsensusGroupSize: metaConsensusGroupSize, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardId, - NbShards: uint32(nbShards), - EligibleNodes: validatorsMapForNodesCoordinator, - WaitingNodes: waitingMapForNodesCoordinator, - SelfPublicKey: []byte(strconv.Itoa(int(shardId))), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: endProcess.GetDummyEndProcessChannel(), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardId, + NbShards: uint32(nbShards), + EligibleNodes: validatorsMapForNodesCoordinator, + WaitingNodes: waitingMapForNodesCoordinator, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: endProcess.GetDummyEndProcessChannel(), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, } nodesCoord, err := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index f1a11c9d72a..592d7d1bdba 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -101,7 +101,7 @@ func createFacadeArg(tpn *TestProcessorNode) nodeFacade.ArgNodeFacade { func createTestApiConfig() config.ApiRoutesConfig { routes := map[string][]string{ - "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, + "node": {"/status", "/metrics", "/heartbeatstatus", "/statistics", "/p2pstatus", "/debug", "/peerinfo", "/bootstrapstatus", "/connected-peers-ratings", "/managed-keys/count", "/managed-keys", "/loaded-keys", "/managed-keys/eligible", "/managed-keys/waiting", "/waiting-epochs-left/:key"}, "address": {"/:address", "/:address/balance", "/:address/username", "/:address/code-hash", "/:address/key/:key", "/:address/esdt", "/:address/esdt/:tokenIdentifier"}, "hardfork": {"/trigger"}, "network": {"/status", "/total-staked", "/economics", "/config"}, @@ -179,6 +179,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Hasher: TestHasher, VMOutputCacher: &testscommon.CacherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: tpn.BlockchainHook, } txSimulator, err := transactionEvaluator.NewTransactionSimulator(argSimulator) @@ -194,6 +195,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { Accounts: wrappedAccounts, ShardCoordinator: tpn.ShardCoordinator, EnableEpochsHandler: tpn.EnableEpochsHandler, + BlockChain: tpn.BlockChain, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) log.LogIfError(err) @@ -273,7 +275,7 @@ func createFacadeComponents(tpn *TestProcessorNode) nodeFacade.ApiResolver { APITransactionHandler: apiTransactionHandler, APIBlockHandler: blockAPIHandler, APIInternalBlockHandler: apiInternalBlockProcessor, - GenesisNodesSetupHandler: &mock.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 6512c5a95e6..b28d5e3f953 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -45,7 +45,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { accountsDb[state.UserAccountsState] = tpn.AccntState accountsDb[state.PeerAccountsState] = tpn.PeerState - coreComponents := GetDefaultCoreComponents() + coreComponents := GetDefaultCoreComponents(CreateEnableEpochsConfig()) coreComponents.InternalMarshalizerField = TestMarshalizer coreComponents.HasherField = TestHasher coreComponents.Uint64ByteSliceConverterField = TestUint64Converter @@ -104,6 +104,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -116,14 +117,14 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{ + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{ UpdatePeerStateCalled: func(header data.MetaHeaderHandler) ([]byte, error) { return []byte("validator stats root hash"), nil }, }, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) diff --git a/integrationTests/vm/esdt/common.go b/integrationTests/vm/esdt/common.go index 3287641d0e6..2d04331a85f 100644 --- a/integrationTests/vm/esdt/common.go +++ b/integrationTests/vm/esdt/common.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process" vmFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -170,7 +171,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() return CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig( numOfShards, enableEpochs, @@ -178,7 +179,7 @@ func CreateNodesAndPrepareBalances(numOfShards int) ([]*integrationTests.TestPro ) } -// CreateNodesAndPrepareBalances - +// CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig - func CreateNodesAndPrepareBalancesWithEpochsAndRoundsConfig(numOfShards int, enableEpochs config.EnableEpochs, roundsConfig config.RoundConfig) ([]*integrationTests.TestProcessorNode, []int) { nodesPerShard := 1 numMetachainNodes := 1 @@ -230,6 +231,7 @@ func IssueTestToken(nodes []*integrationTests.TestProcessorNode, initialSupply i issueTestToken(nodes, initialSupply, ticker, core.MinMetaTxExtraGasCost) } +// IssueTestTokenWithIssuerAccount - func IssueTestTokenWithIssuerAccount(nodes []*integrationTests.TestProcessorNode, issuerAccount *integrationTests.TestWalletAccount, initialSupply int64, ticker string) { issueTestTokenWithIssuerAccount(nodes, issuerAccount, initialSupply, ticker, core.MinMetaTxExtraGasCost) } @@ -302,6 +304,7 @@ func CheckNumCallBacks( } } +// CheckForwarderRawSavedCallbackArgs - func CheckForwarderRawSavedCallbackArgs( t *testing.T, address []byte, @@ -338,13 +341,14 @@ func CheckForwarderRawSavedCallbackArgs( } } -/// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. +// ForwarderRawSavedPaymentInfo contains token data to be checked in the forwarder-raw contract. type ForwarderRawSavedPaymentInfo struct { TokenId string Nonce uint64 Payment *big.Int } +// CheckForwarderRawSavedCallbackPayments - func CheckForwarderRawSavedCallbackPayments( t *testing.T, address []byte, diff --git a/integrationTests/vm/esdt/process/esdtProcess_test.go b/integrationTests/vm/esdt/process/esdtProcess_test.go index cee94a6132b..d580847067a 100644 --- a/integrationTests/vm/esdt/process/esdtProcess_test.go +++ b/integrationTests/vm/esdt/process/esdtProcess_test.go @@ -42,7 +42,6 @@ func TestESDTIssueAndTransactionsOnMultiShardEnvironment(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -174,7 +173,6 @@ func TestESDTCallBurnOnANonBurnableToken(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, OptimizeGasUsedInCrossMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, @@ -1410,7 +1408,6 @@ func TestExecOnDestWithTokenTransferFromScAtoScBWithIntermediaryExecOnDest_NotEn enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, FailExecutionOnEveryAPIErrorEnableEpoch: integrationTests.UnreachableEpoch, } @@ -2106,7 +2103,6 @@ func TestIssueAndBurnESDT_MaxGasPerBlockExceeded(t *testing.T) { enableEpochs := config.EnableEpochs{ GlobalMintBurnDisableEpoch: integrationTests.UnreachableEpoch, - BuiltInFunctionOnMetaEnableEpoch: integrationTests.UnreachableEpoch, MaxBlockchainHookCountersEnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/staking/baseTestMetaProcessor.go b/integrationTests/vm/staking/baseTestMetaProcessor.go new file mode 100644 index 00000000000..0ae2b5ed2d8 --- /dev/null +++ b/integrationTests/vm/staking/baseTestMetaProcessor.go @@ -0,0 +1,394 @@ +package staking + +import ( + "fmt" + "math/big" + "strconv" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + vmFactory "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + arwenConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" + "github.com/stretchr/testify/require" +) + +const ( + stakingV4Step1EnableEpoch = 1 + stakingV4Step2EnableEpoch = 2 + stakingV4Step3EnableEpoch = 3 + addressLength = 15 + nodePrice = 1000 +) + +func haveTime() bool { return true } +func noTime() bool { return false } + +type nodesConfig struct { + eligible map[uint32][][]byte + waiting map[uint32][][]byte + leaving map[uint32][][]byte + shuffledOut map[uint32][][]byte + queue [][]byte + auction [][]byte + new [][]byte +} + +// TestMetaProcessor - +type TestMetaProcessor struct { + MetaBlockProcessor process.BlockProcessor + NodesCoordinator nodesCoordinator.NodesCoordinator + ValidatorStatistics process.ValidatorStatisticsProcessor + EpochStartTrigger integrationTests.TestEpochStartTrigger + BlockChainHandler data.ChainHandler + NodesConfig nodesConfig + AccountsAdapter state.AccountsAdapter + Marshaller marshal.Marshalizer + TxCacher dataRetriever.TransactionCacher + TxCoordinator process.TransactionCoordinator + SystemVM vmcommon.VMExecutionHandler + BlockChainHook process.BlockChainHookHandler + StakingDataProvider epochStart.StakingDataProvider + + currentRound uint64 +} + +func newTestMetaProcessor( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + queue [][]byte, +) *TestMetaProcessor { + saveNodesConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + nc, + maxNodesConfig, + ) + + stakingcommon.SaveDelegationManagerConfig( + stateComponents.AccountsAdapter(), + coreComponents.InternalMarshalizer(), + ) + + gasScheduleNotifier := createGasScheduleNotifier() + argsBlockChainHook, blockChainHook := createBlockChainHook( + dataComponents, + coreComponents, + stateComponents.AccountsAdapter(), + bootstrapComponents.ShardCoordinator(), + gasScheduleNotifier, + ) + + metaVmFactory := createVMContainerFactory( + coreComponents, + gasScheduleNotifier, + blockChainHook, + argsBlockChainHook, + stateComponents, + bootstrapComponents.ShardCoordinator(), + nc, + maxNodesConfig[0].MaxNumNodes, + ) + vmContainer, _ := metaVmFactory.Create() + systemVM, _ := vmContainer.Get(vmFactory.SystemVirtualMachine) + + validatorStatisticsProcessor := createValidatorStatisticsProcessor( + dataComponents, + coreComponents, + nc, + bootstrapComponents.ShardCoordinator(), + stateComponents.PeerAccounts(), + ) + stakingDataProvider := createStakingDataProvider( + coreComponents.EnableEpochsHandler(), + systemVM, + ) + scp := createSystemSCProcessor( + nc, + coreComponents, + stateComponents, + bootstrapComponents.ShardCoordinator(), + maxNodesConfig, + validatorStatisticsProcessor, + systemVM, + stakingDataProvider, + ) + + txCoordinator := &testscommon.TransactionCoordinatorMock{} + epochStartTrigger := createEpochStartTrigger(coreComponents, dataComponents.StorageService()) + + eligible, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waiting, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + shuffledOut, _ := nc.GetAllShuffledOutValidatorsPublicKeys(0) + + return &TestMetaProcessor{ + AccountsAdapter: stateComponents.AccountsAdapter(), + Marshaller: coreComponents.InternalMarshalizer(), + NodesConfig: nodesConfig{ + eligible: eligible, + waiting: waiting, + shuffledOut: shuffledOut, + queue: queue, + auction: make([][]byte, 0), + }, + MetaBlockProcessor: createMetaBlockProcessor( + nc, + scp, + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + validatorStatisticsProcessor, + blockChainHook, + metaVmFactory, + epochStartTrigger, + vmContainer, + txCoordinator, + ), + currentRound: 1, + NodesCoordinator: nc, + ValidatorStatistics: validatorStatisticsProcessor, + EpochStartTrigger: epochStartTrigger, + BlockChainHandler: dataComponents.Blockchain(), + TxCacher: dataComponents.Datapool().CurrentBlockTxs(), + TxCoordinator: txCoordinator, + SystemVM: systemVM, + BlockChainHook: blockChainHook, + StakingDataProvider: stakingDataProvider, + } +} + +func saveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + nc nodesCoordinator.NodesCoordinator, + maxNodesConfig []config.MaxNodesChangeConfig, +) { + eligibleMap, _ := nc.GetAllEligibleValidatorsPublicKeys(0) + waitingMap, _ := nc.GetAllWaitingValidatorsPublicKeys(0) + allStakedNodes := int64(len(getAllPubKeys(eligibleMap)) + len(getAllPubKeys(waitingMap))) + + maxNumNodes := allStakedNodes + if len(maxNodesConfig) > 0 { + maxNumNodes = int64(maxNodesConfig[0].MaxNumNodes) + } + + stakingcommon.SaveNodesConfig( + accountsDB, + marshaller, + allStakedNodes, + 1, + maxNumNodes, + ) +} + +func createGasScheduleNotifier() core.GasScheduleNotifier { + gasSchedule := arwenConfig.MakeGasMapForTests() + defaults.FillGasMapInternal(gasSchedule, 1) + return testscommon.NewGasScheduleNotifierMock(gasSchedule) +} + +func createEpochStartTrigger( + coreComponents factory.CoreComponentsHolder, + storageService dataRetriever.StorageService, +) integrationTests.TestEpochStartTrigger { + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 10, + RoundsPerEpoch: 10, + }, + Epoch: 0, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + Storage: storageService, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + } + + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + testTrigger := &metachain.TestTrigger{} + testTrigger.SetTrigger(epochStartTrigger) + + return testTrigger +} + +// Process - +func (tmp *TestMetaProcessor) Process(t *testing.T, numOfRounds uint64) { + for r := tmp.currentRound; r < tmp.currentRound+numOfRounds; r++ { + header := tmp.createNewHeader(t, r) + tmp.createAndCommitBlock(t, header, haveTime) + } + + tmp.currentRound += numOfRounds +} + +func (tmp *TestMetaProcessor) createNewHeader(t *testing.T, round uint64) *block.MetaBlock { + _, err := tmp.MetaBlockProcessor.CreateNewHeader(round, round) + require.Nil(t, err) + + epoch := tmp.EpochStartTrigger.Epoch() + printNewHeaderRoundEpoch(round, epoch) + + currentHeader, currentHash := tmp.getCurrentHeaderInfo() + header := createMetaBlockToCommit( + epoch, + round, + currentHash, + currentHeader.GetRandSeed(), + tmp.NodesCoordinator.ConsensusGroupSize(core.MetachainShardId), + ) + + return header +} + +func (tmp *TestMetaProcessor) createAndCommitBlock(t *testing.T, header data.HeaderHandler, haveTime func() bool) { + newHeader, blockBody, err := tmp.MetaBlockProcessor.CreateBlock(header, haveTime) + require.Nil(t, err) + + err = tmp.MetaBlockProcessor.CommitBlock(newHeader, blockBody) + require.Nil(t, err) + + time.Sleep(time.Millisecond * 50) + tmp.updateNodesConfig(header.GetEpoch()) + tmp.displayConfig(tmp.NodesConfig) +} + +func printNewHeaderRoundEpoch(round uint64, epoch uint32) { + headline := display.Headline( + fmt.Sprintf("Commiting header in epoch %v round %v", epoch, round), + "", + delimiter, + ) + fmt.Println(headline) +} + +func (tmp *TestMetaProcessor) getCurrentHeaderInfo() (data.HeaderHandler, []byte) { + currentHeader := tmp.BlockChainHandler.GetCurrentBlockHeader() + currentHash := tmp.BlockChainHandler.GetCurrentBlockHeaderHash() + if currentHeader == nil { + currentHeader = tmp.BlockChainHandler.GetGenesisHeader() + currentHash = tmp.BlockChainHandler.GetGenesisHeaderHash() + } + + return currentHeader, currentHash +} + +func createMetaBlockToCommit( + epoch uint32, + round uint64, + prevHash []byte, + prevRandSeed []byte, + consensusSize int, +) *block.MetaBlock { + roundStr := strconv.Itoa(int(round)) + hdr := block.MetaBlock{ + Epoch: epoch, + Nonce: round, + Round: round, + PrevHash: prevHash, + Signature: []byte("signature"), + PubKeysBitmap: []byte(strings.Repeat("f", consensusSize)), + RootHash: []byte("roothash" + roundStr), + ShardInfo: make([]block.ShardData, 0), + TxCount: 1, + PrevRandSeed: prevRandSeed, + RandSeed: []byte("randseed" + roundStr), + AccumulatedFeesInEpoch: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } + + shardMiniBlockHeaders := make([]block.MiniBlockHeader, 0) + shardMiniBlockHeader := block.MiniBlockHeader{ + Hash: []byte("mb_hash" + roundStr), + ReceiverShardID: 0, + SenderShardID: 0, + TxCount: 1, + } + shardMiniBlockHeaders = append(shardMiniBlockHeaders, shardMiniBlockHeader) + shardData := block.ShardData{ + Nonce: round, + ShardID: 0, + HeaderHash: []byte("hdr_hash" + roundStr), + TxCount: 1, + ShardMiniBlockHeaders: shardMiniBlockHeaders, + DeveloperFees: big.NewInt(0), + AccumulatedFees: big.NewInt(0), + } + hdr.ShardInfo = append(hdr.ShardInfo, shardData) + + return &hdr +} + +func (tmp *TestMetaProcessor) updateNodesConfig(epoch uint32) { + eligible, _ := tmp.NodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) + waiting, _ := tmp.NodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) + leaving, _ := tmp.NodesCoordinator.GetAllLeavingValidatorsPublicKeys(epoch) + shuffledOut, _ := tmp.NodesCoordinator.GetAllShuffledOutValidatorsPublicKeys(epoch) + + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsInfoMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + + auction := make([][]byte, 0) + newList := make([][]byte, 0) + for _, validator := range validatorsInfoMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.AuctionList) { + auction = append(auction, validator.GetPublicKey()) + } + if validator.GetList() == string(common.NewList) { + newList = append(newList, validator.GetPublicKey()) + } + } + + tmp.NodesConfig.eligible = eligible + tmp.NodesConfig.waiting = waiting + tmp.NodesConfig.shuffledOut = shuffledOut + tmp.NodesConfig.leaving = leaving + tmp.NodesConfig.auction = auction + tmp.NodesConfig.new = newList + tmp.NodesConfig.queue = tmp.getWaitingListKeys() +} + +func generateAddresses(startIdx, n uint32) [][]byte { + ret := make([][]byte, 0, n) + + for i := startIdx; i < n+startIdx; i++ { + ret = append(ret, generateAddress(i)) + } + + return ret +} + +func generateAddress(identifier uint32) []byte { + uniqueIdentifier := fmt.Sprintf("address-%d", identifier) + return []byte(strings.Repeat("0", addressLength-len(uniqueIdentifier)) + uniqueIdentifier) +} diff --git a/integrationTests/vm/staking/componentsHolderCreator.go b/integrationTests/vm/staking/componentsHolderCreator.go new file mode 100644 index 00000000000..e3673b08ec7 --- /dev/null +++ b/integrationTests/vm/staking/componentsHolderCreator.go @@ -0,0 +1,221 @@ +package staking + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/nodetype" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-core-go/hashing/sha256" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + mockFactory "github.com/multiversx/mx-chain-go/node/mock/factory" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + stateFactory "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/storagePruningManager" + "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" + "github.com/multiversx/mx-chain-go/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + notifierMocks "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + stateTests "github.com/multiversx/mx-chain-go/testscommon/state" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/trie" +) + +const hashSize = 32 + +func createComponentHolders(numOfShards uint32) ( + factory.CoreComponentsHolder, + factory.DataComponentsHolder, + factory.BootstrapComponentsHolder, + factory.StatusComponentsHolder, + factory.StateComponentsHandler, +) { + coreComponents := createCoreComponents() + statusComponents := createStatusComponents() + stateComponents := createStateComponents(coreComponents) + dataComponents := createDataComponents(coreComponents, numOfShards) + bootstrapComponents := createBootstrapComponents(coreComponents.InternalMarshalizer(), numOfShards) + + return coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents +} + +func createCoreComponents() factory.CoreComponentsHolder { + epochNotifier := forking.NewGenericEpochNotifier() + configEnableEpochs := config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + GovernanceEnableEpoch: integrationTests.UnreachableEpoch, + RefactorPeersMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, + } + + enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(configEnableEpochs, epochNotifier) + + return &integrationMocks.CoreComponentsStub{ + InternalMarshalizerField: &marshal.GogoProtoMarshalizer{}, + HasherField: sha256.NewSha256(), + Uint64ByteSliceConverterField: uint64ByteSlice.NewBigEndianConverter(), + StatusHandlerField: statusHandler.NewStatusMetrics(), + RoundHandlerField: &mock.RoundHandlerMock{RoundTimeDuration: time.Second}, + EpochStartNotifierWithConfirmField: notifier.NewEpochStartSubscriptionHandler(), + EpochNotifierField: epochNotifier, + RaterField: &testscommon.RaterMock{Chance: 5}, + AddressPubKeyConverterField: testscommon.NewPubkeyConverterMock(addressLength), + EconomicsDataField: stakingcommon.CreateEconomicsData(), + ChanStopNodeProcessField: endProcess.GetDummyEndProcessChannel(), + NodeTypeProviderField: nodetype.NewNodeTypeProvider(core.NodeTypeValidator), + ProcessStatusHandlerInternal: statusHandler.NewProcessStatusHandler(), + EnableEpochsHandlerField: enableEpochsHandler, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + RoundNotifierField: ¬ifierMocks.RoundNotifierStub{}, + } +} + +func createDataComponents(coreComponents factory.CoreComponentsHolder, numOfShards uint32) factory.DataComponentsHolder { + genesisBlock := createGenesisMetaBlock() + genesisBlockHash, _ := coreComponents.InternalMarshalizer().Marshal(genesisBlock) + genesisBlockHash = coreComponents.Hasher().Compute(string(genesisBlockHash)) + + blockChain, _ := blockchain.NewMetaChain(&statusHandlerMock.AppStatusHandlerStub{}) + _ = blockChain.SetGenesisHeader(createGenesisMetaBlock()) + blockChain.SetGenesisHeaderHash(genesisBlockHash) + + chainStorer := dataRetriever.NewChainStorer() + chainStorer.AddStorer(dataRetriever.BootstrapUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MetaBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.MiniBlockUnit, integrationTests.CreateMemUnit()) + chainStorer.AddStorer(dataRetriever.BlockHeaderUnit, integrationTests.CreateMemUnit()) + for i := uint32(0); i < numOfShards; i++ { + unit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + chainStorer.AddStorer(unit, integrationTests.CreateMemUnit()) + } + + return &mockFactory.DataComponentsMock{ + Store: chainStorer, + DataPool: dataRetrieverMock.NewPoolsHolderMock(), + BlockChain: blockChain, + EconomicsData: coreComponents.EconomicsData(), + } +} + +func createBootstrapComponents( + marshaller marshal.Marshalizer, + numOfShards uint32, +) factory.BootstrapComponentsHolder { + shardCoordinator, _ := sharding.NewMultiShardCoordinator(numOfShards, core.MetachainShardId) + ncr, _ := nodesCoordinator.NewNodesCoordinatorRegistryFactory( + marshaller, + stakingV4Step2EnableEpoch, + ) + + return &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: shardCoordinator, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{ + CreateCalled: func(epoch uint32) data.HeaderHandler { + return &block.MetaBlock{Epoch: epoch} + }, + }, + NodesCoordinatorRegistryFactoryField: ncr, + } +} + +func createStatusComponents() factory.StatusComponentsHolder { + return &integrationMocks.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + SoftwareVersionCheck: &integrationMocks.SoftwareVersionCheckerMock{}, + ManagedPeersMonitorField: &testscommon.ManagedPeersMonitorStub{}, + } +} + +func createStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHandler { + tsmArgs := getNewTrieStorageManagerArgs(coreComponents) + tsm, _ := trie.CreateTrieStorageManager(tsmArgs, trie.StorageManagerOptions{}) + trieFactoryManager, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) + + argsAccCreator := stateFactory.ArgsAccountCreator{ + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + + accCreator, _ := stateFactory.NewAccountCreator(argsAccCreator) + + userAccountsDB := createAccountsDB(coreComponents, accCreator, trieFactoryManager) + peerAccountsDB := createAccountsDB(coreComponents, stateFactory.NewPeerAccountCreator(), trieFactoryManager) + + _ = userAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + _ = peerAccountsDB.SetSyncer(&mock.AccountsDBSyncerStub{}) + + return &factoryTests.StateComponentsMock{ + PeersAcc: peerAccountsDB, + Accounts: userAccountsDB, + } +} + +func getNewTrieStorageManagerArgs(coreComponents factory.CoreComponentsHolder) trie.NewTrieStorageManagerArgs { + return trie.NewTrieStorageManagerArgs{ + MainStorer: testscommon.CreateMemUnit(), + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "id", + StatsCollector: disabled.NewStateStatistics(), + } +} + +func createAccountsDB( + coreComponents factory.CoreComponentsHolder, + accountFactory state.AccountFactory, + trieStorageManager common.StorageManager, +) *state.AccountsDB { + tr, _ := trie.NewTrie( + trieStorageManager, + coreComponents.InternalMarshalizer(), + coreComponents.Hasher(), + coreComponents.EnableEpochsHandler(), + 5, + ) + + argsEvictionWaitingList := evictionWaitingList.MemoryEvictionWaitingListArgs{ + RootHashesSize: 10, + HashesSize: hashSize, + } + ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(argsEvictionWaitingList) + spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 10) + argsAccountsDb := state.ArgsAccountsDB{ + Trie: tr, + Hasher: coreComponents.Hasher(), + Marshaller: coreComponents.InternalMarshalizer(), + AccountFactory: accountFactory, + StoragePruningManager: spm, + AddressConverter: coreComponents.AddressPubKeyConverter(), + SnapshotsManager: &stateTests.SnapshotsManagerStub{}, + } + adb, _ := state.NewAccountsDB(argsAccountsDb) + return adb +} diff --git a/integrationTests/vm/staking/configDisplayer.go b/integrationTests/vm/staking/configDisplayer.go new file mode 100644 index 00000000000..3ea2a402f7f --- /dev/null +++ b/integrationTests/vm/staking/configDisplayer.go @@ -0,0 +1,132 @@ +package staking + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/display" + "github.com/multiversx/mx-chain-go/state" +) + +const ( + delimiter = "#" + maxPubKeysListLen = 6 +) + +// TODO: Make a subcomponent which will register to epoch notifier to display config only upon epoch change + +func getAllPubKeys(validatorsMap map[uint32][][]byte) [][]byte { + allValidators := make([][]byte, 0) + for _, validatorsInShard := range validatorsMap { + allValidators = append(allValidators, validatorsInShard...) + } + + return allValidators +} + +func getShortPubKeysList(pubKeys [][]byte) [][]byte { + pubKeysToDisplay := pubKeys + sort.SliceStable(pubKeysToDisplay, func(i, j int) bool { + return string(pubKeysToDisplay[i]) < string(pubKeysToDisplay[j]) + }) + + if len(pubKeys) > maxPubKeysListLen { + pubKeysToDisplay = make([][]byte, 0) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[:maxPubKeysListLen/2]...) + pubKeysToDisplay = append(pubKeysToDisplay, [][]byte{[]byte("...")}...) + pubKeysToDisplay = append(pubKeysToDisplay, pubKeys[len(pubKeys)-maxPubKeysListLen/2:]...) + } + + return pubKeysToDisplay +} + +func (tmp *TestMetaProcessor) getAllNodeKeys() state.ShardValidatorsInfoMapHandler { + rootHash, _ := tmp.ValidatorStatistics.RootHash() + validatorsMap, _ := tmp.ValidatorStatistics.GetValidatorInfoForRootHash(rootHash) + return validatorsMap +} + +func (tmp *TestMetaProcessor) displayConfig(config nodesConfig) { + lines := make([]*display.LineData, 0) + + allNodes := tmp.getAllNodeKeys() + _ = tmp.StakingDataProvider.PrepareStakingData(allNodes) + + numShards := uint32(len(config.eligible)) + for shardId := uint32(0); shardId < numShards; shardId++ { + shard := getShardId(shardId, numShards) + + lines = append(lines, tmp.getDisplayableValidatorsInShard("eligible", config.eligible[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("waiting", config.waiting[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("leaving", config.leaving[shard], shard)...) + lines = append(lines, tmp.getDisplayableValidatorsInShard("shuffled", config.shuffledOut[shard], shard)...) + lines = append(lines, display.NewLineData(true, []string{})) + } + lines = append(lines, display.NewLineData(true, []string{"eligible", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.eligible))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"waiting", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.waiting))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"leaving", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.leaving))), "", "", "All shards"})) + lines = append(lines, display.NewLineData(true, []string{"shuffled", fmt.Sprintf("Total: %d", len(getAllPubKeys(config.shuffledOut))), "", "", "All shards"})) + + tableHeader := []string{"List", "BLS key", "Owner", "TopUp", "Shard ID"} + table, _ := display.CreateTableString(tableHeader, lines) + headline := display.Headline("Nodes config", "", delimiter) + fmt.Printf("%s\n%s\n", headline, table) + + tmp.displayValidators("New", config.new) + tmp.displayValidators("Auction", config.auction) + tmp.displayValidators("Queue", config.queue) + + tmp.StakingDataProvider.Clean() +} + +func getShardId(shardId, numShards uint32) uint32 { + if shardId == numShards-1 { + return core.MetachainShardId + } + + return shardId +} + +func (tmp *TestMetaProcessor) getDisplayableValidatorsInShard(list string, pubKeys [][]byte, shardID uint32) []*display.LineData { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "...", strconv.Itoa(int(shardID))})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String(), strconv.Itoa(int(shardID))})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys)), "", "", strconv.Itoa(int(shardID))})) + + return lines +} + +func (tmp *TestMetaProcessor) displayValidators(list string, pubKeys [][]byte) { + pubKeysToDisplay := getShortPubKeysList(pubKeys) + + lines := make([]*display.LineData, 0) + tableHeader := []string{"List", "BLS key", "Owner", "TopUp"} + for idx, pk := range pubKeysToDisplay { + horizontalLineAfter := idx == len(pubKeysToDisplay)-1 + owner, _ := tmp.StakingDataProvider.GetBlsKeyOwner(pk) + topUp, _ := tmp.StakingDataProvider.GetNodeStakedTopUp(pk) + if bytes.Equal(pk, []byte("...")) { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), "...", "..."})) + } else { + lines = append(lines, display.NewLineData(horizontalLineAfter, []string{list, string(pk), owner, topUp.String()})) + } + } + lines = append(lines, display.NewLineData(true, []string{list, fmt.Sprintf("Total: %d", len(pubKeys))})) + + headline := display.Headline(fmt.Sprintf("%s list", list), "", delimiter) + table, _ := display.CreateTableString(tableHeader, lines) + fmt.Printf("%s \n%s\n", headline, table) +} diff --git a/integrationTests/vm/staking/metaBlockProcessorCreator.go b/integrationTests/vm/staking/metaBlockProcessorCreator.go new file mode 100644 index 00000000000..759458cf30e --- /dev/null +++ b/integrationTests/vm/staking/metaBlockProcessorCreator.go @@ -0,0 +1,245 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/postprocess" + "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/process/scToProtocol" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + factory2 "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/integrationtests" + "github.com/multiversx/mx-chain-go/testscommon/outport" + statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" +) + +func createMetaBlockProcessor( + nc nodesCoordinator.NodesCoordinator, + systemSCProcessor process.EpochStartSystemSCProcessor, + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + bootstrapComponents factory.BootstrapComponentsHolder, + statusComponents factory.StatusComponentsHolder, + stateComponents factory.StateComponentsHandler, + validatorsInfoCreator process.ValidatorStatisticsProcessor, + blockChainHook process.BlockChainHookHandler, + metaVMFactory process.VirtualMachinesContainerFactory, + epochStartHandler process.EpochStartTriggerHandler, + vmContainer process.VirtualMachinesContainer, + txCoordinator process.TransactionCoordinator, +) process.BlockProcessor { + blockTracker := createBlockTracker( + dataComponents.Blockchain().GetGenesisHeader(), + bootstrapComponents.ShardCoordinator(), + ) + epochStartDataCreator := createEpochStartDataCreator( + coreComponents, + dataComponents, + bootstrapComponents.ShardCoordinator(), + epochStartHandler, + blockTracker, + ) + + accountsDb := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) + accountsDb[state.UserAccountsState] = stateComponents.AccountsAdapter() + accountsDb[state.PeerAccountsState] = stateComponents.PeerAccounts() + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + bootStorer, _ := bootstrapStorage.NewBootstrapStorer( + coreComponents.InternalMarshalizer(), + bootStrapStorer, + ) + + headerValidator := createHeaderValidator(coreComponents) + valInfoCreator := createValidatorInfoCreator(coreComponents, dataComponents, bootstrapComponents.ShardCoordinator()) + stakingToPeer := createSCToProtocol(coreComponents, stateComponents, dataComponents.Datapool().CurrentBlockTxs()) + + args := blproc.ArgMetaProcessor{ + ArgBaseProcessor: blproc.ArgBaseProcessor{ + CoreComponents: coreComponents, + DataComponents: dataComponents, + BootstrapComponents: bootstrapComponents, + StatusComponents: statusComponents, + StatusCoreComponents: &factory2.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + }, + AccountsDB: accountsDb, + ForkDetector: &integrationMocks.ForkDetectorStub{}, + NodesCoordinator: nc, + FeeHandler: postprocess.NewFeeAccumulator(), + RequestHandler: &testscommon.RequestHandlerStub{}, + BlockChainHook: blockChainHook, + TxCoordinator: txCoordinator, + EpochStartTrigger: epochStartHandler, + HeaderValidator: headerValidator, + BootStorer: bootStorer, + BlockTracker: blockTracker, + BlockSizeThrottler: &mock.BlockSizeThrottlerStub{}, + HistoryRepository: &dblookupext.HistoryRepositoryStub{}, + VMContainersFactory: metaVMFactory, + VmContainer: vmContainer, + GasHandler: &mock.GasHandlerMock{}, + ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, + ScheduledMiniBlocksEnableEpoch: 10000, + ProcessedMiniBlocksTracker: processedMb.NewProcessedMiniBlocksTracker(), + OutportDataProvider: &outport.OutportDataProviderStub{}, + ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, + }, + SCToProtocol: stakingToPeer, + PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, + EpochStartDataCreator: epochStartDataCreator, + EpochEconomics: &mock.EpochEconomicsStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{ + GetLocalTxCacheCalled: func() epochStart.TransactionCacher { + return dataComponents.Datapool().CurrentBlockTxs() + }, + }, + EpochValidatorInfoCreator: valInfoCreator, + ValidatorStatisticsProcessor: validatorsInfoCreator, + EpochSystemSCProcessor: systemSCProcessor, + } + + metaProc, _ := blproc.NewMetaProcessor(args) + return metaProc +} + +func createValidatorInfoCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, +) process.EpochStartValidatorInfoCreator { + mbStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.MiniBlockUnit) + + args := metachain.ArgsNewValidatorInfoCreator{ + ShardCoordinator: shardCoordinator, + MiniBlockStorage: mbStorer, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + DataPool: dataComponents.Datapool(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoStorage: integrationtests.CreateMemUnit(), + } + + valInfoCreator, _ := metachain.NewValidatorInfoCreator(args) + return valInfoCreator +} + +func createEpochStartDataCreator( + coreComponents factory.CoreComponentsHolder, + dataComponents factory.DataComponentsHolder, + shardCoordinator sharding.Coordinator, + epochStartTrigger process.EpochStartTriggerHandler, + blockTracker process.BlockTracker, +) process.EpochStartDataCreator { + argsEpochStartDataCreator := metachain.ArgsNewEpochStartData{ + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + Store: dataComponents.StorageService(), + DataPool: dataComponents.Datapool(), + BlockTracker: blockTracker, + ShardCoordinator: shardCoordinator, + EpochStartTrigger: epochStartTrigger, + RequestHandler: &testscommon.RequestHandlerStub{}, + GenesisEpoch: 0, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + epochStartDataCreator, _ := metachain.NewEpochStartData(argsEpochStartDataCreator) + return epochStartDataCreator +} + +func createBlockTracker( + genesisMetaHeader data.HeaderHandler, + shardCoordinator sharding.Coordinator, +) process.BlockTracker { + genesisBlocks := make(map[uint32]data.HeaderHandler) + for ShardID := uint32(0); ShardID < shardCoordinator.NumberOfShards(); ShardID++ { + genesisBlocks[ShardID] = createGenesisBlock(ShardID) + } + + genesisBlocks[core.MetachainShardId] = genesisMetaHeader + return mock.NewBlockTrackerMock(shardCoordinator, genesisBlocks) +} + +func createGenesisBlock(shardID uint32) *block.Header { + rootHash := []byte("roothash") + return &block.Header{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + ShardID: shardID, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + } +} + +func createGenesisMetaBlock() *block.MetaBlock { + rootHash := []byte("roothash") + return &block.MetaBlock{ + Nonce: 0, + Round: 0, + Signature: rootHash, + RandSeed: rootHash, + PrevRandSeed: rootHash, + PubKeysBitmap: rootHash, + RootHash: rootHash, + PrevHash: rootHash, + AccumulatedFees: big.NewInt(0), + DeveloperFees: big.NewInt(0), + AccumulatedFeesInEpoch: big.NewInt(0), + DevFeesInEpoch: big.NewInt(0), + } +} + +func createHeaderValidator(coreComponents factory.CoreComponentsHolder) epochStart.HeaderValidator { + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + return headerValidator +} + +func createSCToProtocol( + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + txCacher dataRetriever.TransactionCacher, +) process.SmartContractToProtocolHandler { + args := scToProtocol.ArgStakingToPeer{ + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + PeerState: stateComponents.PeerAccounts(), + BaseState: stateComponents.AccountsAdapter(), + ArgParser: smartContract.NewArgumentParser(), + CurrTxs: txCacher, + RatingsData: &mock.RatingsInfoMock{}, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + stakingToPeer, _ := scToProtocol.NewStakingToPeer(args) + return stakingToPeer +} diff --git a/integrationTests/vm/staking/nodesCoordiantorCreator.go b/integrationTests/vm/staking/nodesCoordiantorCreator.go new file mode 100644 index 00000000000..27a54719521 --- /dev/null +++ b/integrationTests/vm/staking/nodesCoordiantorCreator.go @@ -0,0 +1,232 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" + "github.com/multiversx/mx-chain-go/factory" + integrationMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state/accounts" + "github.com/multiversx/mx-chain-go/storage" + nodesSetupMock "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-storage-go/lrucache" +) + +const ( + shuffleBetweenShards = false + adaptivity = false + hysteresis = float32(0.2) + initialRating = 5 +) + +func createNodesCoordinator( + eligibleMap map[uint32][]nodesCoordinator.Validator, + waitingMap map[uint32][]nodesCoordinator.Validator, + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + coreComponents factory.CoreComponentsHolder, + bootStorer storage.Storer, + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory, + maxNodesConfig []config.MaxNodesChangeConfig, +) nodesCoordinator.NodesCoordinator { + shufflerArgs := &nodesCoordinator.NodesShufflerArgs{ + NodesShard: numOfEligibleNodesPerShard, + NodesMeta: numOfMetaNodes, + Hysteresis: hysteresis, + Adaptivity: adaptivity, + ShuffleBetweenShards: shuffleBetweenShards, + MaxNodesEnableConfig: maxNodesConfig, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + StakingV4Step3EnableEpoch: stakingV4Step3EnableEpoch, + }, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + nodeShuffler, _ := nodesCoordinator.NewHashValidatorsShuffler(shufflerArgs) + cache, _ := lrucache.NewCache(10000) + argumentsNodesCoordinator := nodesCoordinator.ArgNodesCoordinator{ + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Marshalizer: coreComponents.InternalMarshalizer(), + Hasher: coreComponents.Hasher(), + ShardIDAsObserver: core.MetachainShardId, + NbShards: numOfShards, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: eligibleMap[core.MetachainShardId][0].PubKey(), + ConsensusGroupCache: cache, + ShuffledOutHandler: &integrationMocks.ShuffledOutHandlerStub{}, + ChanStopNode: coreComponents.ChanStopNodeProcess(), + IsFullArchive: false, + Shuffler: nodeShuffler, + BootStorer: bootStorer, + EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), + NodesCoordinatorRegistryFactory: nodesCoordinatorRegistryFactory, + NodeTypeProvider: coreComponents.NodeTypeProvider(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &nodesSetupMock.NodesSetupStub{}, + } + + baseNodesCoordinator, _ := nodesCoordinator.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + nodesCoord, _ := nodesCoordinator.NewIndexHashedNodesCoordinatorWithRater(baseNodesCoordinator, coreComponents.Rater()) + return nodesCoord +} + +func createGenesisNodes( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + addressStartIdx := uint32(0) + eligibleGenesisNodes := generateGenesisNodeInfoMap(numOfMetaNodes, numOfShards, numOfNodesPerShard, addressStartIdx) + eligibleValidators, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesisNodes) + + addressStartIdx = numOfMetaNodes + numOfShards*numOfNodesPerShard + waitingGenesisNodes := generateGenesisNodeInfoMap(numOfWaitingNodesPerShard, numOfShards, numOfWaitingNodesPerShard, addressStartIdx) + waitingValidators, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesisNodes) + + registerValidators(eligibleValidators, stateComponents, marshaller, common.EligibleList) + registerValidators(waitingValidators, stateComponents, marshaller, common.WaitingList) + + return eligibleValidators, waitingValidators +} + +func createGenesisNodesWithCustomConfig( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + stateComponents factory.StateComponentsHandler, +) (map[uint32][]nodesCoordinator.Validator, map[uint32][]nodesCoordinator.Validator) { + eligibleGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + waitingGenesis := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + + for owner, ownerStats := range owners { + registerOwnerKeys( + []byte(owner), + ownerStats.EligibleBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.EligibleList, + eligibleGenesis, + ) + + registerOwnerKeys( + []byte(owner), + ownerStats.WaitingBlsKeys, + ownerStats.TotalStake, + stateComponents, + marshaller, + common.WaitingList, + waitingGenesis, + ) + } + + eligible, _ := nodesCoordinator.NodesInfoToValidators(eligibleGenesis) + waiting, _ := nodesCoordinator.NodesInfoToValidators(waitingGenesis) + + return eligible, waiting +} + +func generateGenesisNodeInfoMap( + numOfMetaNodes uint32, + numOfShards uint32, + numOfNodesPerShard uint32, + addressStartIdx uint32, +) map[uint32][]nodesCoordinator.GenesisNodeInfoHandler { + validatorsMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + id := addressStartIdx + for shardId := uint32(0); shardId < numOfShards; shardId++ { + for n := uint32(0); n < numOfNodesPerShard; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, shardId, initialRating) + validatorsMap[shardId] = append(validatorsMap[shardId], validator) + id++ + } + } + + for n := uint32(0); n < numOfMetaNodes; n++ { + addr := generateAddress(id) + validator := integrationMocks.NewNodeInfo(addr, addr, core.MetachainShardId, initialRating) + validatorsMap[core.MetachainShardId] = append(validatorsMap[core.MetachainShardId], validator) + id++ + } + + return validatorsMap +} + +func registerOwnerKeys( + owner []byte, + ownerPubKeys map[uint32][][]byte, + totalStake *big.Int, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, + allNodes map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, +) { + for shardID, pubKeysInShard := range ownerPubKeys { + for _, pubKey := range pubKeysInShard { + validator := integrationMocks.NewNodeInfo(pubKey, pubKey, shardID, initialRating) + allNodes[shardID] = append(allNodes[shardID], validator) + + savePeerAcc(stateComponents, pubKey, shardID, list) + } + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + owner, + owner, + pubKeysInShard, + totalStake, + marshaller, + ) + } +} + +func registerValidators( + validators map[uint32][]nodesCoordinator.Validator, + stateComponents factory.StateComponentsHolder, + marshaller marshal.Marshalizer, + list common.PeerType, +) { + for shardID, validatorsInShard := range validators { + for idx, val := range validatorsInShard { + pubKey := val.PubKey() + savePeerAcc(stateComponents, pubKey, shardID, list) + + stakingcommon.RegisterValidatorKeys( + stateComponents.AccountsAdapter(), + pubKey, + pubKey, + [][]byte{pubKey}, + big.NewInt(nodePrice+int64(idx)), + marshaller, + ) + } + } +} + +func savePeerAcc( + stateComponents factory.StateComponentsHolder, + pubKey []byte, + shardID uint32, + list common.PeerType, +) { + peerAccount, _ := accounts.NewPeerAccount(pubKey) + peerAccount.SetTempRating(initialRating) + peerAccount.ShardId = shardID + peerAccount.BLSPublicKey = pubKey + peerAccount.List = string(list) + _ = stateComponents.PeerAccounts().SaveAccount(peerAccount) +} diff --git a/integrationTests/vm/staking/stakingQueue.go b/integrationTests/vm/staking/stakingQueue.go new file mode 100644 index 00000000000..7544e18cf40 --- /dev/null +++ b/integrationTests/vm/staking/stakingQueue.go @@ -0,0 +1,121 @@ +package staking + +import ( + "math/big" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" +) + +func createStakingQueue( + numOfNodesInStakingQueue uint32, + totalNumOfNodes uint32, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + ownerWaitingNodes := make([][]byte, 0) + if numOfNodesInStakingQueue == 0 { + return ownerWaitingNodes + } + + owner := generateAddress(totalNumOfNodes) + totalNumOfNodes += 1 + for i := totalNumOfNodes; i < totalNumOfNodes+numOfNodesInStakingQueue; i++ { + ownerWaitingNodes = append(ownerWaitingNodes, generateAddress(i)) + } + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerWaitingNodes, + marshaller, + owner, + owner, + ) + + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + owner, + owner, + ownerWaitingNodes, + big.NewInt(int64(2*nodePrice*numOfNodesInStakingQueue)), + marshaller, + ) + + return ownerWaitingNodes +} + +func createStakingQueueCustomNodes( + owners map[string]*OwnerStats, + marshaller marshal.Marshalizer, + accountsAdapter state.AccountsAdapter, +) [][]byte { + queue := make([][]byte, 0) + + for owner, ownerStats := range owners { + stakingcommon.RegisterValidatorKeys( + accountsAdapter, + []byte(owner), + []byte(owner), + ownerStats.StakingQueueKeys, + ownerStats.TotalStake, + marshaller, + ) + + stakingcommon.AddKeysToWaitingList( + accountsAdapter, + ownerStats.StakingQueueKeys, + marshaller, + []byte(owner), + []byte(owner), + ) + + queue = append(queue, ownerStats.StakingQueueKeys...) + } + + return queue +} + +func (tmp *TestMetaProcessor) getWaitingListKeys() [][]byte { + stakingSCAcc := stakingcommon.LoadUserAccount(tmp.AccountsAdapter, vm.StakingSCAddress) + + waitingList := &systemSmartContracts.WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + if len(marshaledData) == 0 { + return nil + } + + err := tmp.Marshaller.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil + } + + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + + allPubKeys := make([][]byte, 0) + for len(nextKey) != 0 && index <= waitingList.Length { + allPubKeys = append(allPubKeys, nextKey[2:]) // remove "w_" prefix + + element, errGet := stakingcommon.GetWaitingListElement(stakingSCAcc, tmp.Marshaller, nextKey) + if errGet != nil { + return nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return allPubKeys +} diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go new file mode 100644 index 00000000000..45cc1bcd85e --- /dev/null +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -0,0 +1,1484 @@ +package staking + +import ( + "bytes" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + "github.com/stretchr/testify/require" +) + +func requireSliceContains(t *testing.T, s1, s2 [][]byte) { + for _, elemInS2 := range s2 { + require.Contains(t, s1, elemInS2) + } +} + +func requireSliceContainsNumOfElements(t *testing.T, s1, s2 [][]byte, numOfElements int) { + foundCt := 0 + for _, elemInS2 := range s2 { + if searchInSlice(s1, elemInS2) { + foundCt++ + } + } + + require.Equal(t, numOfElements, foundCt) +} + +func requireSameSliceDifferentOrder(t *testing.T, s1, s2 [][]byte) { + require.Equal(t, len(s1), len(s2)) + + for _, elemInS1 := range s1 { + require.Contains(t, s2, elemInS1) + } +} + +func searchInSlice(s1 [][]byte, s2 []byte) bool { + for _, elemInS1 := range s1 { + if bytes.Equal(elemInS1, s2) { + return true + } + } + + return false +} + +func searchInMap(validatorMap map[uint32][][]byte, pk []byte) bool { + for _, validatorsInShard := range validatorMap { + for _, val := range validatorsInShard { + if bytes.Equal(val, pk) { + return true + } + } + } + return false +} + +func requireMapContains(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.True(t, searchInMap(m, elemInSlice)) + } +} + +func requireMapDoesNotContain(t *testing.T, m map[uint32][][]byte, s [][]byte) { + for _, elemInSlice := range s { + require.False(t, searchInMap(m, elemInSlice)) + } +} + +// remove will remove the item from slice without keeping the order of the original slice +func remove(slice [][]byte, elem []byte) [][]byte { + ret := slice + for i, e := range slice { + if bytes.Equal(elem, e) { + ret[i] = ret[len(slice)-1] + return ret[:len(slice)-1] + } + } + + return ret +} + +func getIntersection(slice1, slice2 [][]byte) [][]byte { + ret := make([][]byte, 0) + for _, value := range slice2 { + if searchInSlice(slice1, value) { + copiedVal := make([]byte, len(value)) + copy(copiedVal, value) + ret = append(ret, copiedVal) + } + } + + return ret +} + +func unStake(t *testing.T, owner []byte, accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer, stake *big.Int) { + validatorSC := stakingcommon.LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, err := validatorSC.RetrieveValue(owner) + require.Nil(t, err) + + validatorData := &systemSmartContracts.ValidatorDataV2{} + err = marshaller.Unmarshal(validatorData, ownerStoredData) + require.Nil(t, err) + + validatorData.TotalStakeValue.Sub(validatorData.TotalStakeValue, stake) + marshaledData, _ := marshaller.Marshal(validatorData) + err = validatorSC.SaveKeyValue(owner, marshaledData) + require.Nil(t, err) + + err = accountsDB.SaveAccount(validatorSC) + require.Nil(t, err) + _, err = accountsDB.Commit() + require.Nil(t, err) +} + +type configNum struct { + eligible map[uint32]int + waiting map[uint32]int + leaving map[uint32]int + shuffledOut map[uint32]int + queue int + auction int + new int +} + +func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { + checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) + checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) + checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) + checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) + + require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) + require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) + require.Equal(t, expectedConfig.new, len(nodesConfig.new)) +} + +func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { + for shardID, numNodesInShard := range expectedNumNodes { + require.Equal(t, numNodesInShard, len(actualNodes[shardID])) + } +} + +func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) +} + +func checkStakingV4EpochChangeFlow( + t *testing.T, + currNodesConfig, prevNodesConfig nodesConfig, + numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { + + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) + + // New auction list also contains unselected nodes from previous auction list + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Nodes which have been selected from previous auction list are now in waiting + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) +} + +func TestStakingV4(t *testing.T) { + t.Parallel() + + numOfMetaNodes := uint32(400) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(400) + numOfWaitingNodesPerShard := uint32(400) + numOfNodesToShufflePerShard := uint32(80) + shardConsensusGroupSize := 266 + metaConsensusGroupSize := 266 + numOfNodesInStakingQueue := uint32(60) + + totalEligible := int(numOfEligibleNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + totalWaiting := int(numOfWaitingNodesPerShard*numOfShards) + int(numOfMetaNodes) // 1600 + + node := NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + ) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + initialNodes := node.NodesConfig + require.Len(t, getAllPubKeys(initialNodes.eligible), totalEligible) + require.Len(t, getAllPubKeys(initialNodes.waiting), totalWaiting) + require.Len(t, initialNodes.queue, int(numOfNodesInStakingQueue)) + require.Empty(t, initialNodes.shuffledOut) + require.Empty(t, initialNodes.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + nodesConfigStakingV4Step1 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.eligible), totalEligible) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step1.waiting), totalWaiting) + require.Empty(t, nodesConfigStakingV4Step1.queue) + require.Empty(t, nodesConfigStakingV4Step1.shuffledOut) + requireSameSliceDifferentOrder(t, initialNodes.queue, nodesConfigStakingV4Step1.auction) + + // 3. Check config after first staking v4 epoch, WITHOUT distribution from auction -> waiting + node.Process(t, 6) + nodesConfigStakingV4Step2 := node.NodesConfig + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), totalEligible) // 1600 + + numOfShuffledOut := int((numOfShards + 1) * numOfNodesToShufflePerShard) // 320 + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut), numOfShuffledOut) + + newWaiting := totalWaiting - numOfShuffledOut // 1280 (1600 - 320) + require.Len(t, getAllPubKeys(nodesConfigStakingV4Step2.waiting), newWaiting) + + // 380 (320 from shuffled out + 60 from initial staking queue -> auction from stakingV4 init) + auctionListSize := numOfShuffledOut + len(nodesConfigStakingV4Step1.auction) + require.Len(t, nodesConfigStakingV4Step2.auction, auctionListSize) + requireSliceContains(t, nodesConfigStakingV4Step2.auction, nodesConfigStakingV4Step1.auction) + + require.Empty(t, nodesConfigStakingV4Step2.queue) + require.Empty(t, nodesConfigStakingV4Step2.leaving) + + // 320 nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(nodesConfigStakingV4Step2.eligible), getAllPubKeys(nodesConfigStakingV4Step1.waiting), numOfShuffledOut) + + // All shuffled out are from previous staking v4 init eligible + requireMapContains(t, nodesConfigStakingV4Step1.eligible, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // All shuffled out are in auction + requireSliceContains(t, nodesConfigStakingV4Step2.auction, getAllPubKeys(nodesConfigStakingV4Step2.shuffledOut)) + + // No auction node from previous epoch has been moved to waiting + requireMapDoesNotContain(t, nodesConfigStakingV4Step2.waiting, nodesConfigStakingV4Step1.auction) + + epochs := 0 + prevConfig := nodesConfigStakingV4Step2 + numOfSelectedNodesFromAuction := numOfShuffledOut // 320, since we will always fill shuffled out nodes with this config + numOfUnselectedNodesFromAuction := auctionListSize - numOfShuffledOut // 60 = 380 - 320 + for epochs < 10 { + node.Process(t, 5) + newNodeConfig := node.NodesConfig + + require.Len(t, getAllPubKeys(newNodeConfig.eligible), totalEligible) // 1600 + require.Len(t, getAllPubKeys(newNodeConfig.waiting), newWaiting) // 1280 + require.Len(t, getAllPubKeys(newNodeConfig.shuffledOut), numOfShuffledOut) // 320 + require.Len(t, newNodeConfig.auction, auctionListSize) // 380 + require.Empty(t, newNodeConfig.queue) + require.Empty(t, newNodeConfig.leaving) + + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + prevConfig = newNodeConfig + epochs++ + } +} + +func TestStakingV4MetaProcessor_ProcessMultipleNodesWithSameSetupExpectSameRootHash(t *testing.T) { + t.Parallel() + + numOfMetaNodes := uint32(6) + numOfShards := uint32(3) + numOfEligibleNodesPerShard := uint32(6) + numOfWaitingNodesPerShard := uint32(6) + numOfNodesToShufflePerShard := uint32(2) + shardConsensusGroupSize := 2 + metaConsensusGroupSize := 2 + numOfNodesInStakingQueue := uint32(2) + + nodes := make([]*TestMetaProcessor, 0, numOfMetaNodes) + for i := uint32(0); i < numOfMetaNodes; i++ { + nodes = append(nodes, NewTestMetaProcessor( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + numOfNodesInStakingQueue, + )) + nodes[i].EpochStartTrigger.SetRoundsPerEpoch(4) + } + + numOfEpochs := uint32(15) + rootHashes := make(map[uint32][][]byte) + for currEpoch := uint32(1); currEpoch <= numOfEpochs; currEpoch++ { + for _, node := range nodes { + rootHash, _ := node.ValidatorStatistics.RootHash() + rootHashes[currEpoch] = append(rootHashes[currEpoch], rootHash) + + node.Process(t, 5) + require.Equal(t, currEpoch, node.EpochStartTrigger.Epoch()) + } + } + + for _, rootHashesInEpoch := range rootHashes { + firstNodeRootHashInEpoch := rootHashesInEpoch[0] + for _, rootHash := range rootHashesInEpoch { + require.Equal(t, firstNodeRootHashInEpoch, rootHash) + } + } +} + +func TestStakingV4_UnStakeNodesWithNotEnoughFunds(t *testing.T) { + t.Parallel() + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 8 nodes, but enough stake for just 7 nodes. At the end of the epoch(staking v4 init), + // the last node from staking queue should be unStaked + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[3:6], + }, + StakingQueueKeys: pubKeys[6:8], + TotalStake: big.NewInt(7 * nodePrice), + } + + // Owner2 has 6 nodes, but enough stake for just 5 nodes. At the end of the epoch(staking v4 init), + // one node from waiting list should be unStaked + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[8:11], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[11:14], + }, + TotalStake: big.NewInt(5 * nodePrice), + } + + // Owner3 has 2 nodes in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[14:16], + TotalStake: big.NewInt(3 * nodePrice), + } + + // Owner4 has 1 node in staking queue with topUp = nodePrice + owner4 := "owner4" + owner4Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[16:17], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + owner4: owner4Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[core.MetachainShardId], owner1Stats.EligibleBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[core.MetachainShardId], owner2Stats.WaitingBlsKeys[core.MetachainShardId], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.eligible[0], owner2Stats.EligibleBlsKeys[0], 3) + requireSliceContainsNumOfElements(t, currNodesConfig.waiting[0], owner1Stats.WaitingBlsKeys[0], 3) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + owner4StakingQueue := owner4Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + queue = append(queue, owner4StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // 2. Check config after staking v4 initialization + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 5) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 3) + + // Owner1 will have the second node from queue removed, before adding all the nodes to auction list + queue = remove(queue, owner1StakingQueue[1]) + require.Empty(t, currNodesConfig.queue) + require.Len(t, currNodesConfig.auction, 4) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // Owner2 will have one of the nodes in waiting list removed + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), getAllPubKeys(owner2Stats.WaitingBlsKeys), 1) + + // Owner1 will unStake some EGLD => at the end of next epoch, he should have the other node from queue(now auction list) removed + unStake(t, []byte(owner1), node.AccountsAdapter, node.Marshaller, big.NewInt(0.1*nodePrice)) + + // 3. Check config in epoch = staking v4 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 6) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, getAllPubKeys(currNodesConfig.shuffledOut), 2) + + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.shuffledOut[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Len(t, currNodesConfig.shuffledOut[0], 1) + + // Owner1 will have the last node from auction list removed + queue = remove(queue, owner1StakingQueue[0]) + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + require.Equal(t, getAllPubKeys(currNodesConfig.leaving)[0], owner1StakingQueue[0]) + + // Owner3 will unStake EGLD => he will have negative top-up at the selection time => one of his nodes will be unStaked. + // His other node should not have been selected => remains in auction. + // Meanwhile, owner4 had never unStaked EGLD => his node from auction list will be distributed to waiting + unStake(t, []byte(owner3), node.AccountsAdapter, node.Marshaller, big.NewInt(2*nodePrice)) + + // 4. Check config in epoch = staking v4 step3 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.leaving), owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, owner3StakingQueue, 1) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner4StakingQueue, 1) +} + +func TestStakingV4_StakeNewNodes(t *testing.T) { + t.Parallel() + + pubKeys := generateAddresses(0, 20) + + // Owner1 has 6 nodes, zero top up + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(6 * nodePrice), + } + + // Owner2 has 4 nodes, zero top up + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:10], + }, + TotalStake: big.NewInt(4 * nodePrice), + } + // Owner3 has 1 node in staking queue with topUp = nodePrice + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[10:11], + TotalStake: big.NewInt(2 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 1, + MinNumberOfEligibleMetaNodes: 1, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 8, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1.1 Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 3) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to staking queue + newOwner0 := "newOwner0" + newNodes0 := map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: [][]byte{generateAddress(333)}, + TotalStake: big.NewInt(nodePrice), + }, + } + + // 1.2 Check staked node before staking v4 is sent to staking queue + node.ProcessStake(t, newNodes0) + queue = append(queue, newNodes0[newOwner0].BLSKeys...) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.queue, 4) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // NewOwner1 stakes 1 node with top up = 2*node price; should be sent to auction list + newOwner1 := "newOwner1" + newNodes1 := map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(3 * nodePrice), + }, + } + // 2. Check config after staking v4 init when a new node is staked + node.Process(t, 4) + node.ProcessStake(t, newNodes1) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes1[newOwner1].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.leaving) + require.Len(t, currNodesConfig.auction, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // NewOwner2 stakes 2 node with top up = 2*node price; should be sent to auction list + newOwner2 := "newOwner2" + newNodes2 := map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: [][]byte{generateAddress(555), generateAddress(666)}, + TotalStake: big.NewInt(4 * nodePrice), + }, + } + // 2. Check in epoch = staking v4 step2 when 2 new nodes are staked + node.Process(t, 4) + node.ProcessStake(t, newNodes2) + currNodesConfig = node.NodesConfig + queue = append(queue, newNodes2[newOwner2].BLSKeys...) + require.Empty(t, currNodesConfig.queue) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, queue, 7) + + // 3. Epoch = staking v4 step3 + // Only the new 2 owners + owner3 had enough top up to be distributed to waiting. + // Meanwhile, owner1 which had 0 top up, still has his bls keys in auction, along with newOwner0 + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Empty(t, currNodesConfig.queue) + requireMapContains(t, currNodesConfig.waiting, newNodes1[newOwner1].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, newNodes2[newOwner2].BLSKeys) + requireMapContains(t, currNodesConfig.waiting, owner3StakingQueue) + requireSliceContains(t, currNodesConfig.auction, owner1StakingQueue) + requireSliceContains(t, currNodesConfig.auction, newNodes0[newOwner0].BLSKeys) +} + +func TestStakingV4_UnStakeNodes(t *testing.T) { + t.Parallel() + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner3 := "owner3" + owner3Stats := &OwnerStats{ + StakingQueueKeys: pubKeys[15:17], + TotalStake: big.NewInt(6 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + owner3: owner3Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + owner3StakingQueue := owner3Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + queue = append(queue, owner3StakingQueue...) + require.Len(t, currNodesConfig.queue, 7) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Owner2 unStakes one of his staking queue nodes. Node should be removed from staking queue list + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.StakingQueueKeys[0]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2Stats.StakingQueueKeys[0]) + require.Len(t, currNodesConfig.queue, 6) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + + // 1.2 Owner2 unStakes one of his waiting list keys. First node from staking queue should be added to fill its place. + copy(queue, currNodesConfig.queue) // copy queue to local variable so we have the queue in same order + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]}, + }) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.new, 1) + requireSliceContains(t, queue, currNodesConfig.new) + require.Empty(t, currNodesConfig.auction) + queue = remove(queue, currNodesConfig.new[0]) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.queue) + + // 2. Check config after staking v4 step1 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + // Owner2's node from waiting list which was unStaked in previous epoch is now leaving + require.Len(t, currNodesConfig.leaving, 1) + require.Equal(t, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0], currNodesConfig.leaving[core.MetachainShardId][0]) + require.Len(t, currNodesConfig.auction, 5) + // All nodes from queue have been moved to auction + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 2.1 Owner3 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner3: {owner3StakingQueue[1]}, + }) + unStakedNodesInStakingV4Step1Epoch := make([][]byte, 0) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner3StakingQueue[1]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner3StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 4) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 2.2 Owner1 unStakes 2 nodes: one from auction + one active + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1StakingQueue[1], owner1Stats.WaitingBlsKeys[0][0]}, + }) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1StakingQueue[1]) + unStakedNodesInStakingV4Step1Epoch = append(unStakedNodesInStakingV4Step1Epoch, owner1Stats.WaitingBlsKeys[0][0]) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner1StakingQueue[1]) + require.Len(t, currNodesConfig.auction, 3) + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, currNodesConfig.queue) + require.Empty(t, currNodesConfig.new) + + // 3. Check config in epoch = staking v4 step2 + node.Process(t, 3) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 3) + // All unStaked nodes in previous epoch are now leaving + requireMapContains(t, currNodesConfig.leaving, unStakedNodesInStakingV4Step1Epoch) + // 3.1 Owner2 unStakes one of his nodes from auction + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2StakingQueue[1]}, + }) + currNodesConfig = node.NodesConfig + queue = remove(queue, owner2StakingQueue[1]) + shuffledOutNodes := getAllPubKeys(currNodesConfig.shuffledOut) + require.Len(t, currNodesConfig.auction, len(shuffledOutNodes)+len(queue)) + requireSliceContains(t, currNodesConfig.auction, shuffledOutNodes) + requireSliceContains(t, currNodesConfig.auction, queue) + + // 4. Check config after whole staking v4 chain is ready, when one of the owners unStakes a node + node.Process(t, 4) + currNodesConfig = node.NodesConfig + node.ProcessUnStake(t, map[string][][]byte{ + owner2: {owner2Stats.EligibleBlsKeys[0][0]}, + }) + node.Process(t, 4) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.leaving), 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner2Stats.EligibleBlsKeys[0][0]}) + require.Empty(t, currNodesConfig.new) + require.Empty(t, currNodesConfig.queue) + + // 4.1 NewOwner stakes 1 node, should be sent to auction + newOwner := "newOwner1" + newNode := map[string]*NodesRegisterData{ + newOwner: { + BLSKeys: [][]byte{generateAddress(444)}, + TotalStake: big.NewInt(2 * nodePrice), + }, + } + node.ProcessStake(t, newNode) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newNode[newOwner].BLSKeys) + + // 4.2 NewOwner unStakes his node, he should not be in auction anymore + set to leaving + node.ProcessUnStake(t, map[string][][]byte{ + newOwner: {newNode[newOwner].BLSKeys[0]}, + }) + currNodesConfig = node.NodesConfig + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newNode[newOwner].BLSKeys, 0) + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newNode[newOwner].BLSKeys) +} + +func TestStakingV4_JailAndUnJailNodes(t *testing.T) { + t.Parallel() + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:2], + }, + WaitingBlsKeys: map[uint32][][]byte{ + 0: pubKeys[2:4], + }, + StakingQueueKeys: pubKeys[4:6], + TotalStake: big.NewInt(10 * nodePrice), + } + + owner2 := "owner2" + owner2Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + 0: pubKeys[6:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:12], + }, + StakingQueueKeys: pubKeys[12:15], + TotalStake: big.NewInt(10 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 1, + ShardConsensusGroupSize: 1, + MinNumberOfEligibleShardNodes: 2, + MinNumberOfEligibleMetaNodes: 2, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + owner2: owner2Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 4, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 6) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 2) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + owner1StakingQueue := owner1Stats.StakingQueueKeys + owner2StakingQueue := owner2Stats.StakingQueueKeys + queue := make([][]byte, 0) + queue = append(queue, owner1StakingQueue...) + queue = append(queue, owner2StakingQueue...) + require.Len(t, currNodesConfig.queue, 5) + requireSameSliceDifferentOrder(t, currNodesConfig.queue, queue) + + // 1.1 Jail 4 nodes: + // - 2 nodes from waiting list shard = 0 + // - 2 nodes from waiting list shard = meta chain + jailedNodes := make([][]byte, 0) + jailedNodes = append(jailedNodes, owner1Stats.WaitingBlsKeys[0]...) + jailedNodes = append(jailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][:2]...) + node.ProcessJail(t, jailedNodes) + + // 1.2 UnJail 2 nodes from initial jailed nodes: + // - 1 node from waiting list shard = 0 + // - 1 node from waiting list shard = meta chain + unJailedNodes := make([][]byte, 0) + unJailedNodes = append(unJailedNodes, owner1Stats.WaitingBlsKeys[0][0]) + unJailedNodes = append(unJailedNodes, owner2Stats.WaitingBlsKeys[core.MetachainShardId][0]) + jailedNodes = remove(jailedNodes, unJailedNodes[0]) + jailedNodes = remove(jailedNodes, unJailedNodes[1]) + node.ProcessUnJail(t, unJailedNodes) + + // 2. Two jailed nodes are now leaving; the other two unJailed nodes are re-staked and distributed on waiting list + node.Process(t, 3) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, jailedNodes) + requireMapContains(t, currNodesConfig.waiting, unJailedNodes) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 4) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Empty(t, currNodesConfig.queue) + + // 2.1 Epoch = stakingV4Step1; unJail one of the jailed nodes and expect it is sent to auction + node.ProcessUnJail(t, jailedNodes[:1]) + currNodesConfig = node.NodesConfig + queue = append(queue, jailedNodes[0]) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3. Epoch = stakingV4Step2; unJail the other jailed node and expect it is sent to auction + node.Process(t, 4) + node.ProcessUnJail(t, jailedNodes[1:]) + currNodesConfig = node.NodesConfig + queue = append(queue, jailedNodes[1]) + queue = append(queue, getAllPubKeys(currNodesConfig.shuffledOut)...) + require.Empty(t, currNodesConfig.queue) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, queue) + + // 3.1 Jail a random node from waiting list + newJailed := getAllPubKeys(currNodesConfig.waiting)[:1] + node.ProcessJail(t, newJailed) + + // 4. Epoch = stakingV4Step3; + // 4.1 Expect jailed node from waiting list is now leaving + node.Process(t, 4) + currNodesConfig = node.NodesConfig + requireMapContains(t, currNodesConfig.leaving, newJailed) + requireSliceContainsNumOfElements(t, currNodesConfig.auction, newJailed, 0) + require.Empty(t, currNodesConfig.queue) + + // 4.2 UnJail previous node and expect it is sent to auction + node.ProcessUnJail(t, newJailed) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newJailed) + require.Empty(t, currNodesConfig.queue) + + // 5. Epoch is now after whole staking v4 chain is activated + node.Process(t, 4) + currNodesConfig = node.NodesConfig + queue = currNodesConfig.auction + newJailed = queue[:1] + newUnJailed := newJailed[0] + + // 5.1 Take a random node from auction and jail it; expect it is removed from auction list + node.ProcessJail(t, newJailed) + queue = remove(queue, newJailed[0]) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + + // 5.2 UnJail previous node; expect it is sent back to auction + node.ProcessUnJail(t, [][]byte{newUnJailed}) + queue = append(queue, newUnJailed) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) + require.Empty(t, node.NodesConfig.queue) +} + +func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffledToToWaiting(t *testing.T) { + t.Parallel() + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, // epoch 3 + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: 6, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // During these 9 epochs, we will always have: + // - 10 activeNodes (8 eligible + 2 waiting) + // - 1 node to shuffle out per shard + // Meanwhile, maxNumNodes changes from 12-10-12 + // Since activeNodes <= maxNumNodes, shuffled out nodes will always be sent directly to waiting list, + // instead of auction(there is no reason to send them to auction, they will be selected anyway) + epoch := uint32(0) + numOfShuffledOut := 2 + numRemainingEligible := 6 + prevNodesConfig := currNodesConfig + for epoch < 9 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // Epoch = 9 with: + // - activeNodes = 10 + // - maxNumNodes = 12 + // Owner2 stakes 2 nodes, which should be initially sent to auction list + owner2Nodes := pubKeys[10:12] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) + + // Epoch = 10 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner2's new nodes are selected from auction and distributed to waiting list + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.waiting[core.MetachainShardId]++ + expectedNodesNum.waiting[0]++ + expectedNodesNum.auction = 0 + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) + + // During epochs 10-13, we will have: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Since activeNodes == maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch = 10 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + prevNodesConfig = currNodesConfig + for epoch < 13 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 13 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner3 stakes 2 nodes, which should be initially sent to auction list + owner3Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // During epochs 14-18, we will have: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes, shuffled out nodes (2) will be sent to auction list + node.Process(t, 5) + prevNodesConfig = node.NodesConfig + epoch = 14 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 2 + for epoch < 18 { + checkConfig(t, expectedNodesNum, currNodesConfig) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 18, with: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Owner3 unStakes one of his nodes + node.ProcessUnStake(t, map[string][][]byte{ + "owner3": {owner3Nodes[0]}, + }) + + // Epoch = 19, with: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Owner3's unStaked node is now leaving + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.leaving, 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner3Nodes[0]}) + + epoch = 19 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // During epochs 19-23, we will have: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes: + // - shuffled out nodes (2) will be sent to auction list + // - waiting lists will be unbalanced (3 in total: 1 + 2 per shard) + // - no node will spend extra epochs in eligible/waiting, since waiting lists will always be refilled + prevNodesConfig = node.NodesConfig + for epoch < 23 { + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.auction, 2) + + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_NewlyStakedNodesInStakingV4Step2ShouldBeSentToWaitingIfListIsTooLow(t *testing.T) { + t.Parallel() + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 20, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 18, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // Epoch = 0, before staking v4, owner2 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 10 + // Newly staked nodes should be sent to new list + owner2Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.new, owner2Nodes) + + // Epoch = 1, staking v4 step 1 + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Owner2's new nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.new = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes, 2) + + // Epoch = 1, before staking v4, owner3 stakes 2 nodes + // - maxNumNodes = 20 + // - activeNumNodes = 12 + // Newly staked nodes should be sent to auction list + owner3Nodes := pubKeys[15:17] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(2 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // Epoch = 2, staking v4 step 2 + // - maxNumNodes = 20 + // - activeNumNodes = 14 + // Owner3's auction nodes should have been sent to waiting + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 0 + expectedNodesNum.waiting[0]++ + expectedNodesNum.waiting[core.MetachainShardId]++ + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), owner3Nodes, 2) + + // During epochs 2-6, we will have: + // - activeNodes = 14 + // - maxNumNodes = 18-20 + // Since activeNodes < maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch := uint32(2) + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfShuffledOut := 2 + numRemainingEligible := 6 + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 0 + + prevNodesConfig := currNodesConfig + for epoch < 6 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} + +func TestStakingV4_LeavingNodesEdgeCases(t *testing.T) { + t.Parallel() + + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:3], + 0: pubKeys[3:6], + 1: pubKeys[6:9], + 2: pubKeys[9:12], + }, + TotalStake: big.NewInt(12 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 3, + ShardConsensusGroupSize: 3, + MinNumberOfEligibleShardNodes: 3, + MinNumberOfEligibleMetaNodes: 3, + NumOfShards: 3, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 16, + NodesToShufflePerShard: 4, + }, + { + EpochEnable: 1, + MaxNumNodes: 18, + NodesToShufflePerShard: 2, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 12, + NodesToShufflePerShard: 2, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(5) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 3) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + require.Len(t, currNodesConfig.eligible[0], 3) + require.Len(t, currNodesConfig.waiting[0], 0) + require.Len(t, currNodesConfig.eligible[1], 3) + require.Len(t, currNodesConfig.waiting[1], 0) + require.Len(t, currNodesConfig.eligible[2], 3) + require.Len(t, currNodesConfig.waiting[2], 0) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // NewOwner0 stakes 1 node with top up = 0 before staking v4; should be sent to new nodes, since there are enough slots + newOwner0 := "newOwner0" + newOwner0BlsKeys := [][]byte{generateAddress(101)} + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner0: { + BLSKeys: newOwner0BlsKeys, + TotalStake: big.NewInt(nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.new, newOwner0BlsKeys) + + // UnStake one of the initial nodes + node.ProcessUnStake(t, map[string][][]byte{ + owner1: {owner1Stats.EligibleBlsKeys[core.MetachainShardId][0]}, + }) + + // Fast-forward few epochs such that the whole staking v4 is activated. + // We should have same 12 initial nodes + 1 extra node (because of legacy code where all leaving nodes were + // considered to be eligible and the unStaked node was forced to remain eligible) + node.Process(t, 49) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + + // Stake 10 extra nodes and check that they are sent to auction + newOwner1 := "newOwner1" + newOwner1BlsKeys := generateAddresses(303, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner1: { + BLSKeys: newOwner1BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSameSliceDifferentOrder(t, currNodesConfig.auction, newOwner1BlsKeys) + + // After 2 epochs, unStake all previously staked keys. Some of them have been already sent to eligible/waiting, but most + // of them are still in auction. UnStaked nodes' status from auction should be: leaving now, but their previous list was auction. + // We should not force his auction nodes as being eligible in the next epoch. We should only force his existing active + // nodes to remain in the system. + node.Process(t, 10) + currNodesConfig = node.NodesConfig + newOwner1AuctionNodes := getIntersection(currNodesConfig.auction, newOwner1BlsKeys) + newOwner1EligibleNodes := getIntersection(getAllPubKeys(currNodesConfig.eligible), newOwner1BlsKeys) + newOwner1WaitingNodes := getIntersection(getAllPubKeys(currNodesConfig.waiting), newOwner1BlsKeys) + newOwner1ActiveNodes := append(newOwner1EligibleNodes, newOwner1WaitingNodes...) + require.Equal(t, len(newOwner1AuctionNodes)+len(newOwner1ActiveNodes), len(newOwner1BlsKeys)) // sanity check + + node.ClearStoredMbs() + node.ProcessUnStake(t, map[string][][]byte{ + newOwner1: newOwner1BlsKeys, + }) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + requireMapContains(t, currNodesConfig.leaving, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.eligible, newOwner1AuctionNodes) + requireMapDoesNotContain(t, currNodesConfig.waiting, newOwner1AuctionNodes) + + allCurrentActiveNodes := append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain := getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Fast-forward some epochs, no error should occur, and we should have our initial config of: + // - 12 eligible nodes + // - 1 waiting list + // - some forced nodes to remain from newOwner1 + node.Process(t, 10) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 12) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 1) + allCurrentActiveNodes = append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + owner1NodesThatAreStillForcedToRemain = getIntersection(allCurrentActiveNodes, newOwner1ActiveNodes) + require.NotZero(t, len(owner1NodesThatAreStillForcedToRemain)) + + // Stake 10 extra nodes such that the forced eligible nodes from previous newOwner1 can leave the system + // and are replaced by new nodes + newOwner2 := "newOwner2" + newOwner2BlsKeys := generateAddresses(403, 10) + node.ProcessStake(t, map[string]*NodesRegisterData{ + newOwner2: { + BLSKeys: newOwner2BlsKeys, + TotalStake: big.NewInt(nodePrice * 10), + }, + }) + currNodesConfig = node.NodesConfig + requireSliceContains(t, currNodesConfig.auction, newOwner2BlsKeys) + + // Fast-forward multiple epochs and check that newOwner1's forced nodes from previous epochs left + node.Process(t, 20) + currNodesConfig = node.NodesConfig + allCurrentNodesInSystem := append(getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(currNodesConfig.waiting)...) + allCurrentNodesInSystem = append(allCurrentNodesInSystem, getAllPubKeys(currNodesConfig.leaving)...) + allCurrentNodesInSystem = append(allCurrentNodesInSystem, currNodesConfig.auction...) + owner1LeftNodes := getIntersection(owner1NodesThatAreStillForcedToRemain, allCurrentNodesInSystem) + require.Zero(t, len(owner1LeftNodes)) +} diff --git a/integrationTests/vm/staking/systemSCCreator.go b/integrationTests/vm/staking/systemSCCreator.go new file mode 100644 index 00000000000..cf18140797a --- /dev/null +++ b/integrationTests/vm/staking/systemSCCreator.go @@ -0,0 +1,264 @@ +package staking + +import ( + "bytes" + "strconv" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/epochStart/metachain" + epochStartMock "github.com/multiversx/mx-chain-go/epochStart/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + metaProcess "github.com/multiversx/mx-chain-go/process/factory/metachain" + "github.com/multiversx/mx-chain-go/process/peer" + "github.com/multiversx/mx-chain-go/process/smartContract/builtInFunctions" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + vmcommonMock "github.com/multiversx/mx-chain-vm-common-go/mock" +) + +func createSystemSCProcessor( + nc nodesCoordinator.NodesCoordinator, + coreComponents factory.CoreComponentsHolder, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + maxNodesConfig []config.MaxNodesChangeConfig, + validatorStatisticsProcessor process.ValidatorStatisticsProcessor, + systemVM vmcommon.VMExecutionHandler, + stakingDataProvider epochStart.StakingDataProvider, +) process.EpochStartSystemSCProcessor { + maxNodesChangeConfigProvider, _ := notifier.NewNodesConfigProvider( + coreComponents.EpochNotifier(), + maxNodesConfig, + ) + + auctionCfg := config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + } + ald, _ := metachain.NewAuctionListDisplayer(metachain.ArgsAuctionListDisplayer{ + TableDisplayHandler: metachain.NewTableDisplayer(), + ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, + AuctionConfig: auctionCfg, + }) + + argsAuctionListSelector := metachain.AuctionListSelectorArgs{ + ShardCoordinator: shardCoordinator, + StakingDataProvider: stakingDataProvider, + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListDisplayHandler: ald, + SoftAuctionConfig: auctionCfg, + } + auctionListSelector, _ := metachain.NewAuctionListSelector(argsAuctionListSelector) + + args := metachain.ArgsNewEpochStartSystemSCProcessing{ + SystemVM: systemVM, + UserAccountsDB: stateComponents.AccountsAdapter(), + PeerAccountsDB: stateComponents.PeerAccounts(), + Marshalizer: coreComponents.InternalMarshalizer(), + StartRating: initialRating, + ValidatorInfoCreator: validatorStatisticsProcessor, + EndOfEpochCallerAddress: vm.EndOfEpochAddress, + StakingSCAddress: vm.StakingSCAddress, + ChanceComputer: &epochStartMock.ChanceComputerStub{}, + EpochNotifier: coreComponents.EpochNotifier(), + GenesisNodesConfig: &genesisMocks.NodesSetupStub{}, + StakingDataProvider: stakingDataProvider, + NodesConfigProvider: nc, + ShardCoordinator: shardCoordinator, + ESDTOwnerAddressBytes: bytes.Repeat([]byte{1}, 32), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + MaxNodesChangeConfigProvider: maxNodesChangeConfigProvider, + AuctionListSelector: auctionListSelector, + } + + systemSCProcessor, _ := metachain.NewSystemSCProcessor(args) + return systemSCProcessor +} + +func createStakingDataProvider( + enableEpochsHandler common.EnableEpochsHandler, + systemVM vmcommon.VMExecutionHandler, +) epochStart.StakingDataProvider { + argsStakingDataProvider := metachain.StakingDataProviderArgs{ + EnableEpochsHandler: enableEpochsHandler, + SystemVM: systemVM, + MinNodePrice: strconv.Itoa(nodePrice), + } + stakingSCProvider, _ := metachain.NewStakingDataProvider(argsStakingDataProvider) + + return stakingSCProvider +} + +func createValidatorStatisticsProcessor( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + nc nodesCoordinator.NodesCoordinator, + shardCoordinator sharding.Coordinator, + peerAccounts state.AccountsAdapter, +) process.ValidatorStatisticsProcessor { + argsValidatorsProcessor := peer.ArgValidatorStatisticsProcessor{ + Marshalizer: coreComponents.InternalMarshalizer(), + NodesCoordinator: nc, + ShardCoordinator: shardCoordinator, + DataPool: dataComponents.Datapool(), + StorageService: dataComponents.StorageService(), + PubkeyConv: coreComponents.AddressPubKeyConverter(), + PeerAdapter: peerAccounts, + Rater: coreComponents.Rater(), + RewardsHandler: &epochStartMock.RewardsHandlerStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, + MaxComputableRounds: 1, + MaxConsecutiveRoundsOfRatingDecrease: 2000, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + } + validatorStatisticsProcessor, _ := peer.NewValidatorStatisticsProcessor(argsValidatorsProcessor) + return validatorStatisticsProcessor +} + +func createBlockChainHook( + dataComponents factory.DataComponentsHolder, + coreComponents factory.CoreComponentsHolder, + accountsAdapter state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + gasScheduleNotifier core.GasScheduleNotifier, +) (hooks.ArgBlockChainHook, process.BlockChainHookWithAccountsAdapter) { + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ + GasSchedule: gasScheduleNotifier, + MapDNSAddresses: make(map[string]struct{}), + Marshalizer: coreComponents.InternalMarshalizer(), + Accounts: accountsAdapter, + ShardCoordinator: shardCoordinator, + EpochNotifier: coreComponents.EpochNotifier(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + AutomaticCrawlerAddresses: [][]byte{core.SystemAccountAddress}, + MaxNumNodesInTransferRole: 1, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + MapDNSV2Addresses: make(map[string]struct{}), + } + + builtInFunctionsContainer, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) + _ = builtInFunctionsContainer.CreateBuiltInFunctionContainer() + builtInFunctionsContainer.BuiltInFunctionContainer() + + argsHook := hooks.ArgBlockChainHook{ + Accounts: accountsAdapter, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + StorageService: dataComponents.StorageService(), + BlockChain: dataComponents.Blockchain(), + ShardCoordinator: shardCoordinator, + Marshalizer: coreComponents.InternalMarshalizer(), + Uint64Converter: coreComponents.Uint64ByteSliceConverter(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + BuiltInFunctions: builtInFunctionsContainer.BuiltInFunctionContainer(), + DataPool: dataComponents.Datapool(), + CompiledSCPool: dataComponents.Datapool().SmartContracts(), + EpochNotifier: coreComponents.EpochNotifier(), + GlobalSettingsHandler: &vmcommonMock.GlobalSettingsHandlerStub{}, + NilCompiledSCStore: true, + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + GasSchedule: gasScheduleNotifier, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, + } + + blockChainHook, _ := hooks.NewBlockChainHookImpl(argsHook) + return argsHook, blockChainHook +} + +func createVMContainerFactory( + coreComponents factory.CoreComponentsHolder, + gasScheduleNotifier core.GasScheduleNotifier, + blockChainHook process.BlockChainHookWithAccountsAdapter, + argsBlockChainHook hooks.ArgBlockChainHook, + stateComponents factory.StateComponentsHandler, + shardCoordinator sharding.Coordinator, + nc nodesCoordinator.NodesCoordinator, + maxNumNodes uint32, +) process.VirtualMachinesContainerFactory { + signVerifer, _ := disabled.NewMessageSignVerifier(&cryptoMocks.KeyGenStub{}) + + argsNewVMContainerFactory := metaProcess.ArgsNewVMContainerFactory{ + BlockChainHook: blockChainHook, + PubkeyConv: coreComponents.AddressPubKeyConverter(), + Economics: coreComponents.EconomicsData(), + MessageSignVerifier: signVerifer, + GasSchedule: gasScheduleNotifier, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, + Hasher: coreComponents.Hasher(), + Marshalizer: coreComponents.InternalMarshalizer(), + SystemSCConfig: &config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "aaaaaa", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + NumNodes: 2000, + ProposalCost: "500", + MinQuorum: 50, + MinPassThreshold: 10, + MinVetoThreshold: 10, + }, + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: strconv.Itoa(nodePrice), + UnJailValue: "10", + MinStepValue: "10", + MinStakeValue: "1", + UnBondPeriod: 1, + NumRoundsWithoutBleed: 1, + MaximumPercentageToBleed: 1, + BleedPercentagePerRound: 1, + MaxNumberOfNodesForStake: uint64(maxNumNodes), + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, + }, + ValidatorAccountsDB: stateComponents.PeerAccounts(), + ChanceComputer: coreComponents.Rater(), + EnableEpochsHandler: coreComponents.EnableEpochsHandler(), + ShardCoordinator: shardCoordinator, + NodesCoordinator: nc, + UserAccountsDB: stateComponents.AccountsAdapter(), + ArgBlockChainHook: argsBlockChainHook, + } + + metaVmFactory, _ := metaProcess.NewVMContainerFactory(argsNewVMContainerFactory) + return metaVmFactory +} diff --git a/integrationTests/vm/staking/testMetaProcessor.go b/integrationTests/vm/staking/testMetaProcessor.go new file mode 100644 index 00000000000..168287b66bc --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessor.go @@ -0,0 +1,99 @@ +package staking + +import ( + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" +) + +// NewTestMetaProcessor - +func NewTestMetaProcessor( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + numOfNodesInStakingQueue uint32, +) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(numOfShards) + + maxNodesConfig := createMaxNodesConfig( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + numOfNodesToShufflePerShard, + ) + + queue := createStakingQueue( + numOfNodesInStakingQueue, + maxNodesConfig[0].MaxNumNodes, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodes( + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + numOfWaitingNodesPerShard, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootStrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + numOfMetaNodes, + numOfShards, + numOfEligibleNodesPerShard, + shardConsensusGroupSize, + metaConsensusGroupSize, + coreComponents, + bootStrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + maxNodesConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + maxNodesConfig, + queue, + ) +} + +func createMaxNodesConfig( + numOfMetaNodes uint32, + numOfShards uint32, + numOfEligibleNodesPerShard uint32, + numOfWaitingNodesPerShard uint32, + numOfNodesToShufflePerShard uint32, +) []config.MaxNodesChangeConfig { + totalEligible := numOfMetaNodes + numOfShards*numOfEligibleNodesPerShard + totalWaiting := (numOfShards + 1) * numOfWaitingNodesPerShard + totalNodes := totalEligible + totalWaiting + + maxNodesConfig := make([]config.MaxNodesChangeConfig, 0) + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: 0, + MaxNumNodes: totalNodes, + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + maxNodesConfig = append(maxNodesConfig, config.MaxNodesChangeConfig{ + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: totalNodes - numOfNodesToShufflePerShard*(numOfShards+1), + NodesToShufflePerShard: numOfNodesToShufflePerShard, + }, + ) + + return maxNodesConfig +} diff --git a/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go new file mode 100644 index 00000000000..a966a499454 --- /dev/null +++ b/integrationTests/vm/staking/testMetaProcessorWithCustomNodesConfig.go @@ -0,0 +1,303 @@ +package staking + +import ( + "bytes" + "encoding/hex" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +// OwnerStats - +type OwnerStats struct { + EligibleBlsKeys map[uint32][][]byte + WaitingBlsKeys map[uint32][][]byte + StakingQueueKeys [][]byte + TotalStake *big.Int +} + +// InitialNodesConfig - +type InitialNodesConfig struct { + Owners map[string]*OwnerStats + MaxNodesChangeConfig []config.MaxNodesChangeConfig + NumOfShards uint32 + MinNumberOfEligibleShardNodes uint32 + MinNumberOfEligibleMetaNodes uint32 + ShardConsensusGroupSize int + MetaConsensusGroupSize int +} + +// NewTestMetaProcessorWithCustomNodes - +func NewTestMetaProcessorWithCustomNodes(config *InitialNodesConfig) *TestMetaProcessor { + coreComponents, dataComponents, bootstrapComponents, statusComponents, stateComponents := createComponentHolders(config.NumOfShards) + + queue := createStakingQueueCustomNodes( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents.AccountsAdapter(), + ) + + eligibleMap, waitingMap := createGenesisNodesWithCustomConfig( + config.Owners, + coreComponents.InternalMarshalizer(), + stateComponents, + ) + + bootstrapStorer, _ := dataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + nc := createNodesCoordinator( + eligibleMap, + waitingMap, + config.MinNumberOfEligibleMetaNodes, + config.NumOfShards, + config.MinNumberOfEligibleShardNodes, + config.ShardConsensusGroupSize, + config.MetaConsensusGroupSize, + coreComponents, + bootstrapStorer, + bootstrapComponents.NodesCoordinatorRegistryFactory(), + config.MaxNodesChangeConfig, + ) + + return newTestMetaProcessor( + coreComponents, + dataComponents, + bootstrapComponents, + statusComponents, + stateComponents, + nc, + config.MaxNodesChangeConfig, + queue, + ) +} + +// NodesRegisterData - +type NodesRegisterData struct { + BLSKeys [][]byte + TotalStake *big.Int +} + +// ProcessStake will create a block containing mini blocks with staking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to stake all nodes +func (tmp *TestMetaProcessor) ProcessStake(t *testing.T, nodes map[string]*NodesRegisterData) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, registerData := range nodes { + scrs := tmp.doStake(t, []byte(owner), registerData) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doStake( + t *testing.T, + owner []byte, + registerData *NodesRegisterData, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: createStakeArgs(registerData.BLSKeys), + CallValue: registerData.TotalStake, + GasProvided: 10, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "stake", + } + + return tmp.runSC(t, arguments) +} + +func createStakeArgs(blsKeys [][]byte) [][]byte { + numBLSKeys := int64(len(blsKeys)) + numBLSKeysBytes := big.NewInt(numBLSKeys).Bytes() + argsStake := [][]byte{numBLSKeysBytes} + + for _, blsKey := range blsKeys { + signature := append([]byte("signature-"), blsKey...) + argsStake = append(argsStake, blsKey, signature) + } + + return argsStake +} + +// ProcessUnStake will create a block containing mini blocks with unStaking txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unStake all nodes +func (tmp *TestMetaProcessor) ProcessUnStake(t *testing.T, nodes map[string][][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for owner, blsKeys := range nodes { + scrs := tmp.doUnStake(t, []byte(owner), blsKeys) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doUnStake( + t *testing.T, + owner []byte, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: owner, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 100, + }, + RecipientAddr: vm.ValidatorSCAddress, + Function: "unStake", + } + + return tmp.runSC(t, arguments) +} + +// ProcessJail will create a block containing mini blocks with jail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to jail all nodes +func (tmp *TestMetaProcessor) ProcessJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + scrs := tmp.doJail(t, blsKeys) + txHashes := tmp.addTxsToCacher(scrs) + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) doJail( + t *testing.T, + blsKeys [][]byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.JailingAddress, + Arguments: blsKeys, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "jail", + } + + return tmp.runSC(t, arguments) +} + +// ProcessUnJail will create a block containing mini blocks with unJail txs using provided nodes. +// Block will be committed + call to validator system sc will be made to unJail all nodes +func (tmp *TestMetaProcessor) ProcessUnJail(t *testing.T, blsKeys [][]byte) { + header := tmp.createNewHeader(t, tmp.currentRound) + tmp.BlockChainHook.SetCurrentHeader(header) + + txHashes := make([][]byte, 0) + for _, blsKey := range blsKeys { + scrs := tmp.doUnJail(t, blsKey) + txHashes = append(txHashes, tmp.addTxsToCacher(scrs)...) + } + + tmp.commitBlockTxs(t, txHashes, header) +} + +func (tmp *TestMetaProcessor) ClearStoredMbs() { + txCoordMock, _ := tmp.TxCoordinator.(*testscommon.TransactionCoordinatorMock) + txCoordMock.ClearStoredMbs() +} + +func (tmp *TestMetaProcessor) doUnJail( + t *testing.T, + blsKey []byte, +) map[string]*smartContractResult.SmartContractResult { + arguments := &vmcommon.ContractCallInput{ + VMInput: vmcommon.VMInput{ + CallerAddr: vm.ValidatorSCAddress, + Arguments: [][]byte{blsKey}, + CallValue: big.NewInt(0), + GasProvided: 10, + }, + RecipientAddr: vm.StakingSCAddress, + Function: "unJail", + } + + return tmp.runSC(t, arguments) +} + +func (tmp *TestMetaProcessor) addTxsToCacher(scrs map[string]*smartContractResult.SmartContractResult) [][]byte { + txHashes := make([][]byte, 0) + for scrHash, scr := range scrs { + txHashes = append(txHashes, []byte(scrHash)) + tmp.TxCacher.AddTx([]byte(scrHash), scr) + } + + return txHashes +} + +func (tmp *TestMetaProcessor) commitBlockTxs(t *testing.T, txHashes [][]byte, header data.HeaderHandler) { + _, err := tmp.AccountsAdapter.Commit() + require.Nil(t, err) + + miniBlocks := block.MiniBlockSlice{ + { + TxHashes: txHashes, + SenderShardID: core.MetachainShardId, + ReceiverShardID: core.MetachainShardId, + Type: block.SmartContractResultBlock, + }, + } + tmp.TxCoordinator.AddTxsFromMiniBlocks(miniBlocks) + tmp.createAndCommitBlock(t, header, noTime) + tmp.currentRound += 1 +} + +func (tmp *TestMetaProcessor) runSC(t *testing.T, arguments *vmcommon.ContractCallInput) map[string]*smartContractResult.SmartContractResult { + vmOutput, err := tmp.SystemVM.RunSmartContractCall(arguments) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + + err = integrationTests.ProcessSCOutputAccounts(vmOutput, tmp.AccountsAdapter) + require.Nil(t, err) + + return createSCRsFromStakingSCOutput(vmOutput, tmp.Marshaller) +} + +func createSCRsFromStakingSCOutput( + vmOutput *vmcommon.VMOutput, + marshaller marshal.Marshalizer, +) map[string]*smartContractResult.SmartContractResult { + allSCR := make(map[string]*smartContractResult.SmartContractResult) + parser := smartContract.NewArgumentParser() + outputAccounts := process.SortVMOutputInsideData(vmOutput) + for _, outAcc := range outputAccounts { + storageUpdates := process.GetSortedStorageUpdates(outAcc) + + if bytes.Equal(outAcc.Address, vm.StakingSCAddress) { + scrData := parser.CreateDataFromStorageUpdate(storageUpdates) + scr := &smartContractResult.SmartContractResult{ + RcvAddr: vm.StakingSCAddress, + Data: []byte(scrData), + } + scrBytes, _ := marshaller.Marshal(scr) + scrHash := hex.EncodeToString(scrBytes) + + allSCR[scrHash] = scr + } + } + + return allSCR +} diff --git a/integrationTests/vm/systemVM/stakingSC_test.go b/integrationTests/vm/systemVM/stakingSC_test.go index 69ad5d15a6e..75e958f926b 100644 --- a/integrationTests/vm/systemVM/stakingSC_test.go +++ b/integrationTests/vm/systemVM/stakingSC_test.go @@ -35,6 +35,9 @@ func TestStakingUnstakingAndUnbondingOnMultiShardEnvironment(t *testing.T) { StakingV2EnableEpoch: integrationTests.UnreachableEpoch, ScheduledMiniBlocksEnableEpoch: integrationTests.UnreachableEpoch, MiniBlockPartialExecutionEnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step1EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step2EnableEpoch: integrationTests.UnreachableEpoch, + StakingV4Step3EnableEpoch: integrationTests.UnreachableEpoch, } nodes := integrationTests.CreateNodesWithEnableEpochs( diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 0c9fa15b273..7d44d945e14 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -60,6 +60,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -322,11 +323,6 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom minGasLimit := strconv.FormatUint(1, 10) testProtocolSustainabilityAddress := "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" - builtInCost, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - ArgsParser: smartContract.NewArgumentParser(), - GasSchedule: mock.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - }) - realEpochNotifier := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, realEpochNotifier) @@ -371,10 +367,9 @@ func createEconomicsData(enableEpochsConfig config.EnableEpochs) (process.Econom MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: realEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - BuiltInFunctionsCostHandler: builtInCost, - TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), + EpochNotifier: realEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: versioning.NewTxVersionChecker(minTransactionVersion), } return economics.NewEconomicsData(argsNewEconomicsData) @@ -702,7 +697,8 @@ func CreateVMAndBlockchainHookMeta( Economics: economicsData, MessageSignVerifier: &mock.MessageSignVerifierMock{}, GasSchedule: gasSchedule, - NodesConfigProvider: &mock.NodesSetupStub{}, + ArgBlockChainHook: args, + NodesConfigProvider: &genesisMocks.NodesSetupStub{}, Hasher: integrationtests.TestHasher, Marshalizer: integrationtests.TestMarshalizer, SystemSCConfig: createSystemSCConfig(), @@ -711,6 +707,7 @@ func CreateVMAndBlockchainHookMeta( ChanceComputer: &shardingMocks.NodesCoordinatorMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), EnableEpochsHandler: enableEpochsHandler, + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, } vmFactory, err := metachain.NewVMContainerFactory(argVMContainer) if err != nil { @@ -764,6 +761,8 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { BleedPercentagePerRound: 0.00001, MaxNumberOfNodesForStake: 36, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "1250000000000000000000", @@ -774,6 +773,12 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { MinServiceFee: 1, MaxServiceFee: 20, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, } } @@ -818,6 +823,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( epochNotifierInstance process.EpochNotifier, guardianChecker process.GuardianChecker, roundNotifierInstance process.RoundNotifier, + chainHandler data.ChainHandler, ) (*ResultsCreateTxProcessor, error) { if check.IfNil(poolsHolder) { poolsHolder = dataRetrieverMock.NewPoolsHolderMock() @@ -980,6 +986,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Marshalizer: integrationtests.TestMarshalizer, Hasher: integrationtests.TestHasher, DataFieldParser: dataFieldParser, + BlockChainHook: blockChainHook, } argsNewSCProcessor.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher @@ -1006,6 +1013,7 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( Accounts: simulationAccountsDB, ShardCoordinator: shardCoordinator, EnableEpochsHandler: argsNewSCProcessor.EnableEpochsHandler, + BlockChain: chainHandler, } apiTransactionEvaluator, err := transactionEvaluator.NewAPITransactionEvaluator(argsTransactionEvaluator) if err != nil { @@ -1077,7 +1085,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMs( senderAddressBytes, senderBalance, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig()) + testscommon.GetDefaultRoundsConfig()) } // CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig - @@ -1088,7 +1096,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1128,6 +1136,7 @@ func CreatePreparedTxProcessorAndAccountsWithVMsWithRoundsConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1174,13 +1183,13 @@ func CreatePreparedTxProcessorWithVMsAndCustomGasSchedule( mock.NewMultiShardsCoordinatorMock(2), integrationtests.CreateMemUnit(), createMockGasScheduleNotifierWithCustomGasSchedule(updateGasSchedule), - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) } // CreatePreparedTxProcessorWithVMsWithShardCoordinator - func CreatePreparedTxProcessorWithVMsWithShardCoordinator(enableEpochsConfig config.EnableEpochs, shardCoordinator sharding.Coordinator) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, integrationTests.GetDefaultRoundsConfig(), shardCoordinator) + return CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig(enableEpochsConfig, testscommon.GetDefaultRoundsConfig(), shardCoordinator) } // CreatePreparedTxProcessorWithVMsWithShardCoordinatorAndRoundConfig - @@ -1207,7 +1216,7 @@ func CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( shardCoordinator, db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) } @@ -1240,7 +1249,7 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo roundsConfig config.RoundConfig, vmConfig *config.VirtualMachineConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() epochNotifierInstance := forking.NewGenericEpochNotifier() enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, epochNotifierInstance) accounts := integrationtests.CreateAccountsDB(db, enableEpochsHandler) @@ -1279,6 +1288,7 @@ func CreatePreparedTxProcessorWithVMConfigWithShardCoordinatorDBAndGasAndRoundCo epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1319,7 +1329,7 @@ func CreateTxProcessorWasmVMWithGasSchedule( senderBalance, gasScheduleMap, enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) } @@ -1332,7 +1342,7 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( enableEpochsConfig config.EnableEpochs, roundsConfig config.RoundConfig, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() _, _ = CreateAccount(accounts, senderAddressBytes, senderNonce, senderBalance) vmConfig := createDefaultVMConfig() @@ -1374,6 +1384,7 @@ func CreateTxProcessorArwenVMWithGasScheduleAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1403,7 +1414,7 @@ func CreateTxProcessorWasmVMWithVMConfig( ) (*VMTestContext, error) { return CreateTxProcessorArwenWithVMConfigAndRoundConfig( enableEpochsConfig, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, gasSchedule, ) @@ -1416,7 +1427,7 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( vmConfig *config.VirtualMachineConfig, gasSchedule map[string]map[string]uint64, ) (*VMTestContext, error) { - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} gasScheduleNotifier := mock.NewGasScheduleNotifierMock(gasSchedule) @@ -1455,6 +1466,7 @@ func CreateTxProcessorArwenWithVMConfigAndRoundConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err @@ -1492,7 +1504,7 @@ func CreatePreparedTxProcessorAndAccountsWithMockedVM( senderAddressBytes, senderBalance, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), wasmVMChangeLocker, ) } @@ -1823,7 +1835,7 @@ func GetNodeIndex(nodeList []*integrationTests.TestProcessorNode, node *integrat // CreatePreparedTxProcessorWithVMsMultiShard - func CreatePreparedTxProcessorWithVMsMultiShard(selfShardID uint32, enableEpochsConfig config.EnableEpochs) (*VMTestContext, error) { - return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, integrationTests.GetDefaultRoundsConfig()) + return CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig(selfShardID, enableEpochsConfig, testscommon.GetDefaultRoundsConfig()) } // CreatePreparedTxProcessorWithVMsMultiShardAndRoundConfig - @@ -1840,7 +1852,7 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( ) (*VMTestContext, error) { shardCoordinator, _ := sharding.NewMultiShardCoordinator(3, selfShardID) - feeAccumulator, _ := postprocess.NewFeeAccumulator() + feeAccumulator := postprocess.NewFeeAccumulator() accounts := integrationtests.CreateInMemoryShardAccountsDB() wasmVMChangeLocker := &sync.RWMutex{} @@ -1885,6 +1897,7 @@ func CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( epochNotifierInstance, guardedAccountHandler, roundNotifierInstance, + chainHandler, ) if err != nil { return nil, err diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index cedf9ad825b..78030ff6b39 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "fmt" "math/big" + "runtime" "strings" "testing" @@ -22,6 +23,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -140,6 +142,10 @@ func TestMinterContractWithAsyncCalls(t *testing.T) { } func TestAsyncCallsOnInitFunctionOnUpgrade(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + firstContractCode := wasm.GetSCCode("./testdata/first/output/first.wasm") newContractCode := wasm.GetSCCode("./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm") @@ -191,7 +197,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -200,7 +206,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade( shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -275,6 +281,10 @@ func testAsyncCallsOnInitFunctionOnUpgrade( } func TestAsyncCallsOnInitFunctionOnDeploy(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + firstSCCode := wasm.GetSCCode("./testdata/first/output/first.wasm") pathToSecondSC := "./testdata/asyncOnInit/asyncOnInitAndUpgrade.wasm" secondSCCode := wasm.GetSCCode(pathToSecondSC) @@ -325,7 +335,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) @@ -334,7 +344,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, shardCoordinatorForShardMeta, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.4"), ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/backwardsCompatibility_test.go b/integrationTests/vm/txsFee/backwardsCompatibility_test.go index b4a73596edb..94735de21a5 100644 --- a/integrationTests/vm/txsFee/backwardsCompatibility_test.go +++ b/integrationTests/vm/txsFee/backwardsCompatibility_test.go @@ -18,11 +18,10 @@ import ( // minGasPrice = 1, gasPerDataByte = 1, minGasLimit = 1 func TestMoveBalanceSelfShouldWorkAndConsumeTxFeeWhenAllFlagsAreDisabled(t *testing.T) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ - PenalizedTooMuchGasEnableEpoch: 100, - BuiltInFunctionOnMetaEnableEpoch: 100, - SCDeployEnableEpoch: 100, - MetaProtectionEnableEpoch: 100, - RelayedTransactionsEnableEpoch: 100, + PenalizedTooMuchGasEnableEpoch: 100, + SCDeployEnableEpoch: 100, + MetaProtectionEnableEpoch: 100, + RelayedTransactionsEnableEpoch: 100, }) require.Nil(t, err) defer testContext.Close() diff --git a/integrationTests/vm/txsFee/builtInFunctions_test.go b/integrationTests/vm/txsFee/builtInFunctions_test.go index 6a9b31bb674..3f5bec54e51 100644 --- a/integrationTests/vm/txsFee/builtInFunctions_test.go +++ b/integrationTests/vm/txsFee/builtInFunctions_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -321,7 +322,7 @@ func TestBuildInFunctionSaveKeyValue_NotEnoughGasForTheSameKeyValue(t *testing.T shardCoord, integrationtests.CreateMemUnit(), gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vm.CreateVMConfigWithVersion("v1.5"), ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 53c6644b679..6a2b9315162 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "fmt" "math/big" + "runtime" "testing" "unicode/utf8" @@ -19,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -115,6 +117,10 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( // relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract // from shard 1. func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompatibility(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + enableEpochs := config.EnableEpochs{ ChangeUsernameEnableEpoch: 1000, // flag disabled, backwards compatibility SCProcessorV2EnableEpoch: 1000, @@ -124,7 +130,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 1, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) require.Nil(t, err) @@ -133,7 +139,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompat testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShardRoundVMConfig( 2, enableEpochs, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), vmConfig, ) require.Nil(t, err) diff --git a/integrationTests/vm/txsFee/guardAccount_test.go b/integrationTests/vm/txsFee/guardAccount_test.go index 2baa497f991..58542a72e79 100644 --- a/integrationTests/vm/txsFee/guardAccount_test.go +++ b/integrationTests/vm/txsFee/guardAccount_test.go @@ -99,14 +99,13 @@ func prepareTestContextForGuardedAccounts(tb testing.TB) *vm.VMTestContext { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, }, testscommon.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) require.Nil(tb, err) diff --git a/integrationTests/vm/txsFee/migrateDataTrie_test.go b/integrationTests/vm/txsFee/migrateDataTrie_test.go index a4bc4ad1e0f..9c62a4f30fd 100644 --- a/integrationTests/vm/txsFee/migrateDataTrie_test.go +++ b/integrationTests/vm/txsFee/migrateDataTrie_test.go @@ -215,7 +215,8 @@ func generateDataTrie( for i := 1; i < numLeaves; i++ { key := keyGenerator(i) - err := tr.UpdateWithVersion(key, key, core.NotSpecified) + value := getValWithAppendedData(key, key, accAddr) + err := tr.UpdateWithVersion(key, value, core.NotSpecified) require.Nil(t, err) keys[i] = key @@ -226,6 +227,13 @@ func generateDataTrie( return rootHash, keys } +func getValWithAppendedData(key, val, address []byte) []byte { + suffix := append(key, address...) + val = append(val, suffix...) + + return val +} + func initDataTrie( t *testing.T, testContext *vm.VMTestContext, diff --git a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go index 181d937e55e..e799fd3efc6 100644 --- a/integrationTests/vm/txsFee/multiShard/asyncCall_test.go +++ b/integrationTests/vm/txsFee/multiShard/asyncCall_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" + "github.com/multiversx/mx-chain-go/testscommon" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -128,7 +129,7 @@ func TestAsyncCallDisabled(t *testing.T) { SCProcessorV2EnableEpoch: integrationTests.UnreachableEpoch, } - roundsConfig := integrationTests.GetDefaultRoundsConfig() + roundsConfig := testscommon.GetDefaultRoundsConfig() activationRound := roundsConfig.RoundActivations["DisableAsyncCallV1"] activationRound.Round = "0" roundsConfig.RoundActivations["DisableAsyncCallV1"] = activationRound diff --git a/integrationTests/vm/txsFee/scCalls_test.go b/integrationTests/vm/txsFee/scCalls_test.go index db01a33cd11..a4529d959a2 100644 --- a/integrationTests/vm/txsFee/scCalls_test.go +++ b/integrationTests/vm/txsFee/scCalls_test.go @@ -23,6 +23,7 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" @@ -60,7 +61,6 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGasAndRoundConfig( config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, SetSenderInEeiOutputTransferEnableEpoch: unreachableEpoch, RefactorPeersMiniBlocksEnableEpoch: unreachableEpoch, MaxBlockchainHookCountersEnableEpoch: unreachableEpoch, @@ -69,7 +69,7 @@ func prepareTestContextForEpoch836(tb testing.TB) (*vm.VMTestContext, []byte) { mock.NewMultiShardsCoordinatorMock(2), db, gasScheduleNotifier, - integrationTests.GetDefaultRoundsConfig(), + testscommon.GetDefaultRoundsConfig(), ) require.Nil(tb, err) @@ -368,7 +368,6 @@ func prepareTestContextForEpoch460(tb testing.TB) (*vm.VMTestContext, []byte) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{ GovernanceEnableEpoch: unreachableEpoch, - WaitingListFixEnableEpoch: unreachableEpoch, ScheduledMiniBlocksEnableEpoch: unreachableEpoch, CorrectJailedNotUnstakedEmptyQueueEpoch: unreachableEpoch, OptimizeNFTStoreEnableEpoch: unreachableEpoch, diff --git a/integrationTests/vm/txsFee/validatorSC_test.go b/integrationTests/vm/txsFee/validatorSC_test.go index 31fbaea8dae..ca4ff9271de 100644 --- a/integrationTests/vm/txsFee/validatorSC_test.go +++ b/integrationTests/vm/txsFee/validatorSC_test.go @@ -10,12 +10,12 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" vmAddr "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -28,6 +28,9 @@ const ( validatorStakeData = "stake@01@" + validatorBLSKey + "@0b823739887c40e9331f70c5a140623dfaf4558a9138b62f4473b26bbafdd4f58cb5889716a71c561c9e20e7a280e985@b2a11555ce521e4944e09ab17549d85b487dcd26c84b5017a39e31a3670889ba" cannotUnBondTokensMessage = "cannot unBond tokens, the validator would remain without min deposit, nodes are still active" noTokensToUnBondMessage = "no tokens that can be unbond at this time" + delegationManagementKey = "delegationManagement" + stakingV4Step1EnableEpoch = 4443 + stakingV4Step2EnableEpoch = 4444 ) var ( @@ -36,8 +39,6 @@ var ( value200EGLD, _ = big.NewInt(0).SetString("200000000000000000000", 10) ) -const delegationManagementKey = "delegationManagement" - func saveDelegationManagerConfig(testContext *vm.VMTestContext) { acc, _ := testContext.Accounts.LoadAccount(vmAddr.DelegationManagerSCAddress) userAcc, _ := acc.(state.UserAccountHandler) @@ -54,7 +55,7 @@ func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondShouldRefund(t *testing.T require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) saveDelegationManagerConfig(testContextMeta) @@ -105,12 +106,18 @@ func checkReturnLog(t *testing.T, testContextMeta *vm.VMTestContext, subStr stri } func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -137,11 +144,15 @@ func TestValidatorsSC_DoStakePutInQueueUnStakeAndUnBondTokensShouldRefund(t *tes func TestValidatorsSC_DoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *testing.T) { argUnbondTokensV1 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 20000, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV1) argUnbondTokensV2 := config.EnableEpochs{ UnbondTokensV2EnableEpoch: 0, + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, } testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t, argUnbondTokensV2) } @@ -152,7 +163,7 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 0}) @@ -174,12 +185,18 @@ func testValidatorsSCDoStakeWithTopUpValueTryToUnStakeTokensAndUnBondTokens(t *t } func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -220,12 +237,18 @@ func TestValidatorsSC_ToStakePutInQueueUnStakeAndUnBondShouldRefundUnBondTokens( } func TestValidatorsSC_ToStakePutInQueueUnStakeNodesAndUnBondNodesShouldRefund(t *testing.T) { - testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(core.MetachainShardId, config.EnableEpochs{}) + testContextMeta, err := vm.CreatePreparedTxProcessorWithVMsMultiShard( + core.MetachainShardId, + config.EnableEpochs{ + StakingV4Step1EnableEpoch: stakingV4Step1EnableEpoch, + StakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, + ) require.Nil(t, err) defer testContextMeta.Close() - saveNodesConfig(t, testContextMeta, 1, 1, 1) + stakingcommon.SaveNodesConfig(testContextMeta.Accounts, testContextMeta.Marshalizer, 1, 1, 1) saveDelegationManagerConfig(testContextMeta) testContextMeta.BlockchainHook.(*hooks.BlockChainHookImpl).SetCurrentHeader(&block.MetaBlock{Epoch: 1}) @@ -278,22 +301,3 @@ func executeTxAndCheckResults( require.Equal(t, vmCodeExpected, recCode) require.Equal(t, expectedErr, err) } - -func saveNodesConfig(t *testing.T, testContext *vm.VMTestContext, stakedNodes, minNumNodes, maxNumNodes int64) { - protoMarshalizer := &marshal.GogoProtoMarshalizer{} - - account, err := testContext.Accounts.LoadAccount(vmAddr.StakingSCAddress) - require.Nil(t, err) - userAccount, _ := account.(state.UserAccountHandler) - - nodesConfigData := &systemSmartContracts.StakingNodesConfig{ - StakedNodes: stakedNodes, - MinNumNodes: minNumNodes, - MaxNumNodes: maxNumNodes, - } - nodesDataBytes, _ := protoMarshalizer.Marshal(nodesConfigData) - - _ = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) - _ = testContext.Accounts.SaveAccount(account) - _, _ = testContext.Accounts.Commit() -} diff --git a/integrationTests/vm/wasm/queries/queries_test.go b/integrationTests/vm/wasm/queries/queries_test.go new file mode 100644 index 00000000000..7c51f04b325 --- /dev/null +++ b/integrationTests/vm/wasm/queries/queries_test.go @@ -0,0 +1,246 @@ +//go:build !race + +// TODO remove build condition above to allow -race -short, after Wasm VM fix + +package queries + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + + "github.com/stretchr/testify/require" +) + +type now struct { + blockNonce uint64 + stateRootHash []byte +} + +func TestQueries(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + snapshotsOfGetNow := make(map[uint64]now) + snapshotsOfGetState := make(map[uint64]int) + historyOfGetNow := make(map[uint64]now) + historyOfGetState := make(map[uint64]int) + + network := integrationTests.NewMiniNetwork() + defer network.Stop() + + scOwner := network.AddUser(big.NewInt(10000000000000)) + + network.Start() + + // Block 1 + + scAddress := deploy(t, network, scOwner.Address, "../testdata/history/output/history.wasm") + network.Continue(t, 1) + + // Block 2 + + now := queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[1] = now + network.Continue(t, 1) + + // Block 3 + + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[2] = now + setState(t, network, scAddress, scOwner.Address, 42) + network.Continue(t, 1) + + // Block 4 + + state := getState(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetState[3] = state + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[3] = now + setState(t, network, scAddress, scOwner.Address, 43) + network.Continue(t, 1) + + // Block 4 + + state = getState(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetState[4] = state + now = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{}) + snapshotsOfGetNow[4] = now + network.Continue(t, 1) + + // Check snapshots + block1, _ := network.ShardNode.GetShardHeader(1) + block2, _ := network.ShardNode.GetShardHeader(2) + block3, _ := network.ShardNode.GetShardHeader(3) + + require.Equal(t, uint64(1), snapshotsOfGetNow[1].blockNonce) + require.Equal(t, uint64(2), snapshotsOfGetNow[2].blockNonce) + require.Equal(t, uint64(3), snapshotsOfGetNow[3].blockNonce) + require.Equal(t, uint64(4), snapshotsOfGetNow[4].blockNonce) + + require.Equal(t, block1.GetRootHash(), snapshotsOfGetNow[1].stateRootHash) + require.Equal(t, block1.GetRootHash(), snapshotsOfGetNow[2].stateRootHash) + require.NotEqual(t, block2.GetRootHash(), snapshotsOfGetNow[3].stateRootHash) + require.NotEqual(t, block3.GetRootHash(), snapshotsOfGetNow[4].stateRootHash) + + require.Equal(t, 42, snapshotsOfGetState[3]) + require.Equal(t, 43, snapshotsOfGetState[4]) + + // Check history + historyOfGetState[1] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 1}) + historyOfGetNow[1] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 1}) + + historyOfGetState[2] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 2}) + historyOfGetNow[2] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 2}) + + historyOfGetState[3] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 3}) + historyOfGetNow[3] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 3}) + + historyOfGetState[4] = getState(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 4}) + historyOfGetNow[4] = queryHistoryGetNow(t, network.ShardNode, scAddress, core.OptionalUint64{HasValue: true, Value: 4}) + + require.Equal(t, snapshotsOfGetState[1], historyOfGetState[1]) + require.Equal(t, snapshotsOfGetNow[1].blockNonce, historyOfGetNow[1].blockNonce) + + require.Equal(t, snapshotsOfGetState[2], historyOfGetState[2]) + require.Equal(t, snapshotsOfGetNow[2].blockNonce, historyOfGetNow[2].blockNonce) + + require.Equal(t, snapshotsOfGetState[3], historyOfGetState[3]) + require.Equal(t, snapshotsOfGetNow[3].blockNonce, historyOfGetNow[3].blockNonce) + + require.Equal(t, snapshotsOfGetState[4], historyOfGetState[4]) + require.Equal(t, snapshotsOfGetNow[4].blockNonce, historyOfGetNow[4].blockNonce) +} + +func deploy(t *testing.T, network *integrationTests.MiniNetwork, sender []byte, codePath string) []byte { + code := wasm.GetSCCode(codePath) + data := fmt.Sprintf("%s@%s@0100", code, hex.EncodeToString(factory.WasmVirtualMachine)) + + _, err := network.SendTransaction( + sender, + make([]byte, 32), + big.NewInt(0), + data, + 1000, + ) + require.NoError(t, err) + + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(sender, 0, factory.WasmVirtualMachine) + return scAddress +} + +func setState(t *testing.T, network *integrationTests.MiniNetwork, scAddress []byte, sender []byte, value uint64) { + data := fmt.Sprintf("setState@%x", value) + + _, err := network.SendTransaction( + sender, + scAddress, + big.NewInt(0), + data, + 1000, + ) + + require.NoError(t, err) +} + +func getState(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, blockNonce core.OptionalUint64) int { + vmOutput, _, err := node.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getState", + Arguments: [][]byte{}, + BlockNonce: blockNonce, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + data := vmOutput.ReturnData + + return int(big.NewInt(0).SetBytes(data[0]).Uint64()) +} + +func queryHistoryGetNow(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, blockNonce core.OptionalUint64) now { + vmOutput, _, err := node.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getNow", + Arguments: [][]byte{}, + BlockNonce: blockNonce, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + data := vmOutput.ReturnData + + return now{ + blockNonce: big.NewInt(0).SetBytes(data[0]).Uint64(), + stateRootHash: data[1], + } +} + +func TestQueries_Metachain(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + network := integrationTests.NewMiniNetwork() + defer network.Stop() + + network.Start() + + alice := network.AddUser(big.NewInt(10000000000000)) + + // Issue fungible token + issueCost := big.NewInt(1000) + tokenNameHex := hex.EncodeToString([]byte("Test")) + tokenTickerHex := hex.EncodeToString([]byte("TEST")) + txData := fmt.Sprintf("issue@%s@%s@64@00", tokenNameHex, tokenTickerHex) + + _, err := network.SendTransaction( + alice.Address, + vm.ESDTSCAddress, + issueCost, + txData, + core.MinMetaTxExtraGasCost, + ) + + require.NoError(t, err) + network.Continue(t, 5) + + tokens, err := network.MetachainNode.Node.GetAllIssuedESDTs(core.FungibleESDT, context.Background()) + require.NoError(t, err) + require.Len(t, tokens, 1) + + // Query token on older block (should fail) + vmOutput, _, err := network.MetachainNode.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + Arguments: [][]byte{[]byte(tokens[0])}, + BlockNonce: core.OptionalUint64{HasValue: true, Value: 2}, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, vmOutput.ReturnCode) + require.Equal(t, "no ticker with given name", vmOutput.ReturnMessage) + + // Query token on newer block (should succeed) + vmOutput, _, err = network.MetachainNode.SCQueryService.ExecuteQuery(&process.SCQuery{ + ScAddress: vm.ESDTSCAddress, + FuncName: "getTokenProperties", + Arguments: [][]byte{[]byte(tokens[0])}, + BlockNonce: core.OptionalUint64{HasValue: true, Value: 4}, + }) + + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, vmOutput.ReturnCode) + require.Equal(t, "Test", string(vmOutput.ReturnData[0])) +} diff --git a/integrationTests/vm/wasm/testdata/history/history.c b/integrationTests/vm/wasm/testdata/history/history.c new file mode 100644 index 00000000000..322e216aca8 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/history/history.c @@ -0,0 +1,51 @@ +typedef unsigned char byte; +typedef unsigned int i32; +typedef unsigned long long i64; + +int getArgument(int argumentIndex, byte *argument); +long long int64getArgument(int argumentIndex); +long long getBlockNonce(); +long long getBlockEpoch(); +void getStateRootHash(byte *hash); + +int int64storageStore(byte *key, int keyLength, long long value); +long long int64storageLoad(byte *key, int keyLength); + +void finish(byte *data, int length); +void int64finish(long long value); + +byte zero32_buffer_a[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte zero32_buffer_b[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte zero32_buffer_c[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +byte storageKey[] = "state"; + +void init() +{ +} + +void upgrade() +{ +} + +void setState() +{ + i64 state = int64getArgument(0); + int64storageStore(storageKey, sizeof(storageKey) - 1, state); +} + +void getState() +{ + i64 state = int64storageLoad(storageKey, sizeof(storageKey) - 1); + int64finish(state); +} + +void getNow() +{ + i64 nonce = getBlockNonce(); + + byte *stateRootHash = zero32_buffer_a; + getStateRootHash(stateRootHash); + + int64finish(nonce); + finish(stateRootHash, 32); +} diff --git a/integrationTests/vm/wasm/testdata/history/history.export b/integrationTests/vm/wasm/testdata/history/history.export new file mode 100644 index 00000000000..b6646aa3aef --- /dev/null +++ b/integrationTests/vm/wasm/testdata/history/history.export @@ -0,0 +1,5 @@ +init +upgrade +getNow +setState +getState diff --git a/integrationTests/vm/wasm/testdata/history/output/history.wasm b/integrationTests/vm/wasm/testdata/history/output/history.wasm new file mode 100755 index 00000000000..5e34d9a0ab0 Binary files /dev/null and b/integrationTests/vm/wasm/testdata/history/output/history.wasm differ diff --git a/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm b/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm new file mode 100755 index 00000000000..cea133a3b2f Binary files /dev/null and b/integrationTests/vm/wasm/testdata/transferValue/output/transferValue.wasm differ diff --git a/integrationTests/vm/wasm/testdata/transferValue/transferValue.c b/integrationTests/vm/wasm/testdata/transferValue/transferValue.c new file mode 100644 index 00000000000..e82fc4054d8 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/transferValue/transferValue.c @@ -0,0 +1,58 @@ +typedef unsigned char byte; +typedef unsigned int i32; +typedef unsigned long long i64; + +int getArgument(int argumentIndex, byte *argument); +int transferValueExecute(byte *destination, byte *value, long long gas, byte *function, int functionLength, int numArguments, byte *argumentsLengths, byte *arguments); +void getCaller(byte *callerAddress); +i32 createAsyncCall(byte *destination, byte *value, byte *data, int dataLength, byte *success, int successLength, byte *error, int errorLength, long long gas, long long extraGasForCallback); + +byte zero32_a[32] = {0}; +byte zero32_b[32] = {0}; +byte zero32_c[32] = {0}; + +byte oneAtomOfEGLD[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; +byte functionNameAskMoney[] = "askMoney"; +byte functionNameMyCallback[] = "myCallback"; + +void init() +{ +} + +void upgrade() +{ +} + +void fund() +{ +} + +void forwardAskMoney() +{ + byte *otherContract = zero32_a; + getArgument(0, otherContract); + + createAsyncCall( + otherContract, + 0, + functionNameAskMoney, + sizeof(functionNameAskMoney) - 1, + functionNameMyCallback, + sizeof(functionNameMyCallback) - 1, + functionNameMyCallback, + sizeof(functionNameMyCallback) - 1, + 15000000, + 0); +} + +void askMoney() +{ + byte *caller = zero32_a; + + getCaller(caller); + transferValueExecute(caller, oneAtomOfEGLD, 0, 0, 0, 0, 0, 0); +} + +void myCallback() +{ +} diff --git a/integrationTests/vm/wasm/testdata/transferValue/transferValue.export b/integrationTests/vm/wasm/testdata/transferValue/transferValue.export new file mode 100644 index 00000000000..c9613a09af3 --- /dev/null +++ b/integrationTests/vm/wasm/testdata/transferValue/transferValue.export @@ -0,0 +1,6 @@ +init +upgrade +fund +forwardAskMoney +askMoney +myCallback diff --git a/integrationTests/vm/wasm/transfers/transfers_test.go b/integrationTests/vm/wasm/transfers/transfers_test.go new file mode 100644 index 00000000000..98e0a416a89 --- /dev/null +++ b/integrationTests/vm/wasm/transfers/transfers_test.go @@ -0,0 +1,62 @@ +//go:build !race + +package transfers + +import ( + "encoding/hex" + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/stretchr/testify/require" +) + +func TestTransfers_DuplicatedTransferValueEvents(t *testing.T) { + context := wasm.SetupTestContext(t) + defer context.Close() + + err := context.DeploySC("../testdata/transferValue/output/transferValue.wasm", "") + require.Nil(t, err) + vault := context.ScAddress + + err = context.DeploySC("../testdata/transferValue/output/transferValue.wasm", "") + require.Nil(t, err) + forwarder := context.ScAddress + + // Add money to the vault + context.ScAddress = vault + err = context.ExecuteSCWithValue(&context.Owner, "fund", big.NewInt(42)) + require.Nil(t, err) + + // Ask money from the vault, via the forwarder + context.ScAddress = forwarder + err = context.ExecuteSC(&context.Owner, fmt.Sprintf("forwardAskMoney@%s", hex.EncodeToString(vault))) + require.Nil(t, err) + require.Len(t, context.LastLogs, 1) + require.Len(t, context.LastLogs[0].GetLogEvents(), 5) + + events := context.LastLogs[0].GetLogEvents() + + require.Equal(t, "transferValueOnly", string(events[0].GetIdentifier())) + require.Equal(t, "AsyncCall", string(events[0].GetData())) + require.Equal(t, []byte{}, events[0].GetTopics()[0]) + require.Equal(t, forwarder, events[0].GetAddress()) + require.Equal(t, vault, events[0].GetTopics()[1]) + + require.Equal(t, "transferValueOnly", string(events[1].GetIdentifier())) + require.Equal(t, "BackTransfer", string(events[1].GetData())) + require.Equal(t, []byte{0x01}, events[1].GetTopics()[0]) + require.Equal(t, vault, events[1].GetAddress()) + require.Equal(t, forwarder, events[1].GetTopics()[1]) + + // Duplicated "transferValueOnly" events are fixed in #5936. + require.Equal(t, "transferValueOnly", string(events[2].GetIdentifier())) + require.Equal(t, "AsyncCallback", string(events[2].GetData())) + require.Equal(t, []byte{}, events[2].GetTopics()[0]) + require.Equal(t, vault, events[2].GetAddress()) + require.Equal(t, forwarder, events[2].GetTopics()[1]) + + require.Equal(t, "writeLog", string(events[3].GetIdentifier())) + require.Equal(t, "completedTxEvent", string(events[4].GetIdentifier())) +} diff --git a/integrationTests/vm/wasm/upgrades/upgrades_test.go b/integrationTests/vm/wasm/upgrades/upgrades_test.go index c989498c955..514507b0c04 100644 --- a/integrationTests/vm/wasm/upgrades/upgrades_test.go +++ b/integrationTests/vm/wasm/upgrades/upgrades_test.go @@ -10,9 +10,7 @@ import ( "math/big" "testing" - "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/integrationTests" - "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory" @@ -172,61 +170,56 @@ func TestUpgrades_HelloTrialAndError(t *testing.T) { t.Skip("this is not a short test") } - network := integrationTests.NewOneNodeNetwork() + network := integrationTests.NewMiniNetwork() defer network.Stop() - alice := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - bob := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - network.Mint(alice, big.NewInt(10000000000000)) - network.Mint(bob, big.NewInt(10000000000000)) + alice := network.AddUser(big.NewInt(10000000000000)) + bob := network.AddUser(big.NewInt(10000000000000)) - network.GoToRoundOne() + network.Start() deployTxData := fmt.Sprintf("%s@%s@0100", wasm.GetSCCode("../testdata/hello-v1/output/answer.wasm"), hex.EncodeToString(factory.WasmVirtualMachine)) upgradeTxData := fmt.Sprintf("upgradeContract@%s@0100", wasm.GetSCCode("../testdata/hello-v2/output/answer.wasm")) // Deploy the smart contract. Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: vm.CreateEmptyAddress(), - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(deployTxData), - }) + _, err := network.SendTransaction( + alice.Address, + make([]byte, 32), + big.NewInt(0), + deployTxData, + 1000, + ) + require.Nil(t, err) - scAddress, _ := network.Node.BlockchainHook.NewAddress(alice, 0, factory.WasmVirtualMachine) + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(alice.Address, 0, factory.WasmVirtualMachine) network.Continue(t, 1) - require.Equal(t, []byte{24}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{24}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) // Upgrade as Bob - upgrade should fail, since Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: bob, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + bob.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{24}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{24}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) // Now upgrade as Alice, should work - network.AddTxToPool(&transaction.Transaction{ - Nonce: 1, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{42}, query(t, network.Node, scAddress, "getUltimateAnswer")) + require.Equal(t, []byte{42}, query(t, network.ShardNode, scAddress, "getUltimateAnswer")) } func TestUpgrades_CounterTrialAndError(t *testing.T) { @@ -234,75 +227,69 @@ func TestUpgrades_CounterTrialAndError(t *testing.T) { t.Skip("this is not a short test") } - network := integrationTests.NewOneNodeNetwork() + network := integrationTests.NewMiniNetwork() defer network.Stop() - alice := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") - bob := []byte("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - network.Mint(alice, big.NewInt(10000000000000)) - network.Mint(bob, big.NewInt(10000000000000)) + alice := network.AddUser(big.NewInt(10000000000000)) + bob := network.AddUser(big.NewInt(10000000000000)) - network.GoToRoundOne() + network.Start() deployTxData := fmt.Sprintf("%s@%s@0100", wasm.GetSCCode("../testdata/counter/output/counter.wasm"), hex.EncodeToString(factory.WasmVirtualMachine)) upgradeTxData := fmt.Sprintf("upgradeContract@%s@0100", wasm.GetSCCode("../testdata/counter/output/counter.wasm")) // Deploy the smart contract. Alice is the owner - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: vm.CreateEmptyAddress(), - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(deployTxData), - }) + _, err := network.SendTransaction( + alice.Address, + make([]byte, 32), + big.NewInt(0), + deployTxData, + 1000, + ) + require.Nil(t, err) - scAddress, _ := network.Node.BlockchainHook.NewAddress(alice, 0, factory.WasmVirtualMachine) + scAddress, _ := network.ShardNode.BlockchainHook.NewAddress(alice.Address, 0, factory.WasmVirtualMachine) network.Continue(t, 1) - require.Equal(t, []byte{1}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{1}, query(t, network.ShardNode, scAddress, "get")) // Increment the counter (could be either Bob or Alice) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 1, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte("increment"), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + "increment", + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{2}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{2}, query(t, network.ShardNode, scAddress, "get")) // Upgrade as Bob - upgrade should fail, since Alice is the owner (counter.init() not executed, state not reset) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: bob, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + bob.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{2}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{2}, query(t, network.ShardNode, scAddress, "get")) // Now upgrade as Alice, should work (state is reset by counter.init()) - network.AddTxToPool(&transaction.Transaction{ - Nonce: 2, - Value: big.NewInt(0), - RcvAddr: scAddress, - SndAddr: alice, - GasPrice: network.GetMinGasPrice(), - GasLimit: network.MaxGasLimitPerBlock(), - Data: []byte(upgradeTxData), - }) + _, err = network.SendTransaction( + alice.Address, + scAddress, + big.NewInt(0), + upgradeTxData, + 1000, + ) + require.Nil(t, err) network.Continue(t, 1) - require.Equal(t, []byte{1}, query(t, network.Node, scAddress, "get")) + require.Equal(t, []byte{1}, query(t, network.ShardNode, scAddress, "get")) } func query(t *testing.T, node *integrationTests.TestProcessorNode, scAddress []byte, function string) []byte { diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index e58d3e25c7b..d4f4207662d 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -102,6 +103,7 @@ type TestContext struct { ScAddress []byte ScCodeMetadata vmcommon.CodeMetadata Accounts *state.AccountsDB + TxLogsProcessor process.TransactionLogProcessor TxProcessor process.TransactionProcessor ScProcessor scrCommon.TestSmartContractProcessor QueryService external.SCQueryService @@ -112,6 +114,7 @@ type TestContext struct { LastTxHash []byte SCRForwarder *mock.IntermediateTransactionHandlerMock LastSCResults []*smartContractResult.SmartContractResult + LastLogs []*data.LogData } type testParticipant struct { @@ -154,7 +157,7 @@ func SetupTestContextWithGasSchedule(t *testing.T, gasSchedule map[string]map[st DynamicGasCostForDataTrieStorageLoadEnableEpoch: integrationTests.UnreachableEpoch, }, context.EpochNotifier) context.RoundNotifier = &epochNotifier.RoundNotifierStub{} - context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(integrationTests.GetDefaultRoundsConfig(), context.RoundNotifier) + context.EnableRoundsHandler, _ = enablers.NewEnableRoundsHandler(testscommon.GetDefaultRoundsConfig(), context.RoundNotifier) context.WasmVMChangeLocker = &sync.RWMutex{} context.initAccounts() @@ -164,7 +167,7 @@ func SetupTestContextWithGasSchedule(t *testing.T, gasSchedule map[string]map[st context.initFeeHandlers() context.initVMAndBlockchainHook() context.initTxProcessorWithOneSCExecutorWithVMs() - context.ScAddress, _ = context.BlockchainHook.NewAddress(context.Owner.Address, context.Owner.Nonce, factory.WasmVirtualMachine) + argsNewSCQueryService := smartContract.ArgsNewSCQueryService{ VmContainer: context.VMContainer, EconomicsFee: context.EconomicsFee, @@ -247,10 +250,9 @@ func (context *TestContext) initFeeHandlers() { MaxGasPriceSetGuardian: "2000000000", }, }, - EpochNotifier: context.EpochNotifier, - EnableEpochsHandler: context.EnableEpochsHandler, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: context.EpochNotifier, + EnableEpochsHandler: context.EnableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -364,8 +366,11 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { defaults.FillGasMapInternal(gasSchedule, 1) argsLogProcessor := transactionLog.ArgTxLogProcessor{Marshalizer: marshalizer} - logsProcessor, _ := transactionLog.NewTxLogProcessor(argsLogProcessor) + context.TxLogsProcessor, err = transactionLog.NewTxLogProcessor(argsLogProcessor) + require.Nil(context.T, err) + context.SCRForwarder = &mock.IntermediateTransactionHandlerMock{} + argsNewSCProcessor := scrCommon.ArgsNewSmartContractProcessor{ VmContainer: context.VMContainer, ArgsParser: smartContract.NewArgumentParser(), @@ -385,14 +390,14 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { SetGasRefundedCalled: func(gasRefunded uint64, hash []byte) {}, }, GasSchedule: mock.NewGasScheduleNotifierMock(gasSchedule), - TxLogsProcessor: logsProcessor, + TxLogsProcessor: context.TxLogsProcessor, EnableRoundsHandler: context.EnableRoundsHandler, EnableEpochsHandler: context.EnableEpochsHandler, WasmVMChangeLocker: context.WasmVMChangeLocker, VMOutputCacher: txcache.NewDisabledCache(), } - context.ScProcessor, _ = processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, context.EpochNotifier) + context.ScProcessor, err = processProxy.NewTestSmartContractProcessorProxy(argsNewSCProcessor, context.EpochNotifier) require.Nil(context.T, err) argsNewTxProcessor := processTransaction.ArgsNewTxProcessor{ @@ -414,7 +419,7 @@ func (context *TestContext) initTxProcessorWithOneSCExecutorWithVMs() { EnableEpochsHandler: context.EnableEpochsHandler, TxVersionChecker: &testscommon.TxVersionCheckerStub{}, GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxLogsProcessor: logsProcessor, + TxLogsProcessor: context.TxLogsProcessor, } context.TxProcessor, err = processTransaction.NewTxProcessor(argsNewTxProcessor) @@ -544,20 +549,20 @@ func (context *TestContext) DeploySC(wasmPath string, parametersString string) e return err } + context.ScAddress, _ = context.BlockchainHook.NewAddress(context.Owner.Address, context.Owner.Nonce, factory.WasmVirtualMachine) + owner.Nonce++ _, err = context.Accounts.Commit() if err != nil { return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } // UpgradeSC - @@ -604,14 +609,12 @@ func (context *TestContext) UpgradeSC(wasmPath string, parametersString string) return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } // GetSCCode - @@ -680,18 +683,16 @@ func (context *TestContext) ExecuteSCWithValue(sender *testParticipant, txData s return err } - err = context.GetCompositeTestError() + err = context.acquireOutcome() if err != nil { return err } - _ = context.UpdateLastSCResults() - - return nil + return context.GetCompositeTestError() } -// UpdateLastSCResults -- -func (context *TestContext) UpdateLastSCResults() error { +// acquireOutcome - +func (context *TestContext) acquireOutcome() error { transactions := context.SCRForwarder.GetIntermediateTransactions() context.LastSCResults = make([]*smartContractResult.SmartContractResult, len(transactions)) for i, tx := range transactions { @@ -703,6 +704,8 @@ func (context *TestContext) UpdateLastSCResults() error { } } + context.LastLogs = context.TxLogsProcessor.GetAllCurrentLogs() + return nil } diff --git a/integrationTests/vm/wasm/wasmvm/mockContracts.go b/integrationTests/vm/wasm/wasmvm/mockContracts.go index 21c6e6cae55..4e1b2b2b2c2 100644 --- a/integrationTests/vm/wasm/wasmvm/mockContracts.go +++ b/integrationTests/vm/wasm/wasmvm/mockContracts.go @@ -17,14 +17,15 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" + worldmock "github.com/multiversx/mx-chain-scenario-go/worldmock" "github.com/multiversx/mx-chain-vm-go/executor" contextmock "github.com/multiversx/mx-chain-vm-go/mock/context" - worldmock "github.com/multiversx/mx-chain-vm-go/mock/world" "github.com/multiversx/mx-chain-vm-go/testcommon" "github.com/multiversx/mx-chain-vm-go/vmhost" "github.com/stretchr/testify/require" ) +// MockInitialBalance represents a mock balance var MockInitialBalance = big.NewInt(10_000_000) // WalletAddressPrefix is the prefix of any smart contract address used for testing. @@ -191,6 +192,7 @@ func makeTestAddress(_ []byte, identifier string) []byte { return append(leftBytes, rightBytes...) } +// CreateHostAndInstanceBuilder creates a new host and instance builder func CreateHostAndInstanceBuilder(t *testing.T, net *integrationTests.TestNetwork, vmContainer process.VirtualMachinesContainer, diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go index 64a8bde201f..36a4fb8e51b 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverter.go @@ -10,15 +10,15 @@ import ( "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" - mge "github.com/multiversx/mx-chain-scenario-go/scenario-exporter" - mgutil "github.com/multiversx/mx-chain-scenario-go/util" + "github.com/multiversx/mx-chain-scenario-go/scenario/exporter" + scenmodel "github.com/multiversx/mx-chain-scenario-go/scenario/model" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) var errReturnCodeNotOk = errors.New("returnCode is not 0(Ok)") // CreateAccountsFromScenariosAccs uses scenariosAccounts to populate the AccountsAdapter -func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts []*mge.TestAccount) error { +func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts []*exporter.TestAccount) error { for _, scenariosAcc := range scenariosUserAccounts { acc, err := tc.Accounts.LoadAccount(scenariosAcc.GetAddress()) if err != nil { @@ -60,7 +60,7 @@ func CreateAccountsFromScenariosAccs(tc *vm.VMTestContext, scenariosUserAccounts } // CreateTransactionsFromScenariosTxs converts scenarios transactions intro trasnsactions that can be processed by the txProcessor -func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transactions []*transaction.Transaction) { +func CreateTransactionsFromScenariosTxs(scenariosTxs []*exporter.Transaction) (transactions []*transaction.Transaction) { var data []byte transactions = make([]*transaction.Transaction, 0) @@ -70,7 +70,7 @@ func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transa endpointName := scenariosTx.GetCallFunction() args := scenariosTx.GetCallArguments() if len(esdtTransfers) != 0 { - data = mgutil.CreateMultiTransferData(scenariosTx.GetReceiverAddress(), esdtTransfers, endpointName, args) + data = scenmodel.CreateMultiTransferData(scenariosTx.GetReceiverAddress(), esdtTransfers, endpointName, args) } else { data = createData(endpointName, args) } @@ -92,7 +92,7 @@ func CreateTransactionsFromScenariosTxs(scenariosTxs []*mge.Transaction) (transa } // DeploySCsFromScenariosDeployTxs deploys all smartContracts correspondent to "scDeploy" in a scenarios test, then replaces with the correct computed address in all the transactions. -func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenariosTxs []*mge.Transaction) ([][]byte, error) { +func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenariosTxs []*exporter.Transaction) ([][]byte, error) { newScAddresses := make([][]byte, 0) for _, deployScenariosTransaction := range deployScenariosTxs { deployedScAddress, err := deploySC(testContext, deployScenariosTransaction) @@ -105,7 +105,7 @@ func DeploySCsFromScenariosDeployTxs(testContext *vm.VMTestContext, deployScenar } // ReplaceScenariosScAddressesWithNewScAddresses corrects the Scenarios SC Addresses, with the new Addresses obtained from deploying the SCs -func ReplaceScenariosScAddressesWithNewScAddresses(deployedScAccounts []*mge.TestAccount, newScAddresses [][]byte, scenariosTxs []*mge.Transaction) { +func ReplaceScenariosScAddressesWithNewScAddresses(deployedScAccounts []*exporter.TestAccount, newScAddresses [][]byte, scenariosTxs []*exporter.Transaction) { for _, newScAddr := range newScAddresses { addressToBeReplaced := deployedScAccounts[0].GetAddress() for _, scenariosTx := range scenariosTxs { @@ -126,7 +126,7 @@ func createData(functionName string, arguments [][]byte) []byte { return builder.ToBytes() } -func deploySC(testContext *vm.VMTestContext, deployScenariosTx *mge.Transaction) (scAddress []byte, err error) { +func deploySC(testContext *vm.VMTestContext, deployScenariosTx *exporter.Transaction) (scAddress []byte, err error) { gasLimit, gasPrice := deployScenariosTx.GetGasLimitAndPrice() ownerAddr := deployScenariosTx.GetSenderAddress() deployData := deployScenariosTx.GetDeployData() diff --git a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go index a701d090e95..2d3d15f681d 100644 --- a/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go +++ b/integrationTests/vm/wasm/wasmvm/scenariosConverter/scenariosConverterUtils.go @@ -8,8 +8,8 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" - mge "github.com/multiversx/mx-chain-scenario-go/scenario-exporter" - mgutil "github.com/multiversx/mx-chain-scenario-go/util" + "github.com/multiversx/mx-chain-scenario-go/scenario/exporter" + scenmodel "github.com/multiversx/mx-chain-scenario-go/scenario/model" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ import ( var log = logger.GetOrCreate("scenariosConverter") // CheckAccounts will verify if scenariosAccounts correspond to AccountsAdapter accounts -func CheckAccounts(t *testing.T, accAdapter state.AccountsAdapter, scenariosAccounts []*mge.TestAccount) { +func CheckAccounts(t *testing.T, accAdapter state.AccountsAdapter, scenariosAccounts []*exporter.TestAccount) { for _, scenariosAcc := range scenariosAccounts { accHandler, err := accAdapter.LoadAccount(scenariosAcc.GetAddress()) require.Nil(t, err) @@ -56,7 +56,7 @@ func CheckStorage(t *testing.T, dataTrie state.UserAccountHandler, scenariosAccS } // CheckTransactions checks if the transactions correspond with the scenariosTransactions -func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, scenariosTransactions []*mge.Transaction) { +func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, scenariosTransactions []*exporter.Transaction) { expectedLength := len(scenariosTransactions) require.Equal(t, expectedLength, len(transactions)) for i := 0; i < expectedLength; i++ { @@ -77,7 +77,7 @@ func CheckTransactions(t *testing.T, transactions []*transaction.Transaction, sc var expectedData []byte if len(expectedEsdtTransfers) != 0 { - expectedData = mgutil.CreateMultiTransferData(expectedReceiver, expectedEsdtTransfers, expectedCallFunction, expectedCallArguments) + expectedData = scenmodel.CreateMultiTransferData(expectedReceiver, expectedEsdtTransfers, expectedCallFunction, expectedCallArguments) require.Equal(t, expectedSender, transactions[i].GetRcvAddr()) } else { require.Equal(t, expectedReceiver, transactions[i].GetRcvAddr()) @@ -97,7 +97,7 @@ func BenchmarkScenariosSpecificTx(b *testing.B, scenariosTestPath string) { return } defer testContext.Close() - if benchmarkTxPos == mge.InvalidBenchmarkTxPos { + if benchmarkTxPos == exporter.InvalidBenchmarkTxPos { log.Trace("no transactions marked for benchmarking") } if len(transactions) > 1 { @@ -115,21 +115,21 @@ func BenchmarkScenariosSpecificTx(b *testing.B, scenariosTestPath string) { // SetStateFromScenariosTest recieves path to scenariosTest, returns a VMTestContext with the specified accounts, an array with the specified transactions and an error func SetStateFromScenariosTest(scenariosTestPath string) (testContext *vm.VMTestContext, transactions []*transaction.Transaction, bechmarkTxPos int, err error) { - stateAndBenchmarkInfo, err := mge.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) + stateAndBenchmarkInfo, err := exporter.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } testContext, err = vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } err = CreateAccountsFromScenariosAccs(testContext, stateAndBenchmarkInfo.Accs) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } newAddresses, err := DeploySCsFromScenariosDeployTxs(testContext, stateAndBenchmarkInfo.DeployTxs) if err != nil { - return nil, nil, mge.InvalidBenchmarkTxPos, err + return nil, nil, exporter.InvalidBenchmarkTxPos, err } ReplaceScenariosScAddressesWithNewScAddresses(stateAndBenchmarkInfo.DeployedAccs, newAddresses, stateAndBenchmarkInfo.Txs) transactions = CreateTransactionsFromScenariosTxs(stateAndBenchmarkInfo.Txs) @@ -138,7 +138,7 @@ func SetStateFromScenariosTest(scenariosTestPath string) (testContext *vm.VMTest // CheckConverter - func CheckConverter(t *testing.T, scenariosTestPath string) { - stateAndBenchmarkInfo, err := mge.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) + stateAndBenchmarkInfo, err := exporter.GetAccountsAndTransactionsFromScenarios(scenariosTestPath) require.Nil(t, err) testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) diff --git a/keysManagement/export_test.go b/keysManagement/export_test.go index b9e80ddcc66..42d1ee00317 100644 --- a/keysManagement/export_test.go +++ b/keysManagement/export_test.go @@ -6,6 +6,12 @@ import ( "github.com/multiversx/mx-chain-go/common" ) +// exported constants +const ( + RedundancyReasonForOneKey = redundancyReasonForOneKey + RedundancyReasonForMultipleKeys = redundancyReasonForMultipleKeys +) + // GetRoundsOfInactivity - func (pInfo *peerInfo) GetRoundsOfInactivity() int { pInfo.mutChangeableData.RLock() diff --git a/keysManagement/keysHandler.go b/keysManagement/keysHandler.go index 109b05fc712..1b4b83c2e6f 100644 --- a/keysManagement/keysHandler.go +++ b/keysManagement/keysHandler.go @@ -120,6 +120,11 @@ func (handler *keysHandler) ResetRoundsWithoutReceivedMessages(pkBytes []byte, p handler.managedPeersHolder.ResetRoundsWithoutReceivedMessages(pkBytes, pid) } +// GetRedundancyStepInReason returns the reason if the current node stepped in as a redundancy node +func (handler *keysHandler) GetRedundancyStepInReason() string { + return handler.managedPeersHolder.GetRedundancyStepInReason() +} + // IsInterfaceNil returns true if there is no value under the interface func (handler *keysHandler) IsInterfaceNil() bool { return handler == nil diff --git a/keysManagement/keysHandler_test.go b/keysManagement/keysHandler_test.go index fecfddf3a29..886053a1b94 100644 --- a/keysManagement/keysHandler_test.go +++ b/keysManagement/keysHandler_test.go @@ -268,3 +268,18 @@ func TestKeysHandler_ResetRoundsWithoutReceivedMessages(t *testing.T) { assert.Equal(t, 1, len(mapResetCalled)) assert.Equal(t, 1, mapResetCalled[string(randomPublicKeyBytes)]) } + +func TestKeysHandler_GetRedundancyStepInReason(t *testing.T) { + t.Parallel() + + expectedString := "expected string" + args := createMockArgsKeysHandler() + args.ManagedPeersHolder = &testscommon.ManagedPeersHolderStub{ + GetRedundancyStepInReasonCalled: func() string { + return expectedString + }, + } + + handler, _ := keysManagement.NewKeysHandler(args) + assert.Equal(t, expectedString, handler.GetRedundancyStepInReason()) +} diff --git a/keysManagement/managedPeersHolder.go b/keysManagement/managedPeersHolder.go index 93e48fa2e30..8156b64c8eb 100644 --- a/keysManagement/managedPeersHolder.go +++ b/keysManagement/managedPeersHolder.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "encoding/hex" "fmt" + "sort" "sync" "time" @@ -19,6 +20,11 @@ import ( var log = logger.GetOrCreate("keysManagement") +const ( + redundancyReasonForOneKey = "multikey node stepped in with one key" + redundancyReasonForMultipleKeys = "multikey node stepped in with %d keys" +) + type managedPeersHolder struct { mut sync.RWMutex defaultPeerInfoCurrentIndex int @@ -276,7 +282,7 @@ func (holder *managedPeersHolder) ResetRoundsWithoutReceivedMessages(pkBytes []b pInfo.resetRoundsWithoutReceivedMessages() } -// GetManagedKeysByCurrentNode returns all keys that will be managed by this node +// GetManagedKeysByCurrentNode returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (holder *managedPeersHolder) GetManagedKeysByCurrentNode() map[string]crypto.PrivateKey { holder.mut.RLock() defer holder.mut.RUnlock() @@ -294,6 +300,23 @@ func (holder *managedPeersHolder) GetManagedKeysByCurrentNode() map[string]crypt return allManagedKeys } +// GetLoadedKeysByCurrentNode returns all keys that were loaded and will be managed by this node +func (holder *managedPeersHolder) GetLoadedKeysByCurrentNode() [][]byte { + holder.mut.RLock() + defer holder.mut.RUnlock() + + allLoadedKeys := make([][]byte, 0, len(holder.data)) + for pk := range holder.data { + allLoadedKeys = append(allLoadedKeys, []byte(pk)) + } + + sort.Slice(allLoadedKeys, func(i, j int) bool { + return string(allLoadedKeys[i]) < string(allLoadedKeys[j]) + }) + + return allLoadedKeys +} + // IsKeyManagedByCurrentNode returns true if the key is managed by the current node func (holder *managedPeersHolder) IsKeyManagedByCurrentNode(pkBytes []byte) bool { pInfo := holder.getPeerInfo(pkBytes) @@ -369,6 +392,26 @@ func (holder *managedPeersHolder) IsMultiKeyMode() bool { return len(holder.data) > 0 } +// GetRedundancyStepInReason returns the reason if the current node stepped in as a redundancy node +// Returns empty string if the current node is the main multikey machine, the machine is not running in multikey mode +// or the machine is acting as a backup but the main machine is acting accordingly +func (holder *managedPeersHolder) GetRedundancyStepInReason() string { + if holder.isMainMachine { + return "" + } + + numManagedKeys := len(holder.GetManagedKeysByCurrentNode()) + if numManagedKeys == 0 { + return "" + } + + if numManagedKeys == 1 { + return redundancyReasonForOneKey + } + + return fmt.Sprintf(redundancyReasonForMultipleKeys, numManagedKeys) +} + // IsInterfaceNil returns true if there is no value under the interface func (holder *managedPeersHolder) IsInterfaceNil() bool { return holder == nil diff --git a/keysManagement/managedPeersHolder_test.go b/keysManagement/managedPeersHolder_test.go index 7c2d278f9cd..9a8c66fb849 100644 --- a/keysManagement/managedPeersHolder_test.go +++ b/keysManagement/managedPeersHolder_test.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "runtime" "strings" "sync" "testing" @@ -13,7 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-crypto-go" + crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/keysManagement" @@ -751,6 +752,24 @@ func TestManagedPeersHolder_GetManagedKeysByCurrentNode(t *testing.T) { }) } +func TestManagedPeersHolder_GetLoadedKeysByCurrentNode(t *testing.T) { + t.Parallel() + + args := createMockArgsManagedPeersHolder() + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes1) + _ = holder.AddManagedPeer(skBytes0) + + for i := 0; i < 10; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + } + + result := holder.GetLoadedKeysByCurrentNode() + assert.Equal(t, 2, len(result)) + assert.Equal(t, pkBytes0, result[0]) + assert.Equal(t, pkBytes1, result[1]) +} + func TestManagedPeersHolder_IsKeyManagedByCurrentNode(t *testing.T) { t.Parallel() @@ -887,6 +906,10 @@ func TestManagedPeersHolder_IsKeyValidator(t *testing.T) { } func TestManagedPeersHolder_GetNextPeerAuthenticationTime(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("skipping on darwin") + } + t.Parallel() holder, _ := keysManagement.NewManagedPeersHolder(createMockArgsManagedPeersHolder()) @@ -935,6 +958,65 @@ func TestManagedPeersHolder_IsMultiKeyMode(t *testing.T) { }) } +func TestManagedPeersHolder_GetRedundancyStepInReason(t *testing.T) { + t.Parallel() + + t.Run("main machine mode", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode but no managed keys", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with one managed key, main active", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with one managed key, main inactive", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + for i := 0; i < args.MaxRoundsOfInactivity+1; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + } + + assert.Equal(t, keysManagement.RedundancyReasonForOneKey, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with 2 managed keys, main active", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + _ = holder.AddManagedPeer(skBytes1) + + assert.Empty(t, holder.GetRedundancyStepInReason()) + }) + t.Run("redundancy machine mode with 2 managed keys, main inactive", func(t *testing.T) { + args := createMockArgsManagedPeersHolder() + args.MaxRoundsOfInactivity = 2 + holder, _ := keysManagement.NewManagedPeersHolder(args) + _ = holder.AddManagedPeer(skBytes0) + _ = holder.AddManagedPeer(skBytes1) + + for i := 0; i < args.MaxRoundsOfInactivity+1; i++ { + holder.IncrementRoundsWithoutReceivedMessages(pkBytes0) + holder.IncrementRoundsWithoutReceivedMessages(pkBytes1) + } + + expectedReason := fmt.Sprintf(keysManagement.RedundancyReasonForMultipleKeys, 2) + assert.Equal(t, expectedReason, holder.GetRedundancyStepInReason()) + }) +} + func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { defer func() { r := recover() @@ -984,10 +1066,12 @@ func TestManagedPeersHolder_ParallelOperationsShouldNotPanic(t *testing.T) { _, _ = holder.GetNextPeerAuthenticationTime(pkBytes0) case 13: holder.SetNextPeerAuthenticationTime(pkBytes0, time.Now()) + case 14: + _ = holder.GetRedundancyStepInReason() } wg.Done() - }(i % 14) + }(i % 15) } wg.Wait() diff --git a/keysManagement/managedPeersMonitor.go b/keysManagement/managedPeersMonitor.go index 2c2eef290b4..5f9f117cc2b 100644 --- a/keysManagement/managedPeersMonitor.go +++ b/keysManagement/managedPeersMonitor.go @@ -60,7 +60,7 @@ func (monitor *managedPeersMonitor) GetManagedKeysCount() int { return len(monitor.managedPeersHolder.GetManagedKeysByCurrentNode()) } -// GetManagedKeys returns all keys managed by the current node +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (monitor *managedPeersMonitor) GetManagedKeys() [][]byte { managedKeysMap := monitor.managedPeersHolder.GetManagedKeysByCurrentNode() managedKeys := make([][]byte, 0, len(managedKeysMap)) @@ -75,6 +75,11 @@ func (monitor *managedPeersMonitor) GetManagedKeys() [][]byte { return managedKeys } +// GetLoadedKeys returns all keys that were loaded and will be managed by this node +func (monitor *managedPeersMonitor) GetLoadedKeys() [][]byte { + return monitor.managedPeersHolder.GetLoadedKeysByCurrentNode() +} + // GetEligibleManagedKeys returns eligible keys that are managed by the current node in the current epoch func (monitor *managedPeersMonitor) GetEligibleManagedKeys() ([][]byte, error) { epoch := monitor.epochProvider.CurrentEpoch() diff --git a/keysManagement/managedPeersMonitor_test.go b/keysManagement/managedPeersMonitor_test.go index 9ec9dbcd8ad..4be6a5282ca 100644 --- a/keysManagement/managedPeersMonitor_test.go +++ b/keysManagement/managedPeersMonitor_test.go @@ -281,3 +281,20 @@ func TestManagedPeersMonitor_GetManagedKeys(t *testing.T) { keys := monitor.GetManagedKeys() require.Equal(t, expectedManagedKeys, keys) } + +func TestManagedPeersMonitor_GetLoadedKeys(t *testing.T) { + t.Parallel() + + loadedKeys := [][]byte{[]byte("pk1"), []byte("pk2"), []byte("pk3")} + args := createMockArgManagedPeersMonitor() + args.ManagedPeersHolder = &testscommon.ManagedPeersHolderStub{ + GetLoadedKeysByCurrentNodeCalled: func() [][]byte { + return loadedKeys + }, + } + monitor, err := NewManagedPeersMonitor(args) + require.NoError(t, err) + + keys := monitor.GetLoadedKeys() + require.Equal(t, loadedKeys, keys) +} diff --git a/node/chainSimulator/chainSimulator.go b/node/chainSimulator/chainSimulator.go new file mode 100644 index 00000000000..a5292d72e40 --- /dev/null +++ b/node/chainSimulator/chainSimulator.go @@ -0,0 +1,605 @@ +package chainSimulator + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "math/big" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/core/sharding" + "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/transaction" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + mxChainSharding "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const delaySendTxs = time.Millisecond + +var log = logger.GetOrCreate("chainSimulator") + +type transactionWithResult struct { + hexHash string + tx *transaction.Transaction + result *transaction.ApiTransactionResult +} + +// ArgsChainSimulator holds the arguments needed to create a new instance of simulator +type ArgsChainSimulator struct { + BypassTxSignatureCheck bool + TempDir string + PathToInitialConfig string + NumOfShards uint32 + MinNodesPerShard uint32 + MetaChainMinNodes uint32 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + GenesisTimestamp int64 + InitialRound int64 + InitialEpoch uint32 + InitialNonce uint64 + RoundDurationInMillis uint64 + RoundsPerEpoch core.OptionalUint64 + ApiInterface components.APIConfigurator + AlterConfigsFunction func(cfg *config.Configs) +} + +type simulator struct { + chanStopNodeProcess chan endProcess.ArgEndProcess + syncedBroadcastNetwork components.SyncedBroadcastNetworkHandler + handlers []ChainHandler + initialWalletKeys *dtos.InitialWalletKeys + initialStakedKeys map[string]*dtos.BLSKey + validatorsPrivateKeys []crypto.PrivateKey + nodes map[uint32]process.NodeHandler + numOfShards uint32 + mutex sync.RWMutex +} + +// NewChainSimulator will create a new instance of simulator +func NewChainSimulator(args ArgsChainSimulator) (*simulator, error) { + syncedBroadcastNetwork := components.NewSyncedBroadcastNetwork() + + instance := &simulator{ + syncedBroadcastNetwork: syncedBroadcastNetwork, + nodes: make(map[uint32]process.NodeHandler), + handlers: make([]ChainHandler, 0, args.NumOfShards+1), + numOfShards: args.NumOfShards, + chanStopNodeProcess: make(chan endProcess.ArgEndProcess), + mutex: sync.RWMutex{}, + initialStakedKeys: make(map[string]*dtos.BLSKey), + } + + err := instance.createChainHandlers(args) + if err != nil { + return nil, err + } + + return instance, nil +} + +func (s *simulator) createChainHandlers(args ArgsChainSimulator) error { + outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ + NumOfShards: args.NumOfShards, + OriginalConfigsPath: args.PathToInitialConfig, + GenesisTimeStamp: computeStartTimeBaseOnInitialRound(args), + RoundDurationInMillis: args.RoundDurationInMillis, + TempDir: args.TempDir, + MinNodesPerShard: args.MinNodesPerShard, + MetaChainMinNodes: args.MetaChainMinNodes, + RoundsPerEpoch: args.RoundsPerEpoch, + InitialEpoch: args.InitialEpoch, + AlterConfigsFunction: args.AlterConfigsFunction, + NumNodesWaitingListShard: args.NumNodesWaitingListShard, + NumNodesWaitingListMeta: args.NumNodesWaitingListMeta, + }) + if err != nil { + return err + } + + for idx := 0; idx < int(args.NumOfShards)+1; idx++ { + shardIDStr := fmt.Sprintf("%d", idx-1) + if idx == 0 { + shardIDStr = "metachain" + } + + node, errCreate := s.createTestNode(*outputConfigs, args, shardIDStr) + if errCreate != nil { + return errCreate + } + + chainHandler, errCreate := process.NewBlocksCreator(node) + if errCreate != nil { + return errCreate + } + + shardID := node.GetShardCoordinator().SelfId() + s.nodes[shardID] = node + s.handlers = append(s.handlers, chainHandler) + } + + s.initialWalletKeys = outputConfigs.InitialWallets + s.validatorsPrivateKeys = outputConfigs.ValidatorsPrivateKeys + + log.Info("running the chain simulator with the following parameters", + "number of shards (including meta)", args.NumOfShards+1, + "round per epoch", outputConfigs.Configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch, + "round duration", time.Millisecond*time.Duration(args.RoundDurationInMillis), + "genesis timestamp", args.GenesisTimestamp, + "original config path", args.PathToInitialConfig, + "temporary path", args.TempDir) + + return nil +} + +func computeStartTimeBaseOnInitialRound(args ArgsChainSimulator) int64 { + return args.GenesisTimestamp + int64(args.RoundDurationInMillis/1000)*args.InitialRound +} + +func (s *simulator) createTestNode( + outputConfigs configs.ArgsConfigsSimulator, args ArgsChainSimulator, shardIDStr string, +) (process.NodeHandler, error) { + argsTestOnlyProcessorNode := components.ArgsTestOnlyProcessingNode{ + Configs: outputConfigs.Configs, + ChanStopNodeProcess: s.chanStopNodeProcess, + SyncedBroadcastNetwork: s.syncedBroadcastNetwork, + NumShards: s.numOfShards, + GasScheduleFilename: outputConfigs.GasScheduleFilename, + ShardIDStr: shardIDStr, + APIInterface: args.ApiInterface, + BypassTxSignatureCheck: args.BypassTxSignatureCheck, + InitialRound: args.InitialRound, + InitialNonce: args.InitialNonce, + MinNodesPerShard: args.MinNodesPerShard, + MinNodesMeta: args.MetaChainMinNodes, + RoundDurationInMillis: args.RoundDurationInMillis, + } + + return components.NewTestOnlyProcessingNode(argsTestOnlyProcessorNode) +} + +// GenerateBlocks will generate the provided number of blocks +func (s *simulator) GenerateBlocks(numOfBlocks int) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + for idx := 0; idx < numOfBlocks; idx++ { + s.incrementRoundOnAllValidators() + err := s.allNodesCreateBlocks() + if err != nil { + return err + } + } + return nil +} + +// GenerateBlocksUntilEpochIsReached will generate blocks until the epoch is reached +func (s *simulator) GenerateBlocksUntilEpochIsReached(targetEpoch int32) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + maxNumberOfRounds := 10000 + for idx := 0; idx < maxNumberOfRounds; idx++ { + s.incrementRoundOnAllValidators() + err := s.allNodesCreateBlocks() + if err != nil { + return err + } + + epochReachedOnAllNodes, err := s.isTargetEpochReached(targetEpoch) + if err != nil { + return err + } + + if epochReachedOnAllNodes { + return nil + } + } + return fmt.Errorf("exceeded rounds to generate blocks") +} + +func (s *simulator) isTargetEpochReached(targetEpoch int32) (bool, error) { + metachainNode := s.nodes[core.MetachainShardId] + metachainEpoch := metachainNode.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch() + + for shardID, n := range s.nodes { + if shardID != core.MetachainShardId { + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < int32(metachainEpoch-1) { + return false, fmt.Errorf("shard %d is with at least 2 epochs behind metachain shard node epoch %d, metachain node epoch %d", + shardID, n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch(), metachainEpoch) + } + } + + if int32(n.GetCoreComponents().EnableEpochsHandler().GetCurrentEpoch()) < targetEpoch { + return false, nil + } + } + + return true, nil +} + +func (s *simulator) incrementRoundOnAllValidators() { + for _, node := range s.handlers { + node.IncrementRound() + } +} + +func (s *simulator) allNodesCreateBlocks() error { + for _, node := range s.handlers { + // TODO MX-15150 remove this when we remove all goroutines + time.Sleep(2 * time.Millisecond) + + err := node.CreateNewBlock() + if err != nil { + return err + } + } + + return nil +} + +// GetNodeHandler returns the node handler from the provided shardID +func (s *simulator) GetNodeHandler(shardID uint32) process.NodeHandler { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.nodes[shardID] +} + +// GetRestAPIInterfaces will return a map with the rest api interfaces for every node +func (s *simulator) GetRestAPIInterfaces() map[uint32]string { + s.mutex.Lock() + defer s.mutex.Unlock() + + resMap := make(map[uint32]string) + for shardID, node := range s.nodes { + resMap[shardID] = node.GetFacadeHandler().RestApiInterface() + } + + return resMap +} + +// GetInitialWalletKeys will return the initial wallet keys +func (s *simulator) GetInitialWalletKeys() *dtos.InitialWalletKeys { + return s.initialWalletKeys +} + +// AddValidatorKeys will add the provided validators private keys in the keys handler on all nodes +func (s *simulator) AddValidatorKeys(validatorsPrivateKeys [][]byte) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + for _, node := range s.nodes { + err := s.setValidatorKeysForNode(node, validatorsPrivateKeys) + if err != nil { + return err + } + } + + return nil +} + +// GenerateAndMintWalletAddress will generate an address in the provided shard and will mint that address with the provided value +// if the target shard ID value does not correspond to a node handled by the chain simulator, the address will be generated in a random shard ID +func (s *simulator) GenerateAndMintWalletAddress(targetShardID uint32, value *big.Int) (dtos.WalletAddress, error) { + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + nodeHandler := s.GetNodeHandler(targetShardID) + var buff []byte + if check.IfNil(nodeHandler) { + buff = generateAddress(addressConverter.Len()) + } else { + buff = generateAddressInShard(nodeHandler.GetShardCoordinator(), addressConverter.Len()) + } + + address, err := addressConverter.Encode(buff) + if err != nil { + return dtos.WalletAddress{}, err + } + + err = s.SetStateMultiple([]*dtos.AddressState{ + { + Address: address, + Balance: value.String(), + }, + }) + + return dtos.WalletAddress{ + Bech32: address, + Bytes: buff, + }, err +} + +func generateAddressInShard(shardCoordinator mxChainSharding.Coordinator, len int) []byte { + for { + buff := generateAddress(len) + shardID := shardCoordinator.ComputeId(buff) + if shardID == shardCoordinator.SelfId() { + return buff + } + } +} + +func generateAddress(len int) []byte { + buff := make([]byte, len) + _, _ = rand.Read(buff) + + return buff +} + +func (s *simulator) setValidatorKeysForNode(node process.NodeHandler, validatorsPrivateKeys [][]byte) error { + for idx, privateKey := range validatorsPrivateKeys { + + err := node.GetCryptoComponents().ManagedPeersHolder().AddManagedPeer(privateKey) + if err != nil { + return fmt.Errorf("cannot add private key for shard=%d, index=%d, error=%s", node.GetShardCoordinator().SelfId(), idx, err.Error()) + } + } + + return nil +} + +// GetValidatorPrivateKeys will return the initial validators private keys +func (s *simulator) GetValidatorPrivateKeys() []crypto.PrivateKey { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.validatorsPrivateKeys +} + +// SetKeyValueForAddress will set the provided state for a given address +func (s *simulator) SetKeyValueForAddress(address string, keyValueMap map[string]string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + addressBytes, err := addressConverter.Decode(address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + return s.setKeyValueSystemAccount(keyValueMap) + } + + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + testNode, ok := s.nodes[shardID] + if !ok { + return fmt.Errorf("cannot find a test node for the computed shard id, computed shard id: %d", shardID) + } + + return testNode.SetKeyValueForAddress(addressBytes, keyValueMap) +} + +func (s *simulator) setKeyValueSystemAccount(keyValueMap map[string]string) error { + for shard, node := range s.nodes { + err := node.SetKeyValueForAddress(core.SystemAccountAddress, keyValueMap) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +// SetStateMultiple will set state for multiple addresses +func (s *simulator) SetStateMultiple(stateSlice []*dtos.AddressState) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + addressConverter := s.nodes[core.MetachainShardId].GetCoreComponents().AddressPubKeyConverter() + for _, state := range stateSlice { + addressBytes, err := addressConverter.Decode(state.Address) + if err != nil { + return err + } + + if bytes.Equal(addressBytes, core.SystemAccountAddress) { + err = s.setStateSystemAccount(state) + } else { + shardID := sharding.ComputeShardID(addressBytes, s.numOfShards) + err = s.nodes[shardID].SetStateForAddress(addressBytes, state) + } + if err != nil { + return err + } + } + + return nil +} + +// SendTxAndGenerateBlockTilTxIsExecuted will send the provided transaction and generate block until the transaction is executed +func (s *simulator) SendTxAndGenerateBlockTilTxIsExecuted(txToSend *transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) (*transaction.ApiTransactionResult, error) { + result, err := s.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{txToSend}, maxNumOfBlocksToGenerateWhenExecutingTx) + if err != nil { + return nil, err + } + + return result[0], nil +} + +// SendTxsAndGenerateBlocksTilAreExecuted will send the provided transactions and generate block until all transactions are executed +func (s *simulator) SendTxsAndGenerateBlocksTilAreExecuted(txsToSend []*transaction.Transaction, maxNumOfBlocksToGenerateWhenExecutingTx int) ([]*transaction.ApiTransactionResult, error) { + if len(txsToSend) == 0 { + return nil, errEmptySliceOfTxs + } + if maxNumOfBlocksToGenerateWhenExecutingTx == 0 { + return nil, errInvalidMaxNumOfBlocks + } + + transactionStatus := make([]*transactionWithResult, 0, len(txsToSend)) + for idx, tx := range txsToSend { + if tx == nil { + return nil, fmt.Errorf("%w on position %d", errNilTransaction, idx) + } + + txHashHex, err := s.sendTx(tx) + if err != nil { + return nil, err + } + + transactionStatus = append(transactionStatus, &transactionWithResult{ + hexHash: txHashHex, + tx: tx, + }) + } + + time.Sleep(delaySendTxs) + + for count := 0; count < maxNumOfBlocksToGenerateWhenExecutingTx; count++ { + err := s.GenerateBlocks(1) + if err != nil { + return nil, err + } + + txsAreExecuted := s.computeTransactionsStatus(transactionStatus) + if txsAreExecuted { + return getApiTransactionsFromResult(transactionStatus), nil + } + } + + return nil, errors.New("something went wrong. Transaction(s) is/are still in pending") +} + +func (s *simulator) computeTransactionsStatus(txsWithResult []*transactionWithResult) bool { + allAreExecuted := true + for _, resultTx := range txsWithResult { + if resultTx.result != nil { + continue + } + + sentTx := resultTx.tx + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(sentTx.RcvAddr) + result, errGet := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetTransaction(resultTx.hexHash, true) + if errGet == nil && result.Status != transaction.TxStatusPending { + log.Info("############## transaction was executed ##############", "txHash", resultTx.hexHash) + resultTx.result = result + continue + } + + allAreExecuted = false + } + + return allAreExecuted +} + +func getApiTransactionsFromResult(txWithResult []*transactionWithResult) []*transaction.ApiTransactionResult { + result := make([]*transaction.ApiTransactionResult, 0, len(txWithResult)) + for _, tx := range txWithResult { + result = append(result, tx.result) + } + + return result +} + +func (s *simulator) sendTx(tx *transaction.Transaction) (string, error) { + shardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(tx.SndAddr) + err := s.GetNodeHandler(shardID).GetFacadeHandler().ValidateTransaction(tx) + if err != nil { + return "", err + } + + node := s.GetNodeHandler(shardID) + txHash, err := core.CalculateHash(node.GetCoreComponents().InternalMarshalizer(), node.GetCoreComponents().Hasher(), tx) + if err != nil { + return "", err + } + + txHashHex := hex.EncodeToString(txHash) + _, err = node.GetFacadeHandler().SendBulkTransactions([]*transaction.Transaction{tx}) + if err != nil { + return "", err + } + + for { + recoveredTx, _ := node.GetFacadeHandler().GetTransaction(txHashHex, false) + if recoveredTx != nil { + log.Info("############## send transaction ##############", "txHash", txHashHex) + return txHashHex, nil + } + + time.Sleep(delaySendTxs) + } +} + +func (s *simulator) setStateSystemAccount(state *dtos.AddressState) error { + for shard, node := range s.nodes { + err := node.SetStateForAddress(core.SystemAccountAddress, state) + if err != nil { + return fmt.Errorf("%w for shard %d", err, shard) + } + } + + return nil +} + +// GetAccount will fetch the account of the provided address +func (s *simulator) GetAccount(address dtos.WalletAddress) (api.AccountResponse, error) { + destinationShardID := s.GetNodeHandler(0).GetShardCoordinator().ComputeId(address.Bytes) + + account, _, err := s.GetNodeHandler(destinationShardID).GetFacadeHandler().GetAccount(address.Bech32, api.AccountQueryOptions{}) + return account, err +} + +// Close will stop and close the simulator +func (s *simulator) Close() { + s.mutex.Lock() + defer s.mutex.Unlock() + + var errorStrings []string + for _, n := range s.nodes { + err := n.Close() + if err != nil { + errorStrings = append(errorStrings, err.Error()) + } + } + + if len(errorStrings) != 0 { + log.Error("error closing chain simulator", "error", components.AggregateErrors(errorStrings, components.ErrClose)) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *simulator) IsInterfaceNil() bool { + return s == nil +} + +// GenerateBlsPrivateKeys will generate bls keys +func GenerateBlsPrivateKeys(numOfKeys int) ([][]byte, []string, error) { + blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + + secretKeysBytes := make([][]byte, 0, numOfKeys) + blsKeysHex := make([]string, 0, numOfKeys) + for idx := 0; idx < numOfKeys; idx++ { + secretKey, publicKey := blockSigningGenerator.GeneratePair() + + secretKeyBytes, err := secretKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + secretKeysBytes = append(secretKeysBytes, secretKeyBytes) + + publicKeyBytes, err := publicKey.ToByteArray() + if err != nil { + return nil, nil, err + } + + blsKeysHex = append(blsKeysHex, hex.EncodeToString(publicKeyBytes)) + } + + return secretKeysBytes, blsKeysHex, nil +} diff --git a/node/chainSimulator/chainSimulator_test.go b/node/chainSimulator/chainSimulator_test.go new file mode 100644 index 00000000000..1a65b37ff78 --- /dev/null +++ b/node/chainSimulator/chainSimulator_test.go @@ -0,0 +1,442 @@ +package chainSimulator + +import ( + "encoding/base64" + "math/big" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + coreAPI "github.com/multiversx/mx-chain-core-go/data/api" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/process" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + defaultPathToInitialConfig = "../../cmd/node/config/" +) + +func TestNewChainSimulator(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: core.OptionalUint64{}, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + time.Sleep(time.Second) + + chainSimulator.Close() +} + +func TestChainSimulator_GenerateBlocksShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: core.OptionalUint64{ + HasValue: true, + Value: 20, + }, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + InitialRound: 200000000, + InitialEpoch: 100, + InitialNonce: 100, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + time.Sleep(time.Second) + + err = chainSimulator.GenerateBlocks(50) + require.Nil(t, err) +} + +func TestChainSimulator_GenerateBlocksAndEpochChangeShouldWork(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 100, + MetaChainMinNodes: 100, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + facade, err := NewChainSimulatorFacade(chainSimulator) + require.Nil(t, err) + + genesisBalances := make(map[string]*big.Int) + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + initialAccount, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) + + genesisBalances[stakeWallet.Address.Bech32] = initialAccount.GetBalance() + } + + time.Sleep(time.Second) + + err = chainSimulator.GenerateBlocks(80) + require.Nil(t, err) + + numAccountsWithIncreasedBalances := 0 + for _, stakeWallet := range chainSimulator.initialWalletKeys.StakeWallets { + account, errGet := facade.GetExistingAccountFromBech32AddressString(stakeWallet.Address.Bech32) + require.Nil(t, errGet) + + if account.GetBalance().Cmp(genesisBalances[stakeWallet.Address.Bech32]) > 0 { + numAccountsWithIncreasedBalances++ + } + } + + assert.True(t, numAccountsWithIncreasedBalances > 0) +} + +func TestChainSimulator_SetState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + keyValueMap := map[string]string{ + "01": "01", + "02": "02", + } + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + err = chainSimulator.SetKeyValueForAddress(address, keyValueMap) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(1) + require.Nil(t, err) + + nodeHandler := chainSimulator.GetNodeHandler(0) + keyValuePairs, _, err := nodeHandler.GetFacadeHandler().GetKeyValuePairs(address, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, keyValueMap, keyValuePairs) +} + +func TestChainSimulator_SetEntireState(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + balance := "431271308732096033771131" + contractAddress := "erd1qqqqqqqqqqqqqpgqmzzm05jeav6d5qvna0q2pmcllelkz8xddz3syjszx5" + accountState := &dtos.AddressState{ + Address: contractAddress, + Nonce: new(uint64), + Balance: balance, + Code: "0061736d010000000129086000006000017f60027f7f017f60027f7f0060017f0060037f7f7f017f60037f7f7f0060017f017f0290020b03656e7619626967496e74476574556e7369676e6564417267756d656e74000303656e760f6765744e756d417267756d656e7473000103656e760b7369676e616c4572726f72000303656e76126d42756666657253746f726167654c6f6164000203656e76176d427566666572546f426967496e74556e7369676e6564000203656e76196d42756666657246726f6d426967496e74556e7369676e6564000203656e76136d42756666657253746f7261676553746f7265000203656e760f6d4275666665725365744279746573000503656e760e636865636b4e6f5061796d656e74000003656e7614626967496e7446696e697368556e7369676e6564000403656e7609626967496e744164640006030b0a010104070301000000000503010003060f027f0041a080080b7f0041a080080b074607066d656d6f7279020004696e697400110667657453756d00120361646400130863616c6c4261636b00140a5f5f646174615f656e6403000b5f5f686561705f6261736503010aca010a0e01017f4100100c2200100020000b1901017f419c8008419c800828020041016b220036020020000b1400100120004604400f0b4180800841191002000b16002000100c220010031a2000100c220010041a20000b1401017f100c2202200110051a2000200210061a0b1301017f100c220041998008410310071a20000b1401017f10084101100d100b210010102000100f0b0e0010084100100d1010100e10090b2201037f10084101100d100b210110102202100e220020002001100a20022000100f0b0300010b0b2f0200418080080b1c77726f6e67206e756d626572206f6620617267756d656e747373756d00419c80080b049cffffff", + CodeHash: "n9EviPlHS6EV+3Xp0YqP28T0IUfeAFRFBIRC1Jw6pyU=", + RootHash: "76cr5Jhn6HmBcDUMIzikEpqFgZxIrOzgNkTHNatXzC4=", + CodeMetadata: "BQY=", + Owner: "erd1ss6u80ruas2phpmr82r42xnkd6rxy40g9jl69frppl4qez9w2jpsqj8x97", + DeveloperRewards: "5401004999998", + Keys: map[string]string{ + "73756d": "0a", + }, + } + + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{accountState}) + require.Nil(t, err) + + err = chainSimulator.GenerateBlocks(30) + require.Nil(t, err) + + nodeHandler := chainSimulator.GetNodeHandler(1) + scAddress, _ := nodeHandler.GetCoreComponents().AddressPubKeyConverter().Decode(contractAddress) + res, _, err := nodeHandler.GetFacadeHandler().ExecuteSCQuery(&process.SCQuery{ + ScAddress: scAddress, + FuncName: "getSum", + CallerAddr: nil, + BlockNonce: core.OptionalUint64{}, + }) + require.Nil(t, err) + + counterValue := big.NewInt(0).SetBytes(res.ReturnData[0]).Int64() + require.Equal(t, 10, int(counterValue)) + + time.Sleep(time.Second) + + account, _, err := nodeHandler.GetFacadeHandler().GetAccount(contractAddress, coreAPI.AccountQueryOptions{}) + require.Nil(t, err) + require.Equal(t, accountState.Balance, account.Balance) + require.Equal(t, accountState.DeveloperRewards, account.DeveloperReward) + require.Equal(t, accountState.Code, account.Code) + require.Equal(t, accountState.CodeHash, base64.StdEncoding.EncodeToString(account.CodeHash)) + require.Equal(t, accountState.CodeMetadata, base64.StdEncoding.EncodeToString(account.CodeMetadata)) + require.Equal(t, accountState.Owner, account.OwnerAddress) + require.Equal(t, accountState.RootHash, base64.StdEncoding.EncodeToString(account.RootHash)) +} + +func TestChainSimulator_GetAccount(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + // the facade's GetAccount method requires that at least one block was produced over the genesis block + _ = chainSimulator.GenerateBlocks(1) + + defer chainSimulator.Close() + + address := dtos.WalletAddress{ + Bech32: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + } + address.Bytes, err = chainSimulator.GetNodeHandler(0).GetCoreComponents().AddressPubKeyConverter().Decode(address.Bech32) + assert.Nil(t, err) + + account, err := chainSimulator.GetAccount(address) + assert.Nil(t, err) + assert.Equal(t, uint64(0), account.Nonce) + assert.Equal(t, "0", account.Balance) + + nonce := uint64(37) + err = chainSimulator.SetStateMultiple([]*dtos.AddressState{ + { + Address: address.Bech32, + Nonce: &nonce, + Balance: big.NewInt(38).String(), + }, + }) + assert.Nil(t, err) + + // without this call the test will fail because the latest produced block points to a state roothash that tells that + // the account has the nonce 0 + _ = chainSimulator.GenerateBlocks(1) + + account, err = chainSimulator.GetAccount(address) + assert.Nil(t, err) + assert.Equal(t, uint64(37), account.Nonce) + assert.Equal(t, "38", account.Balance) +} + +func TestSimulator_SendTransactions(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + startTime := time.Now().Unix() + roundDurationInMillis := uint64(6000) + roundsPerEpoch := core.OptionalUint64{ + HasValue: true, + Value: 20, + } + chainSimulator, err := NewChainSimulator(ArgsChainSimulator{ + BypassTxSignatureCheck: false, + TempDir: t.TempDir(), + PathToInitialConfig: defaultPathToInitialConfig, + NumOfShards: 3, + GenesisTimestamp: startTime, + RoundDurationInMillis: roundDurationInMillis, + RoundsPerEpoch: roundsPerEpoch, + ApiInterface: api.NewNoApiInterface(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + require.NotNil(t, chainSimulator) + + defer chainSimulator.Close() + + oneEgld := big.NewInt(1000000000000000000) + initialMinting := big.NewInt(0).Mul(oneEgld, big.NewInt(100)) + transferValue := big.NewInt(0).Mul(oneEgld, big.NewInt(5)) + + wallet0, err := chainSimulator.GenerateAndMintWalletAddress(0, initialMinting) + require.Nil(t, err) + + wallet1, err := chainSimulator.GenerateAndMintWalletAddress(1, initialMinting) + require.Nil(t, err) + + wallet2, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) + + wallet3, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) + + wallet4, err := chainSimulator.GenerateAndMintWalletAddress(2, initialMinting) + require.Nil(t, err) + + gasLimit := uint64(50000) + tx0 := generateTransaction(wallet0.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx1 := generateTransaction(wallet1.Bytes, 0, wallet2.Bytes, transferValue, "", gasLimit) + tx3 := generateTransaction(wallet3.Bytes, 0, wallet4.Bytes, transferValue, "", gasLimit) + + maxNumOfBlockToGenerateWhenExecutingTx := 15 + + t.Run("nil or empty slice of transactions should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(nil, 1) + assert.Equal(t, errEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + + sentTxs, errSend = chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted(make([]*transaction.Transaction, 0), 1) + assert.Equal(t, errEmptySliceOfTxs, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("invalid max number of blocks to generate should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, 0) + assert.Equal(t, errInvalidMaxNumOfBlocks, errSend) + assert.Nil(t, sentTxs) + }) + t.Run("nil transaction in slice should error", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{nil}, 1) + assert.ErrorIs(t, errSend, errNilTransaction) + assert.Nil(t, sentTxs) + }) + t.Run("2 transactions from different shard should call send correctly", func(t *testing.T) { + sentTxs, errSend := chainSimulator.SendTxsAndGenerateBlocksTilAreExecuted([]*transaction.Transaction{tx0, tx1}, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Equal(t, 2, len(sentTxs)) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet2) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(initialMinting, transferValue) + expectedBalance.Add(expectedBalance, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) + t.Run("1 transaction should be sent correctly", func(t *testing.T) { + _, errSend := chainSimulator.SendTxAndGenerateBlockTilTxIsExecuted(tx3, maxNumOfBlockToGenerateWhenExecutingTx) + assert.Nil(t, errSend) + + account, errGet := chainSimulator.GetAccount(wallet4) + assert.Nil(t, errGet) + expectedBalance := big.NewInt(0).Add(initialMinting, transferValue) + assert.Equal(t, expectedBalance.String(), account.Balance) + }) +} + +func generateTransaction(sender []byte, nonce uint64, receiver []byte, value *big.Int, data string, gasLimit uint64) *transaction.Transaction { + minGasPrice := uint64(1000000000) + txVersion := uint32(1) + mockTxSignature := "sig" + + transferValue := big.NewInt(0).Set(value) + return &transaction.Transaction{ + Nonce: nonce, + Value: transferValue, + SndAddr: sender, + RcvAddr: receiver, + Data: []byte(data), + GasLimit: gasLimit, + GasPrice: minGasPrice, + ChainID: []byte(configs.ChainID), + Version: txVersion, + Signature: []byte(mockTxSignature), + } +} diff --git a/node/chainSimulator/components/api/fixedAPIInterface.go b/node/chainSimulator/components/api/fixedAPIInterface.go new file mode 100644 index 00000000000..2848be6ad15 --- /dev/null +++ b/node/chainSimulator/components/api/fixedAPIInterface.go @@ -0,0 +1,21 @@ +package api + +import "fmt" + +type fixedPortAPIConfigurator struct { + restAPIInterface string + mapShardPort map[uint32]int +} + +// NewFixedPortAPIConfigurator will create a new instance of fixedPortAPIConfigurator +func NewFixedPortAPIConfigurator(restAPIInterface string, mapShardPort map[uint32]int) *fixedPortAPIConfigurator { + return &fixedPortAPIConfigurator{ + restAPIInterface: restAPIInterface, + mapShardPort: mapShardPort, + } +} + +// RestApiInterface will return the api interface for the provided shard +func (f *fixedPortAPIConfigurator) RestApiInterface(shardID uint32) string { + return fmt.Sprintf("%s:%d", f.restAPIInterface, f.mapShardPort[shardID]) +} diff --git a/node/chainSimulator/components/api/fixedAPIInterface_test.go b/node/chainSimulator/components/api/fixedAPIInterface_test.go new file mode 100644 index 00000000000..7348b717831 --- /dev/null +++ b/node/chainSimulator/components/api/fixedAPIInterface_test.go @@ -0,0 +1,20 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +const apiInterface = "127.0.0.1:8080" + +func TestNewFixedPortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFixedPortAPIConfigurator(apiInterface, map[uint32]int{0: 123}) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, fmt.Sprintf("%s:123", apiInterface), interf) +} diff --git a/node/chainSimulator/components/api/freeAPIInterface.go b/node/chainSimulator/components/api/freeAPIInterface.go new file mode 100644 index 00000000000..983ce0d93ca --- /dev/null +++ b/node/chainSimulator/components/api/freeAPIInterface.go @@ -0,0 +1,37 @@ +package api + +import ( + "fmt" + "net" +) + +type freePortAPIConfigurator struct { + restAPIInterface string +} + +// NewFreePortAPIConfigurator will create a new instance of freePortAPIConfigurator +func NewFreePortAPIConfigurator(restAPIInterface string) *freePortAPIConfigurator { + return &freePortAPIConfigurator{ + restAPIInterface: restAPIInterface, + } +} + +// RestApiInterface will return the rest api interface with a free port +func (f *freePortAPIConfigurator) RestApiInterface(_ uint32) string { + return fmt.Sprintf("%s:%d", f.restAPIInterface, getFreePort()) +} + +func getFreePort() int { + // Listen on port 0 to get a free port + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + panic(err) + } + defer func() { + _ = l.Close() + }() + + // Get the port number that was assigned + addr := l.Addr().(*net.TCPAddr) + return addr.Port +} diff --git a/node/chainSimulator/components/api/freeAPIInterface_test.go b/node/chainSimulator/components/api/freeAPIInterface_test.go new file mode 100644 index 00000000000..0b215aa0a57 --- /dev/null +++ b/node/chainSimulator/components/api/freeAPIInterface_test.go @@ -0,0 +1,19 @@ +package api + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewFreePortAPIConfigurator(t *testing.T) { + t.Parallel() + + instance := NewFreePortAPIConfigurator(apiInterface) + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.True(t, strings.Contains(interf, fmt.Sprintf("%s:", apiInterface))) +} diff --git a/node/chainSimulator/components/api/noApiInterface.go b/node/chainSimulator/components/api/noApiInterface.go new file mode 100644 index 00000000000..cd720c2511f --- /dev/null +++ b/node/chainSimulator/components/api/noApiInterface.go @@ -0,0 +1,15 @@ +package api + +import "github.com/multiversx/mx-chain-go/facade" + +type noAPIInterface struct{} + +// NewNoApiInterface will create a new instance of noAPIInterface +func NewNoApiInterface() *noAPIInterface { + return new(noAPIInterface) +} + +// RestApiInterface will return the value for disable api interface +func (n noAPIInterface) RestApiInterface(_ uint32) string { + return facade.DefaultRestPortOff +} diff --git a/node/chainSimulator/components/api/noApiInterface_test.go b/node/chainSimulator/components/api/noApiInterface_test.go new file mode 100644 index 00000000000..ee8efbc5783 --- /dev/null +++ b/node/chainSimulator/components/api/noApiInterface_test.go @@ -0,0 +1,18 @@ +package api + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/facade" + "github.com/stretchr/testify/require" +) + +func TestNewNoApiInterface(t *testing.T) { + t.Parallel() + + instance := NewNoApiInterface() + require.NotNil(t, instance) + + interf := instance.RestApiInterface(0) + require.Equal(t, facade.DefaultRestPortOff, interf) +} diff --git a/node/chainSimulator/components/bootstrapComponents.go b/node/chainSimulator/components/bootstrapComponents.go new file mode 100644 index 00000000000..7e0190ded2e --- /dev/null +++ b/node/chainSimulator/components/bootstrapComponents.go @@ -0,0 +1,159 @@ +package components + +import ( + "fmt" + "io" + + "github.com/multiversx/mx-chain-core-go/core" + nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// ArgsBootstrapComponentsHolder will hold the components needed for the bootstrap components holders +type ArgsBootstrapComponentsHolder struct { + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + StatusCoreComponents factory.StatusCoreComponentsHolder + WorkingDir string + FlagsConfig config.ContextFlagsConfig + ImportDBConfig config.ImportDbConfig + PrefsConfig config.Preferences + Config config.Config + ShardIDStr string +} + +type bootstrapComponentsHolder struct { + epochStartBootstrapper factory.EpochStartBootstrapper + epochBootstrapParams factory.BootstrapParamsHolder + nodeType core.NodeType + shardCoordinator sharding.Coordinator + versionedHeaderFactory nodeFactory.VersionedHeaderFactory + headerVersionHandler nodeFactory.HeaderVersionHandler + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + guardedAccountHandler process.GuardedAccountHandler + nodesCoordinatorRegistryFactory nodesCoordinator.NodesCoordinatorRegistryFactory + managedBootstrapComponentsCloser io.Closer +} + +// CreateBootstrapComponents will create a new instance of bootstrap components holder +func CreateBootstrapComponents(args ArgsBootstrapComponentsHolder) (*bootstrapComponentsHolder, error) { + instance := &bootstrapComponentsHolder{} + + args.PrefsConfig.Preferences.DestinationShardAsObserver = args.ShardIDStr + + bootstrapComponentsFactoryArgs := bootstrapComp.BootstrapComponentsFactoryArgs{ + Config: args.Config, + PrefConfig: args.PrefsConfig, + ImportDbConfig: args.ImportDBConfig, + FlagsConfig: args.FlagsConfig, + WorkingDir: args.WorkingDir, + CoreComponents: args.CoreComponents, + CryptoComponents: args.CryptoComponents, + NetworkComponents: args.NetworkComponents, + StatusCoreComponents: args.StatusCoreComponents, + } + + bootstrapComponentsFactory, err := bootstrapComp.NewBootstrapComponentsFactory(bootstrapComponentsFactoryArgs) + if err != nil { + return nil, fmt.Errorf("NewBootstrapComponentsFactory failed: %w", err) + } + + managedBootstrapComponents, err := bootstrapComp.NewManagedBootstrapComponents(bootstrapComponentsFactory) + if err != nil { + return nil, err + } + + err = managedBootstrapComponents.Create() + if err != nil { + return nil, err + } + + instance.epochStartBootstrapper = managedBootstrapComponents.EpochStartBootstrapper() + instance.epochBootstrapParams = managedBootstrapComponents.EpochBootstrapParams() + instance.nodeType = managedBootstrapComponents.NodeType() + instance.shardCoordinator = managedBootstrapComponents.ShardCoordinator() + instance.versionedHeaderFactory = managedBootstrapComponents.VersionedHeaderFactory() + instance.headerVersionHandler = managedBootstrapComponents.HeaderVersionHandler() + instance.headerIntegrityVerifier = managedBootstrapComponents.HeaderIntegrityVerifier() + instance.guardedAccountHandler = managedBootstrapComponents.GuardedAccountHandler() + instance.nodesCoordinatorRegistryFactory = managedBootstrapComponents.NodesCoordinatorRegistryFactory() + instance.managedBootstrapComponentsCloser = managedBootstrapComponents + + return instance, nil +} + +// NodesCoordinatorRegistryFactory will return the nodes coordinator registry factory +func (b *bootstrapComponentsHolder) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return b.nodesCoordinatorRegistryFactory +} + +// EpochStartBootstrapper will return the epoch start bootstrapper +func (b *bootstrapComponentsHolder) EpochStartBootstrapper() factory.EpochStartBootstrapper { + return b.epochStartBootstrapper +} + +// EpochBootstrapParams will return the epoch bootstrap params +func (b *bootstrapComponentsHolder) EpochBootstrapParams() factory.BootstrapParamsHolder { + return b.epochBootstrapParams +} + +// NodeType will return the node type +func (b *bootstrapComponentsHolder) NodeType() core.NodeType { + return b.nodeType +} + +// ShardCoordinator will return the shardCoordinator +func (b *bootstrapComponentsHolder) ShardCoordinator() sharding.Coordinator { + return b.shardCoordinator +} + +// VersionedHeaderFactory will return the versioned header factory +func (b *bootstrapComponentsHolder) VersionedHeaderFactory() nodeFactory.VersionedHeaderFactory { + return b.versionedHeaderFactory +} + +// HeaderVersionHandler will return header version handler +func (b *bootstrapComponentsHolder) HeaderVersionHandler() nodeFactory.HeaderVersionHandler { + return b.headerVersionHandler +} + +// HeaderIntegrityVerifier will return header integrity verifier +func (b *bootstrapComponentsHolder) HeaderIntegrityVerifier() nodeFactory.HeaderIntegrityVerifierHandler { + return b.headerIntegrityVerifier +} + +// GuardedAccountHandler will return guarded account handler +func (b *bootstrapComponentsHolder) GuardedAccountHandler() process.GuardedAccountHandler { + return b.guardedAccountHandler +} + +// Close will call the Close methods on all inner components +func (b *bootstrapComponentsHolder) Close() error { + return b.managedBootstrapComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (b *bootstrapComponentsHolder) IsInterfaceNil() bool { + return b == nil +} + +// Create will do nothing +func (b *bootstrapComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (b *bootstrapComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (b *bootstrapComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/bootstrapComponents_test.go b/node/chainSimulator/components/bootstrapComponents_test.go new file mode 100644 index 00000000000..7e4becdc52e --- /dev/null +++ b/node/chainSimulator/components/bootstrapComponents_test.go @@ -0,0 +1,200 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsBootstrapComponentsHolder() ArgsBootstrapComponentsHolder { + return ArgsBootstrapComponentsHolder{ + CoreComponents: &factory.CoreComponentsHolderStub{ + ChainIDCalled: func() string { + return "T" + }, + GenesisNodesSetupCalled: func() sharding.GenesisNodesSetupHandler { + return &genesisMocks.NodesSetupStub{} + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + EpochNotifierCalled: func() process.EpochNotifier { + return &epochNotifier.EpochNotifierStub{} + }, + EconomicsDataCalled: func() process.EconomicsDataHandler { + return &economicsmocks.EconomicsHandlerMock{} + }, + RaterCalled: func() sharding.PeerAccountListAndRatingHandler { + return &testscommon.RaterMock{} + }, + NodesShufflerCalled: func() nodesCoordinator.NodesShuffler { + return &shardingMocks.NodeShufflerMock{} + }, + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + PathHandlerCalled: func() storage.PathManagerHandler { + return &testscommon.PathManagerStub{} + }, + TxMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + AddressPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{} + }, + Uint64ByteSliceConverterCalled: func() typeConverters.Uint64ByteSliceConverter { + return &mock.Uint64ByteSliceConverterMock{} + }, + TxSignHasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{} + }, + EnableEpochsHandlerCalled: func() common.EnableEpochsHandler { + return &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + }, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + PubKey: &mock.PublicKeyMock{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + BlKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + TxKeyGen: &cryptoMocks.KeyGenStub{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: &testscommon.StateStatisticsHandlerStub{}, + }, + WorkingDir: ".", + FlagsConfig: config.ContextFlagsConfig{}, + ImportDBConfig: config.ImportDbConfig{}, + PrefsConfig: config.Preferences{}, + Config: config.Config{ + EpochStartConfig: config.EpochStartConfig{ + MinNumConnectedPeersToStart: 1, + MinNumOfPeersToConsiderBlockValid: 1, + }, + TrieSync: config.TrieSyncConfig{ + MaxHardCapForMissingNodes: 1, + NumConcurrentTrieSyncers: 1, + }, + GeneralSettings: config.GeneralSettingsConfig{ + SetGuardianEpochsDelay: 1, + }, + Versions: config.VersionsConfig{ + Cache: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + DefaultVersion: "1", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "1", + }, + }, + }, + WhiteListPool: config.CacheConfig{ + Type: "LRU", + Capacity: 123, + }, + }, + ShardIDStr: "0", + } +} + +func TestCreateBootstrapComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewBootstrapComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{} + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedBootstrapCreate failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsBootstrapComponentsHolder() + args.StatusCoreComponents = &factory.StatusCoreComponentsStub{ + TrieSyncStatisticsField: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + } + comp, err := CreateBootstrapComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestBootstrapComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *bootstrapComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestBootstrapComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateBootstrapComponents(createArgsBootstrapComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.EpochStartBootstrapper()) + require.NotNil(t, comp.EpochBootstrapParams()) + require.NotEmpty(t, comp.NodeType()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.VersionedHeaderFactory()) + require.NotNil(t, comp.HeaderVersionHandler()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.GuardedAccountHandler()) + require.NotNil(t, comp.NodesCoordinatorRegistryFactory()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/closeHandler.go b/node/chainSimulator/components/closeHandler.go new file mode 100644 index 00000000000..19615b50210 --- /dev/null +++ b/node/chainSimulator/components/closeHandler.go @@ -0,0 +1,82 @@ +package components + +import ( + "errors" + "fmt" + "io" + "runtime/debug" + "strings" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +// ErrClose signals that a close error occurred +var ErrClose = errors.New("error while closing inner components") + +type errorlessCloser interface { + Close() +} + +type allCloser interface { + CloseAll() error +} + +type closeHandler struct { + mut sync.RWMutex + components []interface{} +} + +// NewCloseHandler create a new closeHandler instance +func NewCloseHandler() *closeHandler { + return &closeHandler{ + components: make([]interface{}, 0), + } +} + +// AddComponent will try to add a component to the inner list if that component is not nil +func (handler *closeHandler) AddComponent(component interface{}) { + if check.IfNilReflect(component) { + log.Error("programming error in closeHandler.AddComponent: nil component", "stack", string(debug.Stack())) + return + } + + handler.mut.Lock() + handler.components = append(handler.components, component) + handler.mut.Unlock() +} + +// Close will try to close all components, wrapping errors, if necessary +func (handler *closeHandler) Close() error { + handler.mut.RLock() + defer handler.mut.RUnlock() + + var errorStrings []string + for _, component := range handler.components { + var err error + + switch t := component.(type) { + case errorlessCloser: + t.Close() + case io.Closer: + err = t.Close() + case allCloser: + err = t.CloseAll() + } + + if err != nil { + errorStrings = append(errorStrings, fmt.Errorf("%w while closing the component of type %T", err, component).Error()) + } + } + + return AggregateErrors(errorStrings, ErrClose) +} + +// AggregateErrors can aggregate all provided error strings into a single error variable +func AggregateErrors(errorStrings []string, baseError error) error { + if len(errorStrings) == 0 { + return nil + } + + return fmt.Errorf("%w %s", baseError, strings.Join(errorStrings, ", ")) +} diff --git a/node/chainSimulator/components/closeHandler_test.go b/node/chainSimulator/components/closeHandler_test.go new file mode 100644 index 00000000000..f8a88576c3c --- /dev/null +++ b/node/chainSimulator/components/closeHandler_test.go @@ -0,0 +1,69 @@ +package components + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// localErrorlessCloser implements errorlessCloser interface +type localErrorlessCloser struct { + wasCalled bool +} + +// Close - +func (closer *localErrorlessCloser) Close() { + closer.wasCalled = true +} + +// localCloser implements io.Closer interface +type localCloser struct { + wasCalled bool + expectedError error +} + +// Close - +func (closer *localCloser) Close() error { + closer.wasCalled = true + return closer.expectedError +} + +// localCloseAllHandler implements allCloser interface +type localCloseAllHandler struct { + wasCalled bool + expectedError error +} + +// CloseAll - +func (closer *localCloseAllHandler) CloseAll() error { + closer.wasCalled = true + return closer.expectedError +} + +func TestCloseHandler(t *testing.T) { + t.Parallel() + + handler := NewCloseHandler() + require.NotNil(t, handler) + + handler.AddComponent(nil) // for coverage only + + lec := &localErrorlessCloser{} + handler.AddComponent(lec) + + lcNoError := &localCloser{} + handler.AddComponent(lcNoError) + + lcWithError := &localCloser{expectedError: expectedErr} + handler.AddComponent(lcWithError) + + lcahNoError := &localCloseAllHandler{} + handler.AddComponent(lcahNoError) + + lcahWithError := &localCloseAllHandler{expectedError: expectedErr} + handler.AddComponent(lcahWithError) + + err := handler.Close() + require.True(t, strings.Contains(err.Error(), expectedErr.Error())) +} diff --git a/node/chainSimulator/components/coreComponents.go b/node/chainSimulator/components/coreComponents.go new file mode 100644 index 00000000000..08c7105e0ef --- /dev/null +++ b/node/chainSimulator/components/coreComponents.go @@ -0,0 +1,457 @@ +package components + +import ( + "bytes" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/nodetype" + "github.com/multiversx/mx-chain-core-go/core/versioning" + "github.com/multiversx/mx-chain-core-go/core/watchdog" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/typeConverters" + "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-core-go/hashing" + hashingFactory "github.com/multiversx/mx-chain-core-go/hashing/factory" + "github.com/multiversx/mx-chain-core-go/marshal" + marshalFactory "github.com/multiversx/mx-chain-core-go/marshal/factory" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/enablers" + factoryPubKey "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/epochStart/notifier" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/ntp" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/economics" + "github.com/multiversx/mx-chain-go/process/rating" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/statusHandler" + "github.com/multiversx/mx-chain-go/storage" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/multiversx/mx-chain-go/testscommon" +) + +type coreComponentsHolder struct { + closeHandler *closeHandler + internalMarshaller marshal.Marshalizer + txMarshaller marshal.Marshalizer + vmMarshaller marshal.Marshalizer + hasher hashing.Hasher + txSignHasher hashing.Hasher + uint64SliceConverter typeConverters.Uint64ByteSliceConverter + addressPubKeyConverter core.PubkeyConverter + validatorPubKeyConverter core.PubkeyConverter + pathHandler storage.PathManagerHandler + watchdog core.WatchdogTimer + alarmScheduler core.TimersScheduler + syncTimer ntp.SyncTimer + roundHandler consensus.RoundHandler + economicsData process.EconomicsDataHandler + apiEconomicsData process.EconomicsDataHandler + ratingsData process.RatingsInfoHandler + rater sharding.PeerAccountListAndRatingHandler + genesisNodesSetup sharding.GenesisNodesSetupHandler + nodesShuffler nodesCoordinator.NodesShuffler + epochNotifier process.EpochNotifier + enableRoundsHandler process.EnableRoundsHandler + roundNotifier process.RoundNotifier + epochStartNotifierWithConfirm factory.EpochStartNotifierWithConfirm + chanStopNodeProcess chan endProcess.ArgEndProcess + genesisTime time.Time + chainID string + minTransactionVersion uint32 + txVersionChecker process.TxVersionCheckerHandler + encodedAddressLen uint32 + nodeTypeProvider core.NodeTypeProviderHandler + wasmVMChangeLocker common.Locker + processStatusHandler common.ProcessStatusHandler + hardforkTriggerPubKey []byte + enableEpochsHandler common.EnableEpochsHandler +} + +// ArgsCoreComponentsHolder will hold arguments needed for the core components holder +type ArgsCoreComponentsHolder struct { + Config config.Config + EnableEpochsConfig config.EnableEpochs + RoundsConfig config.RoundConfig + EconomicsConfig config.EconomicsConfig + RatingConfig config.RatingsConfig + ChanStopNodeProcess chan endProcess.ArgEndProcess + InitialRound int64 + NodesSetupPath string + GasScheduleFilename string + NumShards uint32 + WorkingDir string + + MinNodesPerShard uint32 + MinNodesMeta uint32 + RoundDurationInMs uint64 +} + +// CreateCoreComponents will create a new instance of factory.CoreComponentsHolder +func CreateCoreComponents(args ArgsCoreComponentsHolder) (*coreComponentsHolder, error) { + var err error + instance := &coreComponentsHolder{ + closeHandler: NewCloseHandler(), + } + + instance.internalMarshaller, err = marshalFactory.NewMarshalizer(args.Config.Marshalizer.Type) + if err != nil { + return nil, err + } + instance.txMarshaller, err = marshalFactory.NewMarshalizer(args.Config.TxSignMarshalizer.Type) + if err != nil { + return nil, err + } + instance.vmMarshaller, err = marshalFactory.NewMarshalizer(args.Config.VmMarshalizer.Type) + if err != nil { + return nil, err + } + instance.hasher, err = hashingFactory.NewHasher(args.Config.Hasher.Type) + if err != nil { + return nil, err + } + instance.txSignHasher, err = hashingFactory.NewHasher(args.Config.TxSignHasher.Type) + if err != nil { + return nil, err + } + instance.uint64SliceConverter = uint64ByteSlice.NewBigEndianConverter() + instance.addressPubKeyConverter, err = factoryPubKey.NewPubkeyConverter(args.Config.AddressPubkeyConverter) + if err != nil { + return nil, err + } + instance.validatorPubKeyConverter, err = factoryPubKey.NewPubkeyConverter(args.Config.ValidatorPubkeyConverter) + if err != nil { + return nil, err + } + + instance.pathHandler, err = storageFactory.CreatePathManager( + storageFactory.ArgCreatePathManager{ + WorkingDir: args.WorkingDir, + ChainID: args.Config.GeneralSettings.ChainID, + }, + ) + if err != nil { + return nil, err + } + + instance.watchdog = &watchdog.DisabledWatchdog{} + instance.alarmScheduler = &mock.AlarmSchedulerStub{} + instance.syncTimer = &testscommon.SyncTimerStub{} + + instance.genesisNodesSetup, err = sharding.NewNodesSetup(args.NodesSetupPath, instance.addressPubKeyConverter, instance.validatorPubKeyConverter, args.NumShards) + if err != nil { + return nil, err + } + + roundDuration := time.Millisecond * time.Duration(instance.genesisNodesSetup.GetRoundDuration()) + instance.roundHandler = NewManualRoundHandler(instance.genesisNodesSetup.GetStartTime(), roundDuration, args.InitialRound) + + instance.wasmVMChangeLocker = &sync.RWMutex{} + instance.txVersionChecker = versioning.NewTxVersionChecker(args.Config.GeneralSettings.MinTransactionVersion) + instance.epochNotifier = forking.NewGenericEpochNotifier() + instance.enableEpochsHandler, err = enablers.NewEnableEpochsHandler(args.EnableEpochsConfig, instance.epochNotifier) + if err != nil { + return nil, err + } + + if err != nil { + return nil, err + } + + argsEconomicsHandler := economics.ArgsNewEconomicsData{ + TxVersionChecker: instance.txVersionChecker, + Economics: &args.EconomicsConfig, + EpochNotifier: instance.epochNotifier, + EnableEpochsHandler: instance.enableEpochsHandler, + } + + instance.economicsData, err = economics.NewEconomicsData(argsEconomicsHandler) + if err != nil { + return nil, err + } + instance.apiEconomicsData = instance.economicsData + + // TODO fix this min nodes per shard to be configurable + instance.ratingsData, err = rating.NewRatingsData(rating.RatingsDataArg{ + Config: args.RatingConfig, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardMinNodes: args.MinNodesPerShard, + MetaMinNodes: args.MinNodesMeta, + RoundDurationMiliseconds: args.RoundDurationInMs, + }) + if err != nil { + return nil, err + } + + instance.rater, err = rating.NewBlockSigningRater(instance.ratingsData) + if err != nil { + return nil, err + } + + instance.nodesShuffler, err = nodesCoordinator.NewHashValidatorsShuffler(&nodesCoordinator.NodesShufflerArgs{ + NodesShard: args.MinNodesPerShard, + NodesMeta: args.MinNodesMeta, + Hysteresis: 0, + Adaptivity: false, + ShuffleBetweenShards: true, + MaxNodesEnableConfig: args.EnableEpochsConfig.MaxNodesChangeEnableEpoch, + EnableEpochsHandler: instance.enableEpochsHandler, + EnableEpochs: args.EnableEpochsConfig, + }) + if err != nil { + return nil, err + } + + instance.roundNotifier = forking.NewGenericRoundNotifier() + instance.enableRoundsHandler, err = enablers.NewEnableRoundsHandler(args.RoundsConfig, instance.roundNotifier) + if err != nil { + return nil, err + } + + instance.epochStartNotifierWithConfirm = notifier.NewEpochStartSubscriptionHandler() + instance.chanStopNodeProcess = args.ChanStopNodeProcess + instance.genesisTime = time.Unix(instance.genesisNodesSetup.GetStartTime(), 0) + instance.chainID = args.Config.GeneralSettings.ChainID + instance.minTransactionVersion = args.Config.GeneralSettings.MinTransactionVersion + instance.encodedAddressLen, err = computeEncodedAddressLen(instance.addressPubKeyConverter) + if err != nil { + return nil, err + } + + instance.nodeTypeProvider = nodetype.NewNodeTypeProvider(core.NodeTypeObserver) + instance.processStatusHandler = statusHandler.NewProcessStatusHandler() + + pubKeyBytes, err := instance.validatorPubKeyConverter.Decode(args.Config.Hardfork.PublicKeyToListenFrom) + if err != nil { + return nil, err + } + instance.hardforkTriggerPubKey = pubKeyBytes + + instance.collectClosableComponents() + + return instance, nil +} + +func computeEncodedAddressLen(converter core.PubkeyConverter) (uint32, error) { + emptyAddress := bytes.Repeat([]byte{0}, converter.Len()) + encodedEmptyAddress, err := converter.Encode(emptyAddress) + if err != nil { + return 0, err + } + + return uint32(len(encodedEmptyAddress)), nil +} + +// InternalMarshalizer will return the internal marshaller +func (c *coreComponentsHolder) InternalMarshalizer() marshal.Marshalizer { + return c.internalMarshaller +} + +// SetInternalMarshalizer will set the internal marshaller +func (c *coreComponentsHolder) SetInternalMarshalizer(marshalizer marshal.Marshalizer) error { + c.internalMarshaller = marshalizer + return nil +} + +// TxMarshalizer will return the transaction marshaller +func (c *coreComponentsHolder) TxMarshalizer() marshal.Marshalizer { + return c.txMarshaller +} + +// VmMarshalizer will return the vm marshaller +func (c *coreComponentsHolder) VmMarshalizer() marshal.Marshalizer { + return c.vmMarshaller +} + +// Hasher will return the hasher +func (c *coreComponentsHolder) Hasher() hashing.Hasher { + return c.hasher +} + +// TxSignHasher will return the transaction sign hasher +func (c *coreComponentsHolder) TxSignHasher() hashing.Hasher { + return c.txSignHasher +} + +// Uint64ByteSliceConverter will return the uint64 to slice converter +func (c *coreComponentsHolder) Uint64ByteSliceConverter() typeConverters.Uint64ByteSliceConverter { + return c.uint64SliceConverter +} + +// AddressPubKeyConverter will return the address pub key converter +func (c *coreComponentsHolder) AddressPubKeyConverter() core.PubkeyConverter { + return c.addressPubKeyConverter +} + +// ValidatorPubKeyConverter will return the validator pub key converter +func (c *coreComponentsHolder) ValidatorPubKeyConverter() core.PubkeyConverter { + return c.validatorPubKeyConverter +} + +// PathHandler will return the path handler +func (c *coreComponentsHolder) PathHandler() storage.PathManagerHandler { + return c.pathHandler +} + +// Watchdog will return the watch dog +func (c *coreComponentsHolder) Watchdog() core.WatchdogTimer { + return c.watchdog +} + +// AlarmScheduler will return the alarm scheduler +func (c *coreComponentsHolder) AlarmScheduler() core.TimersScheduler { + return c.alarmScheduler +} + +// SyncTimer will return the sync timer +func (c *coreComponentsHolder) SyncTimer() ntp.SyncTimer { + return c.syncTimer +} + +// RoundHandler will return the round handler +func (c *coreComponentsHolder) RoundHandler() consensus.RoundHandler { + return c.roundHandler +} + +// EconomicsData will return the economics data handler +func (c *coreComponentsHolder) EconomicsData() process.EconomicsDataHandler { + return c.economicsData +} + +// APIEconomicsData will return the api economics data handler +func (c *coreComponentsHolder) APIEconomicsData() process.EconomicsDataHandler { + return c.apiEconomicsData +} + +// RatingsData will return the ratings data handler +func (c *coreComponentsHolder) RatingsData() process.RatingsInfoHandler { + return c.ratingsData +} + +// Rater will return the rater handler +func (c *coreComponentsHolder) Rater() sharding.PeerAccountListAndRatingHandler { + return c.rater +} + +// GenesisNodesSetup will return the genesis nodes setup handler +func (c *coreComponentsHolder) GenesisNodesSetup() sharding.GenesisNodesSetupHandler { + return c.genesisNodesSetup +} + +// NodesShuffler will return the nodes shuffler +func (c *coreComponentsHolder) NodesShuffler() nodesCoordinator.NodesShuffler { + return c.nodesShuffler +} + +// EpochNotifier will return the epoch notifier +func (c *coreComponentsHolder) EpochNotifier() process.EpochNotifier { + return c.epochNotifier +} + +// EnableRoundsHandler will return the enable rounds handler +func (c *coreComponentsHolder) EnableRoundsHandler() process.EnableRoundsHandler { + return c.enableRoundsHandler +} + +// RoundNotifier will return the round notifier +func (c *coreComponentsHolder) RoundNotifier() process.RoundNotifier { + return c.roundNotifier +} + +// EpochStartNotifierWithConfirm will return the epoch start notifier with confirm +func (c *coreComponentsHolder) EpochStartNotifierWithConfirm() factory.EpochStartNotifierWithConfirm { + return c.epochStartNotifierWithConfirm +} + +// ChanStopNodeProcess will return the channel for stop node process +func (c *coreComponentsHolder) ChanStopNodeProcess() chan endProcess.ArgEndProcess { + return c.chanStopNodeProcess +} + +// GenesisTime will return the genesis time +func (c *coreComponentsHolder) GenesisTime() time.Time { + return c.genesisTime +} + +// ChainID will return the chain id +func (c *coreComponentsHolder) ChainID() string { + return c.chainID +} + +// MinTransactionVersion will return the min transaction version +func (c *coreComponentsHolder) MinTransactionVersion() uint32 { + return c.minTransactionVersion +} + +// TxVersionChecker will return the tx version checker +func (c *coreComponentsHolder) TxVersionChecker() process.TxVersionCheckerHandler { + return c.txVersionChecker +} + +// EncodedAddressLen will return the len of encoded address +func (c *coreComponentsHolder) EncodedAddressLen() uint32 { + return c.encodedAddressLen +} + +// NodeTypeProvider will return the node type provider +func (c *coreComponentsHolder) NodeTypeProvider() core.NodeTypeProviderHandler { + return c.nodeTypeProvider +} + +// WasmVMChangeLocker will return the wasm vm change locker +func (c *coreComponentsHolder) WasmVMChangeLocker() common.Locker { + return c.wasmVMChangeLocker +} + +// ProcessStatusHandler will return the process status handler +func (c *coreComponentsHolder) ProcessStatusHandler() common.ProcessStatusHandler { + return c.processStatusHandler +} + +// HardforkTriggerPubKey will return the pub key for the hard fork trigger +func (c *coreComponentsHolder) HardforkTriggerPubKey() []byte { + return c.hardforkTriggerPubKey +} + +// EnableEpochsHandler will return the enable epoch handler +func (c *coreComponentsHolder) EnableEpochsHandler() common.EnableEpochsHandler { + return c.enableEpochsHandler +} + +func (c *coreComponentsHolder) collectClosableComponents() { + c.closeHandler.AddComponent(c.alarmScheduler) + c.closeHandler.AddComponent(c.syncTimer) +} + +// Close will call the Close methods on all inner components +func (c *coreComponentsHolder) Close() error { + return c.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *coreComponentsHolder) IsInterfaceNil() bool { + return c == nil +} + +// Create will do nothing +func (c *coreComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (c *coreComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (c *coreComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/coreComponents_test.go b/node/chainSimulator/components/coreComponents_test.go new file mode 100644 index 00000000000..619eb9d3a2e --- /dev/null +++ b/node/chainSimulator/components/coreComponents_test.go @@ -0,0 +1,303 @@ +package components + +import ( + "encoding/hex" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +func createArgsCoreComponentsHolder() ArgsCoreComponentsHolder { + return ArgsCoreComponentsHolder{ + Config: config.Config{ + Marshalizer: config.MarshalizerConfig{ + Type: "json", + }, + TxSignMarshalizer: config.TypeConfig{ + Type: "json", + }, + VmMarshalizer: config.TypeConfig{ + Type: "json", + }, + Hasher: config.TypeConfig{ + Type: "blake2b", + }, + TxSignHasher: config.TypeConfig{ + Type: "blake2b", + }, + AddressPubkeyConverter: config.PubkeyConfig{ + Length: 32, + Type: "hex", + }, + ValidatorPubkeyConverter: config.PubkeyConfig{ + Length: 128, + Type: "hex", + }, + GeneralSettings: config.GeneralSettingsConfig{ + ChainID: "T", + MinTransactionVersion: 1, + }, + Hardfork: config.HardforkConfig{ + PublicKeyToListenFrom: "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + }, + }, + EnableEpochsConfig: config.EnableEpochs{}, + RoundsConfig: config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: "10000000000", + MaxGasLimitPerMiniBlock: "10000000000", + MaxGasLimitPerMetaBlock: "10000000000", + MaxGasLimitPerMetaMiniBlock: "10000000000", + MaxGasLimitPerTx: "10000000000", + MinGasLimit: "10", + ExtraGasLimitGuardedTx: "50000", + }, + }, + GasPriceModifier: 0.01, + MinGasPrice: "100", + GasPerDataByte: "1", + MaxGasPriceSetGuardian: "100", + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + EpochEnable: 0, + }, + }, + }, + }, + RatingConfig: config.RatingsConfig{ + General: config.General{ + StartRating: 4000, + MaxRating: 10000, + MinRating: 1, + SignedBlocksThreshold: 0.025, + SelectionChances: []*config.SelectionChance{ + {MaxThreshold: 0, ChancePercent: 1}, + {MaxThreshold: 1, ChancePercent: 2}, + {MaxThreshold: 10000, ChancePercent: 4}, + }, + }, + ShardChain: config.ShardChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.2, + }, + }, + MetaChain: config.MetaChain{ + RatingSteps: config.RatingSteps{ + HoursToMaxRatingFromStartRating: 2, + ProposerValidatorImportance: 1, + ProposerDecreaseFactor: -4, + ValidatorDecreaseFactor: -4, + ConsecutiveMissedBlocksPenalty: 1.3, + }, + }, + }, + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + InitialRound: 0, + NodesSetupPath: "../../../sharding/mock/testdata/nodesSetupMock.json", + GasScheduleFilename: "../../../cmd/node/config/gasSchedules/gasScheduleV7.toml", + NumShards: 3, + WorkingDir: ".", + MinNodesPerShard: 1, + MinNodesMeta: 1, + RoundDurationInMs: 6000, + } +} + +func TestCreateCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("internal NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Marshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("vm NewMarshalizer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.VmMarshalizer.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("main NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("tx NewHasher failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.TxSignHasher.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("address NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.AddressPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validator NewPubkeyConverter failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.ValidatorPubkeyConverter.Type = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewNodesSetup failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.NumShards = 0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewEconomicsData failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.EconomicsConfig.GlobalSettings.MinimumInflation = -1.0 + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("validatorPubKeyConverter.Decode failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCoreComponentsHolder() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid" + comp, err := CreateCoreComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *coreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCoreComponents(createArgsCoreComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCoreComponents_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCoreComponents(createArgsCoreComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.InternalMarshalizer()) + require.Nil(t, comp.SetInternalMarshalizer(nil)) + require.Nil(t, comp.InternalMarshalizer()) + + require.NotNil(t, comp.TxMarshalizer()) + require.NotNil(t, comp.VmMarshalizer()) + require.NotNil(t, comp.Hasher()) + require.NotNil(t, comp.TxSignHasher()) + require.NotNil(t, comp.Uint64ByteSliceConverter()) + require.NotNil(t, comp.AddressPubKeyConverter()) + require.NotNil(t, comp.ValidatorPubKeyConverter()) + require.NotNil(t, comp.PathHandler()) + require.NotNil(t, comp.Watchdog()) + require.NotNil(t, comp.AlarmScheduler()) + require.NotNil(t, comp.SyncTimer()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EconomicsData()) + require.NotNil(t, comp.APIEconomicsData()) + require.NotNil(t, comp.RatingsData()) + require.NotNil(t, comp.Rater()) + require.NotNil(t, comp.GenesisNodesSetup()) + require.NotNil(t, comp.NodesShuffler()) + require.NotNil(t, comp.EpochNotifier()) + require.NotNil(t, comp.EnableRoundsHandler()) + require.NotNil(t, comp.RoundNotifier()) + require.NotNil(t, comp.EpochStartNotifierWithConfirm()) + require.NotNil(t, comp.ChanStopNodeProcess()) + require.NotNil(t, comp.GenesisTime()) + require.Equal(t, "T", comp.ChainID()) + require.Equal(t, uint32(1), comp.MinTransactionVersion()) + require.NotNil(t, comp.TxVersionChecker()) + require.Equal(t, uint32(64), comp.EncodedAddressLen()) + hfPk, _ := hex.DecodeString("41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081") + require.Equal(t, hfPk, comp.HardforkTriggerPubKey()) + require.NotNil(t, comp.NodeTypeProvider()) + require.NotNil(t, comp.WasmVMChangeLocker()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.ProcessStatusHandler()) + require.NotNil(t, comp.EnableEpochsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/cryptoComponents.go b/node/chainSimulator/components/cryptoComponents.go new file mode 100644 index 00000000000..3fcd7e205b7 --- /dev/null +++ b/node/chainSimulator/components/cryptoComponents.go @@ -0,0 +1,269 @@ +package components + +import ( + "fmt" + "io" + + "github.com/multiversx/mx-chain-core-go/core" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing/disabled/singlesig" + "github.com/multiversx/mx-chain-go/common" + cryptoCommon "github.com/multiversx/mx-chain-go/common/crypto" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + cryptoComp "github.com/multiversx/mx-chain-go/factory/crypto" + "github.com/multiversx/mx-chain-go/vm" +) + +// ArgsCryptoComponentsHolder holds all arguments needed to create a crypto components holder +type ArgsCryptoComponentsHolder struct { + Config config.Config + EnableEpochsConfig config.EnableEpochs + Preferences config.Preferences + CoreComponentsHolder factory.CoreComponentsHolder + AllValidatorKeysPemFileName string + BypassTxSignatureCheck bool +} + +type cryptoComponentsHolder struct { + publicKey crypto.PublicKey + privateKey crypto.PrivateKey + p2pPublicKey crypto.PublicKey + p2pPrivateKey crypto.PrivateKey + p2pSingleSigner crypto.SingleSigner + txSingleSigner crypto.SingleSigner + blockSigner crypto.SingleSigner + multiSignerContainer cryptoCommon.MultiSignerContainer + peerSignatureHandler crypto.PeerSignatureHandler + blockSignKeyGen crypto.KeyGenerator + txSignKeyGen crypto.KeyGenerator + p2pKeyGen crypto.KeyGenerator + messageSignVerifier vm.MessageSignVerifier + consensusSigningHandler consensus.SigningHandler + managedPeersHolder common.ManagedPeersHolder + keysHandler consensus.KeysHandler + publicKeyBytes []byte + publicKeyString string + managedCryptoComponentsCloser io.Closer +} + +// CreateCryptoComponents will create a new instance of cryptoComponentsHolder +func CreateCryptoComponents(args ArgsCryptoComponentsHolder) (*cryptoComponentsHolder, error) { + instance := &cryptoComponentsHolder{} + + cryptoComponentsHandlerArgs := cryptoComp.CryptoComponentsFactoryArgs{ + Config: args.Config, + EnableEpochs: args.EnableEpochsConfig, + PrefsConfig: args.Preferences, + CoreComponentsHolder: args.CoreComponentsHolder, + KeyLoader: core.NewKeyLoader(), + ActivateBLSPubKeyMessageVerification: false, + IsInImportMode: false, + ImportModeNoSigCheck: false, + // set validator key pem file with a file that doesn't exist to all validators key pem file + ValidatorKeyPemFileName: "missing.pem", + AllValidatorKeysPemFileName: args.AllValidatorKeysPemFileName, + } + + cryptoComponentsFactory, err := cryptoComp.NewCryptoComponentsFactory(cryptoComponentsHandlerArgs) + if err != nil { + return nil, fmt.Errorf("NewCryptoComponentsFactory failed: %w", err) + } + + managedCryptoComponents, err := cryptoComp.NewManagedCryptoComponents(cryptoComponentsFactory) + if err != nil { + return nil, err + } + + err = managedCryptoComponents.Create() + if err != nil { + return nil, err + } + + instance.publicKey = managedCryptoComponents.PublicKey() + instance.privateKey = managedCryptoComponents.PrivateKey() + instance.publicKeyBytes, err = instance.publicKey.ToByteArray() + if err != nil { + return nil, err + } + instance.publicKeyString, err = args.CoreComponentsHolder.ValidatorPubKeyConverter().Encode(instance.publicKeyBytes) + if err != nil { + return nil, err + } + + instance.p2pPublicKey = managedCryptoComponents.P2pPublicKey() + instance.p2pPrivateKey = managedCryptoComponents.P2pPrivateKey() + instance.p2pSingleSigner = managedCryptoComponents.P2pSingleSigner() + instance.blockSigner = managedCryptoComponents.BlockSigner() + + instance.multiSignerContainer = managedCryptoComponents.MultiSignerContainer() + instance.peerSignatureHandler = managedCryptoComponents.PeerSignatureHandler() + instance.blockSignKeyGen = managedCryptoComponents.BlockSignKeyGen() + instance.txSignKeyGen = managedCryptoComponents.TxSignKeyGen() + instance.p2pKeyGen = managedCryptoComponents.P2pKeyGen() + instance.messageSignVerifier = managedCryptoComponents.MessageSignVerifier() + instance.consensusSigningHandler = managedCryptoComponents.ConsensusSigningHandler() + instance.managedPeersHolder = managedCryptoComponents.ManagedPeersHolder() + instance.keysHandler = managedCryptoComponents.KeysHandler() + instance.managedCryptoComponentsCloser = managedCryptoComponents + + if args.BypassTxSignatureCheck { + instance.txSingleSigner = &singlesig.DisabledSingleSig{} + } else { + instance.txSingleSigner = managedCryptoComponents.TxSingleSigner() + } + + return instance, nil +} + +// PublicKey will return the public key +func (c *cryptoComponentsHolder) PublicKey() crypto.PublicKey { + return c.publicKey +} + +// PrivateKey will return the private key +func (c *cryptoComponentsHolder) PrivateKey() crypto.PrivateKey { + return c.privateKey +} + +// PublicKeyString will return the private key string +func (c *cryptoComponentsHolder) PublicKeyString() string { + return c.publicKeyString +} + +// PublicKeyBytes will return the public key bytes +func (c *cryptoComponentsHolder) PublicKeyBytes() []byte { + return c.publicKeyBytes +} + +// P2pPublicKey will return the p2p public key +func (c *cryptoComponentsHolder) P2pPublicKey() crypto.PublicKey { + return c.p2pPublicKey +} + +// P2pPrivateKey will return the p2p private key +func (c *cryptoComponentsHolder) P2pPrivateKey() crypto.PrivateKey { + return c.p2pPrivateKey +} + +// P2pSingleSigner will return the p2p single signer +func (c *cryptoComponentsHolder) P2pSingleSigner() crypto.SingleSigner { + return c.p2pSingleSigner +} + +// TxSingleSigner will return the transaction single signer +func (c *cryptoComponentsHolder) TxSingleSigner() crypto.SingleSigner { + return c.txSingleSigner +} + +// BlockSigner will return the block signer +func (c *cryptoComponentsHolder) BlockSigner() crypto.SingleSigner { + return c.blockSigner +} + +// SetMultiSignerContainer will set the multi signer container +func (c *cryptoComponentsHolder) SetMultiSignerContainer(container cryptoCommon.MultiSignerContainer) error { + c.multiSignerContainer = container + + return nil +} + +// MultiSignerContainer will return the multi signer container +func (c *cryptoComponentsHolder) MultiSignerContainer() cryptoCommon.MultiSignerContainer { + return c.multiSignerContainer +} + +// GetMultiSigner will return the multi signer by epoch +func (c *cryptoComponentsHolder) GetMultiSigner(epoch uint32) (crypto.MultiSigner, error) { + return c.MultiSignerContainer().GetMultiSigner(epoch) +} + +// PeerSignatureHandler will return the peer signature handler +func (c *cryptoComponentsHolder) PeerSignatureHandler() crypto.PeerSignatureHandler { + return c.peerSignatureHandler +} + +// BlockSignKeyGen will return the block signer key generator +func (c *cryptoComponentsHolder) BlockSignKeyGen() crypto.KeyGenerator { + return c.blockSignKeyGen +} + +// TxSignKeyGen will return the transaction sign key generator +func (c *cryptoComponentsHolder) TxSignKeyGen() crypto.KeyGenerator { + return c.txSignKeyGen +} + +// P2pKeyGen will return the p2p key generator +func (c *cryptoComponentsHolder) P2pKeyGen() crypto.KeyGenerator { + return c.p2pKeyGen +} + +// MessageSignVerifier will return the message signature verifier +func (c *cryptoComponentsHolder) MessageSignVerifier() vm.MessageSignVerifier { + return c.messageSignVerifier +} + +// ConsensusSigningHandler will return the consensus signing handler +func (c *cryptoComponentsHolder) ConsensusSigningHandler() consensus.SigningHandler { + return c.consensusSigningHandler +} + +// ManagedPeersHolder will return the managed peer holder +func (c *cryptoComponentsHolder) ManagedPeersHolder() common.ManagedPeersHolder { + return c.managedPeersHolder +} + +// KeysHandler will return the keys handler +func (c *cryptoComponentsHolder) KeysHandler() consensus.KeysHandler { + return c.keysHandler +} + +// Clone will clone the cryptoComponentsHolder +func (c *cryptoComponentsHolder) Clone() interface{} { + return &cryptoComponentsHolder{ + publicKey: c.PublicKey(), + privateKey: c.PrivateKey(), + p2pPublicKey: c.P2pPublicKey(), + p2pPrivateKey: c.P2pPrivateKey(), + p2pSingleSigner: c.P2pSingleSigner(), + txSingleSigner: c.TxSingleSigner(), + blockSigner: c.BlockSigner(), + multiSignerContainer: c.MultiSignerContainer(), + peerSignatureHandler: c.PeerSignatureHandler(), + blockSignKeyGen: c.BlockSignKeyGen(), + txSignKeyGen: c.TxSignKeyGen(), + p2pKeyGen: c.P2pKeyGen(), + messageSignVerifier: c.MessageSignVerifier(), + consensusSigningHandler: c.ConsensusSigningHandler(), + managedPeersHolder: c.ManagedPeersHolder(), + keysHandler: c.KeysHandler(), + publicKeyBytes: c.PublicKeyBytes(), + publicKeyString: c.PublicKeyString(), + managedCryptoComponentsCloser: c.managedCryptoComponentsCloser, + } +} + +func (c *cryptoComponentsHolder) IsInterfaceNil() bool { + return c == nil +} + +// Create will do nothing +func (c *cryptoComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (c *cryptoComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (c *cryptoComponentsHolder) String() string { + return "" +} + +// Close will do nothing +func (c *cryptoComponentsHolder) Close() error { + return c.managedCryptoComponentsCloser.Close() +} diff --git a/node/chainSimulator/components/cryptoComponents_test.go b/node/chainSimulator/components/cryptoComponents_test.go new file mode 100644 index 00000000000..fc8087f5cd4 --- /dev/null +++ b/node/chainSimulator/components/cryptoComponents_test.go @@ -0,0 +1,168 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/stretchr/testify/require" +) + +func createArgsCryptoComponentsHolder() ArgsCryptoComponentsHolder { + return ArgsCryptoComponentsHolder{ + Config: config.Config{ + Consensus: config.ConsensusConfig{ + Type: "bls", + }, + MultisigHasher: config.TypeConfig{ + Type: "blake2b", + }, + PublicKeyPIDSignature: config.CacheConfig{ + Capacity: 1000, + Type: "LRU", + }, + }, + EnableEpochsConfig: config.EnableEpochs{ + BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{ + { + EnableEpoch: 0, + Type: "no-KOSK", + }, + { + EnableEpoch: 10, + Type: "KOSK", + }, + }, + }, + Preferences: config.Preferences{}, + CoreComponentsHolder: &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "public key", nil + }, + } + }, + }, + AllValidatorKeysPemFileName: "allValidatorKeys.pem", + BypassTxSignatureCheck: false, + } +} + +func TestCreateCryptoComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("should work with bypass tx sig check", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.BypassTxSignatureCheck = true + comp, err := CreateCryptoComponents(args) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewCryptoComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return nil + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedCryptoComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsCryptoComponentsHolder() + args.CoreComponentsHolder = &factory.CoreComponentsHolderStub{ + ValidatorPubKeyConverterCalled: func() core.PubkeyConverter { + return &testscommon.PubkeyConverterStub{ + EncodeCalled: func(pkBytes []byte) (string, error) { + return "", expectedErr + }, + } + }, + } + comp, err := CreateCryptoComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestCryptoComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *cryptoComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_GettersSetters(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.PublicKey()) + require.NotNil(t, comp.PrivateKey()) + require.NotEmpty(t, comp.PublicKeyString()) + require.NotEmpty(t, comp.PublicKeyBytes()) + require.NotNil(t, comp.P2pPublicKey()) + require.NotNil(t, comp.P2pPrivateKey()) + require.NotNil(t, comp.P2pSingleSigner()) + require.NotNil(t, comp.TxSingleSigner()) + require.NotNil(t, comp.BlockSigner()) + container := comp.MultiSignerContainer() + require.NotNil(t, container) + require.Nil(t, comp.SetMultiSignerContainer(nil)) + require.Nil(t, comp.MultiSignerContainer()) + require.Nil(t, comp.SetMultiSignerContainer(container)) + signer, err := comp.GetMultiSigner(0) + require.NoError(t, err) + require.NotNil(t, signer) + require.NotNil(t, comp.PeerSignatureHandler()) + require.NotNil(t, comp.BlockSignKeyGen()) + require.NotNil(t, comp.TxSignKeyGen()) + require.NotNil(t, comp.P2pKeyGen()) + require.NotNil(t, comp.MessageSignVerifier()) + require.NotNil(t, comp.ConsensusSigningHandler()) + require.NotNil(t, comp.ManagedPeersHolder()) + require.NotNil(t, comp.KeysHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestCryptoComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateCryptoComponents(createArgsCryptoComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/dataComponents.go b/node/chainSimulator/components/dataComponents.go new file mode 100644 index 00000000000..8f04c351509 --- /dev/null +++ b/node/chainSimulator/components/dataComponents.go @@ -0,0 +1,124 @@ +package components + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/provider" + "github.com/multiversx/mx-chain-go/factory" +) + +// ArgsDataComponentsHolder will hold the components needed for data components +type ArgsDataComponentsHolder struct { + Chain data.ChainHandler + StorageService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + InternalMarshaller marshal.Marshalizer +} + +type dataComponentsHolder struct { + closeHandler *closeHandler + chain data.ChainHandler + storageService dataRetriever.StorageService + dataPool dataRetriever.PoolsHolder + miniBlockProvider factory.MiniBlockProvider +} + +// CreateDataComponents will create the data components holder +func CreateDataComponents(args ArgsDataComponentsHolder) (*dataComponentsHolder, error) { + miniBlockStorer, err := args.StorageService.GetStorer(dataRetriever.MiniBlockUnit) + if err != nil { + return nil, err + } + + arg := provider.ArgMiniBlockProvider{ + MiniBlockPool: args.DataPool.MiniBlocks(), + MiniBlockStorage: miniBlockStorer, + Marshalizer: args.InternalMarshaller, + } + + miniBlocksProvider, err := provider.NewMiniBlockProvider(arg) + if err != nil { + return nil, err + } + + instance := &dataComponentsHolder{ + closeHandler: NewCloseHandler(), + chain: args.Chain, + storageService: args.StorageService, + dataPool: args.DataPool, + miniBlockProvider: miniBlocksProvider, + } + + instance.collectClosableComponents() + + return instance, nil +} + +// Blockchain will return the blockchain handler +func (d *dataComponentsHolder) Blockchain() data.ChainHandler { + return d.chain +} + +// SetBlockchain will set the blockchain handler +func (d *dataComponentsHolder) SetBlockchain(chain data.ChainHandler) error { + d.chain = chain + + return nil +} + +// StorageService will return the storage service +func (d *dataComponentsHolder) StorageService() dataRetriever.StorageService { + return d.storageService +} + +// Datapool will return the data pool +func (d *dataComponentsHolder) Datapool() dataRetriever.PoolsHolder { + return d.dataPool +} + +// MiniBlocksProvider will return the mini blocks provider +func (d *dataComponentsHolder) MiniBlocksProvider() factory.MiniBlockProvider { + return d.miniBlockProvider +} + +// Clone will clone the data components holder +func (d *dataComponentsHolder) Clone() interface{} { + return &dataComponentsHolder{ + chain: d.chain, + storageService: d.storageService, + dataPool: d.dataPool, + miniBlockProvider: d.miniBlockProvider, + closeHandler: d.closeHandler, + } +} + +func (d *dataComponentsHolder) collectClosableComponents() { + d.closeHandler.AddComponent(d.storageService) + d.closeHandler.AddComponent(d.dataPool) +} + +// Close will call the Close methods on all inner components +func (d *dataComponentsHolder) Close() error { + return d.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (d *dataComponentsHolder) IsInterfaceNil() bool { + return d == nil +} + +// Create will do nothing +func (d *dataComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (d *dataComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (d *dataComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/dataComponents_test.go b/node/chainSimulator/components/dataComponents_test.go new file mode 100644 index 00000000000..a74f0b751f6 --- /dev/null +++ b/node/chainSimulator/components/dataComponents_test.go @@ -0,0 +1,110 @@ +package components + +import ( + "testing" + + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/stretchr/testify/require" +) + +func createArgsDataComponentsHolder() ArgsDataComponentsHolder { + return ArgsDataComponentsHolder{ + Chain: &testscommon.ChainHandlerStub{}, + StorageService: &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return &storage.StorerStub{}, nil + }, + }, + DataPool: &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return &testscommon.CacherStub{} + }, + }, + InternalMarshaller: &testscommon.MarshallerStub{}, + } +} + +func TestCreateDataComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewMiniBlockProvider failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.DataPool = &dataRetriever.PoolsHolderStub{ + MiniBlocksCalled: func() chainStorage.Cacher { + return nil + }, + } + comp, err := CreateDataComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("GetStorer failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsDataComponentsHolder() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + return nil, expectedErr + }, + } + comp, err := CreateDataComponents(args) + require.Equal(t, expectedErr, err) + require.Nil(t, comp) + }) +} + +func TestDataComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *dataComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateDataComponents(createArgsDataComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.Blockchain()) + require.Nil(t, comp.SetBlockchain(nil)) + require.Nil(t, comp.Blockchain()) + require.NotNil(t, comp.StorageService()) + require.NotNil(t, comp.Datapool()) + require.NotNil(t, comp.MiniBlocksProvider()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} + +func TestDataComponentsHolder_Clone(t *testing.T) { + t.Parallel() + + comp, err := CreateDataComponents(createArgsDataComponentsHolder()) + require.NoError(t, err) + + compClone := comp.Clone() + require.Equal(t, comp, compClone) + require.False(t, comp == compClone) // pointer testing + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/instantBroadcastMessenger.go b/node/chainSimulator/components/instantBroadcastMessenger.go new file mode 100644 index 00000000000..893fc4edbc7 --- /dev/null +++ b/node/chainSimulator/components/instantBroadcastMessenger.go @@ -0,0 +1,106 @@ +package components + +import ( + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/sharding" +) + +type instantBroadcastMessenger struct { + consensus.BroadcastMessenger + shardCoordinator sharding.Coordinator +} + +// NewInstantBroadcastMessenger creates a new instance of type instantBroadcastMessenger +func NewInstantBroadcastMessenger(broadcastMessenger consensus.BroadcastMessenger, shardCoordinator sharding.Coordinator) (*instantBroadcastMessenger, error) { + if check.IfNil(broadcastMessenger) { + return nil, errors.ErrNilBroadcastMessenger + } + if check.IfNil(shardCoordinator) { + return nil, errors.ErrNilShardCoordinator + } + + return &instantBroadcastMessenger{ + BroadcastMessenger: broadcastMessenger, + shardCoordinator: shardCoordinator, + }, nil +} + +// BroadcastBlockDataLeader broadcasts the block data as consensus group leader +func (messenger *instantBroadcastMessenger) BroadcastBlockDataLeader(_ data.HeaderHandler, miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if messenger.shardCoordinator.SelfId() == common.MetachainShardId { + return messenger.broadcastMiniblockData(miniBlocks, transactions, pkBytes) + } + + return messenger.broadcastBlockDataLeaderWhenShard(miniBlocks, transactions, pkBytes) +} + +func (messenger *instantBroadcastMessenger) broadcastBlockDataLeaderWhenShard(miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if len(miniBlocks) == 0 { + return nil + } + + metaMiniBlocks, metaTransactions := messenger.extractMetaMiniBlocksAndTransactions(miniBlocks, transactions) + + return messenger.broadcastMiniblockData(metaMiniBlocks, metaTransactions, pkBytes) +} + +func (messenger *instantBroadcastMessenger) broadcastMiniblockData(miniBlocks map[uint32][]byte, transactions map[string][][]byte, pkBytes []byte) error { + if len(miniBlocks) > 0 { + err := messenger.BroadcastMiniBlocks(miniBlocks, pkBytes) + if err != nil { + log.Warn("instantBroadcastMessenger.BroadcastBlockData: broadcast miniblocks", "error", err.Error()) + } + } + + if len(transactions) > 0 { + err := messenger.BroadcastTransactions(transactions, pkBytes) + if err != nil { + log.Warn("instantBroadcastMessenger.BroadcastBlockData: broadcast transactions", "error", err.Error()) + } + } + + return nil +} + +func (messenger *instantBroadcastMessenger) extractMetaMiniBlocksAndTransactions( + miniBlocks map[uint32][]byte, + transactions map[string][][]byte, +) (map[uint32][]byte, map[string][][]byte) { + + metaMiniBlocks := make(map[uint32][]byte) + metaTransactions := make(map[string][][]byte) + + for shardID, mbsMarshalized := range miniBlocks { + if shardID != core.MetachainShardId { + continue + } + + metaMiniBlocks[shardID] = mbsMarshalized + delete(miniBlocks, shardID) + } + + identifier := messenger.shardCoordinator.CommunicationIdentifier(core.MetachainShardId) + + for broadcastTopic, txsMarshalized := range transactions { + if !strings.Contains(broadcastTopic, identifier) { + continue + } + + metaTransactions[broadcastTopic] = txsMarshalized + delete(transactions, broadcastTopic) + } + + return metaMiniBlocks, metaTransactions +} + +// IsInterfaceNil returns true if there is no value under the interface +func (messenger *instantBroadcastMessenger) IsInterfaceNil() bool { + return messenger == nil +} diff --git a/node/chainSimulator/components/instantBroadcastMessenger_test.go b/node/chainSimulator/components/instantBroadcastMessenger_test.go new file mode 100644 index 00000000000..361caa03bbc --- /dev/null +++ b/node/chainSimulator/components/instantBroadcastMessenger_test.go @@ -0,0 +1,134 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/stretchr/testify/require" +) + +func TestNewInstantBroadcastMessenger(t *testing.T) { + t.Parallel() + + t.Run("nil broadcastMessenger should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(nil, nil) + require.Equal(t, errorsMx.ErrNilBroadcastMessenger, err) + require.Nil(t, mes) + }) + t.Run("nil shardCoordinator should error", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, nil) + require.Equal(t, errorsMx.ErrNilShardCoordinator, err) + require.Nil(t, mes) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.NoError(t, err) + require.NotNil(t, mes) + }) +} + +func TestInstantBroadcastMessenger_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var mes *instantBroadcastMessenger + require.True(t, mes.IsInterfaceNil()) + + mes, _ = NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{}, &mock.ShardCoordinatorMock{}) + require.False(t, mes.IsInterfaceNil()) +} + +func TestInstantBroadcastMessenger_BroadcastBlockDataLeader(t *testing.T) { + t.Parallel() + + t.Run("meta should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), + 1: []byte("mb shard 1"), + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 0")}, + "topic_1": {[]byte("txs topic 1")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, providedMBs, mbs) + return expectedErr // for coverage only + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, providedTxs, txs) + return expectedErr // for coverage only + }, + }, &mock.ShardCoordinatorMock{ + ShardID: common.MetachainShardId, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard should work", func(t *testing.T) { + t.Parallel() + + providedMBs := map[uint32][]byte{ + 0: []byte("mb shard 0"), // for coverage only + common.MetachainShardId: []byte("mb shard meta"), + } + expectedMBs := map[uint32][]byte{ + common.MetachainShardId: []byte("mb shard meta"), + } + providedTxs := map[string][][]byte{ + "topic_0": {[]byte("txs topic 1")}, // for coverage only + "topic_0_META": {[]byte("txs topic meta")}, + } + expectedTxs := map[string][][]byte{ + "topic_0_META": {[]byte("txs topic meta")}, + } + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Equal(t, expectedMBs, mbs) + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Equal(t, expectedTxs, txs) + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, providedMBs, providedTxs, []byte("pk")) + require.NoError(t, err) + }) + t.Run("shard, empty miniblocks should early exit", func(t *testing.T) { + t.Parallel() + + mes, err := NewInstantBroadcastMessenger(&mock.BroadcastMessengerMock{ + BroadcastMiniBlocksCalled: func(mbs map[uint32][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + BroadcastTransactionsCalled: func(txs map[string][][]byte, bytes []byte) error { + require.Fail(t, "should have not been called") + return nil + }, + }, &mock.ShardCoordinatorMock{ + ShardID: 0, + }) + require.NoError(t, err) + + err = mes.BroadcastBlockDataLeader(nil, nil, nil, []byte("pk")) + require.NoError(t, err) + }) +} diff --git a/node/chainSimulator/components/interface.go b/node/chainSimulator/components/interface.go new file mode 100644 index 00000000000..4b1421341a0 --- /dev/null +++ b/node/chainSimulator/components/interface.go @@ -0,0 +1,18 @@ +package components + +import "github.com/multiversx/mx-chain-core-go/core" + +// SyncedBroadcastNetworkHandler defines the synced network interface +type SyncedBroadcastNetworkHandler interface { + RegisterMessageReceiver(handler messageReceiver, pid core.PeerID) + Broadcast(pid core.PeerID, topic string, buff []byte) + SendDirectly(from core.PeerID, topic string, buff []byte, to core.PeerID) error + GetConnectedPeers() []core.PeerID + GetConnectedPeersOnTopic(topic string) []core.PeerID + IsInterfaceNil() bool +} + +// APIConfigurator defines what an api configurator should be able to do +type APIConfigurator interface { + RestApiInterface(shardID uint32) string +} diff --git a/node/chainSimulator/components/manualRoundHandler.go b/node/chainSimulator/components/manualRoundHandler.go new file mode 100644 index 00000000000..479cf63a1f5 --- /dev/null +++ b/node/chainSimulator/components/manualRoundHandler.go @@ -0,0 +1,66 @@ +package components + +import ( + "sync/atomic" + "time" +) + +type manualRoundHandler struct { + index int64 + genesisTimeStamp int64 + roundDuration time.Duration + initialRound int64 +} + +// NewManualRoundHandler returns a manual round handler instance +func NewManualRoundHandler(genesisTimeStamp int64, roundDuration time.Duration, initialRound int64) *manualRoundHandler { + return &manualRoundHandler{ + genesisTimeStamp: genesisTimeStamp, + roundDuration: roundDuration, + index: initialRound, + initialRound: initialRound, + } +} + +// IncrementIndex will increment the current round index +func (handler *manualRoundHandler) IncrementIndex() { + atomic.AddInt64(&handler.index, 1) +} + +// Index returns the current index +func (handler *manualRoundHandler) Index() int64 { + return atomic.LoadInt64(&handler.index) +} + +// BeforeGenesis returns false +func (handler *manualRoundHandler) BeforeGenesis() bool { + return false +} + +// UpdateRound does nothing as this implementation does not work with real timers +func (handler *manualRoundHandler) UpdateRound(_ time.Time, _ time.Time) { +} + +// TimeStamp returns the time based of the genesis timestamp and the current round +func (handler *manualRoundHandler) TimeStamp() time.Time { + rounds := atomic.LoadInt64(&handler.index) + timeFromGenesis := handler.roundDuration * time.Duration(rounds) + timestamp := time.Unix(handler.genesisTimeStamp, 0).Add(timeFromGenesis) + timestamp = time.Unix(timestamp.Unix()-int64(handler.roundDuration.Seconds())*handler.initialRound, 0) + return timestamp +} + +// TimeDuration returns the provided time duration for this instance +func (handler *manualRoundHandler) TimeDuration() time.Duration { + return handler.roundDuration +} + +// RemainingTime returns the max time as the start time is not taken into account +func (handler *manualRoundHandler) RemainingTime(_ time.Time, maxTime time.Duration) time.Duration { + return maxTime +} + +// IsInterfaceNil returns true if there is no value under the interface +func (handler *manualRoundHandler) IsInterfaceNil() bool { + return handler == nil +} diff --git a/node/chainSimulator/components/manualRoundHandler_test.go b/node/chainSimulator/components/manualRoundHandler_test.go new file mode 100644 index 00000000000..8a866d6ccec --- /dev/null +++ b/node/chainSimulator/components/manualRoundHandler_test.go @@ -0,0 +1,44 @@ +package components + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestNewManualRoundHandler(t *testing.T) { + t.Parallel() + + handler := NewManualRoundHandler(100, time.Second, 0) + require.NotNil(t, handler) +} + +func TestManualRoundHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var handler *manualRoundHandler + require.True(t, handler.IsInterfaceNil()) + + handler = NewManualRoundHandler(100, time.Second, 0) + require.False(t, handler.IsInterfaceNil()) +} + +func TestManualRoundHandler_Operations(t *testing.T) { + t.Parallel() + + genesisTime := time.Now() + providedIndex := int64(0) + providedRoundDuration := time.Second + handler := NewManualRoundHandler(genesisTime.Unix(), providedRoundDuration, providedIndex) + require.Equal(t, providedIndex, handler.Index()) + handler.IncrementIndex() + require.Equal(t, providedIndex+1, handler.Index()) + expectedTimestamp := time.Unix(handler.genesisTimeStamp, 0).Add(providedRoundDuration) + require.Equal(t, expectedTimestamp, handler.TimeStamp()) + require.Equal(t, providedRoundDuration, handler.TimeDuration()) + providedMaxTime := time.Minute + require.Equal(t, providedMaxTime, handler.RemainingTime(time.Now(), providedMaxTime)) + require.False(t, handler.BeforeGenesis()) + handler.UpdateRound(time.Now(), time.Now()) // for coverage only +} diff --git a/node/chainSimulator/components/memoryComponents.go b/node/chainSimulator/components/memoryComponents.go new file mode 100644 index 00000000000..3b12e720756 --- /dev/null +++ b/node/chainSimulator/components/memoryComponents.go @@ -0,0 +1,82 @@ +package components + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/storage/database" + "github.com/multiversx/mx-chain-go/storage/storageunit" +) + +// CreateMemUnit creates a new in-memory storage unit +func CreateMemUnit() storage.Storer { + capacity := uint32(10) + shards := uint32(1) + sizeInBytes := uint64(0) + cache, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: capacity, Shards: shards, SizeInBytes: sizeInBytes}) + persist, _ := database.NewlruDB(100000) + unit, _ := storageunit.NewStorageUnit(cache, persist) + + return unit +} + +type trieStorage struct { + storage.Storer +} + +// CreateMemUnitForTries returns a special type of storer used on tries instances +func CreateMemUnitForTries() storage.Storer { + return &trieStorage{ + Storer: CreateMemUnit(), + } +} + +// SetEpochForPutOperation does nothing +func (store *trieStorage) SetEpochForPutOperation(_ uint32) { +} + +// GetFromOldEpochsWithoutAddingToCache tries to get directly the key +func (store *trieStorage) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { + value, err := store.Get(key) + + return value, core.OptionalUint32{}, err +} + +// GetFromLastEpoch tries to get directly the key +func (store *trieStorage) GetFromLastEpoch(key []byte) ([]byte, error) { + return store.Get(key) +} + +// PutInEpoch will put the key directly +func (store *trieStorage) PutInEpoch(key []byte, data []byte, _ uint32) error { + return store.Put(key, data) +} + +// PutInEpochWithoutCache will put the key directly +func (store *trieStorage) PutInEpochWithoutCache(key []byte, data []byte, _ uint32) error { + return store.Put(key, data) +} + +// GetLatestStorageEpoch returns 0 +func (store *trieStorage) GetLatestStorageEpoch() (uint32, error) { + return 0, nil +} + +// GetFromCurrentEpoch tries to get directly the key +func (store *trieStorage) GetFromCurrentEpoch(key []byte) ([]byte, error) { + return store.Get(key) +} + +// GetFromEpoch tries to get directly the key +func (store *trieStorage) GetFromEpoch(key []byte, _ uint32) ([]byte, error) { + return store.Get(key) +} + +// RemoveFromCurrentEpoch removes directly the key +func (store *trieStorage) RemoveFromCurrentEpoch(key []byte) error { + return store.Remove(key) +} + +// RemoveFromAllActiveEpochs removes directly the key +func (store *trieStorage) RemoveFromAllActiveEpochs(key []byte) error { + return store.Remove(key) +} diff --git a/node/chainSimulator/components/memoryComponents_test.go b/node/chainSimulator/components/memoryComponents_test.go new file mode 100644 index 00000000000..b393bca7d47 --- /dev/null +++ b/node/chainSimulator/components/memoryComponents_test.go @@ -0,0 +1,55 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateMemUnitForTries(t *testing.T) { + t.Parallel() + + memUnitStorer := CreateMemUnitForTries() + require.NotNil(t, memUnitStorer) + + memUnit, ok := memUnitStorer.(*trieStorage) + require.True(t, ok) + memUnit.SetEpochForPutOperation(0) // for coverage only + key := []byte("key") + data := []byte("data") + require.NoError(t, memUnit.Put(key, data)) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.PutInEpochWithoutCache(key, data, 0)) + + value, _, err := memUnit.GetFromOldEpochsWithoutAddingToCache(key) + require.NoError(t, err) + require.Equal(t, data, value) + + latest, err := memUnit.GetLatestStorageEpoch() + require.NoError(t, err) + require.Zero(t, latest) + + value, err = memUnit.GetFromCurrentEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromEpoch(key, 0) + require.NoError(t, err) + require.Equal(t, data, value) + + value, err = memUnit.GetFromLastEpoch(key) + require.NoError(t, err) + require.Equal(t, data, value) + + require.NoError(t, memUnit.RemoveFromCurrentEpoch(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) + + require.NoError(t, memUnit.PutInEpoch(key, data, 0)) + require.NoError(t, memUnit.RemoveFromAllActiveEpochs(key)) + value, err = memUnit.GetFromCurrentEpoch(key) + require.Error(t, err) + require.Empty(t, value) +} diff --git a/node/chainSimulator/components/networkComponents.go b/node/chainSimulator/components/networkComponents.go new file mode 100644 index 00000000000..6b791f6927b --- /dev/null +++ b/node/chainSimulator/components/networkComponents.go @@ -0,0 +1,142 @@ +package components + +import ( + disabledBootstrap "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/factory" + disabledFactory "github.com/multiversx/mx-chain-go/factory/disabled" + "github.com/multiversx/mx-chain-go/node/chainSimulator/disabled" + "github.com/multiversx/mx-chain-go/p2p" + disabledP2P "github.com/multiversx/mx-chain-go/p2p/disabled" + "github.com/multiversx/mx-chain-go/process" + disabledAntiflood "github.com/multiversx/mx-chain-go/process/throttle/antiflood/disabled" +) + +type networkComponentsHolder struct { + closeHandler *closeHandler + networkMessenger p2p.Messenger + inputAntiFloodHandler factory.P2PAntifloodHandler + outputAntiFloodHandler factory.P2PAntifloodHandler + pubKeyCacher process.TimeCacher + peerBlackListHandler process.PeerBlackListCacher + peerHonestyHandler factory.PeerHonestyHandler + preferredPeersHolderHandler factory.PreferredPeersHolderHandler + peersRatingHandler p2p.PeersRatingHandler + peersRatingMonitor p2p.PeersRatingMonitor + fullArchiveNetworkMessenger p2p.Messenger + fullArchivePreferredPeersHolderHandler factory.PreferredPeersHolderHandler +} + +// CreateNetworkComponents creates a new networkComponentsHolder instance +func CreateNetworkComponents(network SyncedBroadcastNetworkHandler) (*networkComponentsHolder, error) { + messenger, err := NewSyncedMessenger(network) + if err != nil { + return nil, err + } + + instance := &networkComponentsHolder{ + closeHandler: NewCloseHandler(), + networkMessenger: messenger, + inputAntiFloodHandler: disabled.NewAntiFlooder(), + outputAntiFloodHandler: disabled.NewAntiFlooder(), + pubKeyCacher: &disabledAntiflood.TimeCache{}, + peerBlackListHandler: &disabledAntiflood.PeerBlacklistCacher{}, + peerHonestyHandler: disabled.NewPeerHonesty(), + preferredPeersHolderHandler: disabledFactory.NewPreferredPeersHolder(), + peersRatingHandler: disabledBootstrap.NewDisabledPeersRatingHandler(), + peersRatingMonitor: disabled.NewPeersRatingMonitor(), + fullArchiveNetworkMessenger: disabledP2P.NewNetworkMessenger(), + fullArchivePreferredPeersHolderHandler: disabledFactory.NewPreferredPeersHolder(), + } + + instance.collectClosableComponents() + + return instance, nil +} + +// NetworkMessenger returns the network messenger +func (holder *networkComponentsHolder) NetworkMessenger() p2p.Messenger { + return holder.networkMessenger +} + +// InputAntiFloodHandler returns the input antiflooder +func (holder *networkComponentsHolder) InputAntiFloodHandler() factory.P2PAntifloodHandler { + return holder.inputAntiFloodHandler +} + +// OutputAntiFloodHandler returns the output antiflooder +func (holder *networkComponentsHolder) OutputAntiFloodHandler() factory.P2PAntifloodHandler { + return holder.outputAntiFloodHandler +} + +// PubKeyCacher returns the public key cacher +func (holder *networkComponentsHolder) PubKeyCacher() process.TimeCacher { + return holder.pubKeyCacher +} + +// PeerBlackListHandler returns the peer blacklist handler +func (holder *networkComponentsHolder) PeerBlackListHandler() process.PeerBlackListCacher { + return holder.peerBlackListHandler +} + +// PeerHonestyHandler returns the peer honesty handler +func (holder *networkComponentsHolder) PeerHonestyHandler() factory.PeerHonestyHandler { + return holder.peerHonestyHandler +} + +// PreferredPeersHolderHandler returns the preferred peers holder +func (holder *networkComponentsHolder) PreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return holder.preferredPeersHolderHandler +} + +// PeersRatingHandler returns the peers rating handler +func (holder *networkComponentsHolder) PeersRatingHandler() p2p.PeersRatingHandler { + return holder.peersRatingHandler +} + +// PeersRatingMonitor returns the peers rating monitor +func (holder *networkComponentsHolder) PeersRatingMonitor() p2p.PeersRatingMonitor { + return holder.peersRatingMonitor +} + +// FullArchiveNetworkMessenger returns the full archive network messenger +func (holder *networkComponentsHolder) FullArchiveNetworkMessenger() p2p.Messenger { + return holder.fullArchiveNetworkMessenger +} + +// FullArchivePreferredPeersHolderHandler returns the full archive preferred peers holder +func (holder *networkComponentsHolder) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return holder.fullArchivePreferredPeersHolderHandler +} + +func (holder *networkComponentsHolder) collectClosableComponents() { + holder.closeHandler.AddComponent(holder.networkMessenger) + holder.closeHandler.AddComponent(holder.inputAntiFloodHandler) + holder.closeHandler.AddComponent(holder.outputAntiFloodHandler) + holder.closeHandler.AddComponent(holder.peerHonestyHandler) + holder.closeHandler.AddComponent(holder.fullArchiveNetworkMessenger) +} + +// Close will call the Close methods on all inner components +func (holder *networkComponentsHolder) Close() error { + return holder.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (holder *networkComponentsHolder) IsInterfaceNil() bool { + return holder == nil +} + +// Create will do nothing +func (holder *networkComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (holder *networkComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (holder *networkComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/networkComponents_test.go b/node/chainSimulator/components/networkComponents_test.go new file mode 100644 index 00000000000..9c184d4d608 --- /dev/null +++ b/node/chainSimulator/components/networkComponents_test.go @@ -0,0 +1,62 @@ +package components + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCreateNetworkComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil network should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(nil) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestNetworkComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *networkComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestNetworkComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateNetworkComponents(NewSyncedBroadcastNetwork()) + require.NoError(t, err) + + require.NotNil(t, comp.NetworkMessenger()) + require.NotNil(t, comp.InputAntiFloodHandler()) + require.NotNil(t, comp.OutputAntiFloodHandler()) + require.NotNil(t, comp.PubKeyCacher()) + require.NotNil(t, comp.PeerBlackListHandler()) + require.NotNil(t, comp.PeerHonestyHandler()) + require.NotNil(t, comp.PreferredPeersHolderHandler()) + require.NotNil(t, comp.PeersRatingHandler()) + require.NotNil(t, comp.PeersRatingMonitor()) + require.NotNil(t, comp.FullArchiveNetworkMessenger()) + require.NotNil(t, comp.FullArchivePreferredPeersHolderHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/nodeFacade.go b/node/chainSimulator/components/nodeFacade.go new file mode 100644 index 00000000000..7ed67018579 --- /dev/null +++ b/node/chainSimulator/components/nodeFacade.go @@ -0,0 +1,190 @@ +package components + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/api/gin" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/facade" + apiComp "github.com/multiversx/mx-chain-go/factory/api" + nodePack "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/metrics" + "github.com/multiversx/mx-chain-go/process/mock" +) + +func (node *testOnlyProcessingNode) createFacade(configs config.Configs, apiInterface APIConfigurator) error { + log.Debug("creating api resolver structure") + + err := node.createMetrics(configs) + if err != nil { + return err + } + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: configs.EpochConfig.GasSchedule, + ConfigDir: configs.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: node.CoreComponentsHolder.EpochNotifier(), + WasmVMChangeLocker: node.CoreComponentsHolder.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + if err != nil { + return err + } + + allowVMQueriesChan := make(chan struct{}) + go func() { + time.Sleep(time.Second) + close(allowVMQueriesChan) + node.StatusCoreComponents.AppStatusHandler().SetStringValue(common.MetricAreVMQueriesReady, strconv.FormatBool(true)) + }() + + apiResolverArgs := &apiComp.ApiResolverArgs{ + Configs: &configs, + CoreComponents: node.CoreComponentsHolder, + DataComponents: node.DataComponentsHolder, + StateComponents: node.StateComponentsHolder, + BootstrapComponents: node.BootstrapComponentsHolder, + CryptoComponents: node.CryptoComponentsHolder, + ProcessComponents: node.ProcessComponentsHolder, + StatusCoreComponents: node.StatusCoreComponents, + GasScheduleNotifier: gasScheduleNotifier, + Bootstrapper: &mock.BootstrapperStub{ + GetNodeStateCalled: func() common.NodeState { + return common.NsSynchronized + }, + }, + AllowVMQueriesChan: allowVMQueriesChan, + StatusComponents: node.StatusComponentsHolder, + ProcessingMode: common.GetNodeProcessingMode(configs.ImportDbConfig), + } + + apiResolver, err := apiComp.CreateApiResolver(apiResolverArgs) + if err != nil { + return err + } + + log.Debug("creating multiversx node facade") + + flagsConfig := configs.FlagsConfig + + nd, err := nodePack.NewNode( + nodePack.WithStatusCoreComponents(node.StatusCoreComponents), + nodePack.WithCoreComponents(node.CoreComponentsHolder), + nodePack.WithCryptoComponents(node.CryptoComponentsHolder), + nodePack.WithBootstrapComponents(node.BootstrapComponentsHolder), + nodePack.WithStateComponents(node.StateComponentsHolder), + nodePack.WithDataComponents(node.DataComponentsHolder), + nodePack.WithStatusComponents(node.StatusComponentsHolder), + nodePack.WithProcessComponents(node.ProcessComponentsHolder), + nodePack.WithNetworkComponents(node.NetworkComponentsHolder), + nodePack.WithInitialNodesPubKeys(node.CoreComponentsHolder.GenesisNodesSetup().InitialNodesPubKeys()), + nodePack.WithRoundDuration(node.CoreComponentsHolder.GenesisNodesSetup().GetRoundDuration()), + nodePack.WithConsensusGroupSize(int(node.CoreComponentsHolder.GenesisNodesSetup().GetShardConsensusGroupSize())), + nodePack.WithGenesisTime(node.CoreComponentsHolder.GenesisTime()), + nodePack.WithConsensusType(configs.GeneralConfig.Consensus.Type), + nodePack.WithRequestedItemsHandler(node.ProcessComponentsHolder.RequestedItemsHandler()), + nodePack.WithAddressSignatureSize(configs.GeneralConfig.AddressPubkeyConverter.SignatureLength), + nodePack.WithValidatorSignatureSize(configs.GeneralConfig.ValidatorPubkeyConverter.SignatureLength), + nodePack.WithPublicKeySize(configs.GeneralConfig.ValidatorPubkeyConverter.Length), + nodePack.WithNodeStopChannel(node.CoreComponentsHolder.ChanStopNodeProcess()), + nodePack.WithImportMode(configs.ImportDbConfig.IsImportDBMode), + nodePack.WithESDTNFTStorageHandler(node.ProcessComponentsHolder.ESDTDataStorageHandlerForAPI()), + ) + if err != nil { + return errors.New("error creating node: " + err.Error()) + } + + shardID := node.GetShardCoordinator().SelfId() + restApiInterface := apiInterface.RestApiInterface(shardID) + + argNodeFacade := facade.ArgNodeFacade{ + Node: nd, + ApiResolver: apiResolver, + RestAPIServerDebugMode: flagsConfig.EnableRestAPIServerDebugMode, + WsAntifloodConfig: configs.GeneralConfig.WebServerAntiflood, + FacadeConfig: config.FacadeConfig{ + RestApiInterface: restApiInterface, + PprofEnabled: flagsConfig.EnablePprof, + }, + ApiRoutesConfig: *configs.ApiRoutesConfig, + AccountsState: node.StateComponentsHolder.AccountsAdapter(), + PeerState: node.StateComponentsHolder.PeerAccounts(), + Blockchain: node.DataComponentsHolder.Blockchain(), + } + + ef, err := facade.NewNodeFacade(argNodeFacade) + if err != nil { + return fmt.Errorf("%w while creating NodeFacade", err) + } + + ef.SetSyncer(node.CoreComponentsHolder.SyncTimer()) + + node.facadeHandler = ef + + return nil +} + +func (node *testOnlyProcessingNode) createHttpServer(configs config.Configs) error { + httpServerArgs := gin.ArgsNewWebServer{ + Facade: node.facadeHandler, + ApiConfig: *configs.ApiRoutesConfig, + AntiFloodConfig: configs.GeneralConfig.WebServerAntiflood, + } + + httpServerWrapper, err := gin.NewGinWebServerHandler(httpServerArgs) + if err != nil { + return err + } + + err = httpServerWrapper.StartHttpServer() + if err != nil { + return err + } + + node.httpServer = httpServerWrapper + + return nil +} + +func (node *testOnlyProcessingNode) createMetrics(configs config.Configs) error { + err := metrics.InitMetrics( + node.StatusCoreComponents.AppStatusHandler(), + node.CryptoComponentsHolder.PublicKeyString(), + node.BootstrapComponentsHolder.NodeType(), + node.BootstrapComponentsHolder.ShardCoordinator(), + node.CoreComponentsHolder.GenesisNodesSetup(), + configs.FlagsConfig.Version, + configs.EconomicsConfig, + configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch, + node.CoreComponentsHolder.MinTransactionVersion(), + ) + + if err != nil { + return err + } + + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricNodeDisplayName, configs.PreferencesConfig.Preferences.NodeDisplayName) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRedundancyLevel, fmt.Sprintf("%d", configs.PreferencesConfig.Preferences.RedundancyLevel)) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRedundancyIsMainActive, common.MetricValueNA) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricChainId, node.CoreComponentsHolder.ChainID()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricGasPerDataByte, node.CoreComponentsHolder.EconomicsData().GasPerDataByte()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMinGasPrice, node.CoreComponentsHolder.EconomicsData().MinGasPrice()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMinGasLimit, node.CoreComponentsHolder.EconomicsData().MinGasLimit()) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricExtraGasLimitGuardedTx, node.CoreComponentsHolder.EconomicsData().ExtraGasLimitGuardedTx()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricRewardsTopUpGradientPoint, node.CoreComponentsHolder.EconomicsData().RewardsTopUpGradientPoint().String()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricTopUpFactor, fmt.Sprintf("%g", node.CoreComponentsHolder.EconomicsData().RewardsTopUpFactor())) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricGasPriceModifier, fmt.Sprintf("%g", node.CoreComponentsHolder.EconomicsData().GasPriceModifier())) + metrics.SaveUint64Metric(node.StatusCoreComponents.AppStatusHandler(), common.MetricMaxGasPerTransaction, node.CoreComponentsHolder.EconomicsData().MaxGasLimitPerTx()) + if configs.PreferencesConfig.Preferences.FullArchive { + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricPeerType, core.ObserverPeer.String()) + metrics.SaveStringMetric(node.StatusCoreComponents.AppStatusHandler(), common.MetricPeerSubType, core.FullHistoryObserver.String()) + } + + return nil +} diff --git a/node/chainSimulator/components/processComponents.go b/node/chainSimulator/components/processComponents.go new file mode 100644 index 00000000000..efa7af79c10 --- /dev/null +++ b/node/chainSimulator/components/processComponents.go @@ -0,0 +1,507 @@ +package components + +import ( + "fmt" + "io" + "math/big" + "path/filepath" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/common/ordering" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dblookupext" + dbLookupFactory "github.com/multiversx/mx-chain-go/dblookupext/factory" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/factory" + processComp "github.com/multiversx/mx-chain-go/factory/processing" + "github.com/multiversx/mx-chain-go/genesis" + "github.com/multiversx/mx-chain-go/genesis/parsing" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/interceptors/disabled" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/storage/cache" + "github.com/multiversx/mx-chain-go/update" + "github.com/multiversx/mx-chain-go/update/trigger" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +// ArgsProcessComponentsHolder will hold the components needed for process components +type ArgsProcessComponentsHolder struct { + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + BootstrapComponents factory.BootstrapComponentsHolder + StateComponents factory.StateComponentsHolder + DataComponents factory.DataComponentsHolder + StatusComponents factory.StatusComponentsHolder + StatusCoreComponents factory.StatusCoreComponentsHolder + NodesCoordinator nodesCoordinator.NodesCoordinator + + EpochConfig config.EpochConfig + RoundConfig config.RoundConfig + ConfigurationPathsHolder config.ConfigurationPathsHolder + FlagsConfig config.ContextFlagsConfig + ImportDBConfig config.ImportDbConfig + PrefsConfig config.Preferences + Config config.Config + EconomicsConfig config.EconomicsConfig + SystemSCConfig config.SystemSmartContractsConfig + + GenesisNonce uint64 + GenesisRound uint64 +} + +type processComponentsHolder struct { + receiptsRepository factory.ReceiptsRepository + nodesCoordinator nodesCoordinator.NodesCoordinator + shardCoordinator sharding.Coordinator + interceptorsContainer process.InterceptorsContainer + fullArchiveInterceptorsContainer process.InterceptorsContainer + resolversContainer dataRetriever.ResolversContainer + requestersFinder dataRetriever.RequestersFinder + roundHandler consensus.RoundHandler + epochStartTrigger epochStart.TriggerHandler + epochStartNotifier factory.EpochStartNotifier + forkDetector process.ForkDetector + blockProcessor process.BlockProcessor + blackListHandler process.TimeCacher + bootStorer process.BootStorer + headerSigVerifier process.InterceptedHeaderSigVerifier + headerIntegrityVerifier process.HeaderIntegrityVerifier + validatorsStatistics process.ValidatorStatisticsProcessor + validatorsProvider process.ValidatorsProvider + blockTracker process.BlockTracker + pendingMiniBlocksHandler process.PendingMiniBlocksHandler + requestHandler process.RequestHandler + txLogsProcessor process.TransactionLogProcessorDatabase + headerConstructionValidator process.HeaderConstructionValidator + peerShardMapper process.NetworkShardingCollector + fullArchivePeerShardMapper process.NetworkShardingCollector + fallbackHeaderValidator process.FallbackHeaderValidator + apiTransactionEvaluator factory.TransactionEvaluator + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + historyRepository dblookupext.HistoryRepository + importStartHandler update.ImportStartHandler + requestedItemsHandler dataRetriever.RequestedItemsHandler + nodeRedundancyHandler consensus.NodeRedundancyHandler + currentEpochProvider process.CurrentNetworkEpochProviderHandler + scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + txsSenderHandler process.TxsSenderHandler + hardforkTrigger factory.HardforkTrigger + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker + esdtDataStorageHandlerForAPI vmcommon.ESDTNFTStorageHandler + accountsParser genesis.AccountsParser + sentSignatureTracker process.SentSignaturesTracker + managedProcessComponentsCloser io.Closer +} + +// CreateProcessComponents will create the process components holder +func CreateProcessComponents(args ArgsProcessComponentsHolder) (*processComponentsHolder, error) { + importStartHandler, err := trigger.NewImportStartHandler(filepath.Join(args.FlagsConfig.DbDir, common.DefaultDBPath), args.FlagsConfig.Version) + if err != nil { + return nil, err + } + totalSupply, ok := big.NewInt(0).SetString(args.EconomicsConfig.GlobalSettings.GenesisTotalSupply, 10) + if !ok { + return nil, fmt.Errorf("can not parse total suply from economics.toml, %s is not a valid value", + args.EconomicsConfig.GlobalSettings.GenesisTotalSupply) + } + + mintingSenderAddress := args.EconomicsConfig.GlobalSettings.GenesisMintingSenderAddress + argsAccountsParser := genesis.AccountsParserArgs{ + GenesisFilePath: args.ConfigurationPathsHolder.Genesis, + EntireSupply: totalSupply, + MinterAddress: mintingSenderAddress, + PubkeyConverter: args.CoreComponents.AddressPubKeyConverter(), + KeyGenerator: args.CryptoComponents.TxSignKeyGen(), + Hasher: args.CoreComponents.Hasher(), + Marshalizer: args.CoreComponents.InternalMarshalizer(), + } + + accountsParser, err := parsing.NewAccountsParser(argsAccountsParser) + if err != nil { + return nil, err + } + + smartContractParser, err := parsing.NewSmartContractsParser( + args.ConfigurationPathsHolder.SmartContracts, + args.CoreComponents.AddressPubKeyConverter(), + args.CryptoComponents.TxSignKeyGen(), + ) + if err != nil { + return nil, err + } + + historyRepoFactoryArgs := &dbLookupFactory.ArgsHistoryRepositoryFactory{ + SelfShardID: args.BootstrapComponents.ShardCoordinator().SelfId(), + Config: args.Config.DbLookupExtensions, + Hasher: args.CoreComponents.Hasher(), + Marshalizer: args.CoreComponents.InternalMarshalizer(), + Store: args.DataComponents.StorageService(), + Uint64ByteSliceConverter: args.CoreComponents.Uint64ByteSliceConverter(), + } + historyRepositoryFactory, err := dbLookupFactory.NewHistoryRepositoryFactory(historyRepoFactoryArgs) + if err != nil { + return nil, err + } + + whiteListRequest, err := disabled.NewDisabledWhiteListDataVerifier() + if err != nil { + return nil, err + } + + whiteListerVerifiedTxs, err := disabled.NewDisabledWhiteListDataVerifier() + if err != nil { + return nil, err + } + + historyRepository, err := historyRepositoryFactory.Create() + if err != nil { + return nil, err + } + + requestedItemsHandler := cache.NewTimeCache( + time.Duration(uint64(time.Millisecond) * args.CoreComponents.GenesisNodesSetup().GetRoundDuration())) + + txExecutionOrderHandler := ordering.NewOrderedCollection() + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: args.EpochConfig.GasSchedule, + ConfigDir: args.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: args.CoreComponents.EpochNotifier(), + WasmVMChangeLocker: args.CoreComponents.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + if err != nil { + return nil, err + } + + processArgs := processComp.ProcessComponentsFactoryArgs{ + Config: args.Config, + EpochConfig: args.EpochConfig, + RoundConfig: args.RoundConfig, + PrefConfigs: args.PrefsConfig, + ImportDBConfig: args.ImportDBConfig, + EconomicsConfig: args.EconomicsConfig, + AccountsParser: accountsParser, + SmartContractParser: smartContractParser, + GasSchedule: gasScheduleNotifier, + NodesCoordinator: args.NodesCoordinator, + RequestedItemsHandler: requestedItemsHandler, + WhiteListHandler: whiteListRequest, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + MaxRating: 50, + SystemSCConfig: &args.SystemSCConfig, + ImportStartHandler: importStartHandler, + HistoryRepo: historyRepository, + FlagsConfig: args.FlagsConfig, + Data: args.DataComponents, + CoreData: args.CoreComponents, + Crypto: args.CryptoComponents, + State: args.StateComponents, + Network: args.NetworkComponents, + BootstrapComponents: args.BootstrapComponents, + StatusComponents: args.StatusComponents, + StatusCoreComponents: args.StatusCoreComponents, + TxExecutionOrderHandler: txExecutionOrderHandler, + GenesisNonce: args.GenesisNonce, + GenesisRound: args.GenesisRound, + } + processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) + if err != nil { + return nil, fmt.Errorf("NewProcessComponentsFactory failed: %w", err) + } + + managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) + if err != nil { + return nil, err + } + + err = managedProcessComponents.Create() + if err != nil { + return nil, err + } + + instance := &processComponentsHolder{ + receiptsRepository: managedProcessComponents.ReceiptsRepository(), + nodesCoordinator: managedProcessComponents.NodesCoordinator(), + shardCoordinator: managedProcessComponents.ShardCoordinator(), + interceptorsContainer: managedProcessComponents.InterceptorsContainer(), + fullArchiveInterceptorsContainer: managedProcessComponents.FullArchiveInterceptorsContainer(), + resolversContainer: managedProcessComponents.ResolversContainer(), + requestersFinder: managedProcessComponents.RequestersFinder(), + roundHandler: managedProcessComponents.RoundHandler(), + epochStartTrigger: managedProcessComponents.EpochStartTrigger(), + epochStartNotifier: managedProcessComponents.EpochStartNotifier(), + forkDetector: managedProcessComponents.ForkDetector(), + blockProcessor: managedProcessComponents.BlockProcessor(), + blackListHandler: managedProcessComponents.BlackListHandler(), + bootStorer: managedProcessComponents.BootStorer(), + headerSigVerifier: managedProcessComponents.HeaderSigVerifier(), + headerIntegrityVerifier: managedProcessComponents.HeaderIntegrityVerifier(), + validatorsStatistics: managedProcessComponents.ValidatorsStatistics(), + validatorsProvider: managedProcessComponents.ValidatorsProvider(), + blockTracker: managedProcessComponents.BlockTracker(), + pendingMiniBlocksHandler: managedProcessComponents.PendingMiniBlocksHandler(), + requestHandler: managedProcessComponents.RequestHandler(), + txLogsProcessor: managedProcessComponents.TxLogsProcessor(), + headerConstructionValidator: managedProcessComponents.HeaderConstructionValidator(), + peerShardMapper: managedProcessComponents.PeerShardMapper(), + fullArchivePeerShardMapper: managedProcessComponents.FullArchivePeerShardMapper(), + fallbackHeaderValidator: managedProcessComponents.FallbackHeaderValidator(), + apiTransactionEvaluator: managedProcessComponents.APITransactionEvaluator(), + whiteListHandler: managedProcessComponents.WhiteListHandler(), + whiteListerVerifiedTxs: managedProcessComponents.WhiteListerVerifiedTxs(), + historyRepository: managedProcessComponents.HistoryRepository(), + importStartHandler: managedProcessComponents.ImportStartHandler(), + requestedItemsHandler: managedProcessComponents.RequestedItemsHandler(), + nodeRedundancyHandler: managedProcessComponents.NodeRedundancyHandler(), + currentEpochProvider: managedProcessComponents.CurrentEpochProvider(), + scheduledTxsExecutionHandler: managedProcessComponents.ScheduledTxsExecutionHandler(), + txsSenderHandler: managedProcessComponents.TxsSenderHandler(), + hardforkTrigger: managedProcessComponents.HardforkTrigger(), + processedMiniBlocksTracker: managedProcessComponents.ProcessedMiniBlocksTracker(), + esdtDataStorageHandlerForAPI: managedProcessComponents.ESDTDataStorageHandlerForAPI(), + accountsParser: managedProcessComponents.AccountsParser(), + sentSignatureTracker: managedProcessComponents.SentSignaturesTracker(), + managedProcessComponentsCloser: managedProcessComponents, + } + + return instance, nil +} + +// SentSignaturesTracker will return the sent signature tracker +func (p *processComponentsHolder) SentSignaturesTracker() process.SentSignaturesTracker { + return p.sentSignatureTracker +} + +// NodesCoordinator will return the nodes coordinator +func (p *processComponentsHolder) NodesCoordinator() nodesCoordinator.NodesCoordinator { + return p.nodesCoordinator +} + +// ShardCoordinator will return the shard coordinator +func (p *processComponentsHolder) ShardCoordinator() sharding.Coordinator { + return p.shardCoordinator +} + +// InterceptorsContainer will return the interceptors container +func (p *processComponentsHolder) InterceptorsContainer() process.InterceptorsContainer { + return p.interceptorsContainer +} + +// FullArchiveInterceptorsContainer will return the full archive interceptor container +func (p *processComponentsHolder) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + return p.fullArchiveInterceptorsContainer +} + +// ResolversContainer will return the resolvers container +func (p *processComponentsHolder) ResolversContainer() dataRetriever.ResolversContainer { + return p.resolversContainer +} + +// RequestersFinder will return the requesters finder +func (p *processComponentsHolder) RequestersFinder() dataRetriever.RequestersFinder { + return p.requestersFinder +} + +// RoundHandler will return the round handler +func (p *processComponentsHolder) RoundHandler() consensus.RoundHandler { + return p.roundHandler +} + +// EpochStartTrigger will return the epoch start trigger +func (p *processComponentsHolder) EpochStartTrigger() epochStart.TriggerHandler { + return p.epochStartTrigger +} + +// EpochStartNotifier will return the epoch start notifier +func (p *processComponentsHolder) EpochStartNotifier() factory.EpochStartNotifier { + return p.epochStartNotifier +} + +// ForkDetector will return the fork detector +func (p *processComponentsHolder) ForkDetector() process.ForkDetector { + return p.forkDetector +} + +// BlockProcessor will return the block processor +func (p *processComponentsHolder) BlockProcessor() process.BlockProcessor { + return p.blockProcessor +} + +// BlackListHandler will return the black list handler +func (p *processComponentsHolder) BlackListHandler() process.TimeCacher { + return p.blackListHandler +} + +// BootStorer will return the boot storer +func (p *processComponentsHolder) BootStorer() process.BootStorer { + return p.bootStorer +} + +// HeaderSigVerifier will return the header sign verifier +func (p *processComponentsHolder) HeaderSigVerifier() process.InterceptedHeaderSigVerifier { + return p.headerSigVerifier +} + +// HeaderIntegrityVerifier will return the header integrity verifier +func (p *processComponentsHolder) HeaderIntegrityVerifier() process.HeaderIntegrityVerifier { + return p.headerIntegrityVerifier +} + +// ValidatorsStatistics will return the validators statistics +func (p *processComponentsHolder) ValidatorsStatistics() process.ValidatorStatisticsProcessor { + return p.validatorsStatistics +} + +// ValidatorsProvider will return the validators provider +func (p *processComponentsHolder) ValidatorsProvider() process.ValidatorsProvider { + return p.validatorsProvider +} + +// BlockTracker will return the block tracker +func (p *processComponentsHolder) BlockTracker() process.BlockTracker { + return p.blockTracker +} + +// PendingMiniBlocksHandler will return the pending miniblocks handler +func (p *processComponentsHolder) PendingMiniBlocksHandler() process.PendingMiniBlocksHandler { + return p.pendingMiniBlocksHandler +} + +// RequestHandler will return the request handler +func (p *processComponentsHolder) RequestHandler() process.RequestHandler { + return p.requestHandler +} + +// TxLogsProcessor will return the transaction log processor +func (p *processComponentsHolder) TxLogsProcessor() process.TransactionLogProcessorDatabase { + return p.txLogsProcessor +} + +// HeaderConstructionValidator will return the header construction validator +func (p *processComponentsHolder) HeaderConstructionValidator() process.HeaderConstructionValidator { + return p.headerConstructionValidator +} + +// PeerShardMapper will return the peer shard mapper +func (p *processComponentsHolder) PeerShardMapper() process.NetworkShardingCollector { + return p.peerShardMapper +} + +// FullArchivePeerShardMapper will return the full archive peer shard mapper +func (p *processComponentsHolder) FullArchivePeerShardMapper() process.NetworkShardingCollector { + return p.fullArchivePeerShardMapper +} + +// FallbackHeaderValidator will return the fallback header validator +func (p *processComponentsHolder) FallbackHeaderValidator() process.FallbackHeaderValidator { + return p.fallbackHeaderValidator +} + +// APITransactionEvaluator will return the api transaction evaluator +func (p *processComponentsHolder) APITransactionEvaluator() factory.TransactionEvaluator { + return p.apiTransactionEvaluator +} + +// WhiteListHandler will return the white list handler +func (p *processComponentsHolder) WhiteListHandler() process.WhiteListHandler { + return p.whiteListHandler +} + +// WhiteListerVerifiedTxs will return the white lister verifier +func (p *processComponentsHolder) WhiteListerVerifiedTxs() process.WhiteListHandler { + return p.whiteListerVerifiedTxs +} + +// HistoryRepository will return the history repository +func (p *processComponentsHolder) HistoryRepository() dblookupext.HistoryRepository { + return p.historyRepository +} + +// ImportStartHandler will return the import start handler +func (p *processComponentsHolder) ImportStartHandler() update.ImportStartHandler { + return p.importStartHandler +} + +// RequestedItemsHandler will return the requested item handler +func (p *processComponentsHolder) RequestedItemsHandler() dataRetriever.RequestedItemsHandler { + return p.requestedItemsHandler +} + +// NodeRedundancyHandler will return the node redundancy handler +func (p *processComponentsHolder) NodeRedundancyHandler() consensus.NodeRedundancyHandler { + return p.nodeRedundancyHandler +} + +// CurrentEpochProvider will return the current epoch provider +func (p *processComponentsHolder) CurrentEpochProvider() process.CurrentNetworkEpochProviderHandler { + return p.currentEpochProvider +} + +// ScheduledTxsExecutionHandler will return the scheduled transactions execution handler +func (p *processComponentsHolder) ScheduledTxsExecutionHandler() process.ScheduledTxsExecutionHandler { + return p.scheduledTxsExecutionHandler +} + +// TxsSenderHandler will return the transactions sender handler +func (p *processComponentsHolder) TxsSenderHandler() process.TxsSenderHandler { + return p.txsSenderHandler +} + +// HardforkTrigger will return the hardfork trigger +func (p *processComponentsHolder) HardforkTrigger() factory.HardforkTrigger { + return p.hardforkTrigger +} + +// ProcessedMiniBlocksTracker will return the processed miniblocks tracker +func (p *processComponentsHolder) ProcessedMiniBlocksTracker() process.ProcessedMiniBlocksTracker { + return p.processedMiniBlocksTracker +} + +// ESDTDataStorageHandlerForAPI will return the esdt data storage handler for api +func (p *processComponentsHolder) ESDTDataStorageHandlerForAPI() vmcommon.ESDTNFTStorageHandler { + return p.esdtDataStorageHandlerForAPI +} + +// AccountsParser will return the accounts parser +func (p *processComponentsHolder) AccountsParser() genesis.AccountsParser { + return p.accountsParser +} + +// ReceiptsRepository returns the receipts repository +func (p *processComponentsHolder) ReceiptsRepository() factory.ReceiptsRepository { + return p.receiptsRepository +} + +// Close will call the Close methods on all inner components +func (p *processComponentsHolder) Close() error { + return p.managedProcessComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *processComponentsHolder) IsInterfaceNil() bool { + return p == nil +} + +// Create will do nothing +func (p *processComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (p *processComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (p *processComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/processComponents_test.go b/node/chainSimulator/components/processComponents_test.go new file mode 100644 index 00000000000..89010da5fd5 --- /dev/null +++ b/node/chainSimulator/components/processComponents_test.go @@ -0,0 +1,425 @@ +package components + +import ( + "math/big" + "sync" + "testing" + + coreData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" + "github.com/multiversx/mx-chain-core-go/marshal" + commonFactory "github.com/multiversx/mx-chain-go/common/factory" + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + "github.com/multiversx/mx-chain-go/config" + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/sharding" + chainStorage "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" + "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" + updateMocks "github.com/multiversx/mx-chain-go/update/mock" + "github.com/stretchr/testify/require" +) + +const testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" + +var ( + addrPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 32, + Type: "bech32", + SignatureLength: 0, + Hrp: "erd", + }) + valPubKeyConv, _ = commonFactory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 96, + Type: "hex", + SignatureLength: 48, + }) +) + +func createArgsProcessComponentsHolder() ArgsProcessComponentsHolder { + nodesSetup, _ := sharding.NewNodesSetup("../../../integrationTests/factory/testdata/nodesSetup.json", addrPubKeyConv, valPubKeyConv, 3) + + args := ArgsProcessComponentsHolder{ + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "../../../cmd/node/config/gasSchedules/gasScheduleV7.toml", + }, + }, + }, + }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), + PrefsConfig: config.Preferences{}, + ImportDBConfig: config.ImportDbConfig{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + SystemSCConfig: config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + NumNodes: 100, + MinQuorum: 50, + MinPassThreshold: 50, + MinVetoThreshold: 50, + }, + Active: config.GovernanceSystemSCConfigActive{ + ProposalCost: "500", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, + }, + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: "2500000000000000000000", + MinStakeValue: "1", + UnJailValue: "1", + MinStepValue: "1", + UnBondPeriod: 0, + NumRoundsWithoutBleed: 0, + MaximumPercentageToBleed: 0, + BleedPercentagePerRound: 0, + MaxNumberOfNodesForStake: 10, + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + NodeLimitPercentage: 0.1, + StakeLimitPercentage: 1, + UnBondPeriodInEpochs: 10, + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + }, + DataComponents: &mock.DataComponentsStub{ + DataPool: dataRetriever.NewPoolsHolderMock(), + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() coreData.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &mock.MiniBlocksProviderStub{}, + Store: genericMocks.NewChainStorerMock(0), + }, + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &marshal.GogoProtoMarshalizer{}, + TxMarsh: &marshal.JsonMarshalizer{}, + UInt64ByteSliceConv: &mock.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: addrPubKeyConv, + ValPubKeyConv: valPubKeyConv, + NodesConfig: nodesSetup, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{ + ProtocolSustainabilityAddressCalled: func() string { + return testingProtocolSustainabilityAddress + }, + GenesisTotalSupplyCalled: func() *big.Int { + return big.NewInt(0).Mul(big.NewInt(1000000000000000000), big.NewInt(20000000)) + }, + }, + Hash: blake2b.NewBlake2b(), + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + RoundChangeNotifier: &epochNotifier.RoundNotifierStub{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: keccak.NewKeccak(), + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + CryptoComponents: &mock.CryptoComponentsStub{ + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, + KeysHandlerField: &testscommon.KeysHandlerStub{}, + }, + NetworkComponents: &mock.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + }, + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{}, + }, + StatusComponents: &mock.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + }, + StatusCoreComponents: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + EconomicsConfig: config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "20000000000000000000000000", + MinimumInflation: 0, + GenesisMintingSenderAddress: "erd17rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rc0pu8s7rcqqkhty3", + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + }, + ConfigurationPathsHolder: config.ConfigurationPathsHolder{ + Genesis: "../../../integrationTests/factory/testdata/genesis.json", + SmartContracts: "../../../integrationTests/factory/testdata/genesisSmartContracts.json", + Nodes: "../../../integrationTests/factory/testdata/genesis.json", + }, + } + + args.StateComponents = components.GetStateComponents(args.CoreComponents, args.StatusCoreComponents) + return args +} + +func TestCreateProcessComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + // TODO reinstate test after Wasm VM pointer fix + if testing.Short() { + t.Skip("cannot run with -race -short; requires Wasm VM fix") + } + + t.Parallel() + + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewImportStartHandler failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.FlagsConfig.Version = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("total supply conversion failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EconomicsConfig.GlobalSettings.GenesisTotalSupply = "invalid number" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewAccountsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.Genesis = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewSmartContractsParser failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.ConfigurationPathsHolder.SmartContracts = "" + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewHistoryRepositoryFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("historyRepositoryFactory.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.Config.DbLookupExtensions.Enabled = true + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.Store = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (chainStorage.Storer, error) { + if unitType == retriever.ESDTSuppliesUnit { + return nil, expectedErr + } + return &storage.StorerStub{}, nil + }, + } + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewGasScheduleNotifier failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.EpochConfig.GasSchedule = config.GasScheduleConfig{} + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("NewProcessComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + dataMock, ok := args.DataComponents.(*mock.DataComponentsStub) + require.True(t, ok) + dataMock.BlockChain = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedProcessComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsProcessComponentsHolder() + args.NodesCoordinator = nil + comp, err := CreateProcessComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestProcessComponentsHolder_IsInterfaceNil(t *testing.T) { + // TODO reinstate test after Wasm VM pointer fix + if testing.Short() { + t.Skip("cannot run with -race -short; requires Wasm VM fix") + } + + t.Parallel() + + var comp *processComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateProcessComponents(createArgsProcessComponentsHolder()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestProcessComponentsHolder_Getters(t *testing.T) { + // TODO reinstate test after Wasm VM pointer fix + if testing.Short() { + t.Skip("cannot run with -race -short; requires Wasm VM fix") + } + + t.Parallel() + + comp, err := CreateProcessComponents(createArgsProcessComponentsHolder()) + require.NoError(t, err) + + require.NotNil(t, comp.SentSignaturesTracker()) + require.NotNil(t, comp.NodesCoordinator()) + require.NotNil(t, comp.ShardCoordinator()) + require.NotNil(t, comp.InterceptorsContainer()) + require.NotNil(t, comp.FullArchiveInterceptorsContainer()) + require.NotNil(t, comp.ResolversContainer()) + require.NotNil(t, comp.RequestersFinder()) + require.NotNil(t, comp.RoundHandler()) + require.NotNil(t, comp.EpochStartTrigger()) + require.NotNil(t, comp.EpochStartNotifier()) + require.NotNil(t, comp.ForkDetector()) + require.NotNil(t, comp.BlockProcessor()) + require.NotNil(t, comp.BlackListHandler()) + require.NotNil(t, comp.BootStorer()) + require.NotNil(t, comp.HeaderSigVerifier()) + require.NotNil(t, comp.HeaderIntegrityVerifier()) + require.NotNil(t, comp.ValidatorsStatistics()) + require.NotNil(t, comp.ValidatorsProvider()) + require.NotNil(t, comp.BlockTracker()) + require.NotNil(t, comp.PendingMiniBlocksHandler()) + require.NotNil(t, comp.RequestHandler()) + require.NotNil(t, comp.TxLogsProcessor()) + require.NotNil(t, comp.HeaderConstructionValidator()) + require.NotNil(t, comp.PeerShardMapper()) + require.NotNil(t, comp.FullArchivePeerShardMapper()) + require.NotNil(t, comp.FallbackHeaderValidator()) + require.NotNil(t, comp.APITransactionEvaluator()) + require.NotNil(t, comp.WhiteListHandler()) + require.NotNil(t, comp.WhiteListerVerifiedTxs()) + require.NotNil(t, comp.HistoryRepository()) + require.NotNil(t, comp.ImportStartHandler()) + require.NotNil(t, comp.RequestedItemsHandler()) + require.NotNil(t, comp.NodeRedundancyHandler()) + require.NotNil(t, comp.CurrentEpochProvider()) + require.NotNil(t, comp.ScheduledTxsExecutionHandler()) + require.NotNil(t, comp.TxsSenderHandler()) + require.NotNil(t, comp.HardforkTrigger()) + require.NotNil(t, comp.ProcessedMiniBlocksTracker()) + require.NotNil(t, comp.ESDTDataStorageHandlerForAPI()) + require.NotNil(t, comp.AccountsParser()) + require.NotNil(t, comp.ReceiptsRepository()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/stateComponents.go b/node/chainSimulator/components/stateComponents.go new file mode 100644 index 00000000000..b3fddf55f40 --- /dev/null +++ b/node/chainSimulator/components/stateComponents.go @@ -0,0 +1,135 @@ +package components + +import ( + "io" + + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/factory" + factoryState "github.com/multiversx/mx-chain-go/factory/state" + "github.com/multiversx/mx-chain-go/state" +) + +// ArgsStateComponents will hold the components needed for state components +type ArgsStateComponents struct { + Config config.Config + CoreComponents factory.CoreComponentsHolder + StatusCore factory.StatusCoreComponentsHolder + StoreService dataRetriever.StorageService + ChainHandler chainData.ChainHandler +} + +type stateComponentsHolder struct { + peerAccount state.AccountsAdapter + accountsAdapter state.AccountsAdapter + accountsAdapterAPI state.AccountsAdapter + accountsRepository state.AccountsRepository + triesContainer common.TriesHolder + triesStorageManager map[string]common.StorageManager + missingTrieNodesNotifier common.MissingTrieNodesNotifier + stateComponentsCloser io.Closer +} + +// CreateStateComponents will create the state components holder +func CreateStateComponents(args ArgsStateComponents) (*stateComponentsHolder, error) { + stateComponentsFactory, err := factoryState.NewStateComponentsFactory(factoryState.StateComponentsFactoryArgs{ + Config: args.Config, + Core: args.CoreComponents, + StatusCore: args.StatusCore, + StorageService: args.StoreService, + ProcessingMode: common.Normal, + ShouldSerializeSnapshots: false, + ChainHandler: args.ChainHandler, + }) + if err != nil { + return nil, err + } + + stateComp, err := factoryState.NewManagedStateComponents(stateComponentsFactory) + if err != nil { + return nil, err + } + + err = stateComp.Create() + if err != nil { + return nil, err + } + + err = stateComp.CheckSubcomponents() + if err != nil { + return nil, err + } + + return &stateComponentsHolder{ + peerAccount: stateComp.PeerAccounts(), + accountsAdapter: stateComp.AccountsAdapter(), + accountsAdapterAPI: stateComp.AccountsAdapterAPI(), + accountsRepository: stateComp.AccountsRepository(), + triesContainer: stateComp.TriesContainer(), + triesStorageManager: stateComp.TrieStorageManagers(), + missingTrieNodesNotifier: stateComp.MissingTrieNodesNotifier(), + stateComponentsCloser: stateComp, + }, nil +} + +// PeerAccounts will return peer accounts +func (s *stateComponentsHolder) PeerAccounts() state.AccountsAdapter { + return s.peerAccount +} + +// AccountsAdapter will return accounts adapter +func (s *stateComponentsHolder) AccountsAdapter() state.AccountsAdapter { + return s.accountsAdapter +} + +// AccountsAdapterAPI will return accounts adapter api +func (s *stateComponentsHolder) AccountsAdapterAPI() state.AccountsAdapter { + return s.accountsAdapterAPI +} + +// AccountsRepository will return accounts repository +func (s *stateComponentsHolder) AccountsRepository() state.AccountsRepository { + return s.accountsRepository +} + +// TriesContainer will return tries container +func (s *stateComponentsHolder) TriesContainer() common.TriesHolder { + return s.triesContainer +} + +// TrieStorageManagers will return trie storage managers +func (s *stateComponentsHolder) TrieStorageManagers() map[string]common.StorageManager { + return s.triesStorageManager +} + +// MissingTrieNodesNotifier will return missing trie nodes notifier +func (s *stateComponentsHolder) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + return s.missingTrieNodesNotifier +} + +// Close will close the state components +func (s *stateComponentsHolder) Close() error { + return s.stateComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *stateComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *stateComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *stateComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *stateComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/stateComponents_test.go b/node/chainSimulator/components/stateComponents_test.go new file mode 100644 index 00000000000..5422d2ea352 --- /dev/null +++ b/node/chainSimulator/components/stateComponents_test.go @@ -0,0 +1,99 @@ +package components + +import ( + "testing" + + disabledStatistics "github.com/multiversx/mx-chain-go/common/statistics/disabled" + mockFactory "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func createArgsStateComponents() ArgsStateComponents { + return ArgsStateComponents{ + Config: testscommon.GetGeneralConfig(), + CoreComponents: &mockFactory.CoreComponentsMock{ + IntMarsh: &testscommon.MarshallerStub{}, + Hash: &testscommon.HasherStub{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, + }, + StatusCore: &factory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + StateStatsHandlerField: disabledStatistics.NewStateStatistics(), + }, + StoreService: genericMocks.NewChainStorerMock(0), + ChainHandler: &testscommon.ChainHandlerStub{}, + } +} + +func TestCreateStateComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStateComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + args.CoreComponents = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("stateComp.Create failure should error", func(t *testing.T) { + t.Parallel() + + args := createArgsStateComponents() + coreMock, ok := args.CoreComponents.(*mockFactory.CoreComponentsMock) + require.True(t, ok) + coreMock.EnableEpochsHandlerField = nil + comp, err := CreateStateComponents(args) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStateComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *stateComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStateComponents(createArgsStateComponents()) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStateComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStateComponents(createArgsStateComponents()) + require.NoError(t, err) + + require.NotNil(t, comp.PeerAccounts()) + require.NotNil(t, comp.AccountsAdapter()) + require.NotNil(t, comp.AccountsAdapterAPI()) + require.NotNil(t, comp.AccountsRepository()) + require.NotNil(t, comp.TriesContainer()) + require.NotNil(t, comp.TrieStorageManagers()) + require.NotNil(t, comp.MissingTrieNodesNotifier()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} diff --git a/node/chainSimulator/components/statusComponents.go b/node/chainSimulator/components/statusComponents.go new file mode 100644 index 00000000000..65f9dbb7667 --- /dev/null +++ b/node/chainSimulator/components/statusComponents.go @@ -0,0 +1,158 @@ +package components + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/appStatusPolling" + "github.com/multiversx/mx-chain-core-go/core/check" + outportCfg "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/statistics" + "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon" +) + +type statusComponentsHolder struct { + closeHandler *closeHandler + outportHandler outport.OutportHandler + softwareVersionChecker statistics.SoftwareVersionChecker + managedPeerMonitor common.ManagedPeersMonitor + appStatusHandler core.AppStatusHandler + forkDetector process.ForkDetector + statusPollingIntervalSec int + cancelFunc func() + mutex sync.RWMutex +} + +// CreateStatusComponents will create a new instance of status components holder +func CreateStatusComponents(shardID uint32, appStatusHandler core.AppStatusHandler, statusPollingIntervalSec int) (*statusComponentsHolder, error) { + if check.IfNil(appStatusHandler) { + return nil, core.ErrNilAppStatusHandler + } + + var err error + instance := &statusComponentsHolder{ + closeHandler: NewCloseHandler(), + appStatusHandler: appStatusHandler, + statusPollingIntervalSec: statusPollingIntervalSec, + } + + // TODO add drivers to index data + instance.outportHandler, err = outport.NewOutport(100*time.Millisecond, outportCfg.OutportConfig{ + ShardID: shardID, + }) + if err != nil { + return nil, err + } + instance.softwareVersionChecker = &mock.SoftwareVersionCheckerMock{} + instance.managedPeerMonitor = &testscommon.ManagedPeersMonitorStub{} + + instance.collectClosableComponents() + + return instance, nil +} + +// OutportHandler will return the outport handler +func (s *statusComponentsHolder) OutportHandler() outport.OutportHandler { + return s.outportHandler +} + +// SoftwareVersionChecker will return the software version checker +func (s *statusComponentsHolder) SoftwareVersionChecker() statistics.SoftwareVersionChecker { + return s.softwareVersionChecker +} + +// ManagedPeersMonitor will return the managed peers monitor +func (s *statusComponentsHolder) ManagedPeersMonitor() common.ManagedPeersMonitor { + return s.managedPeerMonitor +} + +func (s *statusComponentsHolder) collectClosableComponents() { + s.closeHandler.AddComponent(s.outportHandler) + s.closeHandler.AddComponent(s.softwareVersionChecker) +} + +// Close will call the Close methods on all inner components +func (s *statusComponentsHolder) Close() error { + if s.cancelFunc != nil { + s.cancelFunc() + } + + return s.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *statusComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *statusComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *statusComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *statusComponentsHolder) String() string { + return "" +} + +// SetForkDetector will set the fork detector +func (s *statusComponentsHolder) SetForkDetector(forkDetector process.ForkDetector) error { + if check.IfNil(forkDetector) { + return process.ErrNilForkDetector + } + + s.mutex.Lock() + s.forkDetector = forkDetector + s.mutex.Unlock() + + return nil +} + +// StartPolling starts polling for the updated status +func (s *statusComponentsHolder) StartPolling() error { + if check.IfNil(s.forkDetector) { + return process.ErrNilForkDetector + } + + var ctx context.Context + ctx, s.cancelFunc = context.WithCancel(context.Background()) + + appStatusPollingHandler, err := appStatusPolling.NewAppStatusPolling( + s.appStatusHandler, + time.Duration(s.statusPollingIntervalSec)*time.Second, + log, + ) + if err != nil { + return errors.ErrStatusPollingInit + } + + err = appStatusPollingHandler.RegisterPollingFunc(s.probableHighestNonceHandler) + if err != nil { + return fmt.Errorf("%w, cannot register handler func for forkdetector's probable higher nonce", err) + } + + appStatusPollingHandler.Poll(ctx) + + return nil +} + +func (s *statusComponentsHolder) probableHighestNonceHandler(appStatusHandler core.AppStatusHandler) { + s.mutex.RLock() + probableHigherNonce := s.forkDetector.ProbableHighestNonce() + s.mutex.RUnlock() + + appStatusHandler.SetUInt64Value(common.MetricProbableHighestNonce, probableHigherNonce) +} diff --git a/node/chainSimulator/components/statusComponents_test.go b/node/chainSimulator/components/statusComponents_test.go new file mode 100644 index 00000000000..0e83e435003 --- /dev/null +++ b/node/chainSimulator/components/statusComponents_test.go @@ -0,0 +1,135 @@ +package components + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-go/common" + mxErrors "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +func TestCreateStatusComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, nil, 5) + require.Equal(t, core.ErrNilAppStatusHandler, err) + require.Nil(t, comp) + }) +} + +func TestStatusComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + comp, _ = CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + + require.NotNil(t, comp.OutportHandler()) + require.NotNil(t, comp.SoftwareVersionChecker()) + require.NotNil(t, comp.ManagedPeersMonitor()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) + + require.Nil(t, comp.Close()) +} +func TestStatusComponentsHolder_SetForkDetector(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + + err = comp.SetForkDetector(nil) + require.Equal(t, process.ErrNilForkDetector, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + require.Nil(t, comp.Close()) +} + +func TestStatusComponentsHolder_StartPolling(t *testing.T) { + t.Parallel() + + t.Run("nil fork detector should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 5) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, process.ErrNilForkDetector, err) + }) + t.Run("NewAppStatusPolling failure should error", func(t *testing.T) { + t.Parallel() + + comp, err := CreateStatusComponents(0, &statusHandler.AppStatusHandlerStub{}, 0) + require.NoError(t, err) + + err = comp.SetForkDetector(&mock.ForkDetectorStub{}) + require.NoError(t, err) + + err = comp.StartPolling() + require.Equal(t, mxErrors.ErrStatusPollingInit, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedHighestNonce := uint64(123) + providedStatusPollingIntervalSec := 1 + wasSetUInt64ValueCalled := atomic.Flag{} + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + require.Equal(t, common.MetricProbableHighestNonce, key) + require.Equal(t, providedHighestNonce, value) + wasSetUInt64ValueCalled.SetValue(true) + }, + } + comp, err := CreateStatusComponents(0, appStatusHandler, providedStatusPollingIntervalSec) + require.NoError(t, err) + + forkDetector := &mock.ForkDetectorStub{ + ProbableHighestNonceCalled: func() uint64 { + return providedHighestNonce + }, + } + err = comp.SetForkDetector(forkDetector) + require.NoError(t, err) + + err = comp.StartPolling() + require.NoError(t, err) + + time.Sleep(time.Duration(providedStatusPollingIntervalSec+1) * time.Second) + require.True(t, wasSetUInt64ValueCalled.IsSet()) + + require.Nil(t, comp.Close()) + }) +} diff --git a/node/chainSimulator/components/statusCoreComponents.go b/node/chainSimulator/components/statusCoreComponents.go new file mode 100644 index 00000000000..7ac3b9045fa --- /dev/null +++ b/node/chainSimulator/components/statusCoreComponents.go @@ -0,0 +1,126 @@ +package components + +import ( + "io" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/statusCore" + "github.com/multiversx/mx-chain-go/node/external" +) + +type statusCoreComponentsHolder struct { + resourceMonitor factory.ResourceMonitor + networkStatisticsProvider factory.NetworkStatisticsProvider + trieSyncStatisticsProvider factory.TrieSyncStatisticsProvider + statusHandler core.AppStatusHandler + statusMetrics external.StatusMetricsHandler + persistentStatusHandler factory.PersistentStatusHandler + stateStatisticsHandler common.StateStatisticsHandler + managedStatusCoreComponentsCloser io.Closer +} + +// CreateStatusCoreComponents will create a new instance of factory.StatusCoreComponentsHandler +func CreateStatusCoreComponents(configs config.Configs, coreComponents factory.CoreComponentsHolder) (*statusCoreComponentsHolder, error) { + var err error + + statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(statusCore.StatusCoreComponentsFactoryArgs{ + Config: *configs.GeneralConfig, + EpochConfig: *configs.EpochConfig, + RoundConfig: *configs.RoundConfig, + RatingsConfig: *configs.RatingsConfig, + EconomicsConfig: *configs.EconomicsConfig, + CoreComp: coreComponents, + }) + if err != nil { + return nil, err + } + + managedStatusCoreComponents, err := statusCore.NewManagedStatusCoreComponents(statusCoreComponentsFactory) + if err != nil { + return nil, err + } + + err = managedStatusCoreComponents.Create() + if err != nil { + return nil, err + } + + // stop resource monitor + _ = managedStatusCoreComponents.ResourceMonitor().Close() + + instance := &statusCoreComponentsHolder{ + resourceMonitor: managedStatusCoreComponents.ResourceMonitor(), + networkStatisticsProvider: managedStatusCoreComponents.NetworkStatistics(), + trieSyncStatisticsProvider: managedStatusCoreComponents.TrieSyncStatistics(), + statusHandler: managedStatusCoreComponents.AppStatusHandler(), + statusMetrics: managedStatusCoreComponents.StatusMetrics(), + persistentStatusHandler: managedStatusCoreComponents.PersistentStatusHandler(), + stateStatisticsHandler: managedStatusCoreComponents.StateStatsHandler(), + managedStatusCoreComponentsCloser: managedStatusCoreComponents, + } + + return instance, nil +} + +// StateStatsHandler will return the state statistics handler +func (s *statusCoreComponentsHolder) StateStatsHandler() common.StateStatisticsHandler { + return s.stateStatisticsHandler +} + +// ResourceMonitor will return the resource monitor +func (s *statusCoreComponentsHolder) ResourceMonitor() factory.ResourceMonitor { + return s.resourceMonitor +} + +// NetworkStatistics will return the network statistics provider +func (s *statusCoreComponentsHolder) NetworkStatistics() factory.NetworkStatisticsProvider { + return s.networkStatisticsProvider +} + +// TrieSyncStatistics will return trie sync statistics provider +func (s *statusCoreComponentsHolder) TrieSyncStatistics() factory.TrieSyncStatisticsProvider { + return s.trieSyncStatisticsProvider +} + +// AppStatusHandler will return the status handler +func (s *statusCoreComponentsHolder) AppStatusHandler() core.AppStatusHandler { + return s.statusHandler +} + +// StatusMetrics will return the status metrics handler +func (s *statusCoreComponentsHolder) StatusMetrics() external.StatusMetricsHandler { + return s.statusMetrics +} + +// PersistentStatusHandler will return the persistent status handler +func (s *statusCoreComponentsHolder) PersistentStatusHandler() factory.PersistentStatusHandler { + return s.persistentStatusHandler +} + +// Close will call the Close methods on all inner components +func (s *statusCoreComponentsHolder) Close() error { + return s.managedStatusCoreComponentsCloser.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (s *statusCoreComponentsHolder) IsInterfaceNil() bool { + return s == nil +} + +// Create will do nothing +func (s *statusCoreComponentsHolder) Create() error { + return nil +} + +// CheckSubcomponents will do nothing +func (s *statusCoreComponentsHolder) CheckSubcomponents() error { + return nil +} + +// String will do nothing +func (s *statusCoreComponentsHolder) String() string { + return "" +} diff --git a/node/chainSimulator/components/statusCoreComponents_test.go b/node/chainSimulator/components/statusCoreComponents_test.go new file mode 100644 index 00000000000..a616890644f --- /dev/null +++ b/node/chainSimulator/components/statusCoreComponents_test.go @@ -0,0 +1,113 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/mock" + mockTests "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" + "github.com/stretchr/testify/require" +) + +func createArgs() (config.Configs, factory.CoreComponentsHolder) { + generalCfg := testscommon.GetGeneralConfig() + ratingsCfg := components.CreateDummyRatingsConfig() + economicsCfg := components.CreateDummyEconomicsConfig() + cfg := config.Configs{ + GeneralConfig: &generalCfg, + EpochConfig: &config.EpochConfig{ + GasSchedule: config.GasScheduleConfig{ + GasScheduleByEpochs: []config.GasScheduleByEpochs{ + { + StartEpoch: 0, + FileName: "gasScheduleV1.toml", + }, + }, + }, + }, + RoundConfig: &config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "Example": { + Round: "18446744073709551615", + }, + }, + }, + RatingsConfig: &ratingsCfg, + EconomicsConfig: &economicsCfg, + } + + return cfg, &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + IntMarsh: &testscommon.MarshallerStub{}, + UInt64ByteSliceConv: &mockTests.Uint64ByteSliceConverterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + } +} + +func TestCreateStatusCoreComponents(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + require.NotNil(t, comp) + + require.Nil(t, comp.Create()) + require.Nil(t, comp.Close()) + }) + t.Run("NewStatusCoreComponentsFactory failure should error", func(t *testing.T) { + t.Parallel() + + cfg, _ := createArgs() + comp, err := CreateStatusCoreComponents(cfg, nil) + require.Error(t, err) + require.Nil(t, comp) + }) + t.Run("managedStatusCoreComponents.Create failure should error", func(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + cfg.GeneralConfig.ResourceStats.RefreshIntervalInSec = 0 + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.Error(t, err) + require.Nil(t, comp) + }) +} + +func TestStatusCoreComponentsHolder_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var comp *statusCoreComponentsHolder + require.True(t, comp.IsInterfaceNil()) + + cfg, coreComp := createArgs() + comp, _ = CreateStatusCoreComponents(cfg, coreComp) + require.False(t, comp.IsInterfaceNil()) + require.Nil(t, comp.Close()) +} + +func TestStatusCoreComponentsHolder_Getters(t *testing.T) { + t.Parallel() + + cfg, coreComp := createArgs() + comp, err := CreateStatusCoreComponents(cfg, coreComp) + require.NoError(t, err) + + require.NotNil(t, comp.ResourceMonitor()) + require.NotNil(t, comp.NetworkStatistics()) + require.NotNil(t, comp.TrieSyncStatistics()) + require.NotNil(t, comp.AppStatusHandler()) + require.NotNil(t, comp.StatusMetrics()) + require.NotNil(t, comp.PersistentStatusHandler()) + require.NotNil(t, comp.StateStatsHandler()) + require.Nil(t, comp.CheckSubcomponents()) + require.Empty(t, comp.String()) +} diff --git a/node/chainSimulator/components/storageService.go b/node/chainSimulator/components/storageService.go new file mode 100644 index 00000000000..9a2a7c4860f --- /dev/null +++ b/node/chainSimulator/components/storageService.go @@ -0,0 +1,39 @@ +package components + +import ( + "github.com/multiversx/mx-chain-go/dataRetriever" +) + +// CreateStore creates a storage service for shard nodes +func CreateStore(numOfShards uint32) dataRetriever.StorageService { + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaBlockUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.BootstrapUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.StatusMetricsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ReceiptsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ScheduledSCRsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.TxLogsUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.UserAccountsUnit, CreateMemUnitForTries()) + store.AddStorer(dataRetriever.PeerAccountsUnit, CreateMemUnitForTries()) + store.AddStorer(dataRetriever.ESDTSuppliesUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RoundHdrHashDataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniblocksMetadataUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.MiniblockHashByTxHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.EpochByHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.ResultsHashesByTxHashUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.TrieEpochRootHashUnit, CreateMemUnit()) + + for i := uint32(0); i < numOfShards; i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, CreateMemUnit()) + } + + return store +} diff --git a/node/chainSimulator/components/storageService_test.go b/node/chainSimulator/components/storageService_test.go new file mode 100644 index 00000000000..3be398b53e6 --- /dev/null +++ b/node/chainSimulator/components/storageService_test.go @@ -0,0 +1,51 @@ +package components + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/stretchr/testify/require" +) + +func TestCreateStore(t *testing.T) { + t.Parallel() + + store := CreateStore(2) + require.NotNil(t, store) + + expectedUnits := []dataRetriever.UnitType{ + dataRetriever.TransactionUnit, + dataRetriever.MiniBlockUnit, + dataRetriever.MetaBlockUnit, + dataRetriever.PeerChangesUnit, + dataRetriever.BlockHeaderUnit, + dataRetriever.UnsignedTransactionUnit, + dataRetriever.RewardTransactionUnit, + dataRetriever.MetaHdrNonceHashDataUnit, + dataRetriever.BootstrapUnit, + dataRetriever.StatusMetricsUnit, + dataRetriever.ReceiptsUnit, + dataRetriever.ScheduledSCRsUnit, + dataRetriever.TxLogsUnit, + dataRetriever.UserAccountsUnit, + dataRetriever.PeerAccountsUnit, + dataRetriever.ESDTSuppliesUnit, + dataRetriever.RoundHdrHashDataUnit, + dataRetriever.MiniblocksMetadataUnit, + dataRetriever.MiniblockHashByTxHashUnit, + dataRetriever.EpochByHashUnit, + dataRetriever.ResultsHashesByTxHashUnit, + dataRetriever.TrieEpochRootHashUnit, + dataRetriever.ShardHdrNonceHashDataUnit, + dataRetriever.UnitType(101), // shard 2 + } + + all := store.GetAllStorers() + require.Equal(t, len(expectedUnits), len(all)) + + for i := 0; i < len(expectedUnits); i++ { + unit, err := store.GetStorer(expectedUnits[i]) + require.NoError(t, err) + require.NotNil(t, unit) + } +} diff --git a/node/chainSimulator/components/syncedBroadcastNetwork.go b/node/chainSimulator/components/syncedBroadcastNetwork.go new file mode 100644 index 00000000000..99e8168c45e --- /dev/null +++ b/node/chainSimulator/components/syncedBroadcastNetwork.go @@ -0,0 +1,135 @@ +package components + +import ( + "errors" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-communication-go/p2p" + p2pMessage "github.com/multiversx/mx-chain-communication-go/p2p/message" + "github.com/multiversx/mx-chain-core-go/core" +) + +var ( + errNilHandler = errors.New("nil handler") + errHandlerAlreadyExists = errors.New("handler already exists") + errUnknownPeer = errors.New("unknown peer") +) + +type messageReceiver interface { + receive(fromConnectedPeer core.PeerID, message p2p.MessageP2P) + HasTopic(name string) bool +} + +type syncedBroadcastNetwork struct { + mutOperation sync.RWMutex + peers map[core.PeerID]messageReceiver +} + +// NewSyncedBroadcastNetwork creates a new synced broadcast network +func NewSyncedBroadcastNetwork() *syncedBroadcastNetwork { + return &syncedBroadcastNetwork{ + peers: make(map[core.PeerID]messageReceiver), + } +} + +// RegisterMessageReceiver registers the message receiver +func (network *syncedBroadcastNetwork) RegisterMessageReceiver(handler messageReceiver, pid core.PeerID) { + if handler == nil { + log.Error("programming error in syncedBroadcastNetwork.RegisterMessageReceiver: %w", errNilHandler) + return + } + + network.mutOperation.Lock() + defer network.mutOperation.Unlock() + + _, found := network.peers[pid] + if found { + log.Error("programming error in syncedBroadcastNetwork.RegisterMessageReceiver", "pid", pid.Pretty(), "error", errHandlerAlreadyExists) + return + } + + network.peers[pid] = handler +} + +// Broadcast will iterate through peers and send the message +func (network *syncedBroadcastNetwork) Broadcast(pid core.PeerID, topic string, buff []byte) { + _, handlers := network.getPeersAndHandlers() + + for _, handler := range handlers { + message := &p2pMessage.Message{ + FromField: pid.Bytes(), + DataField: buff, + TopicField: topic, + BroadcastMethodField: p2p.Broadcast, + PeerField: pid, + } + + handler.receive(pid, message) + } +} + +// SendDirectly will try to send directly to the provided peer +func (network *syncedBroadcastNetwork) SendDirectly(from core.PeerID, topic string, buff []byte, to core.PeerID) error { + network.mutOperation.RLock() + handler, found := network.peers[to] + if !found { + network.mutOperation.RUnlock() + + return fmt.Errorf("syncedBroadcastNetwork.SendDirectly: %w, pid %s", errUnknownPeer, to.Pretty()) + } + network.mutOperation.RUnlock() + + message := &p2pMessage.Message{ + FromField: from.Bytes(), + DataField: buff, + TopicField: topic, + BroadcastMethodField: p2p.Direct, + PeerField: from, + } + + handler.receive(from, message) + + return nil +} + +// GetConnectedPeers returns all connected peers +func (network *syncedBroadcastNetwork) GetConnectedPeers() []core.PeerID { + peers, _ := network.getPeersAndHandlers() + + return peers +} + +func (network *syncedBroadcastNetwork) getPeersAndHandlers() ([]core.PeerID, []messageReceiver) { + network.mutOperation.RLock() + defer network.mutOperation.RUnlock() + + peers := make([]core.PeerID, 0, len(network.peers)) + handlers := make([]messageReceiver, 0, len(network.peers)) + + for p, handler := range network.peers { + peers = append(peers, p) + handlers = append(handlers, handler) + } + + return peers, handlers +} + +// GetConnectedPeersOnTopic will find suitable peers connected on the provided topic +func (network *syncedBroadcastNetwork) GetConnectedPeersOnTopic(topic string) []core.PeerID { + peers, handlers := network.getPeersAndHandlers() + + peersOnTopic := make([]core.PeerID, 0, len(peers)) + for idx, p := range peers { + if handlers[idx].HasTopic(topic) { + peersOnTopic = append(peersOnTopic, p) + } + } + + return peersOnTopic +} + +// IsInterfaceNil returns true if there is no value under the interface +func (network *syncedBroadcastNetwork) IsInterfaceNil() bool { + return network == nil +} diff --git a/node/chainSimulator/components/syncedBroadcastNetwork_test.go b/node/chainSimulator/components/syncedBroadcastNetwork_test.go new file mode 100644 index 00000000000..74e061a819a --- /dev/null +++ b/node/chainSimulator/components/syncedBroadcastNetwork_test.go @@ -0,0 +1,303 @@ +package components + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func TestSyncedBroadcastNetwork_BroadcastShouldWorkOn3Peers(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + globalTopic := "global" + oneTwoTopic := "topic_1_2" + oneThreeTopic := "topic_1_3" + twoThreeTopic := "topic_2_3" + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(globalTopic, true) + _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) + _ = peer1.CreateTopic(oneTwoTopic, true) + _ = peer1.RegisterMessageProcessor(oneTwoTopic, "", processor1) + _ = peer1.CreateTopic(oneThreeTopic, true) + _ = peer1.RegisterMessageProcessor(oneThreeTopic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(globalTopic, true) + _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) + _ = peer2.CreateTopic(oneTwoTopic, true) + _ = peer2.RegisterMessageProcessor(oneTwoTopic, "", processor2) + _ = peer2.CreateTopic(twoThreeTopic, true) + _ = peer2.RegisterMessageProcessor(twoThreeTopic, "", processor2) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor3 := createMessageProcessor(t, messages, peer3.ID()) + _ = peer3.CreateTopic(globalTopic, true) + _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) + _ = peer3.CreateTopic(oneThreeTopic, true) + _ = peer3.RegisterMessageProcessor(oneThreeTopic, "", processor3) + _ = peer3.CreateTopic(twoThreeTopic, true) + _ = peer3.RegisterMessageProcessor(twoThreeTopic, "", processor3) + + globalMessage := []byte("global message") + oneTwoMessage := []byte("1-2 message") + oneThreeMessage := []byte("1-3 message") + twoThreeMessage := []byte("2-3 message") + + peer1.Broadcast(globalTopic, globalMessage) + assert.Equal(t, globalMessage, messages[peer1.ID()][globalTopic]) + assert.Equal(t, globalMessage, messages[peer2.ID()][globalTopic]) + assert.Equal(t, globalMessage, messages[peer3.ID()][globalTopic]) + + peer1.Broadcast(oneTwoTopic, oneTwoMessage) + assert.Equal(t, oneTwoMessage, messages[peer1.ID()][oneTwoTopic]) + assert.Equal(t, oneTwoMessage, messages[peer2.ID()][oneTwoTopic]) + assert.Nil(t, messages[peer3.ID()][oneTwoTopic]) + + peer1.Broadcast(oneThreeTopic, oneThreeMessage) + assert.Equal(t, oneThreeMessage, messages[peer1.ID()][oneThreeTopic]) + assert.Nil(t, messages[peer2.ID()][oneThreeTopic]) + assert.Equal(t, oneThreeMessage, messages[peer3.ID()][oneThreeTopic]) + + peer2.Broadcast(twoThreeTopic, twoThreeMessage) + assert.Nil(t, messages[peer1.ID()][twoThreeTopic]) + assert.Equal(t, twoThreeMessage, messages[peer2.ID()][twoThreeTopic]) + assert.Equal(t, twoThreeMessage, messages[peer3.ID()][twoThreeTopic]) +} + +func TestSyncedBroadcastNetwork_BroadcastOnAnUnjoinedTopicShouldDiscardMessage(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + globalTopic := "global" + twoThreeTopic := "topic_2_3" + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(globalTopic, true) + _ = peer1.RegisterMessageProcessor(globalTopic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(globalTopic, true) + _ = peer2.RegisterMessageProcessor(globalTopic, "", processor2) + _ = peer2.CreateTopic(twoThreeTopic, true) + _ = peer2.RegisterMessageProcessor(twoThreeTopic, "", processor2) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor3 := createMessageProcessor(t, messages, peer3.ID()) + _ = peer3.CreateTopic(globalTopic, true) + _ = peer3.RegisterMessageProcessor(globalTopic, "", processor3) + _ = peer3.CreateTopic(twoThreeTopic, true) + _ = peer3.RegisterMessageProcessor(twoThreeTopic, "", processor3) + + testMessage := []byte("test message") + + peer1.Broadcast(twoThreeTopic, testMessage) + + assert.Nil(t, messages[peer1.ID()][twoThreeTopic]) + assert.Nil(t, messages[peer2.ID()][twoThreeTopic]) + assert.Nil(t, messages[peer3.ID()][twoThreeTopic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyShouldWorkBetween2peers(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer2.ID()) + assert.Nil(t, err) + + assert.Nil(t, messages[peer1.ID()][topic]) + assert.Equal(t, testMessage, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyToSelfShouldWork(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := createMessageProcessor(t, messages, peer2.ID()) + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer1.ID()) + assert.Nil(t, err) + + assert.Equal(t, testMessage, messages[peer1.ID()][topic]) + assert.Nil(t, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_SendDirectlyShouldNotDeadlock(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + messages := make(map[core.PeerID]map[string][]byte) + + topic := "topic" + testMessage := []byte("test message") + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor1 := createMessageProcessor(t, messages, peer1.ID()) + _ = peer1.CreateTopic(topic, true) + _ = peer1.RegisterMessageProcessor(topic, "", processor1) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + processor2 := &p2pmocks.MessageProcessorStub{ + ProcessReceivedMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + log.Debug("sending message back to", "pid", fromConnectedPeer.Pretty()) + return source.SendToConnectedPeer(message.Topic(), []byte("reply: "+string(message.Data())), fromConnectedPeer) + }, + } + _ = peer2.CreateTopic(topic, true) + _ = peer2.RegisterMessageProcessor(topic, "", processor2) + + err = peer1.SendToConnectedPeer(topic, testMessage, peer2.ID()) + assert.Nil(t, err) + + assert.Equal(t, "reply: "+string(testMessage), string(messages[peer1.ID()][topic])) + assert.Nil(t, messages[peer2.ID()][topic]) +} + +func TestSyncedBroadcastNetwork_ConnectedPeersAndAddresses(t *testing.T) { + t.Parallel() + + network := NewSyncedBroadcastNetwork() + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + + peers := peer1.ConnectedPeers() + assert.Equal(t, 2, len(peers)) + + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + + assert.True(t, peer1.IsConnected(peer2.ID())) + assert.True(t, peer2.IsConnected(peer1.ID())) + assert.False(t, peer1.IsConnected("no connection")) + + addresses := peer1.ConnectedAddresses() + assert.Equal(t, 2, len(addresses)) + assert.Contains(t, addresses, fmt.Sprintf(virtualAddressTemplate, peer1.ID().Pretty())) + assert.Contains(t, addresses, peer1.Addresses()[0]) + assert.Contains(t, addresses, fmt.Sprintf(virtualAddressTemplate, peer2.ID().Pretty())) + assert.Contains(t, addresses, peer2.Addresses()[0]) +} + +func TestSyncedBroadcastNetwork_GetConnectedPeersOnTopic(t *testing.T) { + t.Parallel() + + globalTopic := "global" + oneTwoTopic := "topic_1_2" + oneThreeTopic := "topic_1_3" + twoThreeTopic := "topic_2_3" + + network := NewSyncedBroadcastNetwork() + + peer1, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer1.CreateTopic(globalTopic, false) + _ = peer1.CreateTopic(oneTwoTopic, false) + _ = peer1.CreateTopic(oneThreeTopic, false) + + peer2, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer2.CreateTopic(globalTopic, false) + _ = peer2.CreateTopic(oneTwoTopic, false) + _ = peer2.CreateTopic(twoThreeTopic, false) + + peer3, err := NewSyncedMessenger(network) + assert.Nil(t, err) + _ = peer3.CreateTopic(globalTopic, false) + _ = peer3.CreateTopic(oneThreeTopic, false) + _ = peer3.CreateTopic(twoThreeTopic, false) + + peers := peer1.ConnectedPeersOnTopic(globalTopic) + assert.Equal(t, 3, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + assert.Contains(t, peers, peer3.ID()) + + peers = peer1.ConnectedPeersOnTopic(oneTwoTopic) + assert.Equal(t, 2, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer2.ID()) + + peers = peer3.ConnectedPeersOnTopic(oneThreeTopic) + assert.Equal(t, 2, len(peers)) + assert.Contains(t, peers, peer1.ID()) + assert.Contains(t, peers, peer3.ID()) + + peersInfo := peer1.GetConnectedPeersInfo() + assert.Equal(t, 3, len(peersInfo.UnknownPeers)) +} + +func createMessageProcessor(t *testing.T, dataMap map[core.PeerID]map[string][]byte, pid core.PeerID) p2p.MessageProcessor { + return &p2pmocks.MessageProcessorStub{ + ProcessReceivedMessageCalled: func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + m, found := dataMap[pid] + if !found { + m = make(map[string][]byte) + dataMap[pid] = m + } + + // some interceptors/resolvers require that the peer field should be the same + assert.Equal(t, message.Peer().Bytes(), message.From()) + assert.Equal(t, message.Peer(), fromConnectedPeer) + m[message.Topic()] = message.Data() + + return nil + }, + } +} diff --git a/node/chainSimulator/components/syncedMessenger.go b/node/chainSimulator/components/syncedMessenger.go new file mode 100644 index 00000000000..d30ac85b409 --- /dev/null +++ b/node/chainSimulator/components/syncedMessenger.go @@ -0,0 +1,396 @@ +package components + +import ( + "bytes" + "errors" + "fmt" + "sync" + "time" + + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-communication-go/p2p/libp2p/crypto" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + logger "github.com/multiversx/mx-chain-logger-go" +) + +const virtualAddressTemplate = "/virtual/p2p/%s" + +var ( + log = logger.GetOrCreate("node/chainSimulator") + p2pInstanceCreator, _ = crypto.NewIdentityGenerator(log) + hasher = blake2b.NewBlake2b() + errNilNetwork = errors.New("nil network") + errTopicAlreadyCreated = errors.New("topic already created") + errNilMessageProcessor = errors.New("nil message processor") + errTopicNotCreated = errors.New("topic not created") + errTopicHasProcessor = errors.New("there is already a message processor for provided topic and identifier") + errInvalidSignature = errors.New("invalid signature") + errMessengerIsClosed = errors.New("messenger is closed") +) + +type syncedMessenger struct { + mutIsClosed sync.RWMutex + isClosed bool + mutOperation sync.RWMutex + topics map[string]map[string]p2p.MessageProcessor + network SyncedBroadcastNetworkHandler + pid core.PeerID +} + +// NewSyncedMessenger creates a new synced network messenger +func NewSyncedMessenger(network SyncedBroadcastNetworkHandler) (*syncedMessenger, error) { + if check.IfNil(network) { + return nil, errNilNetwork + } + + _, pid, err := p2pInstanceCreator.CreateRandomP2PIdentity() + if err != nil { + return nil, err + } + + messenger := &syncedMessenger{ + network: network, + topics: make(map[string]map[string]p2p.MessageProcessor), + pid: pid, + } + + log.Debug("created syncedMessenger", "pid", pid.Pretty()) + + network.RegisterMessageReceiver(messenger, pid) + + return messenger, nil +} + +// HasCompatibleProtocolID returns true +func (messenger *syncedMessenger) HasCompatibleProtocolID(_ string) bool { + return true +} + +func (messenger *syncedMessenger) receive(fromConnectedPeer core.PeerID, message p2p.MessageP2P) { + if messenger.closed() { + return + } + if check.IfNil(message) { + return + } + + messenger.mutOperation.RLock() + handlers := messenger.topics[message.Topic()] + messenger.mutOperation.RUnlock() + + for _, handler := range handlers { + err := handler.ProcessReceivedMessage(message, fromConnectedPeer, messenger) + if err != nil { + log.Trace("received message syncedMessenger", + "error", err, "topic", message.Topic(), "from connected peer", fromConnectedPeer.Pretty()) + } + } +} + +// ProcessReceivedMessage does nothing and returns nil +func (messenger *syncedMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { + return nil +} + +// CreateTopic will create a topic for receiving data +func (messenger *syncedMessenger) CreateTopic(name string, _ bool) error { + if messenger.closed() { + return errMessengerIsClosed + } + + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + _, found := messenger.topics[name] + if found { + return fmt.Errorf("programming error in syncedMessenger.CreateTopic, %w for topic %s", errTopicAlreadyCreated, name) + } + + messenger.topics[name] = make(map[string]p2p.MessageProcessor) + + return nil +} + +// HasTopic returns true if the topic was registered +func (messenger *syncedMessenger) HasTopic(name string) bool { + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + _, found := messenger.topics[name] + + return found +} + +// RegisterMessageProcessor will try to register a message processor on the provided topic & identifier +func (messenger *syncedMessenger) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { + if messenger.closed() { + return errMessengerIsClosed + } + if check.IfNil(handler) { + return fmt.Errorf("programming error in syncedMessenger.RegisterMessageProcessor, "+ + "%w for topic %s and identifier %s", errNilMessageProcessor, topic, identifier) + } + + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + handlers, found := messenger.topics[topic] + if !found { + handlers = make(map[string]p2p.MessageProcessor) + messenger.topics[topic] = handlers + } + + _, found = handlers[identifier] + if found { + return fmt.Errorf("programming error in syncedMessenger.RegisterMessageProcessor, %w, topic %s, identifier %s", + errTopicHasProcessor, topic, identifier) + } + + handlers[identifier] = handler + + return nil +} + +// UnregisterAllMessageProcessors will unregister all message processors +func (messenger *syncedMessenger) UnregisterAllMessageProcessors() error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + for topic := range messenger.topics { + messenger.topics[topic] = make(map[string]p2p.MessageProcessor) + } + + return nil +} + +// UnregisterMessageProcessor will unregister the message processor for the provided topic and identifier +func (messenger *syncedMessenger) UnregisterMessageProcessor(topic string, identifier string) error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + handlers, found := messenger.topics[topic] + if !found { + return fmt.Errorf("programming error in syncedMessenger.UnregisterMessageProcessor, %w for topic %s", + errTopicNotCreated, topic) + } + + delete(handlers, identifier) + + return nil +} + +// Broadcast will broadcast the provided buffer on the topic in a synchronous manner +func (messenger *syncedMessenger) Broadcast(topic string, buff []byte) { + if messenger.closed() { + return + } + if !messenger.HasTopic(topic) { + return + } + + messenger.network.Broadcast(messenger.pid, topic, buff) +} + +// BroadcastOnChannel calls the Broadcast method +func (messenger *syncedMessenger) BroadcastOnChannel(_ string, topic string, buff []byte) { + messenger.Broadcast(topic, buff) +} + +// BroadcastUsingPrivateKey calls the Broadcast method +func (messenger *syncedMessenger) BroadcastUsingPrivateKey(topic string, buff []byte, _ core.PeerID, _ []byte) { + messenger.Broadcast(topic, buff) +} + +// BroadcastOnChannelUsingPrivateKey calls the Broadcast method +func (messenger *syncedMessenger) BroadcastOnChannelUsingPrivateKey(_ string, topic string, buff []byte, _ core.PeerID, _ []byte) { + messenger.Broadcast(topic, buff) +} + +// SendToConnectedPeer will send the message to the peer +func (messenger *syncedMessenger) SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error { + if messenger.closed() { + return errMessengerIsClosed + } + + if !messenger.HasTopic(topic) { + return nil + } + + log.Trace("syncedMessenger.SendToConnectedPeer", + "from", messenger.pid.Pretty(), + "to", peerID.Pretty(), + "data", buff) + + return messenger.network.SendDirectly(messenger.pid, topic, buff, peerID) +} + +// UnJoinAllTopics will unjoin all topics +func (messenger *syncedMessenger) UnJoinAllTopics() error { + messenger.mutOperation.Lock() + defer messenger.mutOperation.Unlock() + + messenger.topics = make(map[string]map[string]p2p.MessageProcessor) + + return nil +} + +// Bootstrap does nothing and returns nil +func (messenger *syncedMessenger) Bootstrap() error { + return nil +} + +// Peers returns the network's peer ID +func (messenger *syncedMessenger) Peers() []core.PeerID { + return messenger.network.GetConnectedPeers() +} + +// Addresses returns the addresses this messenger was bound to. It returns a virtual address +func (messenger *syncedMessenger) Addresses() []string { + return []string{fmt.Sprintf(virtualAddressTemplate, messenger.pid.Pretty())} +} + +// ConnectToPeer does nothing and returns nil +func (messenger *syncedMessenger) ConnectToPeer(_ string) error { + return nil +} + +// IsConnected returns true if the peer ID is found on the network +func (messenger *syncedMessenger) IsConnected(peerID core.PeerID) bool { + peers := messenger.network.GetConnectedPeers() + for _, peer := range peers { + if peer == peerID { + return true + } + } + + return false +} + +// ConnectedPeers returns the same list as the function Peers +func (messenger *syncedMessenger) ConnectedPeers() []core.PeerID { + return messenger.Peers() +} + +// ConnectedAddresses returns all connected addresses +func (messenger *syncedMessenger) ConnectedAddresses() []string { + peers := messenger.network.GetConnectedPeers() + addresses := make([]string, 0, len(peers)) + for _, peer := range peers { + addresses = append(addresses, fmt.Sprintf(virtualAddressTemplate, peer.Pretty())) + } + + return addresses +} + +// PeerAddresses returns the virtual peer address +func (messenger *syncedMessenger) PeerAddresses(pid core.PeerID) []string { + return []string{fmt.Sprintf(virtualAddressTemplate, pid.Pretty())} +} + +// ConnectedPeersOnTopic returns the connected peers on the provided topic +func (messenger *syncedMessenger) ConnectedPeersOnTopic(topic string) []core.PeerID { + return messenger.network.GetConnectedPeersOnTopic(topic) +} + +// SetPeerShardResolver does nothing and returns nil +func (messenger *syncedMessenger) SetPeerShardResolver(_ p2p.PeerShardResolver) error { + return nil +} + +// GetConnectedPeersInfo return current connected peers info +func (messenger *syncedMessenger) GetConnectedPeersInfo() *p2p.ConnectedPeersInfo { + peersInfo := &p2p.ConnectedPeersInfo{} + peers := messenger.network.GetConnectedPeers() + for _, peer := range peers { + peersInfo.UnknownPeers = append(peersInfo.UnknownPeers, peer.Pretty()) + } + + return peersInfo +} + +// WaitForConnections does nothing +func (messenger *syncedMessenger) WaitForConnections(_ time.Duration, _ uint32) { +} + +// IsConnectedToTheNetwork returns true +func (messenger *syncedMessenger) IsConnectedToTheNetwork() bool { + return true +} + +// ThresholdMinConnectedPeers returns 0 +func (messenger *syncedMessenger) ThresholdMinConnectedPeers() int { + return 0 +} + +// SetThresholdMinConnectedPeers does nothing and returns nil +func (messenger *syncedMessenger) SetThresholdMinConnectedPeers(_ int) error { + return nil +} + +// SetPeerDenialEvaluator does nothing and returns nil +func (messenger *syncedMessenger) SetPeerDenialEvaluator(_ p2p.PeerDenialEvaluator) error { + return nil +} + +// ID returns the peer ID +func (messenger *syncedMessenger) ID() core.PeerID { + return messenger.pid +} + +// Port returns 0 +func (messenger *syncedMessenger) Port() int { + return 0 +} + +// Sign will return the hash(messenger.ID + payload) +func (messenger *syncedMessenger) Sign(payload []byte) ([]byte, error) { + return hasher.Compute(messenger.pid.Pretty() + string(payload)), nil +} + +// Verify will check if the provided signature === hash(pid + payload) +func (messenger *syncedMessenger) Verify(payload []byte, pid core.PeerID, signature []byte) error { + sig := hasher.Compute(pid.Pretty() + string(payload)) + if bytes.Equal(sig, signature) { + return nil + } + + return errInvalidSignature +} + +// SignUsingPrivateKey will return an empty byte slice +func (messenger *syncedMessenger) SignUsingPrivateKey(_ []byte, _ []byte) ([]byte, error) { + return make([]byte, 0), nil +} + +// AddPeerTopicNotifier does nothing and returns nil +func (messenger *syncedMessenger) AddPeerTopicNotifier(_ p2p.PeerTopicNotifier) error { + return nil +} + +// SetDebugger will set the provided debugger +func (messenger *syncedMessenger) SetDebugger(_ p2p.Debugger) error { + return nil +} + +// Close does nothing and returns nil +func (messenger *syncedMessenger) Close() error { + messenger.mutIsClosed.Lock() + messenger.isClosed = true + messenger.mutIsClosed.Unlock() + + return nil +} + +func (messenger *syncedMessenger) closed() bool { + messenger.mutIsClosed.RLock() + defer messenger.mutIsClosed.RUnlock() + + return messenger.isClosed +} + +// IsInterfaceNil returns true if there is no value under the interface +func (messenger *syncedMessenger) IsInterfaceNil() bool { + return messenger == nil +} diff --git a/node/chainSimulator/components/syncedMessenger_test.go b/node/chainSimulator/components/syncedMessenger_test.go new file mode 100644 index 00000000000..c0efd6f2942 --- /dev/null +++ b/node/chainSimulator/components/syncedMessenger_test.go @@ -0,0 +1,261 @@ +package components + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/stretchr/testify/assert" +) + +func TestNewSyncedMessenger(t *testing.T) { + t.Parallel() + + t.Run("nil network should error", func(t *testing.T) { + t.Parallel() + + messenger, err := NewSyncedMessenger(nil) + assert.Nil(t, messenger) + assert.Equal(t, errNilNetwork, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, err := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + assert.NotNil(t, messenger) + assert.Nil(t, err) + }) +} + +func TestSyncedMessenger_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var messenger *syncedMessenger + assert.True(t, messenger.IsInterfaceNil()) + + messenger, _ = NewSyncedMessenger(NewSyncedBroadcastNetwork()) + assert.False(t, messenger.IsInterfaceNil()) +} + +func TestSyncedMessenger_DisabledMethodsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + assert.Fail(t, fmt.Sprintf("should have not panicked: %v", r)) + } + }() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + assert.Nil(t, messenger.Close()) + assert.Nil(t, messenger.AddPeerTopicNotifier(nil)) + assert.Zero(t, messenger.Port()) + assert.Nil(t, messenger.SetPeerDenialEvaluator(nil)) + assert.Nil(t, messenger.SetThresholdMinConnectedPeers(0)) + assert.Zero(t, messenger.ThresholdMinConnectedPeers()) + assert.True(t, messenger.IsConnectedToTheNetwork()) + assert.Nil(t, messenger.SetPeerShardResolver(nil)) + assert.Nil(t, messenger.ConnectToPeer("")) + assert.Nil(t, messenger.Bootstrap()) + assert.Nil(t, messenger.ProcessReceivedMessage(nil, "", nil)) + + messenger.WaitForConnections(0, 0) + + buff, err := messenger.SignUsingPrivateKey(nil, nil) + assert.Empty(t, buff) + assert.Nil(t, err) +} + +func TestSyncedMessenger_RegisterMessageProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil message processor should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.RegisterMessageProcessor("", "", nil) + assert.ErrorIs(t, err, errNilMessageProcessor) + }) + t.Run("processor exists, should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.CreateTopic("t", false) + assert.Nil(t, err) + + processor1 := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor1) + assert.Nil(t, err) + + processor2 := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor2) + assert.ErrorIs(t, err, errTopicHasProcessor) + + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + assert.True(t, messenger.topics["t"]["i"] == processor1) // pointer testing + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + err := messenger.CreateTopic("t", false) + assert.Nil(t, err) + + processor := &p2pmocks.MessageProcessorStub{} + err = messenger.RegisterMessageProcessor("t", "i", processor) + assert.Nil(t, err) + + messenger.mutOperation.RLock() + defer messenger.mutOperation.RUnlock() + + assert.True(t, messenger.topics["t"]["i"] == processor) // pointer testing + }) +} + +func TestSyncedMessenger_UnregisterAllMessageProcessors(t *testing.T) { + t.Parallel() + + t.Run("no topics should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) + t.Run("one topic but no processor should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + _ = messenger.CreateTopic(topic, true) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + }) + t.Run("one topic with processor should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier := "identifier" + _ = messenger.CreateTopic(topic, true) + _ = messenger.RegisterMessageProcessor(topic, identifier, &p2pmocks.MessageProcessorStub{}) + + messenger.mutOperation.RLock() + assert.NotNil(t, messenger.topics[topic][identifier]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterAllMessageProcessors() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + }) +} + +func TestSyncedMessenger_UnregisterMessageProcessor(t *testing.T) { + t.Parallel() + + t.Run("topic not found should error", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier := "identifier" + err := messenger.UnregisterMessageProcessor(topic, identifier) + assert.ErrorIs(t, err, errTopicNotCreated) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + topic := "topic" + identifier1 := "identifier1" + identifier2 := "identifier2" + + _ = messenger.CreateTopic(topic, true) + _ = messenger.RegisterMessageProcessor(topic, identifier1, &p2pmocks.MessageProcessorStub{}) + _ = messenger.RegisterMessageProcessor(topic, identifier2, &p2pmocks.MessageProcessorStub{}) + + messenger.mutOperation.RLock() + assert.Equal(t, 2, len(messenger.topics[topic])) + assert.NotNil(t, messenger.topics[topic][identifier1]) + assert.NotNil(t, messenger.topics[topic][identifier2]) + messenger.mutOperation.RUnlock() + + err := messenger.UnregisterMessageProcessor(topic, identifier1) + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Equal(t, 1, len(messenger.topics[topic])) + assert.NotNil(t, messenger.topics[topic][identifier2]) + messenger.mutOperation.RUnlock() + }) +} + +func TestSyncedMessenger_UnJoinAllTopics(t *testing.T) { + t.Parallel() + + t.Run("no topics registered should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + + err := messenger.UnJoinAllTopics() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) + t.Run("one registered topic should work", func(t *testing.T) { + t.Parallel() + + messenger, _ := NewSyncedMessenger(NewSyncedBroadcastNetwork()) + topic := "topic" + _ = messenger.CreateTopic(topic, true) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics[topic]) + messenger.mutOperation.RUnlock() + + err := messenger.UnJoinAllTopics() + assert.Nil(t, err) + + messenger.mutOperation.RLock() + assert.Empty(t, messenger.topics) + messenger.mutOperation.RUnlock() + }) +} diff --git a/node/chainSimulator/components/testOnlyProcessingNode.go b/node/chainSimulator/components/testOnlyProcessingNode.go new file mode 100644 index 00000000000..07c8561c73f --- /dev/null +++ b/node/chainSimulator/components/testOnlyProcessingNode.go @@ -0,0 +1,578 @@ +package components + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/consensus/spos/sposFactory" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + dataRetrieverFactory "github.com/multiversx/mx-chain-go/dataRetriever/factory" + "github.com/multiversx/mx-chain-go/facade" + "github.com/multiversx/mx-chain-go/factory" + bootstrapComp "github.com/multiversx/mx-chain-go/factory/bootstrap" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/postprocess" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" +) + +// ArgsTestOnlyProcessingNode represents the DTO struct for the NewTestOnlyProcessingNode constructor function +type ArgsTestOnlyProcessingNode struct { + Configs config.Configs + APIInterface APIConfigurator + + ChanStopNodeProcess chan endProcess.ArgEndProcess + SyncedBroadcastNetwork SyncedBroadcastNetworkHandler + + InitialRound int64 + InitialNonce uint64 + GasScheduleFilename string + NumShards uint32 + ShardIDStr string + BypassTxSignatureCheck bool + MinNodesPerShard uint32 + MinNodesMeta uint32 + RoundDurationInMillis uint64 +} + +type testOnlyProcessingNode struct { + closeHandler *closeHandler + CoreComponentsHolder factory.CoreComponentsHandler + StatusCoreComponents factory.StatusCoreComponentsHandler + StateComponentsHolder factory.StateComponentsHandler + StatusComponentsHolder factory.StatusComponentsHandler + CryptoComponentsHolder factory.CryptoComponentsHandler + NetworkComponentsHolder factory.NetworkComponentsHandler + BootstrapComponentsHolder factory.BootstrapComponentsHandler + ProcessComponentsHolder factory.ProcessComponentsHandler + DataComponentsHolder factory.DataComponentsHandler + + NodesCoordinator nodesCoordinator.NodesCoordinator + ChainHandler chainData.ChainHandler + ArgumentsParser process.ArgumentsParser + TransactionFeeHandler process.TransactionFeeHandler + StoreService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + broadcastMessenger consensus.BroadcastMessenger + + httpServer shared.UpgradeableHttpServerHandler + facadeHandler shared.FacadeHandler +} + +// NewTestOnlyProcessingNode creates a new instance of a node that is able to only process transactions +func NewTestOnlyProcessingNode(args ArgsTestOnlyProcessingNode) (*testOnlyProcessingNode, error) { + instance := &testOnlyProcessingNode{ + ArgumentsParser: smartContract.NewArgumentParser(), + StoreService: CreateStore(args.NumShards), + closeHandler: NewCloseHandler(), + } + + var err error + instance.TransactionFeeHandler = postprocess.NewFeeAccumulator() + + instance.CoreComponentsHolder, err = CreateCoreComponents(ArgsCoreComponentsHolder{ + Config: *args.Configs.GeneralConfig, + EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, + RoundsConfig: *args.Configs.RoundConfig, + EconomicsConfig: *args.Configs.EconomicsConfig, + ChanStopNodeProcess: args.ChanStopNodeProcess, + NumShards: args.NumShards, + WorkingDir: args.Configs.FlagsConfig.WorkingDir, + GasScheduleFilename: args.GasScheduleFilename, + NodesSetupPath: args.Configs.ConfigurationPathsHolder.Nodes, + InitialRound: args.InitialRound, + MinNodesPerShard: args.MinNodesPerShard, + MinNodesMeta: args.MinNodesMeta, + RoundDurationInMs: args.RoundDurationInMillis, + RatingConfig: *args.Configs.RatingsConfig, + }) + if err != nil { + return nil, err + } + + instance.StatusCoreComponents, err = CreateStatusCoreComponents(args.Configs, instance.CoreComponentsHolder) + if err != nil { + return nil, err + } + + instance.CryptoComponentsHolder, err = CreateCryptoComponents(ArgsCryptoComponentsHolder{ + Config: *args.Configs.GeneralConfig, + EnableEpochsConfig: args.Configs.EpochConfig.EnableEpochs, + Preferences: *args.Configs.PreferencesConfig, + CoreComponentsHolder: instance.CoreComponentsHolder, + BypassTxSignatureCheck: args.BypassTxSignatureCheck, + AllValidatorKeysPemFileName: args.Configs.ConfigurationPathsHolder.AllValidatorKeys, + }) + if err != nil { + return nil, err + } + + instance.NetworkComponentsHolder, err = CreateNetworkComponents(args.SyncedBroadcastNetwork) + if err != nil { + return nil, err + } + + instance.BootstrapComponentsHolder, err = CreateBootstrapComponents(ArgsBootstrapComponentsHolder{ + CoreComponents: instance.CoreComponentsHolder, + CryptoComponents: instance.CryptoComponentsHolder, + NetworkComponents: instance.NetworkComponentsHolder, + StatusCoreComponents: instance.StatusCoreComponents, + WorkingDir: args.Configs.FlagsConfig.WorkingDir, + FlagsConfig: *args.Configs.FlagsConfig, + ImportDBConfig: *args.Configs.ImportDbConfig, + PrefsConfig: *args.Configs.PreferencesConfig, + Config: *args.Configs.GeneralConfig, + ShardIDStr: args.ShardIDStr, + }) + if err != nil { + return nil, err + } + + selfShardID := instance.GetShardCoordinator().SelfId() + instance.StatusComponentsHolder, err = CreateStatusComponents( + selfShardID, + instance.StatusCoreComponents.AppStatusHandler(), + args.Configs.GeneralConfig.GeneralSettings.StatusPollingIntervalSec, + ) + if err != nil { + return nil, err + } + + err = instance.createBlockChain(selfShardID) + if err != nil { + return nil, err + } + + instance.StateComponentsHolder, err = CreateStateComponents(ArgsStateComponents{ + Config: *args.Configs.GeneralConfig, + CoreComponents: instance.CoreComponentsHolder, + StatusCore: instance.StatusCoreComponents, + StoreService: instance.StoreService, + ChainHandler: instance.ChainHandler, + }) + if err != nil { + return nil, err + } + + err = instance.createDataPool(args) + if err != nil { + return nil, err + } + err = instance.createNodesCoordinator(args.Configs.PreferencesConfig.Preferences, *args.Configs.GeneralConfig) + if err != nil { + return nil, err + } + + instance.DataComponentsHolder, err = CreateDataComponents(ArgsDataComponentsHolder{ + Chain: instance.ChainHandler, + StorageService: instance.StoreService, + DataPool: instance.DataPool, + InternalMarshaller: instance.CoreComponentsHolder.InternalMarshalizer(), + }) + if err != nil { + return nil, err + } + + instance.ProcessComponentsHolder, err = CreateProcessComponents(ArgsProcessComponentsHolder{ + CoreComponents: instance.CoreComponentsHolder, + CryptoComponents: instance.CryptoComponentsHolder, + NetworkComponents: instance.NetworkComponentsHolder, + BootstrapComponents: instance.BootstrapComponentsHolder, + StateComponents: instance.StateComponentsHolder, + StatusComponents: instance.StatusComponentsHolder, + StatusCoreComponents: instance.StatusCoreComponents, + FlagsConfig: *args.Configs.FlagsConfig, + ImportDBConfig: *args.Configs.ImportDbConfig, + PrefsConfig: *args.Configs.PreferencesConfig, + Config: *args.Configs.GeneralConfig, + EconomicsConfig: *args.Configs.EconomicsConfig, + SystemSCConfig: *args.Configs.SystemSCConfig, + EpochConfig: *args.Configs.EpochConfig, + RoundConfig: *args.Configs.RoundConfig, + ConfigurationPathsHolder: *args.Configs.ConfigurationPathsHolder, + NodesCoordinator: instance.NodesCoordinator, + DataComponents: instance.DataComponentsHolder, + GenesisNonce: args.InitialNonce, + GenesisRound: uint64(args.InitialRound), + }) + if err != nil { + return nil, err + } + + err = instance.StatusComponentsHolder.SetForkDetector(instance.ProcessComponentsHolder.ForkDetector()) + if err != nil { + return nil, err + } + + err = instance.StatusComponentsHolder.StartPolling() + if err != nil { + return nil, err + } + + err = instance.createBroadcastMessenger() + if err != nil { + return nil, err + } + + err = instance.createFacade(args.Configs, args.APIInterface) + if err != nil { + return nil, err + } + + err = instance.createHttpServer(args.Configs) + if err != nil { + return nil, err + } + + instance.collectClosableComponents(args.APIInterface) + + return instance, nil +} + +func (node *testOnlyProcessingNode) createBlockChain(selfShardID uint32) error { + var err error + if selfShardID == core.MetachainShardId { + node.ChainHandler, err = blockchain.NewMetaChain(node.StatusCoreComponents.AppStatusHandler()) + } else { + node.ChainHandler, err = blockchain.NewBlockChain(node.StatusCoreComponents.AppStatusHandler()) + } + + return err +} + +func (node *testOnlyProcessingNode) createDataPool(args ArgsTestOnlyProcessingNode) error { + var err error + + argsDataPool := dataRetrieverFactory.ArgsDataPool{ + Config: args.Configs.GeneralConfig, + EconomicsData: node.CoreComponentsHolder.EconomicsData(), + ShardCoordinator: node.BootstrapComponentsHolder.ShardCoordinator(), + Marshalizer: node.CoreComponentsHolder.InternalMarshalizer(), + PathManager: node.CoreComponentsHolder.PathHandler(), + } + + node.DataPool, err = dataRetrieverFactory.NewDataPoolFromConfig(argsDataPool) + + return err +} + +func (node *testOnlyProcessingNode) createNodesCoordinator(pref config.PreferencesConfig, generalConfig config.Config) error { + nodesShufflerOut, err := bootstrapComp.CreateNodesShuffleOut( + node.CoreComponentsHolder.GenesisNodesSetup(), + generalConfig.EpochStartConfig, + node.CoreComponentsHolder.ChanStopNodeProcess(), + ) + if err != nil { + return err + } + + bootstrapStorer, err := node.StoreService.GetStorer(dataRetriever.BootstrapUnit) + if err != nil { + return err + } + + node.NodesCoordinator, err = bootstrapComp.CreateNodesCoordinator( + nodesShufflerOut, + node.CoreComponentsHolder.GenesisNodesSetup(), + pref, + node.CoreComponentsHolder.EpochStartNotifierWithConfirm(), + node.CryptoComponentsHolder.PublicKey(), + node.CoreComponentsHolder.InternalMarshalizer(), + node.CoreComponentsHolder.Hasher(), + node.CoreComponentsHolder.Rater(), + bootstrapStorer, + node.CoreComponentsHolder.NodesShuffler(), + node.BootstrapComponentsHolder.ShardCoordinator().SelfId(), + node.BootstrapComponentsHolder.EpochBootstrapParams(), + node.BootstrapComponentsHolder.EpochBootstrapParams().Epoch(), + node.CoreComponentsHolder.ChanStopNodeProcess(), + node.CoreComponentsHolder.NodeTypeProvider(), + node.CoreComponentsHolder.EnableEpochsHandler(), + node.DataPool.CurrentEpochValidatorInfo(), + node.BootstrapComponentsHolder.NodesCoordinatorRegistryFactory(), + ) + if err != nil { + return err + } + + return nil +} + +func (node *testOnlyProcessingNode) createBroadcastMessenger() error { + broadcastMessenger, err := sposFactory.GetBroadcastMessenger( + node.CoreComponentsHolder.InternalMarshalizer(), + node.CoreComponentsHolder.Hasher(), + node.NetworkComponentsHolder.NetworkMessenger(), + node.ProcessComponentsHolder.ShardCoordinator(), + node.CryptoComponentsHolder.PeerSignatureHandler(), + node.DataComponentsHolder.Datapool().Headers(), + node.ProcessComponentsHolder.InterceptorsContainer(), + node.CoreComponentsHolder.AlarmScheduler(), + node.CryptoComponentsHolder.KeysHandler(), + ) + if err != nil { + return err + } + + node.broadcastMessenger, err = NewInstantBroadcastMessenger(broadcastMessenger, node.BootstrapComponentsHolder.ShardCoordinator()) + return err +} + +// GetProcessComponents will return the process components +func (node *testOnlyProcessingNode) GetProcessComponents() factory.ProcessComponentsHolder { + return node.ProcessComponentsHolder +} + +// GetChainHandler will return the chain handler +func (node *testOnlyProcessingNode) GetChainHandler() chainData.ChainHandler { + return node.ChainHandler +} + +// GetBroadcastMessenger will return the broadcast messenger +func (node *testOnlyProcessingNode) GetBroadcastMessenger() consensus.BroadcastMessenger { + return node.broadcastMessenger +} + +// GetShardCoordinator will return the shard coordinator +func (node *testOnlyProcessingNode) GetShardCoordinator() sharding.Coordinator { + return node.BootstrapComponentsHolder.ShardCoordinator() +} + +// GetCryptoComponents will return the crypto components +func (node *testOnlyProcessingNode) GetCryptoComponents() factory.CryptoComponentsHolder { + return node.CryptoComponentsHolder +} + +// GetCoreComponents will return the core components +func (node *testOnlyProcessingNode) GetCoreComponents() factory.CoreComponentsHolder { + return node.CoreComponentsHolder +} + +// GetStateComponents will return the state components +func (node *testOnlyProcessingNode) GetStateComponents() factory.StateComponentsHolder { + return node.StateComponentsHolder +} + +// GetFacadeHandler will return the facade handler +func (node *testOnlyProcessingNode) GetFacadeHandler() shared.FacadeHandler { + return node.facadeHandler +} + +// GetStatusCoreComponents will return the status core components +func (node *testOnlyProcessingNode) GetStatusCoreComponents() factory.StatusCoreComponentsHolder { + return node.StatusCoreComponents +} + +func (node *testOnlyProcessingNode) collectClosableComponents(apiInterface APIConfigurator) { + node.closeHandler.AddComponent(node.ProcessComponentsHolder) + node.closeHandler.AddComponent(node.DataComponentsHolder) + node.closeHandler.AddComponent(node.StateComponentsHolder) + node.closeHandler.AddComponent(node.StatusComponentsHolder) + node.closeHandler.AddComponent(node.BootstrapComponentsHolder) + node.closeHandler.AddComponent(node.NetworkComponentsHolder) + node.closeHandler.AddComponent(node.StatusCoreComponents) + node.closeHandler.AddComponent(node.CoreComponentsHolder) + node.closeHandler.AddComponent(node.facadeHandler) + + // TODO remove this after http server fix + shardID := node.GetShardCoordinator().SelfId() + if facade.DefaultRestPortOff != apiInterface.RestApiInterface(shardID) { + node.closeHandler.AddComponent(node.httpServer) + } +} + +// SetKeyValueForAddress will set the provided state for the given address +func (node *testOnlyProcessingNode) SetKeyValueForAddress(address []byte, keyValueMap map[string]string) error { + userAccount, err := node.getUserAccount(address) + if err != nil { + return err + } + + err = setKeyValueMap(userAccount, keyValueMap) + if err != nil { + return err + } + + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err = accountsAdapter.SaveAccount(userAccount) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + + return err +} + +func setKeyValueMap(userAccount state.UserAccountHandler, keyValueMap map[string]string) error { + for keyHex, valueHex := range keyValueMap { + keyDecoded, err := hex.DecodeString(keyHex) + if err != nil { + return fmt.Errorf("cannot decode key, error: %w", err) + } + valueDecoded, err := hex.DecodeString(valueHex) + if err != nil { + return fmt.Errorf("cannot decode value, error: %w", err) + } + + err = userAccount.SaveKeyValue(keyDecoded, valueDecoded) + if err != nil { + return err + } + } + + return nil +} + +// SetStateForAddress will set the state for the give address +func (node *testOnlyProcessingNode) SetStateForAddress(address []byte, addressState *dtos.AddressState) error { + userAccount, err := node.getUserAccount(address) + if err != nil { + return err + } + + err = setNonceAndBalanceForAccount(userAccount, addressState.Nonce, addressState.Balance) + if err != nil { + return err + } + + err = setKeyValueMap(userAccount, addressState.Keys) + if err != nil { + return err + } + + err = node.setScDataIfNeeded(address, userAccount, addressState) + if err != nil { + return err + } + + rootHash, err := base64.StdEncoding.DecodeString(addressState.RootHash) + if err != nil { + return err + } + if len(rootHash) != 0 { + userAccount.SetRootHash(rootHash) + } + + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + err = accountsAdapter.SaveAccount(userAccount) + if err != nil { + return err + } + + _, err = accountsAdapter.Commit() + return err +} + +func setNonceAndBalanceForAccount(userAccount state.UserAccountHandler, nonce *uint64, balance string) error { + if nonce != nil { + // set nonce to zero + userAccount.IncreaseNonce(-userAccount.GetNonce()) + // set nonce with the provided value + userAccount.IncreaseNonce(*nonce) + } + + if balance == "" { + return nil + } + + providedBalance, ok := big.NewInt(0).SetString(balance, 10) + if !ok { + return errors.New("cannot convert string balance to *big.Int") + } + + // set balance to zero + userBalance := userAccount.GetBalance() + err := userAccount.AddToBalance(userBalance.Neg(userBalance)) + if err != nil { + return err + } + // set provided balance + return userAccount.AddToBalance(providedBalance) +} + +func (node *testOnlyProcessingNode) setScDataIfNeeded(address []byte, userAccount state.UserAccountHandler, addressState *dtos.AddressState) error { + if !core.IsSmartContractAddress(address) { + return nil + } + + if addressState.Code != "" { + decodedCode, err := hex.DecodeString(addressState.Code) + if err != nil { + return err + } + userAccount.SetCode(decodedCode) + } + + if addressState.CodeHash != "" { + codeHash, errD := base64.StdEncoding.DecodeString(addressState.CodeHash) + if errD != nil { + return errD + } + userAccount.SetCodeHash(codeHash) + } + + if addressState.CodeMetadata != "" { + decodedCodeMetadata, errD := base64.StdEncoding.DecodeString(addressState.CodeMetadata) + if errD != nil { + return errD + } + userAccount.SetCodeMetadata(decodedCodeMetadata) + } + + if addressState.Owner != "" { + ownerAddress, errD := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(addressState.Owner) + if errD != nil { + return errD + } + userAccount.SetOwnerAddress(ownerAddress) + } + + if addressState.DeveloperRewards != "" { + developerRewards, ok := big.NewInt(0).SetString(addressState.DeveloperRewards, 10) + if !ok { + return errors.New("cannot convert string developer rewards to *big.Int") + } + userAccount.AddToDeveloperReward(developerRewards) + } + + return nil +} + +func (node *testOnlyProcessingNode) getUserAccount(address []byte) (state.UserAccountHandler, error) { + accountsAdapter := node.StateComponentsHolder.AccountsAdapter() + account, err := accountsAdapter.LoadAccount(address) + if err != nil { + return nil, err + } + + userAccount, ok := account.(state.UserAccountHandler) + if !ok { + return nil, errors.New("cannot cast AccountHandler to UserAccountHandler") + } + + return userAccount, nil +} + +// Close will call the Close methods on all inner components +func (node *testOnlyProcessingNode) Close() error { + return node.closeHandler.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (node *testOnlyProcessingNode) IsInterfaceNil() bool { + return node == nil +} diff --git a/node/chainSimulator/components/testOnlyProcessingNode_test.go b/node/chainSimulator/components/testOnlyProcessingNode_test.go new file mode 100644 index 00000000000..c48a8456086 --- /dev/null +++ b/node/chainSimulator/components/testOnlyProcessingNode_test.go @@ -0,0 +1,471 @@ +package components + +import ( + "errors" + "math/big" + "runtime" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/node/chainSimulator/components/api" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func createMockArgsTestOnlyProcessingNode(t *testing.T) ArgsTestOnlyProcessingNode { + outputConfigs, err := configs.CreateChainSimulatorConfigs(configs.ArgsChainSimulatorConfigs{ + NumOfShards: 3, + OriginalConfigsPath: "../../../cmd/node/config/", + GenesisTimeStamp: 0, + RoundDurationInMillis: 6000, + TempDir: t.TempDir(), + MinNodesPerShard: 1, + MetaChainMinNodes: 1, + }) + require.Nil(t, err) + + return ArgsTestOnlyProcessingNode{ + Configs: outputConfigs.Configs, + GasScheduleFilename: outputConfigs.GasScheduleFilename, + NumShards: 3, + + SyncedBroadcastNetwork: NewSyncedBroadcastNetwork(), + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + APIInterface: api.NewNoApiInterface(), + ShardIDStr: "0", + RoundDurationInMillis: 6000, + MinNodesMeta: 1, + MinNodesPerShard: 1, + } +} + +func TestNewTestOnlyProcessingNode(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + + t.Run("should work", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + node, err := NewTestOnlyProcessingNode(args) + assert.Nil(t, err) + assert.NotNil(t, node) + }) + t.Run("try commit a block", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + node, err := NewTestOnlyProcessingNode(args) + assert.Nil(t, err) + assert.NotNil(t, node) + + newHeader, err := node.ProcessComponentsHolder.BlockProcessor().CreateNewHeader(1, 1) + assert.Nil(t, err) + + err = newHeader.SetPrevHash(node.ChainHandler.GetGenesisHeaderHash()) + assert.Nil(t, err) + + header, block, err := node.ProcessComponentsHolder.BlockProcessor().CreateBlock(newHeader, func() bool { + return true + }) + assert.Nil(t, err) + require.NotNil(t, header) + require.NotNil(t, block) + + err = node.ProcessComponentsHolder.BlockProcessor().ProcessBlock(header, block, func() time.Duration { + return 1000 + }) + assert.Nil(t, err) + + err = node.ProcessComponentsHolder.BlockProcessor().CommitBlock(header, block) + assert.Nil(t, err) + }) + t.Run("CreateCoreComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.Marshalizer.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateCryptoComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.GeneralConfig.PublicKeyPIDSignature.Type = "invalid type" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateNetworkComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.SyncedBroadcastNetwork = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateBootstrapComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.WorkingDir = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateStateComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.ShardIDStr = common.MetachainShardName // coverage only + args.Configs.GeneralConfig.StateTriesConfig.MaxStateTrieLevelInMemory = 0 + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("CreateProcessComponents failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.FlagsConfig.Version = "" + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) + t.Run("createFacade failure should error", func(t *testing.T) { + args := createMockArgsTestOnlyProcessingNode(t) + args.Configs.EpochConfig.GasSchedule.GasScheduleByEpochs = nil + node, err := NewTestOnlyProcessingNode(args) + require.Error(t, err) + require.Nil(t, node) + }) +} + +func TestTestOnlyProcessingNode_SetKeyValueForAddress(t *testing.T) { + // TODO reinstate test after Wasm VM pointer fix + if testing.Short() { + t.Skip("cannot run with -race -short; requires Wasm VM fix") + } + + goodKeyValueMap := map[string]string{ + "01": "02", + } + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.NoError(t, err) + + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + }) + t.Run("decode key failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "nonHex": "01", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode key")) + }) + t.Run("decode value failure should error", func(t *testing.T) { + keyValueMap := map[string]string{ + "01": "nonHex", + } + err = node.SetKeyValueForAddress(addressBytes, keyValueMap) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), "cannot decode value")) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("account un-castable to UserAccountHandler should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.PeerAccountHandlerMock{}, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, nil) + require.Error(t, errLocal) + require.Equal(t, "cannot cast AccountHandler to UserAccountHandler", errLocal.Error()) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + }, nil + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetKeyValueForAddress(addressBytes, goodKeyValueMap) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_SetStateForAddress(t *testing.T) { + // TODO reinstate test after Wasm VM pointer fix + if testing.Short() { + t.Skip("cannot run with -race -short; requires Wasm VM fix") + } + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + nonce := uint64(100) + + address := "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj" + scAddress := "erd1qqqqqqqqqqqqqpgqrchxzx5uu8sv3ceg8nx8cxc0gesezure5awqn46gtd" + addressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(address) + scAddressBytes, _ := node.CoreComponentsHolder.AddressPubKeyConverter().Decode(scAddress) + addressState := &dtos.AddressState{ + Address: "erd1qtc600lryvytxuy4h7vn7xmsy5tw6vuw3tskr75cwnmv4mnyjgsq6e5zgj", + Nonce: &nonce, + Balance: "1000000000000000000", + Keys: map[string]string{ + "01": "02", + }, + } + + t.Run("should work", func(t *testing.T) { + _, err = node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "account was not found")) + + err = node.SetStateForAddress(addressBytes, addressState) + require.NoError(t, err) + + account, err := node.StateComponentsHolder.AccountsAdapter().GetExistingAccount(addressBytes) + require.NoError(t, err) + require.Equal(t, *addressState.Nonce, account.GetNonce()) + }) + t.Run("LoadAccount failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), nil) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("state balance invalid should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Balance = "invalid balance" + err = node.SetStateForAddress(addressBytes, &addressStateCopy) + require.Error(t, err) + require.Equal(t, "cannot convert string balance to *big.Int", err.Error()) + }) + t.Run("AddToBalance failure should error", func(t *testing.T) { + nodeLocal, errLocal := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + AddToBalanceCalled: func(value *big.Int) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress([]byte("address"), addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("SaveKeyValue failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + LoadAccountCalled: func(container []byte) (vmcommon.AccountHandler, error) { + return &state.UserAccountStub{ + SaveKeyValueCalled: func(key []byte, value []byte) error { + return expectedErr + }, + Balance: big.NewInt(0), + }, nil + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) + t.Run("invalid sc code should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Code = "invalid code" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeHash = "invalid code hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc code metadata should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.CodeMetadata = "invalid code metadata" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc owner should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Address = scAddress + addressStateCopy.Owner = "invalid owner" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid sc dev rewards should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress + addressStateCopy.DeveloperRewards = "invalid dev rewards" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("invalid root hash should error", func(t *testing.T) { + addressStateCopy := *addressState + addressStateCopy.Owner = address + addressStateCopy.Address = scAddress // coverage + addressStateCopy.DeveloperRewards = "1000000" + addressStateCopy.RootHash = "invalid root hash" + + err = node.SetStateForAddress(scAddressBytes, &addressStateCopy) + require.Error(t, err) + }) + t.Run("SaveAccount failure should error", func(t *testing.T) { + argsLocal := createMockArgsTestOnlyProcessingNode(t) + nodeLocal, errLocal := NewTestOnlyProcessingNode(argsLocal) + require.NoError(t, errLocal) + + nodeLocal.StateComponentsHolder = &factory.StateComponentsMock{ + Accounts: &state.AccountsStub{ + SaveAccountCalled: func(account vmcommon.AccountHandler) error { + return expectedErr + }, + }, + } + + errLocal = nodeLocal.SetStateForAddress(addressBytes, addressState) + require.Equal(t, expectedErr, errLocal) + }) +} + +func TestTestOnlyProcessingNode_IsInterfaceNil(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + + var node *testOnlyProcessingNode + require.True(t, node.IsInterfaceNil()) + + node, _ = NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.False(t, node.IsInterfaceNil()) +} + +func TestTestOnlyProcessingNode_Close(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.NoError(t, err) + + require.NoError(t, node.Close()) +} + +func TestTestOnlyProcessingNode_Getters(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + + node := &testOnlyProcessingNode{} + require.Nil(t, node.GetProcessComponents()) + require.Nil(t, node.GetChainHandler()) + require.Nil(t, node.GetBroadcastMessenger()) + require.Nil(t, node.GetCryptoComponents()) + require.Nil(t, node.GetCoreComponents()) + require.Nil(t, node.GetStateComponents()) + require.Nil(t, node.GetFacadeHandler()) + require.Nil(t, node.GetStatusCoreComponents()) + + node, err := NewTestOnlyProcessingNode(createMockArgsTestOnlyProcessingNode(t)) + require.Nil(t, err) + + require.NotNil(t, node.GetProcessComponents()) + require.NotNil(t, node.GetChainHandler()) + require.NotNil(t, node.GetBroadcastMessenger()) + require.NotNil(t, node.GetShardCoordinator()) + require.NotNil(t, node.GetCryptoComponents()) + require.NotNil(t, node.GetCoreComponents()) + require.NotNil(t, node.GetStateComponents()) + require.NotNil(t, node.GetFacadeHandler()) + require.NotNil(t, node.GetStatusCoreComponents()) +} diff --git a/node/chainSimulator/configs/configs.go b/node/chainSimulator/configs/configs.go new file mode 100644 index 00000000000..d781a3f8a5d --- /dev/null +++ b/node/chainSimulator/configs/configs.go @@ -0,0 +1,451 @@ +package configs + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "encoding/pem" + "math/big" + "os" + "path" + "strconv" + "strings" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" + shardingCore "github.com/multiversx/mx-chain-core-go/core/sharding" + crypto "github.com/multiversx/mx-chain-crypto-go" + "github.com/multiversx/mx-chain-crypto-go/signing" + "github.com/multiversx/mx-chain-crypto-go/signing/ed25519" + "github.com/multiversx/mx-chain-crypto-go/signing/mcl" + "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/genesis/data" + "github.com/multiversx/mx-chain-go/node" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/testscommon" +) + +var oneEgld = big.NewInt(1000000000000000000) +var initialStakedEgldPerNode = big.NewInt(0).Mul(oneEgld, big.NewInt(2500)) +var initialSupply = big.NewInt(0).Mul(oneEgld, big.NewInt(20000000)) // 20 million EGLD +const ( + // ChainID contains the chain id + ChainID = "chain" + + allValidatorsPemFileName = "allValidatorsKeys.pem" +) + +// ArgsChainSimulatorConfigs holds all the components needed to create the chain simulator configs +type ArgsChainSimulatorConfigs struct { + NumOfShards uint32 + OriginalConfigsPath string + GenesisTimeStamp int64 + RoundDurationInMillis uint64 + TempDir string + MinNodesPerShard uint32 + MetaChainMinNodes uint32 + InitialEpoch uint32 + RoundsPerEpoch core.OptionalUint64 + NumNodesWaitingListShard uint32 + NumNodesWaitingListMeta uint32 + AlterConfigsFunction func(cfg *config.Configs) +} + +// ArgsConfigsSimulator holds the configs for the chain simulator +type ArgsConfigsSimulator struct { + GasScheduleFilename string + Configs config.Configs + ValidatorsPrivateKeys []crypto.PrivateKey + InitialWallets *dtos.InitialWalletKeys +} + +// CreateChainSimulatorConfigs will create the chain simulator configs +func CreateChainSimulatorConfigs(args ArgsChainSimulatorConfigs) (*ArgsConfigsSimulator, error) { + configs, err := testscommon.CreateTestConfigs(args.TempDir, args.OriginalConfigsPath) + if err != nil { + return nil, err + } + + configs.GeneralConfig.GeneralSettings.ChainID = ChainID + + // empty genesis smart contracts file + err = os.WriteFile(configs.ConfigurationPathsHolder.SmartContracts, []byte("[]"), os.ModePerm) + if err != nil { + return nil, err + } + + // update genesis.json + initialWallets, err := generateGenesisFile(args, configs) + if err != nil { + return nil, err + } + + // generate validators key and nodesSetup.json + privateKeys, publicKeys, err := generateValidatorsKeyAndUpdateFiles( + configs, + initialWallets.StakeWallets, + args, + ) + if err != nil { + return nil, err + } + + configs.ConfigurationPathsHolder.AllValidatorKeys = path.Join(args.OriginalConfigsPath, allValidatorsPemFileName) + err = generateValidatorsPem(configs.ConfigurationPathsHolder.AllValidatorKeys, publicKeys, privateKeys) + if err != nil { + return nil, err + } + + configs.GeneralConfig.SmartContractsStorage.DB.Type = string(storageunit.MemoryDB) + configs.GeneralConfig.SmartContractsStorageForSCQuery.DB.Type = string(storageunit.MemoryDB) + configs.GeneralConfig.SmartContractsStorageSimulate.DB.Type = string(storageunit.MemoryDB) + + maxNumNodes := uint64((args.MinNodesPerShard+args.NumNodesWaitingListShard)*args.NumOfShards) + + uint64(args.MetaChainMinNodes+args.NumNodesWaitingListMeta) + + SetMaxNumberOfNodesInConfigs(configs, maxNumNodes, args.NumOfShards) + + // set compatible trie configs + configs.GeneralConfig.StateTriesConfig.SnapshotsEnabled = false + + // enable db lookup extension + configs.GeneralConfig.DbLookupExtensions.Enabled = true + + configs.GeneralConfig.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds = 1 + configs.GeneralConfig.EpochStartConfig.GenesisEpoch = args.InitialEpoch + + if args.RoundsPerEpoch.HasValue { + configs.GeneralConfig.EpochStartConfig.RoundsPerEpoch = int64(args.RoundsPerEpoch.Value) + } + + gasScheduleName, err := GetLatestGasScheduleFilename(configs.ConfigurationPathsHolder.GasScheduleDirectoryName) + if err != nil { + return nil, err + } + + node.ApplyArchCustomConfigs(configs) + + if args.AlterConfigsFunction != nil { + args.AlterConfigsFunction(configs) + } + + return &ArgsConfigsSimulator{ + Configs: *configs, + ValidatorsPrivateKeys: privateKeys, + GasScheduleFilename: gasScheduleName, + InitialWallets: initialWallets, + }, nil +} + +// SetMaxNumberOfNodesInConfigs will correctly set the max number of nodes in configs +func SetMaxNumberOfNodesInConfigs(cfg *config.Configs, maxNumNodes uint64, numOfShards uint32) { + cfg.SystemSCConfig.StakingSystemSCConfig.MaxNumberOfNodesForStake = maxNumNodes + numMaxNumNodesEnableEpochs := len(cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch) + for idx := 0; idx < numMaxNumNodesEnableEpochs-1; idx++ { + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[idx].MaxNumNodes = uint32(maxNumNodes) + } + + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].EpochEnable = cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch + prevEntry := cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-2] + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].NodesToShufflePerShard = prevEntry.NodesToShufflePerShard + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[numMaxNumNodesEnableEpochs-1].MaxNumNodes = prevEntry.MaxNumNodes - (numOfShards+1)*prevEntry.NodesToShufflePerShard +} + +// SetQuickJailRatingConfig will set the rating config in a way that leads to rapid jailing of a node +func SetQuickJailRatingConfig(cfg *config.Configs) { + cfg.RatingsConfig.ShardChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.ShardChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 + cfg.RatingsConfig.MetaChain.RatingSteps.ConsecutiveMissedBlocksPenalty = 100 + cfg.RatingsConfig.MetaChain.RatingSteps.HoursToMaxRatingFromStartRating = 1 +} + +// SetStakingV4ActivationEpochs configures activation epochs for Staking V4. +// It takes an initial epoch and sets three consecutive steps for enabling Staking V4 features: +// - Step 1 activation epoch +// - Step 2 activation epoch +// - Step 3 activation epoch +func SetStakingV4ActivationEpochs(cfg *config.Configs, initialEpoch uint32) { + cfg.EpochConfig.EnableEpochs.StakingV4Step1EnableEpoch = initialEpoch + cfg.EpochConfig.EnableEpochs.StakingV4Step2EnableEpoch = initialEpoch + 1 + cfg.EpochConfig.EnableEpochs.StakingV4Step3EnableEpoch = initialEpoch + 2 + + // Set the MaxNodesChange enable epoch for index 2 + cfg.EpochConfig.EnableEpochs.MaxNodesChangeEnableEpoch[2].EpochEnable = initialEpoch + 2 +} + +func generateGenesisFile(args ArgsChainSimulatorConfigs, configs *config.Configs) (*dtos.InitialWalletKeys, error) { + addressConverter, err := factory.NewPubkeyConverter(configs.GeneralConfig.AddressPubkeyConverter) + if err != nil { + return nil, err + } + + initialWalletKeys := &dtos.InitialWalletKeys{ + BalanceWallets: make(map[uint32]*dtos.WalletKey), + StakeWallets: make([]*dtos.WalletKey, 0), + } + + addresses := make([]data.InitialAccount, 0) + numOfNodes := int((args.NumNodesWaitingListShard+args.MinNodesPerShard)*args.NumOfShards + args.NumNodesWaitingListMeta + args.MetaChainMinNodes) + for i := 0; i < numOfNodes; i++ { + wallet, errGenerate := generateWalletKey(addressConverter) + if errGenerate != nil { + return nil, errGenerate + } + + stakedValue := big.NewInt(0).Set(initialStakedEgldPerNode) + addresses = append(addresses, data.InitialAccount{ + Address: wallet.Address.Bech32, + StakingValue: stakedValue, + Supply: stakedValue, + }) + + initialWalletKeys.StakeWallets = append(initialWalletKeys.StakeWallets, wallet) + } + + // generate an address for every shard + initialBalance := big.NewInt(0).Set(initialSupply) + totalStakedValue := big.NewInt(int64(numOfNodes)) + totalStakedValue = totalStakedValue.Mul(totalStakedValue, big.NewInt(0).Set(initialStakedEgldPerNode)) + initialBalance = initialBalance.Sub(initialBalance, totalStakedValue) + + walletBalance := big.NewInt(0).Set(initialBalance) + walletBalance.Div(walletBalance, big.NewInt(int64(args.NumOfShards))) + + // remainder = balance % numTotalWalletKeys + remainder := big.NewInt(0).Set(initialBalance) + remainder.Mod(remainder, big.NewInt(int64(args.NumOfShards))) + + for shardID := uint32(0); shardID < args.NumOfShards; shardID++ { + walletKey, errG := generateWalletKeyForShard(shardID, args.NumOfShards, addressConverter) + if errG != nil { + return nil, errG + } + + addresses = append(addresses, data.InitialAccount{ + Address: walletKey.Address.Bech32, + Balance: big.NewInt(0).Set(walletBalance), + Supply: big.NewInt(0).Set(walletBalance), + }) + + initialWalletKeys.BalanceWallets[shardID] = walletKey + } + + addresses[len(addresses)-1].Balance.Add(walletBalance, remainder) + addresses[len(addresses)-1].Supply.Add(walletBalance, remainder) + + addressesBytes, errM := json.Marshal(addresses) + if errM != nil { + return nil, errM + } + + err = os.WriteFile(configs.ConfigurationPathsHolder.Genesis, addressesBytes, os.ModePerm) + if err != nil { + return nil, err + } + + return initialWalletKeys, nil +} + +func generateValidatorsKeyAndUpdateFiles( + configs *config.Configs, + stakeWallets []*dtos.WalletKey, + args ArgsChainSimulatorConfigs, +) ([]crypto.PrivateKey, []crypto.PublicKey, error) { + blockSigningGenerator := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) + + nodesSetupFile := configs.ConfigurationPathsHolder.Nodes + nodes := &sharding.NodesSetup{} + err := core.LoadJsonFile(nodes, nodesSetupFile) + if err != nil { + return nil, nil, err + } + + nodes.RoundDuration = args.RoundDurationInMillis + nodes.StartTime = args.GenesisTimeStamp + + // TODO fix this to can be configurable + nodes.ConsensusGroupSize = 1 + nodes.MetaChainConsensusGroupSize = 1 + nodes.Hysteresis = 0 + + nodes.MinNodesPerShard = args.MinNodesPerShard + nodes.MetaChainMinNodes = args.MetaChainMinNodes + + nodes.InitialNodes = make([]*sharding.InitialNode, 0) + privateKeys := make([]crypto.PrivateKey, 0) + publicKeys := make([]crypto.PublicKey, 0) + walletIndex := 0 + // generate meta keys + for idx := uint32(0); idx < args.NumNodesWaitingListMeta+args.MetaChainMinNodes; idx++ { + sk, pk := blockSigningGenerator.GeneratePair() + privateKeys = append(privateKeys, sk) + publicKeys = append(publicKeys, pk) + + pkBytes, errB := pk.ToByteArray() + if errB != nil { + return nil, nil, errB + } + + nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ + PubKey: hex.EncodeToString(pkBytes), + Address: stakeWallets[walletIndex].Address.Bech32, + }) + + walletIndex++ + } + + // generate shard keys + for idx1 := uint32(0); idx1 < args.NumOfShards; idx1++ { + for idx2 := uint32(0); idx2 < args.NumNodesWaitingListShard+args.MinNodesPerShard; idx2++ { + sk, pk := blockSigningGenerator.GeneratePair() + privateKeys = append(privateKeys, sk) + publicKeys = append(publicKeys, pk) + + pkBytes, errB := pk.ToByteArray() + if errB != nil { + return nil, nil, errB + } + + nodes.InitialNodes = append(nodes.InitialNodes, &sharding.InitialNode{ + PubKey: hex.EncodeToString(pkBytes), + Address: stakeWallets[walletIndex].Address.Bech32, + }) + walletIndex++ + } + } + + marshaledNodes, err := json.Marshal(nodes) + if err != nil { + return nil, nil, err + } + + err = os.WriteFile(nodesSetupFile, marshaledNodes, os.ModePerm) + if err != nil { + return nil, nil, err + } + + return privateKeys, publicKeys, nil +} + +func generateValidatorsPem(validatorsFile string, publicKeys []crypto.PublicKey, privateKey []crypto.PrivateKey) error { + validatorPubKeyConverter, err := pubkeyConverter.NewHexPubkeyConverter(96) + if err != nil { + return err + } + + buff := bytes.Buffer{} + for idx := 0; idx < len(publicKeys); idx++ { + publicKeyBytes, errA := publicKeys[idx].ToByteArray() + if errA != nil { + return errA + } + + pkString, errE := validatorPubKeyConverter.Encode(publicKeyBytes) + if errE != nil { + return errE + } + + privateKeyBytes, errP := privateKey[idx].ToByteArray() + if errP != nil { + return errP + } + + blk := pem.Block{ + Type: "PRIVATE KEY for " + pkString, + Bytes: []byte(hex.EncodeToString(privateKeyBytes)), + } + + err = pem.Encode(&buff, &blk) + if err != nil { + return err + } + } + + return os.WriteFile(validatorsFile, buff.Bytes(), 0644) +} + +// GetLatestGasScheduleFilename will parse the provided path and get the latest gas schedule filename +func GetLatestGasScheduleFilename(directory string) (string, error) { + entries, err := os.ReadDir(directory) + if err != nil { + return "", err + } + + extension := ".toml" + versionMarker := "V" + + highestVersion := 0 + filename := "" + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + splt := strings.Split(name, versionMarker) + if len(splt) != 2 { + continue + } + + versionAsString := splt[1][:len(splt[1])-len(extension)] + number, errConversion := strconv.Atoi(versionAsString) + if errConversion != nil { + continue + } + + if number > highestVersion { + highestVersion = number + filename = name + } + } + + return path.Join(directory, filename), nil +} + +func generateWalletKeyForShard(shardID, numOfShards uint32, converter core.PubkeyConverter) (*dtos.WalletKey, error) { + for { + walletKey, err := generateWalletKey(converter) + if err != nil { + return nil, err + } + + addressShardID := shardingCore.ComputeShardID(walletKey.Address.Bytes, numOfShards) + if addressShardID != shardID { + continue + } + + return walletKey, nil + } +} + +func generateWalletKey(converter core.PubkeyConverter) (*dtos.WalletKey, error) { + walletSuite := ed25519.NewEd25519() + walletKeyGenerator := signing.NewKeyGenerator(walletSuite) + + sk, pk := walletKeyGenerator.GeneratePair() + pubKeyBytes, err := pk.ToByteArray() + if err != nil { + return nil, err + } + + privateKeyBytes, err := sk.ToByteArray() + if err != nil { + return nil, err + } + + bech32Address, err := converter.Encode(pubKeyBytes) + if err != nil { + return nil, err + } + + return &dtos.WalletKey{ + Address: dtos.WalletAddress{ + Bech32: bech32Address, + Bytes: pubKeyBytes, + }, + PrivateKeyHex: hex.EncodeToString(privateKeyBytes), + }, nil +} diff --git a/node/chainSimulator/configs/configs_test.go b/node/chainSimulator/configs/configs_test.go new file mode 100644 index 00000000000..52da48ecda0 --- /dev/null +++ b/node/chainSimulator/configs/configs_test.go @@ -0,0 +1,28 @@ +package configs + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/integrationTests/realcomponents" + "github.com/stretchr/testify/require" +) + +func TestNewProcessorRunnerChainArguments(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + outputConfig, err := CreateChainSimulatorConfigs(ArgsChainSimulatorConfigs{ + NumOfShards: 3, + OriginalConfigsPath: "../../../cmd/node/config", + RoundDurationInMillis: 6000, + GenesisTimeStamp: 0, + TempDir: t.TempDir(), + MetaChainMinNodes: 1, + MinNodesPerShard: 1, + }) + require.Nil(t, err) + + pr := realcomponents.NewProcessorRunner(t, outputConfig.Configs) + pr.Close(t) +} diff --git a/node/chainSimulator/disabled/antiflooder.go b/node/chainSimulator/disabled/antiflooder.go new file mode 100644 index 00000000000..0d4c45fd0e3 --- /dev/null +++ b/node/chainSimulator/disabled/antiflooder.go @@ -0,0 +1,72 @@ +package disabled + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process" +) + +type antiFlooder struct { +} + +// NewAntiFlooder creates a new instance of disabled antiflooder +func NewAntiFlooder() *antiFlooder { + return &antiFlooder{} +} + +// CanProcessMessage returns nil +func (a *antiFlooder) CanProcessMessage(_ p2p.MessageP2P, _ core.PeerID) error { + return nil +} + +// IsOriginatorEligibleForTopic does nothing and returns nil +func (a *antiFlooder) IsOriginatorEligibleForTopic(_ core.PeerID, _ string) error { + return nil +} + +// CanProcessMessagesOnTopic does nothing and returns nil +func (a *antiFlooder) CanProcessMessagesOnTopic(_ core.PeerID, _ string, _ uint32, _ uint64, _ []byte) error { + return nil +} + +// ApplyConsensusSize does nothing +func (a *antiFlooder) ApplyConsensusSize(_ int) { +} + +// SetDebugger does nothing and returns nil +func (a *antiFlooder) SetDebugger(_ process.AntifloodDebugger) error { + return nil +} + +// BlacklistPeer does nothing +func (a *antiFlooder) BlacklistPeer(_ core.PeerID, _ string, _ time.Duration) { +} + +// ResetForTopic does nothing +func (a *antiFlooder) ResetForTopic(_ string) { +} + +// SetMaxMessagesForTopic does nothing +func (a *antiFlooder) SetMaxMessagesForTopic(_ string, _ uint32) { +} + +// SetPeerValidatorMapper does nothing and returns nil +func (a *antiFlooder) SetPeerValidatorMapper(_ process.PeerValidatorMapper) error { + return nil +} + +// SetTopicsForAll does nothing +func (a *antiFlooder) SetTopicsForAll(_ ...string) { +} + +// Close does nothing and returns nil +func (a *antiFlooder) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (a *antiFlooder) IsInterfaceNil() bool { + return a == nil +} diff --git a/node/chainSimulator/disabled/peerHonesty.go b/node/chainSimulator/disabled/peerHonesty.go new file mode 100644 index 00000000000..87552b29e43 --- /dev/null +++ b/node/chainSimulator/disabled/peerHonesty.go @@ -0,0 +1,23 @@ +package disabled + +type peerHonesty struct { +} + +// NewPeerHonesty creates a new instance of disabled peer honesty +func NewPeerHonesty() *peerHonesty { + return &peerHonesty{} +} + +// ChangeScore does nothing +func (p *peerHonesty) ChangeScore(_ string, _ string, _ int) { +} + +// Close does nothing and returns nil +func (p *peerHonesty) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *peerHonesty) IsInterfaceNil() bool { + return p == nil +} diff --git a/node/chainSimulator/disabled/peersRatingMonitor.go b/node/chainSimulator/disabled/peersRatingMonitor.go new file mode 100644 index 00000000000..425b63fdc8c --- /dev/null +++ b/node/chainSimulator/disabled/peersRatingMonitor.go @@ -0,0 +1,21 @@ +package disabled + +import "github.com/multiversx/mx-chain-go/p2p" + +type peersRatingMonitor struct { +} + +// NewPeersRatingMonitor will create a new disabled peersRatingMonitor instance +func NewPeersRatingMonitor() *peersRatingMonitor { + return &peersRatingMonitor{} +} + +// GetConnectedPeersRatings returns an empty string since it is a disabled component +func (monitor *peersRatingMonitor) GetConnectedPeersRatings(_ p2p.ConnectionsHandler) (string, error) { + return "", nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (monitor *peersRatingMonitor) IsInterfaceNil() bool { + return monitor == nil +} diff --git a/node/chainSimulator/dtos/keys.go b/node/chainSimulator/dtos/keys.go new file mode 100644 index 00000000000..7f4c0e613e9 --- /dev/null +++ b/node/chainSimulator/dtos/keys.go @@ -0,0 +1,25 @@ +package dtos + +// WalletKey holds the public and the private key of a wallet +type WalletKey struct { + Address WalletAddress `json:"address"` + PrivateKeyHex string `json:"privateKeyHex"` +} + +// InitialWalletKeys holds the initial wallet keys +type InitialWalletKeys struct { + StakeWallets []*WalletKey `json:"stakeWallets"` + BalanceWallets map[uint32]*WalletKey `json:"balanceWallets"` +} + +// WalletAddress holds the address in multiple formats +type WalletAddress struct { + Bech32 string `json:"bech32"` + Bytes []byte `json:"bytes"` +} + +// BLSKey holds the BLS key in multiple formats +type BLSKey struct { + Hex string + Bytes []byte +} diff --git a/node/chainSimulator/dtos/state.go b/node/chainSimulator/dtos/state.go new file mode 100644 index 00000000000..a8edb7e212d --- /dev/null +++ b/node/chainSimulator/dtos/state.go @@ -0,0 +1,15 @@ +package dtos + +// AddressState will hold the address state +type AddressState struct { + Address string `json:"address"` + Nonce *uint64 `json:"nonce,omitempty"` + Balance string `json:"balance,omitempty"` + Code string `json:"code,omitempty"` + RootHash string `json:"rootHash,omitempty"` + CodeMetadata string `json:"codeMetadata,omitempty"` + CodeHash string `json:"codeHash,omitempty"` + DeveloperRewards string `json:"developerReward,omitempty"` + Owner string `json:"ownerAddress,omitempty"` + Keys map[string]string `json:"keys,omitempty"` +} diff --git a/node/chainSimulator/errors.go b/node/chainSimulator/errors.go new file mode 100644 index 00000000000..5e2dec0c16a --- /dev/null +++ b/node/chainSimulator/errors.go @@ -0,0 +1,12 @@ +package chainSimulator + +import "errors" + +var ( + errNilChainSimulator = errors.New("nil chain simulator") + errNilMetachainNode = errors.New("nil metachain node") + errShardSetupError = errors.New("shard setup error") + errEmptySliceOfTxs = errors.New("empty slice of transactions to send") + errNilTransaction = errors.New("nil transaction") + errInvalidMaxNumOfBlocks = errors.New("invalid max number of blocks to generate") +) diff --git a/node/chainSimulator/facade.go b/node/chainSimulator/facade.go new file mode 100644 index 00000000000..8cf4d1f50b6 --- /dev/null +++ b/node/chainSimulator/facade.go @@ -0,0 +1,54 @@ +package chainSimulator + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +type chainSimulatorFacade struct { + chainSimulator ChainSimulator + metaNode process.NodeHandler +} + +// NewChainSimulatorFacade returns the chain simulator facade +func NewChainSimulatorFacade(chainSimulator ChainSimulator) (*chainSimulatorFacade, error) { + if check.IfNil(chainSimulator) { + return nil, errNilChainSimulator + } + + metaNode := chainSimulator.GetNodeHandler(common.MetachainShardId) + if check.IfNil(metaNode) { + return nil, errNilMetachainNode + } + + return &chainSimulatorFacade{ + chainSimulator: chainSimulator, + metaNode: metaNode, + }, nil +} + +// GetExistingAccountFromBech32AddressString will return the existing account for the provided address in bech32 format +func (f *chainSimulatorFacade) GetExistingAccountFromBech32AddressString(address string) (vmcommon.UserAccountHandler, error) { + addressBytes, err := f.metaNode.GetCoreComponents().AddressPubKeyConverter().Decode(address) + if err != nil { + return nil, err + } + + shardID := f.metaNode.GetShardCoordinator().ComputeId(addressBytes) + + shardNodeHandler := f.chainSimulator.GetNodeHandler(shardID) + if check.IfNil(shardNodeHandler) { + return nil, fmt.Errorf("%w missing node handler for shard %d", errShardSetupError, shardID) + } + + account, err := shardNodeHandler.GetStateComponents().AccountsAdapter().GetExistingAccount(addressBytes) + if err != nil { + return nil, err + } + + return account.(vmcommon.UserAccountHandler), nil +} diff --git a/node/chainSimulator/facade_test.go b/node/chainSimulator/facade_test.go new file mode 100644 index 00000000000..908704c05a0 --- /dev/null +++ b/node/chainSimulator/facade_test.go @@ -0,0 +1,193 @@ +package chainSimulator + +import ( + "errors" + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/factory" + factoryMock "github.com/multiversx/mx-chain-go/factory/mock" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/vmcommonMocks" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewChainSimulatorFacade(t *testing.T) { + t.Parallel() + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{} + }, + }) + require.NoError(t, err) + require.NotNil(t, facade) + }) + t.Run("nil chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(nil) + require.Equal(t, errNilChainSimulator, err) + require.Nil(t, facade) + }) + t.Run("nil node handler returned by chain simulator should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return nil + }, + }) + require.Equal(t, errNilMetachainNode, err) + require.Nil(t, facade) + }) +} + +func TestChainSimulatorFacade_GetExistingAccountFromBech32AddressString(t *testing.T) { + t.Parallel() + + t.Run("address decode failure should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{ + DecodeCalled: func(humanReadable string) ([]byte, error) { + return nil, expectedErr + }, + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("nil shard node should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + if shardID != common.MetachainShardId { + return nil + } + + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.True(t, errors.Is(err, errShardSetupError)) + require.Nil(t, handler) + }) + t.Run("shard node GetExistingAccount should error", func(t *testing.T) { + t.Parallel() + + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.Equal(t, expectedErr, err) + require.Nil(t, handler) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedAccount := &vmcommonMocks.UserAccountStub{} + facade, err := NewChainSimulatorFacade(&chainSimulator.ChainSimulatorMock{ + GetNodeHandlerCalled: func(shardID uint32) process.NodeHandler { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &mock.CoreComponentsStub{ + AddressPubKeyConverterField: &testscommon.PubkeyConverterStub{}, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{ + ComputeIdCalled: func(address []byte) uint32 { + return 0 + }, + } + }, + GetStateComponentsCalled: func() factory.StateComponentsHolder { + return &factoryMock.StateComponentsHolderStub{ + AccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return providedAccount, nil + }, + } + }, + } + }, + } + }, + }) + require.NoError(t, err) + + handler, err := facade.GetExistingAccountFromBech32AddressString("address") + require.NoError(t, err) + require.True(t, handler == providedAccount) // pointer testing + }) +} diff --git a/node/chainSimulator/interface.go b/node/chainSimulator/interface.go new file mode 100644 index 00000000000..0b2f51ca457 --- /dev/null +++ b/node/chainSimulator/interface.go @@ -0,0 +1,17 @@ +package chainSimulator + +import "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + +// ChainHandler defines what a chain handler should be able to do +type ChainHandler interface { + IncrementRound() + CreateNewBlock() error + IsInterfaceNil() bool +} + +// ChainSimulator defines what a chain simulator should be able to do +type ChainSimulator interface { + GenerateBlocks(numOfBlocks int) error + GetNodeHandler(shardID uint32) process.NodeHandler + IsInterfaceNil() bool +} diff --git a/node/chainSimulator/process/errors.go b/node/chainSimulator/process/errors.go new file mode 100644 index 00000000000..eb1a69656e7 --- /dev/null +++ b/node/chainSimulator/process/errors.go @@ -0,0 +1,6 @@ +package process + +import "errors" + +// ErrNilNodeHandler signals that a nil node handler has been provided +var ErrNilNodeHandler = errors.New("nil node handler") diff --git a/node/chainSimulator/process/interface.go b/node/chainSimulator/process/interface.go new file mode 100644 index 00000000000..6dc0b84fa02 --- /dev/null +++ b/node/chainSimulator/process/interface.go @@ -0,0 +1,27 @@ +package process + +import ( + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" +) + +// NodeHandler defines what a node handler should be able to do +type NodeHandler interface { + GetProcessComponents() factory.ProcessComponentsHolder + GetChainHandler() chainData.ChainHandler + GetBroadcastMessenger() consensus.BroadcastMessenger + GetShardCoordinator() sharding.Coordinator + GetCryptoComponents() factory.CryptoComponentsHolder + GetCoreComponents() factory.CoreComponentsHolder + GetStateComponents() factory.StateComponentsHolder + GetFacadeHandler() shared.FacadeHandler + GetStatusCoreComponents() factory.StatusCoreComponentsHolder + SetKeyValueForAddress(addressBytes []byte, state map[string]string) error + SetStateForAddress(address []byte, state *dtos.AddressState) error + Close() error + IsInterfaceNil() bool +} diff --git a/node/chainSimulator/process/processor.go b/node/chainSimulator/process/processor.go new file mode 100644 index 00000000000..d8f225bfde8 --- /dev/null +++ b/node/chainSimulator/process/processor.go @@ -0,0 +1,233 @@ +package process + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus/spos" + "github.com/multiversx/mx-chain-go/node/chainSimulator/configs" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("process-block") + +type manualRoundHandler interface { + IncrementIndex() +} + +type blocksCreator struct { + nodeHandler NodeHandler +} + +// NewBlocksCreator will create a new instance of blocksCreator +func NewBlocksCreator(nodeHandler NodeHandler) (*blocksCreator, error) { + if check.IfNil(nodeHandler) { + return nil, ErrNilNodeHandler + } + + return &blocksCreator{ + nodeHandler: nodeHandler, + }, nil +} + +// IncrementRound will increment the current round +func (creator *blocksCreator) IncrementRound() { + roundHandler := creator.nodeHandler.GetCoreComponents().RoundHandler() + manual := roundHandler.(manualRoundHandler) + manual.IncrementIndex() + + creator.nodeHandler.GetStatusCoreComponents().AppStatusHandler().SetUInt64Value(common.MetricCurrentRound, uint64(roundHandler.Index())) +} + +// CreateNewBlock creates and process a new block +func (creator *blocksCreator) CreateNewBlock() error { + bp := creator.nodeHandler.GetProcessComponents().BlockProcessor() + + nonce, _, prevHash, prevRandSeed, epoch := creator.getPreviousHeaderData() + round := creator.nodeHandler.GetCoreComponents().RoundHandler().Index() + newHeader, err := bp.CreateNewHeader(uint64(round), nonce+1) + if err != nil { + return err + } + + shardID := creator.nodeHandler.GetShardCoordinator().SelfId() + err = newHeader.SetShardID(shardID) + if err != nil { + return err + } + + err = newHeader.SetPrevHash(prevHash) + if err != nil { + return err + } + + err = newHeader.SetPrevRandSeed(prevRandSeed) + if err != nil { + return err + } + + err = newHeader.SetPubKeysBitmap([]byte{1}) + if err != nil { + return err + } + + err = newHeader.SetChainID([]byte(configs.ChainID)) + if err != nil { + return err + } + + headerCreationTime := creator.nodeHandler.GetCoreComponents().RoundHandler().TimeStamp() + err = newHeader.SetTimeStamp(uint64(headerCreationTime.Unix())) + if err != nil { + return err + } + + validatorsGroup, err := creator.nodeHandler.GetProcessComponents().NodesCoordinator().ComputeConsensusGroup(prevRandSeed, newHeader.GetRound(), shardID, epoch) + if err != nil { + return err + } + blsKey := validatorsGroup[spos.IndexOfLeaderInConsensusGroup] + + isManaged := creator.nodeHandler.GetCryptoComponents().KeysHandler().IsKeyManagedByCurrentNode(blsKey.PubKey()) + if !isManaged { + log.Debug("cannot propose block - leader bls key is missing", + "leader key", blsKey.PubKey(), + "shard", creator.nodeHandler.GetShardCoordinator().SelfId()) + return nil + } + + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + randSeed, err := signingHandler.CreateSignatureForPublicKey(newHeader.GetPrevRandSeed(), blsKey.PubKey()) + if err != nil { + return err + } + err = newHeader.SetRandSeed(randSeed) + if err != nil { + return err + } + + header, block, err := bp.CreateBlock(newHeader, func() bool { + return true + }) + if err != nil { + return err + } + + err = creator.setHeaderSignatures(header, blsKey.PubKey()) + if err != nil { + return err + } + + err = bp.CommitBlock(header, block) + if err != nil { + return err + } + + miniBlocks, transactions, err := bp.MarshalizedDataToBroadcast(header, block) + if err != nil { + return err + } + + err = creator.nodeHandler.GetBroadcastMessenger().BroadcastHeader(header, blsKey.PubKey()) + if err != nil { + return err + } + + err = creator.nodeHandler.GetBroadcastMessenger().BroadcastMiniBlocks(miniBlocks, blsKey.PubKey()) + if err != nil { + return err + } + + return creator.nodeHandler.GetBroadcastMessenger().BroadcastTransactions(transactions, blsKey.PubKey()) +} + +func (creator *blocksCreator) getPreviousHeaderData() (nonce, round uint64, prevHash, prevRandSeed []byte, epoch uint32) { + currentHeader := creator.nodeHandler.GetChainHandler().GetCurrentBlockHeader() + + if currentHeader != nil { + nonce, round = currentHeader.GetNonce(), currentHeader.GetRound() + prevHash = creator.nodeHandler.GetChainHandler().GetCurrentBlockHeaderHash() + prevRandSeed = currentHeader.GetRandSeed() + epoch = currentHeader.GetEpoch() + return + } + + prevHash = creator.nodeHandler.GetChainHandler().GetGenesisHeaderHash() + prevRandSeed = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetRandSeed() + round = uint64(creator.nodeHandler.GetCoreComponents().RoundHandler().Index()) - 1 + epoch = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetEpoch() + nonce = creator.nodeHandler.GetChainHandler().GetGenesisHeader().GetNonce() + + return +} + +func (creator *blocksCreator) setHeaderSignatures(header data.HeaderHandler, blsKeyBytes []byte) error { + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + headerClone := header.ShallowClone() + _ = headerClone.SetPubKeysBitmap(nil) + + marshalizedHdr, err := creator.nodeHandler.GetCoreComponents().InternalMarshalizer().Marshal(headerClone) + if err != nil { + return err + } + + err = signingHandler.Reset([]string{string(blsKeyBytes)}) + if err != nil { + return err + } + + headerHash := creator.nodeHandler.GetCoreComponents().Hasher().Compute(string(marshalizedHdr)) + _, err = signingHandler.CreateSignatureShareForPublicKey( + headerHash, + uint16(0), + header.GetEpoch(), + blsKeyBytes, + ) + if err != nil { + return err + } + + sig, err := signingHandler.AggregateSigs(header.GetPubKeysBitmap(), header.GetEpoch()) + if err != nil { + return err + } + + err = header.SetSignature(sig) + if err != nil { + return err + } + + leaderSignature, err := creator.createLeaderSignature(header, blsKeyBytes) + if err != nil { + return err + } + + err = header.SetLeaderSignature(leaderSignature) + if err != nil { + return err + } + + return nil +} + +func (creator *blocksCreator) createLeaderSignature(header data.HeaderHandler, blsKeyBytes []byte) ([]byte, error) { + headerClone := header.ShallowClone() + err := headerClone.SetLeaderSignature(nil) + if err != nil { + return nil, err + } + + marshalizedHdr, err := creator.nodeHandler.GetCoreComponents().InternalMarshalizer().Marshal(headerClone) + if err != nil { + return nil, err + } + + signingHandler := creator.nodeHandler.GetCryptoComponents().ConsensusSigningHandler() + + return signingHandler.CreateSignatureForPublicKey(marshalizedHdr, blsKeyBytes) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (creator *blocksCreator) IsInterfaceNil() bool { + return creator == nil +} diff --git a/node/chainSimulator/process/processor_test.go b/node/chainSimulator/process/processor_test.go new file mode 100644 index 00000000000..80ffd568134 --- /dev/null +++ b/node/chainSimulator/process/processor_test.go @@ -0,0 +1,631 @@ +package process_test + +import ( + "errors" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/hashing" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/consensus" + mockConsensus "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + chainSimulatorProcess "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/chainSimulator" + testsConsensus "github.com/multiversx/mx-chain-go/testscommon/consensus" + testsFactory "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/stretchr/testify/require" +) + +var expectedErr = errors.New("expected error") + +func TestNewBlocksCreator(t *testing.T) { + t.Parallel() + + t.Run("nil node handler should error", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(nil) + require.Equal(t, chainSimulatorProcess.ErrNilNodeHandler, err) + require.Nil(t, creator) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.NoError(t, err) + require.NotNil(t, creator) + }) +} + +func TestBlocksCreator_IsInterfaceNil(t *testing.T) { + t.Parallel() + + creator, _ := chainSimulatorProcess.NewBlocksCreator(nil) + require.True(t, creator.IsInterfaceNil()) + + creator, _ = chainSimulatorProcess.NewBlocksCreator(&chainSimulator.NodeHandlerMock{}) + require.False(t, creator.IsInterfaceNil()) +} + +func TestBlocksCreator_IncrementRound(t *testing.T) { + t.Parallel() + + wasIncrementIndexCalled := false + wasSetUInt64ValueCalled := false + nodeHandler := &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + IncrementIndexCalled: func() { + wasIncrementIndexCalled = true + }, + } + }, + } + }, + GetStatusCoreComponentsCalled: func() factory.StatusCoreComponentsHolder { + return &testsFactory.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + wasSetUInt64ValueCalled = true + require.Equal(t, common.MetricCurrentRound, key) + }, + }, + } + }, + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + creator.IncrementRound() + require.True(t, wasIncrementIndexCalled) + require.True(t, wasSetUInt64ValueCalled) +} + +func TestBlocksCreator_CreateNewBlock(t *testing.T) { + t.Parallel() + + t.Run("CreateNewHeader failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return nil, expectedErr + }, + } + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + } + } + nodeHandler.GetChainHandlerCalled = func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} // coverage for getPreviousHeaderData + }, + } + } + + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetShardID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetShardIDCalled: func(shardId uint32) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevHash failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevHashCalled: func(hash []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPrevRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPrevRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetPubKeysBitmap failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetPubKeysBitmapCalled: func(bitmap []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetChainID failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetChainIDCalled: func(chainID []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("SetTimeStamp failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetTimeStampCalled: func(timestamp uint64) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("ComputeConsensusGroup failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("key not managed by the current node should return nil", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return false + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) + t.Run("CreateSignatureForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureForPublicKeyCalled: func(message []byte, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("SetRandSeed failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{ + SetRandSeedCalled: func(seed []byte) error { + return expectedErr + }, + }, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CreateBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.Marshal failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + rh := nodeHandler.GetCoreComponents().RoundHandler() + nodeHandler.GetCoreComponentsCalled = func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return rh + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.Reset failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + ResetCalled: func(pubKeys []string) error { + return expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.CreateSignatureShareForPublicKey failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + CreateSignatureShareForPublicKeyCalled: func(message []byte, index uint16, epoch uint32, publicKeyBytes []byte) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.AggregateSigs failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + kh := nodeHandler.GetCryptoComponents().KeysHandler() + nodeHandler.GetCryptoComponentsCalled = func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: kh, + SigHandler: &testsConsensus.SigningHandlerStub{ + AggregateSigsCalled: func(bitmap []byte, epoch uint32) ([]byte, error) { + return nil, expectedErr + }, + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("setHeaderSignatures.SetSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("createLeaderSignature.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{ + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + } + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("setHeaderSignatures.SetLeaderSignature failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + SetLeaderSignatureCalled: func(signature []byte) error { + return expectedErr + }, + }, &block.Body{}, nil + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("CommitBlock failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + CommitBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + return expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("MarshalizedDataToBroadcast failure should error", func(t *testing.T) { + t.Parallel() + + blockProcess := &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return nil, nil, expectedErr + }, + } + testCreateNewBlock(t, blockProcess, expectedErr) + }) + t.Run("BroadcastHeader failure should error", func(t *testing.T) { + t.Parallel() + + nodeHandler := getNodeHandler() + nodeHandler.GetBroadcastMessengerCalled = func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{ + BroadcastHeaderCalled: func(handler data.HeaderHandler, bytes []byte) error { + return expectedErr + }, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + creator, err := chainSimulatorProcess.NewBlocksCreator(getNodeHandler()) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.NoError(t, err) + }) +} + +func testCreateNewBlock(t *testing.T, blockProcess process.BlockProcessor, expectedErr error) { + nodeHandler := getNodeHandler() + nc := nodeHandler.GetProcessComponents().NodesCoordinator() + nodeHandler.GetProcessComponentsCalled = func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: blockProcess, + NodesCoord: nc, + } + } + creator, err := chainSimulatorProcess.NewBlocksCreator(nodeHandler) + require.NoError(t, err) + + err = creator.CreateNewBlock() + require.Equal(t, expectedErr, err) +} + +func getNodeHandler() *chainSimulator.NodeHandlerMock { + return &chainSimulator.NodeHandlerMock{ + GetCoreComponentsCalled: func() factory.CoreComponentsHolder { + return &testsFactory.CoreComponentsHolderStub{ + RoundHandlerCalled: func() consensus.RoundHandler { + return &testscommon.RoundHandlerMock{ + TimeStampCalled: func() time.Time { + return time.Now() + }, + } + }, + InternalMarshalizerCalled: func() marshal.Marshalizer { + return &testscommon.MarshallerStub{} + }, + HasherCalled: func() hashing.Hasher { + return &testscommon.HasherStub{ + ComputeCalled: func(s string) []byte { + return []byte("hash") + }, + } + }, + } + }, + GetProcessComponentsCalled: func() factory.ProcessComponentsHolder { + return &mock.ProcessComponentsStub{ + BlockProcess: &testscommon.BlockProcessorStub{ + CreateNewHeaderCalled: func(round uint64, nonce uint64) (data.HeaderHandler, error) { + return &testscommon.HeaderHandlerStub{}, nil + }, + CreateBlockCalled: func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.HeaderHandler, data.BodyHandler, error) { + haveTime() // coverage only + return &testscommon.HeaderHandlerStub{ + CloneCalled: func() data.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, &block.Body{}, nil + }, + MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { + return make(map[uint32][]byte), make(map[string][][]byte), nil + }, + }, + NodesCoord: &shardingMocks.NodesCoordinatorStub{ + ComputeConsensusGroupCalled: func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return []nodesCoordinator.Validator{ + shardingMocks.NewValidatorMock([]byte("A"), 1, 1), + }, nil + }, + }, + } + }, + GetChainHandlerCalled: func() data.ChainHandler { + return &testscommon.ChainHandlerStub{ + GetGenesisHeaderCalled: func() data.HeaderHandler { + return &block.HeaderV2{} + }, + } + }, + GetShardCoordinatorCalled: func() sharding.Coordinator { + return &testscommon.ShardsCoordinatorMock{} + }, + GetCryptoComponentsCalled: func() factory.CryptoComponentsHolder { + return &mock.CryptoComponentsStub{ + KeysHandlerField: &testscommon.KeysHandlerStub{ + IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { + return true + }, + }, + SigHandler: &testsConsensus.SigningHandlerStub{}, + } + }, + GetBroadcastMessengerCalled: func() consensus.BroadcastMessenger { + return &mockConsensus.BroadcastMessengerMock{} + }, + } +} diff --git a/node/customConfigsArm64.go b/node/customConfigsArm64.go new file mode 100644 index 00000000000..ce62a5fa604 --- /dev/null +++ b/node/customConfigsArm64.go @@ -0,0 +1,29 @@ +//go:build arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(configs *config.Configs) { + log.Debug("ApplyArchCustomConfigs", "architecture", runtime.GOARCH) + + firstSupportedWasmer2VMVersion := "v1.5" + log.Debug("ApplyArchCustomConfigs - hardcoding the initial VM to " + firstSupportedWasmer2VMVersion) + configs.GeneralConfig.VirtualMachine.Execution.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } + configs.GeneralConfig.VirtualMachine.Querying.WasmVMVersions = []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: firstSupportedWasmer2VMVersion, + }, + } +} diff --git a/node/customConfigsArm64_test.go b/node/customConfigsArm64_test.go new file mode 100644 index 00000000000..925774a3318 --- /dev/null +++ b/node/customConfigsArm64_test.go @@ -0,0 +1,91 @@ +//go:build arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + expectedVMWasmVersionsConfig := []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.5", + }, + } + + t.Run("providing a configuration should alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, expectedVMConfig, providedConfigs.GeneralConfig.VirtualMachine) + }) + t.Run("empty config should return an altered config", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + + expectedVMConfig := providedConfigs.GeneralConfig.VirtualMachine + expectedVMConfig.Execution.WasmVMVersions = expectedVMWasmVersionsConfig + expectedVMConfig.Querying.WasmVMVersions = expectedVMWasmVersionsConfig + + ApplyArchCustomConfigs(providedConfigs) + + expectedConfig := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: expectedVMConfig, + }, + } + + assert.Equal(t, expectedConfig, providedConfigs) + }) +} diff --git a/node/customConfigsDefault.go b/node/customConfigsDefault.go new file mode 100644 index 00000000000..b762871db10 --- /dev/null +++ b/node/customConfigsDefault.go @@ -0,0 +1,14 @@ +//go:build !arm64 + +package node + +import ( + "runtime" + + "github.com/multiversx/mx-chain-go/config" +) + +// ApplyArchCustomConfigs will apply configuration tweaks based on the architecture the node is running on +func ApplyArchCustomConfigs(_ *config.Configs) { + log.Debug("ApplyArchCustomConfigs - nothing to do", "architecture", runtime.GOARCH) +} diff --git a/node/customConfigsDefault_test.go b/node/customConfigsDefault_test.go new file mode 100644 index 00000000000..8f9e8eb6521 --- /dev/null +++ b/node/customConfigsDefault_test.go @@ -0,0 +1,74 @@ +//go:build !arm64 + +package node + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/assert" +) + +func TestApplyArchCustomConfigs(t *testing.T) { + t.Parallel() + + executionVMConfig := config.VirtualMachineConfig{ + WasmVMVersions: []config.WasmVMVersionByEpoch{ + { + StartEpoch: 0, + Version: "v1.2", + }, + { + StartEpoch: 1, + Version: "v1.3", + }, + { + StartEpoch: 2, + Version: "v1.4", + }, + { + StartEpoch: 3, + Version: "v1.5", + }, + }, + TimeOutForSCExecutionInMilliseconds: 1, + WasmerSIGSEGVPassthrough: true, + } + + queryVMConfig := config.QueryVirtualMachineConfig{ + VirtualMachineConfig: executionVMConfig, + NumConcurrentVMs: 15, + } + + t.Run("providing a configuration should not alter it", func(t *testing.T) { + t.Parallel() + + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{ + VirtualMachine: config.VirtualMachineServicesConfig{ + Execution: executionVMConfig, + Querying: queryVMConfig, + }, + }, + } + + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, executionVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Execution) + assert.Equal(t, queryVMConfig, providedConfigs.GeneralConfig.VirtualMachine.Querying) + }) + t.Run("empty config should return an empty config", func(t *testing.T) { + t.Parallel() + + // this test will prevent adding new config changes without handling them in this test + providedConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + emptyConfigs := &config.Configs{ + GeneralConfig: &config.Config{}, + } + ApplyArchCustomConfigs(providedConfigs) + + assert.Equal(t, emptyConfigs, providedConfigs) + }) +} diff --git a/node/external/nodeApiResolver.go b/node/external/nodeApiResolver.go index d980e9ad91f..0ae0356f4f7 100644 --- a/node/external/nodeApiResolver.go +++ b/node/external/nodeApiResolver.go @@ -40,7 +40,9 @@ type ArgNodeApiResolver struct { AccountsParser genesis.AccountsParser GasScheduleNotifier common.GasScheduleNotifierAPI ManagedPeersMonitor common.ManagedPeersMonitor + PublicKey string NodesCoordinator nodesCoordinator.NodesCoordinator + StorageManagers []common.StorageManager } // nodeApiResolver can resolve API requests @@ -59,7 +61,9 @@ type nodeApiResolver struct { accountsParser genesis.AccountsParser gasScheduleNotifier common.GasScheduleNotifierAPI managedPeersMonitor common.ManagedPeersMonitor + publicKey string nodesCoordinator nodesCoordinator.NodesCoordinator + storageManagers []common.StorageManager } // NewNodeApiResolver creates a new nodeApiResolver instance @@ -125,7 +129,9 @@ func NewNodeApiResolver(arg ArgNodeApiResolver) (*nodeApiResolver, error) { accountsParser: arg.AccountsParser, gasScheduleNotifier: arg.GasScheduleNotifier, managedPeersMonitor: arg.ManagedPeersMonitor, + publicKey: arg.PublicKey, nodesCoordinator: arg.NodesCoordinator, + storageManagers: arg.StorageManagers, }, nil } @@ -151,6 +157,15 @@ func (nar *nodeApiResolver) SimulateTransactionExecution(tx *transaction.Transac // Close closes all underlying components func (nar *nodeApiResolver) Close() error { + for _, sm := range nar.storageManagers { + if check.IfNil(sm) { + continue + } + + err := sm.Close() + log.LogIfError(err) + } + return nar.scQueryService.Close() } @@ -345,12 +360,23 @@ func (nar *nodeApiResolver) GetManagedKeysCount() int { return nar.managedPeersMonitor.GetManagedKeysCount() } -// GetManagedKeys returns all keys managed by the current node when running in multikey mode +// GetManagedKeys returns all keys that should act as validator(main or backup that took over) and will be managed by this node func (nar *nodeApiResolver) GetManagedKeys() []string { managedKeys := nar.managedPeersMonitor.GetManagedKeys() return nar.parseKeys(managedKeys) } +// GetLoadedKeys returns all keys that were loaded by this node +func (nar *nodeApiResolver) GetLoadedKeys() []string { + loadedKeys := nar.managedPeersMonitor.GetLoadedKeys() + if len(loadedKeys) > 0 { + return nar.parseKeys(loadedKeys) + } + + // node is in single key mode, returning the main public key + return []string{nar.publicKey} +} + // GetEligibleManagedKeys returns the eligible managed keys when node is running in multikey mode func (nar *nodeApiResolver) GetEligibleManagedKeys() ([]string, error) { eligibleKeys, err := nar.managedPeersMonitor.GetEligibleManagedKeys() diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 9e1d0ee516d..5a1cec19787 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -39,7 +39,7 @@ func createMockArgs() external.ArgNodeApiResolver { APIBlockHandler: &mock.BlockAPIHandlerStub{}, APITransactionHandler: &mock.TransactionAPIHandlerStub{}, APIInternalBlockHandler: &mock.InternalBlockApiHandlerStub{}, - GenesisNodesSetupHandler: &testscommon.NodesSetupStub{}, + GenesisNodesSetupHandler: &genesisMocks.NodesSetupStub{}, ValidatorPubKeyConverter: &testscommon.PubkeyConverterMock{}, AccountsParser: &genesisMocks.AccountsParserStub{}, GasScheduleNotifier: &testscommon.GasScheduleNotifierMock{}, @@ -594,7 +594,7 @@ func TestNodeApiResolver_GetGenesisNodesPubKeys(t *testing.T) { } arg := createMockArgs() - arg.GenesisNodesSetupHandler = &testscommon.NodesSetupStub{ + arg.GenesisNodesSetupHandler = &genesisMocks.NodesSetupStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { return eligible, waiting }, @@ -739,6 +739,59 @@ func TestNodeApiResolver_GetManagedKeys(t *testing.T) { require.Equal(t, expectedKeys, keys) } +func TestNodeApiResolver_GetLoadedKeys(t *testing.T) { + t.Parallel() + + t.Run("multikey should work", func(t *testing.T) { + t.Parallel() + + providedKeys := [][]byte{ + []byte("pk1"), + []byte("pk2"), + } + expectedKeys := []string{ + "pk1", + "pk2", + } + args := createMockArgs() + args.ManagedPeersMonitor = &testscommon.ManagedPeersMonitorStub{ + GetLoadedKeysCalled: func() [][]byte { + return providedKeys + }, + } + args.ValidatorPubKeyConverter = &testscommon.PubkeyConverterStub{ + SilentEncodeCalled: func(pkBytes []byte, log core.Logger) string { + return string(pkBytes) + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + keys := nar.GetLoadedKeys() + require.Equal(t, expectedKeys, keys) + }) + t.Run("single key should work", func(t *testing.T) { + t.Parallel() + + providedKey := "pk1" + expectedKeys := []string{ + "pk1", + } + args := createMockArgs() + args.PublicKey = providedKey + args.ManagedPeersMonitor = &testscommon.ManagedPeersMonitorStub{ + GetLoadedKeysCalled: func() [][]byte { + return [][]byte{} + }, + } + nar, err := external.NewNodeApiResolver(args) + require.NoError(t, err) + + keys := nar.GetLoadedKeys() + require.Equal(t, expectedKeys, keys) + }) +} + func TestNodeApiResolver_GetEligibleManagedKeys(t *testing.T) { t.Parallel() diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index faf1996940e..46e2904d6d2 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -21,8 +21,7 @@ import ( func createEconomicsData() process.EconomicsDataHandler { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, + Economics: &economicsConfig, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { if flag == common.PenalizedTooMuchGasFlag { diff --git a/node/external/timemachine/fee/memoryFootprint/memory_test.go b/node/external/timemachine/fee/memoryFootprint/memory_test.go index 2f32427e4de..a854a286ddd 100644 --- a/node/external/timemachine/fee/memoryFootprint/memory_test.go +++ b/node/external/timemachine/fee/memoryFootprint/memory_test.go @@ -30,8 +30,7 @@ func TestFeeComputer_MemoryFootprint(t *testing.T) { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, + Economics: &economicsConfig, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { if flag == common.PenalizedTooMuchGasFlag { diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor.go b/node/external/transactionAPI/gasUsedAndFeeProcessor.go index a22b689d6a4..f0036bc136b 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/transaction" + datafield "github.com/multiversx/mx-chain-vm-common-go/parsers/dataField" ) type gasUsedAndFeeProcessor struct { @@ -52,7 +53,7 @@ func (gfp *gasUsedAndFeeProcessor) prepareTxWithResultsBasedOnLogs( tx *transaction.ApiTransactionResult, hasRefund bool, ) { - if tx.Logs == nil { + if tx.Logs == nil || (tx.Function == "" && tx.Operation == datafield.OperationTransfer) { return } diff --git a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go index 5c0ba4d4c05..99541bfef5d 100644 --- a/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go +++ b/node/external/transactionAPI/gasUsedAndFeeProcessor_test.go @@ -20,11 +20,10 @@ import ( func createEconomicsData(enableEpochsHandler common.EnableEpochsHandler) process.EconomicsDataHandler { economicsConfig := testscommon.GetEconomicsConfig() economicsData, _ := economics.NewEconomicsData(economics.ArgsNewEconomicsData{ - BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, - Economics: &economicsConfig, - EnableEpochsHandler: enableEpochsHandler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, + Economics: &economicsConfig, + EnableEpochsHandler: enableEpochsHandler, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, }) return economicsData diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index 3b1151f61af..ca2cd4e910a 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -53,6 +53,8 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetUInt64Value(common.MetricTrieSyncNumReceivedBytes, initUint) appStatusHandler.SetUInt64Value(common.MetricAccountsSnapshotInProgress, initUint) appStatusHandler.SetUInt64Value(common.MetricPeersSnapshotInProgress, initUint) + appStatusHandler.SetUInt64Value(common.MetricNonceAtEpochStart, initUint) + appStatusHandler.SetUInt64Value(common.MetricRoundAtEpochStart, initUint) appStatusHandler.SetInt64Value(common.MetricLastAccountsSnapshotDurationSec, initInt) appStatusHandler.SetInt64Value(common.MetricLastPeersSnapshotDurationSec, initInt) @@ -125,9 +127,7 @@ func InitConfigMetrics( appStatusHandler.SetUInt64Value(common.MetricESDTMultiTransferEnableEpoch, uint64(enableEpochs.ESDTMultiTransferEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricGlobalMintBurnDisableEpoch, uint64(enableEpochs.GlobalMintBurnDisableEpoch)) appStatusHandler.SetUInt64Value(common.MetricESDTTransferRoleEnableEpoch, uint64(enableEpochs.ESDTTransferRoleEnableEpoch)) - appStatusHandler.SetUInt64Value(common.MetricBuiltInFunctionOnMetaEnableEpoch, uint64(enableEpochs.BuiltInFunctionOnMetaEnableEpoch)) appStatusHandler.SetStringValue(common.MetricTotalSupply, economicsConfig.GlobalSettings.GenesisTotalSupply) - appStatusHandler.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, uint64(enableEpochs.WaitingListFixEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetGuardianEnableEpoch, uint64(enableEpochs.SetGuardianEnableEpoch)) appStatusHandler.SetUInt64Value(common.MetricSetScToScLogEventEnableEpoch, uint64(enableEpochs.ScToScLogEventEnableEpoch)) diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 0a0e3e57cc7..7da1a582626 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" @@ -63,6 +64,8 @@ func TestInitBaseMetrics(t *testing.T) { common.MetricAccountsSnapshotNumNodes, common.MetricTrieSyncNumProcessedNodes, common.MetricTrieSyncNumReceivedBytes, + common.MetricRoundAtEpochStart, + common.MetricNonceAtEpochStart, } keys := make(map[string]struct{}) @@ -134,10 +137,8 @@ func TestInitConfigMetrics(t *testing.T) { ESDTMultiTransferEnableEpoch: 31, GlobalMintBurnDisableEpoch: 32, ESDTTransferRoleEnableEpoch: 33, - BuiltInFunctionOnMetaEnableEpoch: 34, - WaitingListFixEnableEpoch: 35, - SetGuardianEnableEpoch: 36, - ScToScLogEventEnableEpoch: 37, + SetGuardianEnableEpoch: 34, + ScToScLogEventEnableEpoch: 35, MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ { EpochEnable: 0, @@ -186,8 +187,6 @@ func TestInitConfigMetrics(t *testing.T) { "erd_esdt_multi_transfer_enable_epoch": uint32(31), "erd_global_mint_burn_disable_epoch": uint32(32), "erd_esdt_transfer_role_enable_epoch": uint32(33), - "erd_builtin_function_on_meta_enable_epoch": uint32(34), - "erd_waiting_list_fix_enable_epoch": uint32(35), "erd_max_nodes_change_enable_epoch": nil, "erd_total_supply": "12345", "erd_hysteresis": "0.100000", @@ -195,8 +194,8 @@ func TestInitConfigMetrics(t *testing.T) { "erd_max_nodes_change_enable_epoch0_epoch_enable": uint32(0), "erd_max_nodes_change_enable_epoch0_max_num_nodes": uint32(1), "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), - "erd_set_guardian_feature_enable_epoch": uint32(36), - "erd_set_sc_to_sc_log_event_enable_epoch": uint32(37), + "erd_set_guardian_feature_enable_epoch": uint32(34), + "erd_set_sc_to_sc_log_event_enable_epoch": uint32(35), common.MetricGatewayMetricsEndpoint: "http://localhost:8080", } @@ -206,7 +205,7 @@ func TestInitConfigMetrics(t *testing.T) { }, } - genesisNodesConfig := &testscommon.NodesSetupStub{ + genesisNodesConfig := &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return true }, @@ -237,7 +236,7 @@ func TestInitConfigMetrics(t *testing.T) { assert.Equal(t, v, keys[k]) } - genesisNodesConfig = &testscommon.NodesSetupStub{ + genesisNodesConfig = &genesisMocks.NodesSetupStub{ GetAdaptivityCalled: func() bool { return false }, @@ -363,7 +362,7 @@ func TestInitMetrics(t *testing.T) { return 0 }, } - nodesSetup := &testscommon.NodesSetupStub{ + nodesSetup := &genesisMocks.NodesSetupStub{ GetShardConsensusGroupSizeCalled: func() uint32 { return 63 }, diff --git a/node/mock/peerProcessorMock.go b/node/mock/peerProcessorMock.go deleted file mode 100644 index 7ae112df225..00000000000 --- a/node/mock/peerProcessorMock.go +++ /dev/null @@ -1,133 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorMock - -type ValidatorStatisticsProcessorMock struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - IsInterfaceNilCalled func() bool - - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorMock) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorMock) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorMock) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorMock) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorMock) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - - return nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorMock) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorMock) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorMock) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorMock) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorMock) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorMock) DisplayRatings(_ uint32) { -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorMock) LastFinalizedRootHash() []byte { - return nil -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorMock) IsInterfaceNil() bool { - return false -} diff --git a/node/mock/validatorStatisticsProcessorStub.go b/node/mock/validatorStatisticsProcessorStub.go deleted file mode 100644 index 2233bc84f03..00000000000 --- a/node/mock/validatorStatisticsProcessorStub.go +++ /dev/null @@ -1,130 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/state" -) - -// ValidatorStatisticsProcessorStub - -type ValidatorStatisticsProcessorStub struct { - UpdatePeerStateCalled func(header data.MetaHeaderHandler) ([]byte, error) - RevertPeerStateCalled func(header data.MetaHeaderHandler) error - GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) - RootHashCalled func() ([]byte, error) - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error - ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error - CommitCalled func() ([]byte, error) - PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo - SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) -} - -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - -// PeerAccountToValidatorInfo - -func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { - if vsp.PeerAccountToValidatorInfoCalled != nil { - return vsp.PeerAccountToValidatorInfoCalled(peerAccount) - } - return nil -} - -// Process - -func (vsp *ValidatorStatisticsProcessorStub) Process(validatorInfo data.ShardValidatorInfoHandler) error { - if vsp.ProcessCalled != nil { - return vsp.ProcessCalled(validatorInfo) - } - - return nil -} - -// Commit - -func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { - if vsp.CommitCalled != nil { - return vsp.CommitCalled() - } - - return nil, nil -} - -// ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { - if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { - return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) - } - return nil -} - -// GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { - if vsp.GetValidatorInfoForRootHashCalled != nil { - return vsp.GetValidatorInfoForRootHashCalled(rootHash) - } - return nil, nil -} - -// UpdatePeerState - -func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHeaderHandler, _ map[string]data.HeaderHandler) ([]byte, error) { - if vsp.UpdatePeerStateCalled != nil { - return vsp.UpdatePeerStateCalled(header) - } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil -} - -// RevertPeerState - -func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { - if vsp.RevertPeerStateCalled != nil { - return vsp.RevertPeerStateCalled(header) - } - return nil -} - -// RootHash - -func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { - if vsp.RootHashCalled != nil { - return vsp.RootHashCalled() - } - return nil, nil -} - -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - return nil -} - -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { - if vsp.GetPeerAccountCalled != nil { - return vsp.GetPeerAccountCalled(address) - } - - return nil, nil -} - -// DisplayRatings - -func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { -} - -// IsInterfaceNil - -func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false -} diff --git a/node/node.go b/node/node.go index 001bbd23f30..978fd45dc99 100644 --- a/node/node.go +++ b/node/node.go @@ -1008,6 +1008,11 @@ func (n *Node) ValidatorStatisticsApi() (map[string]*validator.ValidatorStatisti return n.processComponents.ValidatorsProvider().GetLatestValidators(), nil } +// AuctionListApi will return the auction list config along with qualified nodes +func (n *Node) AuctionListApi() ([]*common.AuctionListValidatorAPIResponse, error) { + return n.processComponents.ValidatorsProvider().GetAuctionList() +} + // DirectTrigger will start the hardfork trigger func (n *Node) DirectTrigger(epoch uint32, withEarlyEndOfEpoch bool) error { return n.processComponents.HardforkTrigger().Trigger(epoch, withEarlyEndOfEpoch) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 71cdc1b1beb..54ffe84b4e3 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -167,12 +167,10 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("save jailed always"), "epoch", enableEpochs.SaveJailedAlwaysEnableEpoch) log.Debug(readEpochFor("validator to delegation"), "epoch", enableEpochs.ValidatorToDelegationEnableEpoch) log.Debug(readEpochFor("re-delegate below minimum check"), "epoch", enableEpochs.ReDelegateBelowMinCheckEnableEpoch) - log.Debug(readEpochFor("waiting waiting list"), "epoch", enableEpochs.WaitingListFixEnableEpoch) log.Debug(readEpochFor("increment SCR nonce in multi transfer"), "epoch", enableEpochs.IncrementSCRNonceInMultiTransferEnableEpoch) log.Debug(readEpochFor("esdt and NFT multi transfer"), "epoch", enableEpochs.ESDTMultiTransferEnableEpoch) log.Debug(readEpochFor("contract global mint and burn"), "epoch", enableEpochs.GlobalMintBurnDisableEpoch) log.Debug(readEpochFor("contract transfer role"), "epoch", enableEpochs.ESDTTransferRoleEnableEpoch) - log.Debug(readEpochFor("built in functions on metachain"), "epoch", enableEpochs.BuiltInFunctionOnMetaEnableEpoch) log.Debug(readEpochFor("compute rewards checkpoint on delegation"), "epoch", enableEpochs.ComputeRewardCheckpointEnableEpoch) log.Debug(readEpochFor("esdt NFT create on multiple shards"), "epoch", enableEpochs.ESDTNFTCreateOnMultiShardEnableEpoch) log.Debug(readEpochFor("SCR size invariant check"), "epoch", enableEpochs.SCRSizeInvariantCheckEnableEpoch) @@ -208,6 +206,11 @@ func printEnableEpochs(configs *config.Configs) { log.Debug(readEpochFor("refactor peers mini blocks"), "epoch", enableEpochs.RefactorPeersMiniBlocksEnableEpoch) log.Debug(readEpochFor("runtime memstore limit"), "epoch", enableEpochs.RuntimeMemStoreLimitEnableEpoch) log.Debug(readEpochFor("max blockchainhook counters"), "epoch", enableEpochs.MaxBlockchainHookCountersEnableEpoch) + log.Debug(readEpochFor("limit validators"), "epoch", enableEpochs.StakeLimitsEnableEpoch) + log.Debug(readEpochFor("staking v4 step 1"), "epoch", enableEpochs.StakingV4Step1EnableEpoch) + log.Debug(readEpochFor("staking v4 step 2"), "epoch", enableEpochs.StakingV4Step2EnableEpoch) + log.Debug(readEpochFor("staking v4 step 3"), "epoch", enableEpochs.StakingV4Step3EnableEpoch) + gasSchedule := configs.EpochConfig.GasSchedule log.Debug(readEpochFor("gas schedule directories paths"), "epoch", gasSchedule.GasScheduleByEpochs) @@ -269,6 +272,10 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( chanStopNodeProcess chan endProcess.ArgEndProcess, ) (bool, error) { goRoutinesNumberStart := runtime.NumGoroutine() + + log.Debug("applying custom configs based on the current architecture") + ApplyArchCustomConfigs(nr.configs) + configs := nr.configs flagsConfig := configs.FlagsConfig configurationPaths := configs.ConfigurationPathsHolder @@ -284,6 +291,11 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( return true, err } + err = config.SanityCheckNodesConfig(managedCoreComponents.GenesisNodesSetup(), configs.EpochConfig.EnableEpochs) + if err != nil { + return true, err + } + log.Debug("creating status core components") managedStatusCoreComponents, err := nr.CreateManagedStatusCoreComponents(managedCoreComponents) if err != nil { @@ -375,6 +387,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedCoreComponents.NodeTypeProvider(), managedCoreComponents.EnableEpochsHandler(), managedDataComponents.Datapool().CurrentEpochValidatorInfo(), + managedBootstrapComponents.NodesCoordinatorRegistryFactory(), ) if err != nil { return true, err @@ -430,7 +443,6 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( managedStateComponents, managedBootstrapComponents, managedProcessComponents, - managedStatusCoreComponents, ) if err != nil { return true, err @@ -559,7 +571,6 @@ func addSyncersToAccountsDB( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) error { selfId := bootstrapComponents.ShardCoordinator().SelfId() if selfId == core.MetachainShardId { @@ -569,7 +580,6 @@ func addSyncersToAccountsDB( dataComponents, stateComponents, processComponents, - statusCoreComponents, ) if err != nil { return err @@ -593,7 +603,6 @@ func addSyncersToAccountsDB( stateComponents, bootstrapComponents, processComponents, - statusCoreComponents, ) if err != nil { return err @@ -613,7 +622,6 @@ func getUserAccountSyncer( stateComponents mainFactory.StateComponentsHolder, bootstrapComponents mainFactory.BootstrapComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxStateTrieLevelInMemory userTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.UserAccountsUnit.String())) @@ -631,7 +639,6 @@ func getUserAccountSyncer( dataComponents, processComponents, storageManager, - statusCoreComponents, maxTrieLevelInMemory, ), ShardId: bootstrapComponents.ShardCoordinator().SelfId(), @@ -648,7 +655,6 @@ func getValidatorAccountSyncer( dataComponents mainFactory.DataComponentsHolder, stateComponents mainFactory.StateComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxPeerTrieLevelInMemory peerTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.PeerAccountsUnit.String())) @@ -661,7 +667,6 @@ func getValidatorAccountSyncer( dataComponents, processComponents, storageManager, - statusCoreComponents, maxTrieLevelInMemory, ), } @@ -675,7 +680,6 @@ func getBaseAccountSyncerArgs( dataComponents mainFactory.DataComponentsHolder, processComponents mainFactory.ProcessComponentsHolder, storageManager common.StorageManager, - statusCoreComponents mainFactory.StatusCoreComponentsHolder, maxTrieLevelInMemory uint, ) syncer.ArgsNewBaseAccountsSyncer { return syncer.ArgsNewBaseAccountsSyncer{ @@ -825,6 +829,7 @@ func (nr *nodeRunner) createMetrics( metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricNodeDisplayName, nr.configs.PreferencesConfig.Preferences.NodeDisplayName) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyLevel, fmt.Sprintf("%d", nr.configs.PreferencesConfig.Preferences.RedundancyLevel)) metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyIsMainActive, common.MetricValueNA) + metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricRedundancyStepInReason, "") metrics.SaveStringMetric(statusCoreComponents.AppStatusHandler(), common.MetricChainId, coreComponents.ChainID()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricGasPerDataByte, coreComponents.EconomicsData().GasPerDataByte()) metrics.SaveUint64Metric(statusCoreComponents.AppStatusHandler(), common.MetricMinGasPrice, coreComponents.EconomicsData().MinGasPrice()) @@ -1025,7 +1030,7 @@ func (nr *nodeRunner) logInformation( log.Info("Bootstrap", "epoch", bootstrapComponents.EpochBootstrapParams().Epoch()) if bootstrapComponents.EpochBootstrapParams().NodesConfig() != nil { log.Info("the epoch from nodesConfig is", - "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().CurrentEpoch) + "epoch", bootstrapComponents.EpochBootstrapParams().NodesConfig().GetCurrentEpoch()) } var shardIdString = core.GetShardIDString(bootstrapComponents.ShardCoordinator().SelfId()) @@ -1233,8 +1238,10 @@ func (nr *nodeRunner) CreateManagedProcessComponents( processArgs := processComp.ProcessComponentsFactoryArgs{ Config: *configs.GeneralConfig, EpochConfig: *configs.EpochConfig, + RoundConfig: *configs.RoundConfig, PrefConfigs: *configs.PreferencesConfig, ImportDBConfig: *configs.ImportDbConfig, + EconomicsConfig: *configs.EconomicsConfig, AccountsParser: accountsParser, SmartContractParser: smartContractParser, GasSchedule: gasScheduleNotifier, diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index 6e3c61a12cd..bb20b16fc47 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -35,7 +35,9 @@ func TestNewNodeRunner(t *testing.T) { t.Run("with valid configs should work", func(t *testing.T) { t.Parallel() - configs := testscommon.CreateTestConfigs(t, originalConfigsPath) + configs, err := testscommon.CreateTestConfigs(t.TempDir(), originalConfigsPath) + require.Nil(t, err) + runner, err := NewNodeRunner(configs) assert.NotNil(t, runner) assert.Nil(t, err) @@ -45,11 +47,13 @@ func TestNewNodeRunner(t *testing.T) { func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { t.Parallel() - configs := testscommon.CreateTestConfigs(t, originalConfigsPath) + configs, err := testscommon.CreateTestConfigs(t.TempDir(), originalConfigsPath) + require.Nil(t, err) + runner, _ := NewNodeRunner(configs) trigger := mock.NewApplicationRunningTrigger() - err := logger.AddLogObserver(trigger, &logger.PlainFormatter{}) + err = logger.AddLogObserver(trigger, &logger.PlainFormatter{}) require.Nil(t, err) // start a go routine that will send the SIGINT message after 1 second after the node has started diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 29683432508..bcd15052e21 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -264,7 +264,7 @@ func (n *Node) generateAndSignTxBuffArray( return tx, signedMarshalizedTx, nil } -//GenerateTransaction generates a new transaction with sender, receiver, amount and code +// GenerateTransaction generates a new transaction with sender, receiver, amount and code func (n *Node) GenerateTransaction(senderHex string, receiverHex string, value *big.Int, transactionData string, privateKey crypto.PrivateKey, chainID []byte, minTxVersion uint32) (*transaction.Transaction, error) { if check.IfNil(n.coreComponents.AddressPubKeyConverter()) { return nil, ErrNilPubkeyConverter diff --git a/node/node_test.go b/node/node_test.go index 152cf98bdd7..2cde11d08a0 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -56,11 +56,13 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" factoryTests "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" mockStorage "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -3203,12 +3205,11 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { initialPubKeys[1] = keys[1] initialPubKeys[2] = keys[2] - validatorsInfo := make(map[uint32][]*state.ValidatorInfo) + validatorsInfo := state.NewShardValidatorsInfoMap() for shardId, pubkeysPerShard := range initialPubKeys { - validatorsInfo[shardId] = make([]*state.ValidatorInfo, 0) for _, pubKey := range pubkeysPerShard { - validatorsInfo[shardId] = append(validatorsInfo[shardId], &state.ValidatorInfo{ + _ = validatorsInfo.Add(&state.ValidatorInfo{ PublicKey: []byte(pubKey), ShardId: shardId, List: "", @@ -3230,26 +3231,25 @@ func TestNode_ValidatorStatisticsApi(t *testing.T) { } } - vsp := &mock.ValidatorStatisticsProcessorStub{ + vsp := &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() (i []byte, err error) { return []byte("hash"), nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { return validatorsInfo, nil }, } - validatorProvider := &mock.ValidatorsProviderStub{GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { - apiResponses := make(map[string]*validator.ValidatorStatistics) + validatorProvider := &stakingcommon.ValidatorsProviderStub{ + GetLatestValidatorsCalled: func() map[string]*validator.ValidatorStatistics { + apiResponses := make(map[string]*validator.ValidatorStatistics) - for _, vis := range validatorsInfo { - for _, vi := range vis { + for _, vi := range validatorsInfo.GetAllValidatorsInfo() { apiResponses[hex.EncodeToString(vi.GetPublicKey())] = &validator.ValidatorStatistics{} } - } - return apiResponses - }, + return apiResponses + }, } processComponents := getDefaultProcessComponents() @@ -5100,7 +5100,7 @@ func getDefaultCoreComponents() *nodeMockFactory.CoreComponentsMock { APIEconomicsHandler: &economicsmocks.EconomicsHandlerMock{}, RatingsConfig: &testscommon.RatingsInfoMock{}, RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, StartTime: time.Time{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, TxVersionCheckHandler: versioning.NewTxVersionChecker(0), @@ -5125,8 +5125,8 @@ func getDefaultProcessComponents() *factoryMock.ProcessComponentsMock { BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorMock{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, diff --git a/outport/process/interface.go b/outport/process/interface.go index abcbbe10fec..5fcb19020f3 100644 --- a/outport/process/interface.go +++ b/outport/process/interface.go @@ -34,6 +34,7 @@ type GasConsumedProvider interface { type EconomicsDataHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool MaxGasLimitPerBlock(shardID uint32) uint64 diff --git a/outport/process/transactionsfee/interface.go b/outport/process/transactionsfee/interface.go index fa09f18076a..53042467442 100644 --- a/outport/process/transactionsfee/interface.go +++ b/outport/process/transactionsfee/interface.go @@ -12,6 +12,7 @@ import ( type FeesProcessorHandler interface { ComputeGasUsedAndFeeBasedOnRefundValue(tx data.TransactionWithFeeHandler, refundValue *big.Int) (uint64, *big.Int) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int + ComputeTxFee(tx data.TransactionWithFeeHandler) *big.Int ComputeGasLimit(tx data.TransactionWithFeeHandler) uint64 IsInterfaceNil() bool } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index 593a5d6b83b..c77956f5365 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -90,7 +90,7 @@ func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Transact func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.TransactionPool) { for _, invalidTx := range pool.InvalidTxs { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx.Transaction, invalidTx.Transaction.GasLimit) + fee := tep.txFeeCalculator.ComputeTxFee(invalidTx.Transaction) invalidTx.FeeInfo.SetGasUsed(invalidTx.Transaction.GetGasLimit()) invalidTx.FeeInfo.SetFee(fee) invalidTx.FeeInfo.SetInitialPaidFee(fee) @@ -103,7 +103,7 @@ func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *trans gasUsed := tep.txFeeCalculator.ComputeGasLimit(txHandler) fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, gasUsed) - initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, txHandler.GetGasLimit()) + initialPaidFee := tep.txFeeCalculator.ComputeTxFee(txHandler) feeInfo := txWithResult.GetFeeInfo() feeInfo.SetGasUsed(gasUsed) @@ -137,15 +137,23 @@ func (tep *transactionsFeeProcessor) prepareTxWithResults(txHashHex string, txWi } } - tep.prepareTxWithResultsBasedOnLogs(txWithResults, hasRefund) + tep.prepareTxWithResultsBasedOnLogs(txHashHex, txWithResults, hasRefund) } func (tep *transactionsFeeProcessor) prepareTxWithResultsBasedOnLogs( + txHashHex string, txWithResults *transactionWithResults, hasRefund bool, ) { - if check.IfNilReflect(txWithResults.log) { + tx := txWithResults.GetTxHandler() + if check.IfNil(tx) { + tep.log.Warn("tep.prepareTxWithResultsBasedOnLogs nil transaction handler", "txHash", txHashHex) + return + } + + res := tep.dataFieldParser.Parse(tx.GetData(), tx.GetSndAddr(), tx.GetRcvAddr(), tep.shardCoordinator.NumberOfShards()) + if check.IfNilReflect(txWithResults.log) || (res.Function == "" && res.Operation == datafield.OperationTransfer) { return } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index e0efbab8ada..8ff4cf14501 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -212,11 +212,15 @@ func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { t.Parallel() + receiver, _ := hex.DecodeString("00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526") tx1Hash := "h1" tx1 := &outportcore.TxInfo{ Transaction: &transaction.Transaction{ GasLimit: 30000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -226,6 +230,9 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { Transaction: &transaction.Transaction{ GasLimit: 50000000, GasPrice: 1000000000, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: receiver, + Data: []byte("here"), }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } @@ -520,3 +527,59 @@ func TestPutFeeAndGasUsedScrWithRefund(t *testing.T) { require.Equal(t, big.NewInt(552865000000000), initialTx.GetFeeInfo().GetFee()) require.Equal(t, uint64(50_336_500), initialTx.GetFeeInfo().GetGasUsed()) } + +func TestMoveBalanceWithSignalError(t *testing.T) { + txHash := []byte("e3cdb8b4936fdbee2d3b1244b4c49959df5f90ada683d650019d244e5a64afaf") + initialTx := &outportcore.TxInfo{Transaction: &transaction.Transaction{ + Nonce: 1004, + GasLimit: 12_175_500, + GasPrice: 1000000000, + SndAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + RcvAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + Data: []byte("start@5465737420526166666c65203120f09f9a80@10000000000000000@0100000002@01000000006082a400@0100000001@01000000023232@"), + }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}} + + scrHash := []byte("scrHash") + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1005, + SndAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + RcvAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + Value: big.NewInt(0), + Data: []byte("@sending value to non payable contract"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrHash): scr, + }, + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, + }, + Logs: []*outportcore.LogData{ + { + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Identifier: []byte(core.SignalErrorOperation), + }, + }, + }, + TxHash: hex.EncodeToString(txHash), + }, + }, + } + + arg := prepareMockArg() + txsFeeProc, err := NewTransactionsFeeProcessor(arg) + require.NotNil(t, txsFeeProc) + require.Nil(t, err) + + err = txsFeeProc.PutFeeAndGasUsed(pool) + require.Nil(t, err) + require.Equal(t, uint64(225_500), initialTx.GetFeeInfo().GetGasUsed()) +} diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go index 0216ccdd797..1eb767d26c8 100644 --- a/p2p/disabled/networkMessenger.go +++ b/p2p/disabled/networkMessenger.go @@ -190,6 +190,11 @@ func (netMes *networkMessenger) SetDebugger(_ p2p.Debugger) error { return nil } +// HasCompatibleProtocolID returns false as it is disabled +func (netMes *networkMessenger) HasCompatibleProtocolID(_ string) bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (netMes *networkMessenger) IsInterfaceNil() bool { return netMes == nil diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 703d6326b40..df929214829 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -93,6 +93,7 @@ type ArgBaseProcessor struct { ReceiptsRepository receiptsRepository BlockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler ManagedPeersHolder common.ManagedPeersHolder + SentSignaturesTracker process.SentSignaturesTracker } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 5049cece729..b12aa6b2783 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -35,6 +35,7 @@ import ( "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/processedMb" + "github.com/multiversx/mx-chain-go/process/headerCheck" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -89,6 +90,7 @@ type baseProcessor struct { processDebugger process.Debugger processStatusHandler common.ProcessStatusHandler managedPeersHolder common.ManagedPeersHolder + sentSignaturesTracker process.SentSignaturesTracker versionedHeaderFactory nodeFactory.VersionedHeaderFactory headerIntegrityVerifier process.HeaderIntegrityVerifier @@ -119,6 +121,7 @@ type baseProcessor struct { mutNonceOfFirstCommittedBlock sync.RWMutex nonceOfFirstCommittedBlock core.OptionalUint64 + extraDelayRequestBlockInfo time.Duration } type bootStorerDataArgs struct { @@ -559,6 +562,9 @@ func checkProcessorParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.ManagedPeersHolder) { return process.ErrNilManagedPeersHolder } + if check.IfNil(arguments.SentSignaturesTracker) { + return process.ErrNilSentSignatureTracker + } return nil } @@ -1680,7 +1686,7 @@ func (bp *baseProcessor) requestMiniBlocksIfNeeded(headerHandler data.HeaderHand return } - waitTime := common.ExtraDelayForRequestBlockInfo + waitTime := bp.extraDelayRequestBlockInfo roundDifferences := bp.roundHandler.Index() - int64(headerHandler.GetRound()) if roundDifferences > 1 { waitTime = 0 @@ -2110,3 +2116,23 @@ func (bp *baseProcessor) setNonceOfFirstCommittedBlock(nonce uint64) { bp.nonceOfFirstCommittedBlock.HasValue = true bp.nonceOfFirstCommittedBlock.Value = nonce } + +func (bp *baseProcessor) checkSentSignaturesAtCommitTime(header data.HeaderHandler) error { + validatorsGroup, err := headerCheck.ComputeConsensusGroup(header, bp.nodesCoordinator) + if err != nil { + return err + } + + consensusGroup := make([]string, 0, len(validatorsGroup)) + for _, validator := range validatorsGroup { + consensusGroup = append(consensusGroup, string(validator.PubKey())) + } + + signers := headerCheck.ComputeSignersPublicKeys(consensusGroup, header.GetPubKeysBitmap()) + + for _, signer := range signers { + bp.sentSignaturesTracker.ResetCountersForManagedBlockSigner([]byte(signer)) + } + + return nil +} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 2cf37208f6b..f24a580bbc3 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -34,6 +34,7 @@ import ( "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/process/coordinator" "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" @@ -73,7 +74,7 @@ func createArgBaseProcessor( bootstrapComponents *mock.BootstrapComponentsMock, statusComponents *mock.StatusComponentsMock, ) blproc.ArgBaseProcessor { - nodesCoordinator := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() argsHeaderValidator := blproc.ArgsHeaderValidator{ Hasher: &hashingMocks.HasherMock{}, Marshalizer: &mock.MarshalizerMock{}, @@ -102,7 +103,7 @@ func createArgBaseProcessor( Config: config.Config{}, AccountsDB: accountsDb, ForkDetector: &mock.ForkDetectorMock{}, - NodesCoordinator: nodesCoordinator, + NodesCoordinator: nodesCoordinatorInstance, FeeHandler: &mock.FeeAccumulatorStub{}, RequestHandler: &testscommon.RequestHandlerStub{}, BlockChainHook: &testscommon.BlockChainHookStub{}, @@ -126,6 +127,7 @@ func createArgBaseProcessor( ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, } } @@ -3112,3 +3114,54 @@ func TestBaseProcessor_ConcurrentCallsNonceOfFirstCommittedBlock(t *testing.T) { assert.True(t, len(values) <= 1) // we can have the situation when all reads are done before the first set assert.Equal(t, numCalls/2, values[lastValRead]+noValues) } + +func TestBaseProcessor_CheckSentSignaturesAtCommitTime(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + t.Run("nodes coordinator errors, should return error", func(t *testing.T) { + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return nil, expectedErr + } + + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + assert.Fail(t, "should have not called ResetCountersManagedBlockSigners") + }, + } + arguments.NodesCoordinator = nodesCoordinatorInstance + bp, _ := blproc.NewShardProcessor(arguments) + + err := bp.CheckSentSignaturesAtCommitTime(&block.Header{}) + assert.Equal(t, expectedErr, err) + }) + t.Run("should work with bitmap", func(t *testing.T) { + validator0, _ := nodesCoordinator.NewValidator([]byte("pk0"), 0, 0) + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 2, 2) + + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + return []nodesCoordinator.Validator{validator0, validator1, validator2}, nil + } + + resetCountersCalled := make([][]byte, 0) + arguments := CreateMockArguments(createComponentHolderMocks()) + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersCalled = append(resetCountersCalled, signerPk) + }, + } + arguments.NodesCoordinator = nodesCoordinatorInstance + bp, _ := blproc.NewShardProcessor(arguments) + + err := bp.CheckSentSignaturesAtCommitTime(&block.Header{ + PubKeysBitmap: []byte{0b00000101}, + }) + assert.Nil(t, err) + + assert.Equal(t, [][]byte{validator0.PubKey(), validator2.PubKey()}, resetCountersCalled) + }) +} diff --git a/process/block/export_test.go b/process/block/export_test.go index c8da250cba6..2332115613c 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -167,6 +168,7 @@ func NewShardProcessorEmptyWith3shards( ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, }, } shardProc, err := NewShardProcessor(arguments) @@ -181,6 +183,10 @@ func (mp *metaProcessor) ReceivedShardHeader(header data.HeaderHandler, shardHea mp.receivedShardHeader(header, shardHeaderHash) } +func (mp *metaProcessor) GetDataPool() dataRetriever.PoolsHolder { + return mp.dataPool +} + func (mp *metaProcessor) AddHdrHashToRequestedList(hdr data.HeaderHandler, hdrHash []byte) { mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() @@ -559,3 +565,144 @@ func (mp *metaProcessor) GetAllMarshalledTxs(body *block.Body) map[string][][]by func (bp *baseProcessor) SetNonceOfFirstCommittedBlock(nonce uint64) { bp.setNonceOfFirstCommittedBlock(nonce) } + +// CheckSentSignaturesAtCommitTime - +func (bp *baseProcessor) CheckSentSignaturesAtCommitTime(header data.HeaderHandler) error { + return bp.checkSentSignaturesAtCommitTime(header) +} + +// GetHdrForBlock - +func (mp *metaProcessor) GetHdrForBlock() *hdrForBlock { + return mp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (mp *metaProcessor) ChannelReceiveAllHeaders() chan bool { + return mp.chRcvAllHdrs +} + +// ComputeExistingAndRequestMissingShardHeaders - +func (mp *metaProcessor) ComputeExistingAndRequestMissingShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { + return mp.computeExistingAndRequestMissingShardHeaders(metaBlock) +} + +// ComputeExistingAndRequestMissingMetaHeaders - +func (sp *shardProcessor) ComputeExistingAndRequestMissingMetaHeaders(header data.ShardHeaderHandler) (uint32, uint32) { + return sp.computeExistingAndRequestMissingMetaHeaders(header) +} + +// GetHdrForBlock - +func (sp *shardProcessor) GetHdrForBlock() *hdrForBlock { + return sp.hdrsForCurrBlock +} + +// ChannelReceiveAllHeaders - +func (sp *shardProcessor) ChannelReceiveAllHeaders() chan bool { + return sp.chRcvAllMetaHdrs +} + +// InitMaps - +func (hfb *hdrForBlock) InitMaps() { + hfb.initMaps() + hfb.resetMissingHdrs() +} + +// Clone - +func (hfb *hdrForBlock) Clone() *hdrForBlock { + return hfb +} + +// SetNumMissingHdrs - +func (hfb *hdrForBlock) SetNumMissingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetNumMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) SetNumMissingFinalityAttestingHdrs(num uint32) { + hfb.mutHdrsForBlock.Lock() + hfb.missingFinalityAttestingHdrs = num + hfb.mutHdrsForBlock.Unlock() +} + +// SetHighestHdrNonce - +func (hfb *hdrForBlock) SetHighestHdrNonce(shardId uint32, nonce uint64) { + hfb.mutHdrsForBlock.Lock() + hfb.highestHdrNonce[shardId] = nonce + hfb.mutHdrsForBlock.Unlock() +} + +// HdrInfo - +type HdrInfo struct { + UsedInBlock bool + Hdr data.HeaderHandler +} + +// SetHdrHashAndInfo - +func (hfb *hdrForBlock) SetHdrHashAndInfo(hash string, info *HdrInfo) { + hfb.mutHdrsForBlock.Lock() + hfb.hdrHashAndInfo[hash] = &hdrInfo{ + hdr: info.Hdr, + usedInBlock: info.UsedInBlock, + } + hfb.mutHdrsForBlock.Unlock() +} + +// GetHdrHashMap - +func (hfb *hdrForBlock) GetHdrHashMap() map[string]data.HeaderHandler { + m := make(map[string]data.HeaderHandler) + + hfb.mutHdrsForBlock.RLock() + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = hi.hdr + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetHighestHdrNonce - +func (hfb *hdrForBlock) GetHighestHdrNonce() map[uint32]uint64 { + m := make(map[uint32]uint64) + + hfb.mutHdrsForBlock.RLock() + for shardId, nonce := range hfb.highestHdrNonce { + m[shardId] = nonce + } + hfb.mutHdrsForBlock.RUnlock() + + return m +} + +// GetMissingHdrs - +func (hfb *hdrForBlock) GetMissingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingHdrs +} + +// GetMissingFinalityAttestingHdrs - +func (hfb *hdrForBlock) GetMissingFinalityAttestingHdrs() uint32 { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + return hfb.missingFinalityAttestingHdrs +} + +// GetHdrHashAndInfo - +func (hfb *hdrForBlock) GetHdrHashAndInfo() map[string]*HdrInfo { + hfb.mutHdrsForBlock.RLock() + defer hfb.mutHdrsForBlock.RUnlock() + + m := make(map[string]*HdrInfo) + for hash, hi := range hfb.hdrHashAndInfo { + m[hash] = &HdrInfo{ + UsedInBlock: hi.usedInBlock, + Hdr: hi.hdr, + } + } + + return m +} diff --git a/process/block/metablock.go b/process/block/metablock.go index 49b2504b3ce..390e1cebf25 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -136,6 +136,8 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, + sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } mp := metaProcessor{ @@ -437,7 +439,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( } if mp.isRewardsV2Enabled(header) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -452,7 +454,7 @@ func (mp *metaProcessor) processEpochStartMetaBlock( return err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header.Nonce, header.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, header) if err != nil { return err } @@ -869,7 +871,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. var rewardMiniBlocks block.MiniBlockSlice if mp.isRewardsV2Enabled(metaBlock) { - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } @@ -884,7 +886,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. return nil, err } - err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock.Nonce, metaBlock.Epoch) + err = mp.epochSystemSCProcessor.ProcessSystemSmartContract(allValidatorsInfo, metaBlock) if err != nil { return nil, err } @@ -1238,6 +1240,11 @@ func (mp *metaProcessor) CommitBlock( mp.setNonceOfFirstCommittedBlock(headerHandler.GetNonce()) mp.updateLastCommittedInDebugger(headerHandler.GetRound()) + errNotCritical := mp.checkSentSignaturesAtCommitTime(headerHandler) + if errNotCritical != nil { + log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + } + notarizedHeadersHashes, errNotCritical := mp.updateCrossShardInfo(header) if errNotCritical != nil { log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) diff --git a/process/block/metablockRequest_test.go b/process/block/metablockRequest_test.go new file mode 100644 index 00000000000..0718830a43c --- /dev/null +++ b/process/block/metablockRequest_test.go @@ -0,0 +1,653 @@ +package block_test + +import ( + "bytes" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/multiversx/mx-chain-go/dataRetriever" + blockProcess "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/pool" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" +) + +func TestMetaProcessor_computeExistingAndRequestMissingShardHeaders(t *testing.T) { + t.Parallel() + + noOfShards := uint32(2) + td := createTestData() + + t.Run("all referenced shard headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersForBlock := mp.GetHdrForBlock() + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + require.Equal(t, uint32(2), numMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(2), numCallsMissingHeaders.Load()) + }) + t.Run("one referenced shard header present and one missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing header + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(1), numMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingHdrs()) + // before receiving all missing headers referenced in metaBlock, the number of missing attestations is not updated + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(1), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(2), numAttestationMissing) + require.Equal(t, uint32(2), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 2) + require.Equal(t, uint32(2), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, one attestation header missing", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(1), numAttestationMissing) + require.Equal(t, uint32(1), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 3) + require.Equal(t, uint32(1), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) + t.Run("all referenced shard headers present, all attestation headers present", func(t *testing.T) { + t.Parallel() + referencedHeaders := []*shardHeaderData{td[0].referencedHeaderData, td[1].referencedHeaderData} + shardInfo := createShardInfo(referencedHeaders) + metaBlock := &block.MetaBlock{ + ShardInfo: shardInfo, + } + + numCallsMissingAttestation := atomic.Uint32{} + numCallsMissingHeaders := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + updateRequestsHandlerForCountingRequests(t, arguments, td, metaBlock, &numCallsMissingHeaders, &numCallsMissingAttestation) + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + headersPool := mp.GetDataPool().Headers() + // adding the existing headers + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + numMissing, numAttestationMissing := mp.ComputeExistingAndRequestMissingShardHeaders(metaBlock) + time.Sleep(100 * time.Millisecond) + headersForBlock := mp.GetHdrForBlock() + require.Equal(t, uint32(0), numMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), numAttestationMissing) + require.Equal(t, uint32(0), headersForBlock.GetMissingFinalityAttestingHdrs()) + require.Len(t, headersForBlock.GetHdrHashAndInfo(), 4) + require.Equal(t, uint32(0), numCallsMissingAttestation.Load()) + require.Equal(t, uint32(0), numCallsMissingHeaders.Load()) + }) +} + +func TestMetaProcessor_receivedShardHeader(t *testing.T) { + t.Parallel() + noOfShards := uint32(2) + td := createTestData() + + t.Run("receiving the last used in block shard header", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, td[0].referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + + t.Run("shard header used in block received, not latest", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + require.Equal(t, nonce, attestationNonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[1].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + mp.ReceivedShardHeader(referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // not yet requested attestation blocks as still missing one header + require.Equal(t, uint32(0), numCalls.Load()) + // not yet computed + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, "nonce should have been %d", attestationNonce) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + referencedHeaderData := td[0].referencedHeaderData + hdrsForBlock.SetHighestHdrNonce(0, referencedHeaderData.header.GetNonce()-1) + hdrsForBlock.SetHdrHashAndInfo(string(referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(referencedHeaderData.headerHash, referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(1), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked waiting on writing to the channel + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + attestationHeaderData := td[0].attestationHeaderData + headersPool.AddHeader(attestationHeaderData.headerHash, attestationHeaderData.header) + mp.ReceivedShardHeader(attestationHeaderData.header, attestationHeaderData.headerHash) + wg.Wait() + + require.Equal(t, uint32(1), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) + t.Run("all needed shard attestation headers received, when multiple shards headers missing", func(t *testing.T) { + t.Parallel() + + numCalls := atomic.Uint32{} + arguments := createMetaProcessorArguments(t, noOfShards) + + poolsHolder, ok := arguments.DataComponents.Datapool().(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + // for requesting attestation header + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != td[shardID].attestationHeaderData.header.GetNonce() { + require.Fail(t, fmt.Sprintf("requested nonce for shard %d should have been %d", shardID, attestationNonce)) + } + numCalls.Add(1) + } + + mp, err := blockProcess.NewMetaProcessor(*arguments) + require.Nil(t, err) + require.NotNil(t, mp) + + hdrsForBlock := mp.GetHdrForBlock() + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(0, 99) + hdrsForBlock.SetHighestHdrNonce(1, 97) + hdrsForBlock.SetHdrHashAndInfo(string(td[0].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(td[1].referencedHeaderData.headerHash), &blockProcess.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + // receive the missing header for shard 0 + headersPool := mp.GetDataPool().Headers() + headersPool.AddHeader(td[0].referencedHeaderData.headerHash, td[0].referencedHeaderData.header) + mp.ReceivedShardHeader(td[0].referencedHeaderData.header, td[0].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + // the attestation header for shard 0 is not requested as the attestation header for shard 1 is missing + // TODO: refactor request logic to request missing attestation headers as soon as possible + require.Equal(t, uint32(0), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // receive the missing header for shard 1 + headersPool.AddHeader(td[1].referencedHeaderData.headerHash, td[1].referencedHeaderData.header) + mp.ReceivedShardHeader(td[1].referencedHeaderData.header, td[1].referencedHeaderData.headerHash) + + time.Sleep(100 * time.Millisecond) + require.Nil(t, err) + require.NotNil(t, mp) + require.Equal(t, uint32(2), numCalls.Load()) + require.Equal(t, uint32(2), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + + // needs to be done before receiving the last header otherwise it will + // be blocked writing to a channel no one is reading from + wg := startWaitingForAllHeadersReceivedSignal(t, mp) + + // receive also the attestation header + headersPool.AddHeader(td[0].attestationHeaderData.headerHash, td[0].attestationHeaderData.header) + mp.ReceivedShardHeader(td[0].attestationHeaderData.header, td[0].attestationHeaderData.headerHash) + + headersPool.AddHeader(td[1].attestationHeaderData.headerHash, td[1].attestationHeaderData.header) + mp.ReceivedShardHeader(td[1].attestationHeaderData.header, td[1].attestationHeaderData.headerHash) + wg.Wait() + + time.Sleep(100 * time.Millisecond) + // the receive of an attestation header, if not the last one, will trigger a new request of missing attestation headers + // TODO: refactor request logic to not request recently already requested headers + require.Equal(t, uint32(3), numCalls.Load()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + }) +} + +type receivedAllHeadersSignaler interface { + ChannelReceiveAllHeaders() chan bool +} + +func startWaitingForAllHeadersReceivedSignal(t *testing.T, mp receivedAllHeadersSignaler) *sync.WaitGroup { + wg := &sync.WaitGroup{} + wg.Add(1) + go func(w *sync.WaitGroup) { + receivedAllHeaders := checkReceivedAllHeaders(mp.ChannelReceiveAllHeaders()) + require.True(t, receivedAllHeaders) + wg.Done() + }(wg) + return wg +} + +func checkReceivedAllHeaders(channelReceiveAllHeaders chan bool) bool { + select { + case <-time.After(100 * time.Millisecond): + return false + case <-channelReceiveAllHeaders: + return true + } +} + +func createPoolsHolderForHeaderRequests() dataRetriever.HeadersPool { + headersInPool := make(map[string]data.HeaderHandler) + mutHeadersInPool := sync.RWMutex{} + errNotFound := errors.New("header not found") + + return &pool.HeadersPoolStub{ + AddCalled: func(headerHash []byte, header data.HeaderHandler) { + mutHeadersInPool.Lock() + headersInPool[string(headerHash)] = header + mutHeadersInPool.Unlock() + }, + GetHeaderByHashCalled: func(hash []byte) (data.HeaderHandler, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + if h, ok := headersInPool[string(hash)]; ok { + return h, nil + } + return nil, errNotFound + }, + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + mutHeadersInPool.RLock() + defer mutHeadersInPool.RUnlock() + for hash, h := range headersInPool { + if h.GetNonce() == hdrNonce && h.GetShardID() == shardId { + return []data.HeaderHandler{h}, [][]byte{[]byte(hash)}, nil + } + } + return nil, nil, errNotFound + }, + } +} + +func createMetaProcessorArguments(t *testing.T, noOfShards uint32) *blockProcess.ArgMetaProcessor { + poolMock := dataRetrieverMock.NewPoolsHolderMock() + poolMock.Headers() + coreComponents, dataComponents, bootstrapComponents, statusComponents := createMockComponentHolders() + coreComponents.Hash = &hashingMocks.HasherMock{} + dataComponents.DataPool = poolMock + dataComponents.Storage = initStore() + bootstrapComponents.Coordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + arguments.AccountsDB[state.UserAccountsState] = &stateMock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil + }, + JournalLenCalled: func() int { + return 0 + }, + } + + startHeaders := createGenesisBlocks(bootstrapComponents.ShardCoordinator()) + arguments.BlockTracker = mock.NewBlockTrackerMock(bootstrapComponents.ShardCoordinator(), startHeaders) + arguments.ArgBaseProcessor.RequestHandler = &testscommon.RequestHandlerStub{ + RequestShardHeaderByNonceCalled: func(shardID uint32, nonce uint64) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderByNonceCalled: func(nonce uint64) { + require.Fail(t, "should not have been called") + }, + + RequestShardHeaderCalled: func(shardID uint32, hash []byte) { + require.Fail(t, "should not have been called") + }, + RequestMetaHeaderCalled: func(hash []byte) { + require.Fail(t, "should not have been called") + }, + } + + return &arguments +} + +type shardHeaderData struct { + header *block.HeaderV2 + headerHash []byte +} + +type shardTestData struct { + referencedHeaderData *shardHeaderData + attestationHeaderData *shardHeaderData +} + +func createTestData() map[uint32]*shardTestData { + shard0Header1Hash := []byte("sh0TestHash1") + shard0header2Hash := []byte("sh0TestHash2") + shard1Header1Hash := []byte("sh1TestHash1") + shard1header2Hash := []byte("sh1TestHash2") + shard0ReferencedNonce := uint64(100) + shard1ReferencedNonce := uint64(98) + shard0AttestationNonce := shard0ReferencedNonce + 1 + shard1AttestationNonce := shard1ReferencedNonce + 1 + + shardsTestData := map[uint32]*shardTestData{ + 0: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 100, + Nonce: shard0ReferencedNonce, + }, + }, + headerHash: shard0Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 0, + Round: 101, + Nonce: shard0AttestationNonce, + PrevHash: shard0Header1Hash, + }, + }, + headerHash: shard0header2Hash, + }, + }, + 1: { + referencedHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 100, + Nonce: shard1ReferencedNonce, + }, + }, + headerHash: shard1Header1Hash, + }, + attestationHeaderData: &shardHeaderData{ + header: &block.HeaderV2{ + Header: &block.Header{ + ShardID: 1, + Round: 101, + Nonce: shard1AttestationNonce, + PrevHash: shard1Header1Hash, + }, + }, + headerHash: shard1header2Hash, + }, + }, + } + + return shardsTestData +} + +func createShardInfo(referencedHeaders []*shardHeaderData) []block.ShardData { + shardData := make([]block.ShardData, len(referencedHeaders)) + for i, h := range referencedHeaders { + shardData[i] = block.ShardData{ + HeaderHash: h.headerHash, + Round: h.header.GetRound(), + PrevHash: h.header.GetPrevHash(), + Nonce: h.header.GetNonce(), + ShardID: h.header.GetShardID(), + } + } + + return shardData +} + +func updateRequestsHandlerForCountingRequests( + t *testing.T, + arguments *blockProcess.ArgMetaProcessor, + td map[uint32]*shardTestData, + metaBlock *block.MetaBlock, + numCallsMissingHeaders, numCallsMissingAttestation *atomic.Uint32, +) { + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + attestationNonce := td[shardID].attestationHeaderData.header.GetNonce() + if nonce != attestationNonce { + require.Fail(t, fmt.Sprintf("nonce should have been %d", attestationNonce)) + } + numCallsMissingAttestation.Add(1) + } + requestHandler.RequestShardHeaderCalled = func(shardID uint32, hash []byte) { + for _, sh := range metaBlock.ShardInfo { + if bytes.Equal(sh.HeaderHash, hash) && sh.ShardID == shardID { + numCallsMissingHeaders.Add(1) + return + } + } + + require.Fail(t, fmt.Sprintf("header hash %s not found in meta block", hash)) + } +} diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 0777df9b803..173e14ffb90 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -150,15 +150,16 @@ func createMockMetaArguments( OutportDataProvider: &outport.OutportDataProviderStub{}, BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, ManagedPeersHolder: &testscommon.ManagedPeersHolderStub{}, + SentSignaturesTracker: &testscommon.SentSignatureTrackerStub{}, }, SCToProtocol: &mock.SCToProtocolStub{}, PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, EpochStartDataCreator: &mock.EpochStartDataCreatorStub{}, EpochEconomics: &mock.EpochEconomicsStub{}, - EpochRewardsCreator: &mock.EpochRewardsCreatorStub{}, - EpochValidatorInfoCreator: &mock.EpochValidatorInfoCreatorStub{}, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorStub{}, - EpochSystemSCProcessor: &mock.EpochStartSystemSCStub{}, + EpochRewardsCreator: &testscommon.RewardsCreatorStub{}, + EpochValidatorInfoCreator: &testscommon.EpochValidatorInfoCreatorStub{}, + ValidatorStatisticsProcessor: &testscommon.ValidatorStatisticsProcessorStub{}, + EpochSystemSCProcessor: &testscommon.EpochStartSystemSCStub{}, } return arguments } @@ -991,6 +992,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { mdp := initDataPool([]byte("tx_hash")) rootHash := []byte("rootHash") hdr := createMetaBlockHeader() + hdr.PubKeysBitmap = []byte{0b11111111} body := &block.Body{} accounts := &stateMock.AccountsStub{ CommitCalled: func() (i []byte, e error) { @@ -1042,6 +1044,12 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return &block.Header{}, []byte("hash"), nil } arguments.BlockTracker = blockTrackerMock + resetCountersForManagedBlockSignerCalled := false + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersForManagedBlockSignerCalled = true + }, + } mp, _ := blproc.NewMetaProcessor(arguments) @@ -1083,6 +1091,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, forkDetectorAddCalled) assert.True(t, debuggerMethodWasCalled) + assert.True(t, resetCountersForManagedBlockSignerCalled) // this should sleep as there is an async call to display current header and block in CommitBlock time.Sleep(time.Second) } @@ -1199,7 +1208,7 @@ func TestMetaProcessor_RevertStateRevertPeerStateFailsShouldErr(t *testing.T) { return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { return expectedErr }, @@ -1228,7 +1237,7 @@ func TestMetaProcessor_RevertStateShouldWork(t *testing.T) { return nil }, } - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RevertPeerStateCalled: func(header data.MetaHeaderHandler) error { revertePeerStateWasCalled = true return nil @@ -3002,7 +3011,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi dataComponents.DataPool = dPool dataComponents.BlockChain = blkc calledSaveNodesCoordinator := false - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ SaveNodesCoordinatorUpdatesCalled: func(epoch uint32) (bool, error) { calledSaveNodesCoordinator = true return true, nil @@ -3010,7 +3019,7 @@ func TestMetaProcessor_CreateAndProcessBlockCallsProcessAfterFirstEpoch(t *testi } toggleCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ ToggleUnStakeUnBondCalled: func(value bool) error { toggleCalled = true assert.Equal(t, value, true) @@ -3146,7 +3155,7 @@ func TestMetaProcessor_CreateNewHeaderValsOK(t *testing.T) { func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { t.Parallel() - header := &block.MetaBlock{ + headerMeta := &block.MetaBlock{ Nonce: 1, Round: 1, PrevHash: []byte("hash1"), @@ -3165,19 +3174,18 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { arguments := createMockMetaArguments(coreC, dataC, bootstrapC, statusC) wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { assert.True(t, wasCalled) return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) wasCalled = true return nil }, @@ -3185,7 +3193,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) @@ -3202,23 +3210,21 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { }, } arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{} + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{} wasCalled := false - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ VerifyRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { wasCalled = true return nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - assert.Equal(t, header.GetEpoch(), epoch) - assert.Equal(t, header.GetNonce(), nonce) + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { + assert.Equal(t, headerMeta, header) assert.True(t, wasCalled) return nil }, @@ -3226,7 +3232,7 @@ func TestMetaProcessor_ProcessEpochStartMetaBlock(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) - err := mp.ProcessEpochStartMetaBlock(header, &block.Body{}) + err := mp.ProcessEpochStartMetaBlock(headerMeta, &block.Body{}) assert.Nil(t, err) }) } @@ -3315,7 +3321,7 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return nil, expectedErr }, @@ -3333,8 +3339,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr }, } @@ -3351,8 +3357,8 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { arguments := createMockMetaArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) expectedErr := errors.New("expected error") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ - ProcessRatingsEndOfEpochCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, epoch uint32) error { + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ + ProcessRatingsEndOfEpochCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, epoch uint32) error { return expectedErr }, } @@ -3368,15 +3374,13 @@ func TestMetaProcessor_CreateEpochStartBodyShouldFail(t *testing.T) { func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { t.Parallel() - expectedValidatorsInfo := map[uint32][]*state.ValidatorInfo{ - 0: { - &state.ValidatorInfo{ - ShardId: 1, - RewardAddress: []byte("rewardAddr1"), - AccumulatedFees: big.NewInt(10), - }, - }, - } + expectedValidatorsInfo := state.NewShardValidatorsInfoMap() + _ = expectedValidatorsInfo.Add( + &state.ValidatorInfo{ + ShardId: 1, + RewardAddress: []byte("rewardAddr1"), + AccumulatedFees: big.NewInt(10), + }) rewardMiniBlocks := block.MiniBlockSlice{ &block.MiniBlock{ @@ -3417,11 +3421,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil @@ -3429,32 +3433,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } wasCalled := false - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { wasCalled = true - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) assert.True(t, wasCalled) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, @@ -3497,11 +3500,11 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { } expectedRootHash := []byte("root hash") - arguments.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorStub{ + arguments.ValidatorStatisticsProcessor = &testscommon.ValidatorStatisticsProcessorStub{ RootHashCalled: func() ([]byte, error) { return expectedRootHash, nil }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { assert.Equal(t, expectedRootHash, rootHash) return expectedValidatorsInfo, nil }, @@ -3509,32 +3512,31 @@ func TestMetaProcessor_CreateEpochStartBodyShouldWork(t *testing.T) { wasCalled := false expectedRewardsForProtocolSustain := big.NewInt(11) - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateRewardsMiniBlocksCalled: func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { wasCalled = true assert.Equal(t, expectedValidatorsInfo, validatorsInfo) assert.Equal(t, mb, metaBlock) return rewardMiniBlocks, nil }, - GetProtocolSustainCalled: func() *big.Int { + GetProtocolSustainabilityRewardsCalled: func() *big.Int { return expectedRewardsForProtocolSustain }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ - CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ + CreateValidatorInfoMiniBlocksCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { assert.Equal(t, expectedValidatorsInfo, validatorsInfo) return validatorInfoMiniBlocks, nil }, } - arguments.EpochSystemSCProcessor = &mock.EpochStartSystemSCStub{ - ProcessSystemSmartContractCalled: func(validatorsInfo map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { + arguments.EpochSystemSCProcessor = &testscommon.EpochStartSystemSCStub{ + ProcessSystemSmartContractCalled: func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error { assert.True(t, wasCalled) - assert.Equal(t, mb.GetNonce(), nonce) - assert.Equal(t, mb.GetEpoch(), epoch) + assert.Equal(t, mb, header) return nil }, } @@ -3609,8 +3611,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { t.Parallel() arguments := createMockMetaArguments(createMockComponentHolders()) - - arguments.EpochRewardsCreator = &mock.EpochRewardsCreatorStub{ + arguments.EpochRewardsCreator = &testscommon.RewardsCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { @@ -3623,7 +3624,7 @@ func TestMetaProcessor_getAllMarshalledTxs(t *testing.T) { }, } - arguments.EpochValidatorInfoCreator = &mock.EpochValidatorInfoCreatorStub{ + arguments.EpochValidatorInfoCreator = &testscommon.EpochValidatorInfoCreatorStub{ CreateMarshalledDataCalled: func(body *block.Body) map[string][][]byte { marshalledData := make(map[string][][]byte) for _, miniBlock := range body.MiniBlocks { diff --git a/process/block/metrics.go b/process/block/metrics.go index f9c3e0075b3..ce29ddb23f8 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -225,12 +225,12 @@ func indexValidatorsRating( return } - for shardID, validatorInfosInShard := range validators { + for shardID, validatorInfosInShard := range validators.GetShardValidatorsInfoMap() { validatorsInfos := make([]*outportcore.ValidatorRatingInfo, 0) for _, validatorInfo := range validatorInfosInShard { validatorsInfos = append(validatorsInfos, &outportcore.ValidatorRatingInfo{ - PublicKey: hex.EncodeToString(validatorInfo.PublicKey), - Rating: float32(validatorInfo.Rating) * 100 / 10000000, + PublicKey: hex.EncodeToString(validatorInfo.GetPublicKey()), + Rating: float32(validatorInfo.GetRating()) * 100 / 10000000, }) } diff --git a/process/block/postprocess/feeHandler.go b/process/block/postprocess/feeHandler.go index d4248154ef9..5cfc7996ab6 100644 --- a/process/block/postprocess/feeHandler.go +++ b/process/block/postprocess/feeHandler.go @@ -26,13 +26,14 @@ type feeHandler struct { } // NewFeeAccumulator constructor for the fee accumulator -func NewFeeAccumulator() (*feeHandler, error) { - f := &feeHandler{} - f.accumulatedFees = big.NewInt(0) - f.developerFees = big.NewInt(0) - f.mapHashFee = make(map[string]*feeData) - f.mapDependentHashes = make(map[string][]byte) - return f, nil +func NewFeeAccumulator() *feeHandler { + return &feeHandler{ + mut: sync.RWMutex{}, + mapHashFee: make(map[string]*feeData), + accumulatedFees: big.NewInt(0), + developerFees: big.NewInt(0), + mapDependentHashes: make(map[string][]byte), + } } // CreateBlockStarted does the cleanup before creating a new block diff --git a/process/block/postprocess/feeHandler_test.go b/process/block/postprocess/feeHandler_test.go index b74dbab4e0e..060276ba2fb 100644 --- a/process/block/postprocess/feeHandler_test.go +++ b/process/block/postprocess/feeHandler_test.go @@ -13,15 +13,14 @@ import ( func TestNewFeeAccumulator(t *testing.T) { t.Parallel() - feeHandler, err := postprocess.NewFeeAccumulator() - require.Nil(t, err) + feeHandler := postprocess.NewFeeAccumulator() require.NotNil(t, feeHandler) } func TestFeeHandler_CreateBlockStarted(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) zeroGasAndFees := process.GetZeroGasAndFees() @@ -37,7 +36,7 @@ func TestFeeHandler_CreateBlockStarted(t *testing.T) { func TestFeeHandler_GetAccumulatedFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) accumulatedFees := feeHandler.GetAccumulatedFees() @@ -47,7 +46,7 @@ func TestFeeHandler_GetAccumulatedFees(t *testing.T) { func TestFeeHandler_GetDeveloperFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(50), []byte("txhash")) devFees := feeHandler.GetDeveloperFees() @@ -57,7 +56,7 @@ func TestFeeHandler_GetDeveloperFees(t *testing.T) { func TestFeeHandler_ProcessTransactionFee(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -72,7 +71,7 @@ func TestFeeHandler_ProcessTransactionFee(t *testing.T) { func TestFeeHandler_RevertFees(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFee(big.NewInt(1000), big.NewInt(100), []byte("txhash1")) feeHandler.ProcessTransactionFee(big.NewInt(100), big.NewInt(10), []byte("txhash2")) @@ -89,7 +88,7 @@ func TestFeeHandler_RevertFees(t *testing.T) { func TestFeeHandler_CompleteRevertFeesUserTxs(t *testing.T) { t.Parallel() - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() userTxHashes := [][]byte{[]byte("txHash1"), []byte("txHash2"), []byte("txHash3")} originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3")} @@ -111,7 +110,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { originalTxHashes := [][]byte{[]byte("origTxHash1"), []byte("origTxHash2"), []byte("origTxHash3"), []byte("userTxHash4")} t.Run("revert partial originalTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -125,7 +124,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert all userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -139,7 +138,7 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { require.Equal(t, big.NewInt(200), devFees) }) t.Run("revert partial userTxs", func(t *testing.T) { - feeHandler, _ := postprocess.NewFeeAccumulator() + feeHandler := postprocess.NewFeeAccumulator() feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(1000), big.NewInt(100), userTxHashes[0], originalTxHashes[0]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(100), big.NewInt(10), userTxHashes[1], originalTxHashes[1]) feeHandler.ProcessTransactionFeeRelayedUserTx(big.NewInt(10), big.NewInt(1), userTxHashes[2], originalTxHashes[2]) @@ -157,6 +156,6 @@ func TestFeeHandler_PartialRevertFeesUserTxs(t *testing.T) { func TestFeeHandler_IsInterfaceNil(t *testing.T) { t.Parallel() - fee, _ := postprocess.NewFeeAccumulator() + fee := postprocess.NewFeeAccumulator() require.False(t, check.IfNil(fee)) } diff --git a/process/block/postprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go index d659730575a..b9a0a8e8f83 100644 --- a/process/block/postprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -35,15 +35,15 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateResultsProcessor() ArgsNewIntermediateResultsProcessor { args := ArgsNewIntermediateResultsProcessor{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - Coordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConv: createMockPubkeyConverter(), - Store: &storage.ChainStorerStub{}, - BlockType: block.SmartContractResultBlock, - CurrTxs: &mock.TxForCurrentBlockStub{}, - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + Coordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConv: createMockPubkeyConverter(), + Store: &storage.ChainStorerStub{}, + BlockType: block.SmartContractResultBlock, + CurrTxs: &mock.TxForCurrentBlockStub{}, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 50203a1a5ae..9d4fb1cf686 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -15,9 +15,9 @@ import ( "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" + commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - commonMocks "github.com/multiversx/mx-chain-go/testscommon/common" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 91e2d79d29f..11e62f63ff9 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -121,6 +121,8 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, managedPeersHolder: arguments.ManagedPeersHolder, + sentSignaturesTracker: arguments.SentSignaturesTracker, + extraDelayRequestBlockInfo: time.Duration(arguments.Config.EpochStartConfig.ExtraDelayForRequestBlockInfoInMilliseconds) * time.Millisecond, } sp := shardProcessor{ @@ -988,7 +990,12 @@ func (sp *shardProcessor) CommitBlock( sp.updateLastCommittedInDebugger(headerHandler.GetRound()) - errNotCritical := sp.updateCrossShardInfo(processedMetaHdrs) + errNotCritical := sp.checkSentSignaturesAtCommitTime(headerHandler) + if errNotCritical != nil { + log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + } + + errNotCritical = sp.updateCrossShardInfo(processedMetaHdrs) if errNotCritical != nil { log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) } diff --git a/process/block/shardblockRequest_test.go b/process/block/shardblockRequest_test.go new file mode 100644 index 00000000000..2440c6ecba5 --- /dev/null +++ b/process/block/shardblockRequest_test.go @@ -0,0 +1,584 @@ +package block_test + +import ( + "bytes" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/require" + + blproc "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/testscommon" + dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" +) + +type headerData struct { + hash []byte + header data.HeaderHandler +} + +type shardBlockTestData struct { + headerData []*headerData +} + +func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { + t.Parallel() + + t.Run("missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + attestationNonce := metaChainData.headerData[1].header.GetNonce() + require.Equal(t, attestationNonce, nonce, fmt.Sprintf("nonce should have been %d", attestationNonce)) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + // not adding the confirmation metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), res) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("no missing attesting meta header", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "should not request meta header by nonce") + } + sp, _ := blproc.NewShardProcessor(arguments) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + metaBlockData := metaChainData.headerData[0] + confirmationMetaBlockData := metaChainData.headerData[1] + headersDataPool.AddHeader(confirmationMetaBlockData.hash, confirmationMetaBlockData.header) + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + res := sp.RequestMissingFinalityAttestingHeaders() + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), res) + }) +} + +func TestShardProcessor_computeExistingAndRequestMissingMetaHeaders(t *testing.T) { + t.Parallel() + + shard1ID := uint32(1) + t.Run("one referenced metaBlock missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + numCalls := atomic.Uint32{} + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // should only be called when requesting attestation meta header block + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Equal(t, metaChainData.headerData[1].hash, hash) + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + + metaBlockData := metaChainData.headerData[0] + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + // first of the 2 referenced headers is added, the other will be missing + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaBlockData.hash, metaBlockData.header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(1), numCalls.Load()) + }) + t.Run("multiple referenced metaBlocks missing will be requested", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCalls := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Fail(t, "should not request meta header by nonce") + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCalls.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + metaBlockData := testData[core.MetachainShardId].headerData[0] + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, metaBlockData.header.GetNonce()) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(2), numCalls.Load()) + }) + t.Run("all referenced metaBlocks existing with missing attestation, will request the attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + metaChainData := testData[core.MetachainShardId] + shard1Data := testData[shard1ID] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + // not yet requesting the attestation metaBlock + require.Equal(t, metaChainData.headerData[1].header.GetNonce()+1, nonce) + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + if !(bytes.Equal(hash, metaChainData.headerData[0].hash) || bytes.Equal(hash, metaChainData.headerData[1].hash)) { + require.Fail(t, "other requests than the expected 2 metaBlocks are not expected") + } + + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(1), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(1), numCallsAttestation.Load()) + }) + t.Run("all referenced metaBlocks existing and existing attestation metaBlock will not request", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + numCallsMissing := atomic.Uint32{} + numCallsAttestation := atomic.Uint32{} + shard1Data := testData[shard1ID] + metaChainData := testData[core.MetachainShardId] + requestHandler.RequestShardHeaderByNonceCalled = func(shardID uint32, nonce uint64) { + require.Fail(t, fmt.Sprintf("should not request shard header by nonce, shardID: %d, nonce: %d", shardID, nonce)) + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + numCallsAttestation.Add(1) + } + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + numCallsMissing.Add(1) + } + sp, _ := blproc.NewShardProcessor(arguments) + // not adding the referenced metaBlock to the headers pool means it will be missing and requested + headersDataPool := arguments.DataComponents.Datapool().Headers() + headersDataPool.AddHeader(metaChainData.headerData[0].hash, metaChainData.headerData[0].header) + headersDataPool.AddHeader(metaChainData.headerData[1].hash, metaChainData.headerData[1].header) + attestationMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: metaChainData.headerData[1].hash, + ShardInfo: []block.ShardData{}, + } + attestationMetaBlockHash := []byte("attestationHash") + + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + + blockBeingProcessed := shard1Data.headerData[1].header + shardBlockBeingProcessed := blockBeingProcessed.(*block.Header) + missingHeaders, missingFinalityAttestingHeaders := sp.ComputeExistingAndRequestMissingMetaHeaders(shardBlockBeingProcessed) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(0), missingHeaders) + require.Equal(t, uint32(0), missingFinalityAttestingHeaders) + require.Equal(t, uint32(0), numCallsMissing.Load()) + require.Equal(t, uint32(0), numCallsAttestation.Load()) + }) +} + +func TestShardProcessor_receivedMetaBlock(t *testing.T) { + t.Parallel() + + t.Run("received non referenced metaBlock, while still having missing referenced metaBlocks", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + otherMetaBlock := &block.MetaBlock{ + Nonce: 102, + Round: 102, + PrevHash: []byte("other meta block prev hash"), + } + + otherMetaBlockHash := []byte("other meta block hash") + sp.ReceivedMetaBlock(otherMetaBlock, otherMetaBlockHash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(2), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received missing referenced metaBlock, other referenced metaBlock still missing", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + firstMissingMetaBlockData := testData[core.MetachainShardId].headerData[0] + secondMissingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := firstMissingMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(2) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(firstMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + hdrsForBlock.SetHdrHashAndInfo(string(secondMissingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + sp.ReceivedMetaBlock(firstMissingMetaBlockData.header, firstMissingMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + highestHeaderNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, firstMissingMetaBlockData.header.GetNonce(), highestHeaderNonces[core.MetachainShardId]) + }) + t.Run("received non missing referenced metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + notMissingReferencedMetaBlockData := testData[core.MetachainShardId].headerData[0] + missingMetaBlockData := testData[core.MetachainShardId].headerData[1] + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + highestHeaderNonce := notMissingReferencedMetaBlockData.header.GetNonce() - 1 + hdrsForBlock.SetNumMissingHdrs(1) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(0) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, highestHeaderNonce) + hdrsForBlock.SetHdrHashAndInfo(string(notMissingReferencedMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: notMissingReferencedMetaBlockData.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(missingMetaBlockData.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: nil, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(notMissingReferencedMetaBlockData.hash, notMissingReferencedMetaBlockData.header) + + sp.ReceivedMetaBlock(notMissingReferencedMetaBlockData.header, notMissingReferencedMetaBlockData.hash) + time.Sleep(100 * time.Millisecond) + + require.Equal(t, uint32(1), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, highestHeaderNonce, hdrsForBlockHighestNonces[core.MetachainShardId]) + }) + t.Run("received missing attestation metaBlock", func(t *testing.T) { + t.Parallel() + + arguments, requestHandler := shardBlockRequestTestInit(t) + testData := createShardProcessorTestData() + sp, _ := blproc.NewShardProcessor(arguments) + hdrsForBlock := sp.GetHdrForBlock() + + referencedMetaBlock := testData[core.MetachainShardId].headerData[0] + lastReferencedMetaBlock := testData[core.MetachainShardId].headerData[1] + attestationMetaBlockHash := []byte("attestation meta block hash") + attestationMetaBlock := &block.MetaBlock{ + Nonce: lastReferencedMetaBlock.header.GetNonce() + 1, + Round: lastReferencedMetaBlock.header.GetRound() + 1, + PrevHash: lastReferencedMetaBlock.hash, + } + + requestHandler.RequestMetaHeaderCalled = func(hash []byte) { + require.Fail(t, "no requests expected") + } + requestHandler.RequestMetaHeaderByNonceCalled = func(nonce uint64) { + require.Fail(t, "no requests expected") + } + + hdrsForBlock.SetNumMissingHdrs(0) + hdrsForBlock.SetNumMissingFinalityAttestingHdrs(1) + hdrsForBlock.SetHighestHdrNonce(core.MetachainShardId, lastReferencedMetaBlock.header.GetNonce()) + hdrsForBlock.SetHdrHashAndInfo(string(referencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: referencedMetaBlock.header, + }) + hdrsForBlock.SetHdrHashAndInfo(string(lastReferencedMetaBlock.hash), + &blproc.HdrInfo{ + UsedInBlock: true, + Hdr: lastReferencedMetaBlock.header, + }) + + headersDataPool := arguments.DataComponents.Datapool().Headers() + require.NotNil(t, headersDataPool) + headersDataPool.AddHeader(referencedMetaBlock.hash, referencedMetaBlock.header) + headersDataPool.AddHeader(lastReferencedMetaBlock.hash, lastReferencedMetaBlock.header) + headersDataPool.AddHeader(attestationMetaBlockHash, attestationMetaBlock) + wg := startWaitingForAllHeadersReceivedSignal(t, sp) + + sp.ReceivedMetaBlock(attestationMetaBlock, attestationMetaBlockHash) + wg.Wait() + + require.Equal(t, uint32(0), hdrsForBlock.GetMissingHdrs()) + require.Equal(t, uint32(0), hdrsForBlock.GetMissingFinalityAttestingHdrs()) + hdrsForBlockHighestNonces := hdrsForBlock.GetHighestHdrNonce() + require.Equal(t, lastReferencedMetaBlock.header.GetNonce(), hdrsForBlockHighestNonces[core.MetachainShardId]) + }) +} + +func shardBlockRequestTestInit(t *testing.T) (blproc.ArgShardProcessor, *testscommon.RequestHandlerStub) { + coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() + poolMock := dataRetrieverMock.NewPoolsHolderMock() + dataComponents.DataPool = poolMock + arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) + poolsHolderAsInterface := arguments.DataComponents.Datapool() + poolsHolder, ok := poolsHolderAsInterface.(*dataRetrieverMock.PoolsHolderMock) + require.True(t, ok) + + headersPoolStub := createPoolsHolderForHeaderRequests() + poolsHolder.SetHeadersPool(headersPoolStub) + + requestHandler, ok := arguments.ArgBaseProcessor.RequestHandler.(*testscommon.RequestHandlerStub) + require.True(t, ok) + return arguments, requestHandler +} + +func createShardProcessorTestData() map[uint32]*shardBlockTestData { + // shard 0 miniblocks + mbHash1 := []byte("mb hash 1") + mbHash2 := []byte("mb hash 2") + mbHash3 := []byte("mb hash 3") + + // shard 1 miniblocks + mbHash4 := []byte("mb hash 4") + mbHash5 := []byte("mb hash 5") + mbHash6 := []byte("mb hash 6") + + prevMetaBlockHash := []byte("prev meta block hash") + metaBlockHash := []byte("meta block hash") + metaConfirmationHash := []byte("confirmation meta block hash") + + shard0Block0Hash := []byte("shard 0 block 0 hash") + shard0Block1Hash := []byte("shard 0 block 1 hash") + shard0Block2Hash := []byte("shard 0 block 2 hash") + + shard1Block0Hash := []byte("shard 1 block 0 hash") + shard1Block1Hash := []byte("shard 1 block 1 hash") + shard1Block2Hash := []byte("shard 1 block 2 hash") + + metaBlock := &block.MetaBlock{ + Nonce: 100, + Round: 100, + PrevHash: prevMetaBlockHash, + ShardInfo: []block.ShardData{ + { + ShardID: 0, + HeaderHash: shard0Block1Hash, + PrevHash: shard0Block0Hash, + ShardMiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + }, + }, + } + metaConfirmationBlock := &block.MetaBlock{ + Nonce: 101, + Round: 101, + PrevHash: metaBlockHash, + ShardInfo: []block.ShardData{}, + } + + shard0Block1 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block0Hash, + Nonce: 98, + Round: 98, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash1, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash2, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash3, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard0Block2 := &block.Header{ + ShardID: 0, + PrevHash: shard0Block1Hash, + Nonce: 99, + Round: 99, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + shard1Block1 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block0Hash, + MetaBlockHashes: [][]byte{prevMetaBlockHash}, + Nonce: 102, + Round: 102, + MiniBlockHeaders: []block.MiniBlockHeader{ + {Hash: mbHash4, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash5, SenderShardID: 0, ReceiverShardID: 1}, + {Hash: mbHash6, SenderShardID: 0, ReceiverShardID: 1}, + }, + } + + shard1Block2 := &block.Header{ + ShardID: 1, + PrevHash: shard1Block1Hash, + MetaBlockHashes: [][]byte{metaBlockHash, metaConfirmationHash}, + Nonce: 103, + Round: 103, + MiniBlockHeaders: []block.MiniBlockHeader{}, + } + + sbd := map[uint32]*shardBlockTestData{ + 0: { + headerData: []*headerData{ + { + hash: shard0Block1Hash, + header: shard0Block1, + }, + { + hash: shard0Block2Hash, + header: shard0Block2, + }, + }, + }, + 1: { + headerData: []*headerData{ + { + hash: shard1Block1Hash, + header: shard1Block1, + }, + { + hash: shard1Block2Hash, + header: shard1Block2, + }, + }, + }, + core.MetachainShardId: { + headerData: []*headerData{ + { + hash: metaBlockHash, + header: metaBlock, + }, + { + hash: metaConfirmationHash, + header: metaConfirmationBlock, + }, + }, + }, + } + + return sbd +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 4b9b95a8c56..39797f8db0c 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -22,6 +22,10 @@ import ( "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -45,9 +49,6 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const MaxGasLimitPerBlock = uint64(100000) @@ -1677,21 +1678,6 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. assert.Equal(t, err, process.ErrTimeIsOut) } -// -------- requestMissingFinalityAttestingHeaders -func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { - t.Parallel() - - tdp := dataRetrieverMock.NewPoolsHolderMock() - coreComponents, dataComponents, bootstrapComponents, statusComponents := createComponentHolderMocks() - dataComponents.DataPool = tdp - arguments := CreateMockArguments(coreComponents, dataComponents, bootstrapComponents, statusComponents) - sp, _ := blproc.NewShardProcessor(arguments) - - sp.SetHighestHdrNonceForCurrentBlock(core.MetachainShardId, 1) - res := sp.RequestMissingFinalityAttestingHeaders() - assert.Equal(t, res > 0, true) -} - // --------- verifyIncludedMetaBlocksFinality func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { t.Parallel() @@ -2048,7 +2034,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { hdr := &block.Header{ Nonce: 1, Round: 1, - PubKeysBitmap: rootHash, + PubKeysBitmap: []byte{0b11111111}, PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, @@ -2121,6 +2107,12 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return &block.MetaBlock{}, []byte("hash"), nil } arguments.BlockTracker = blockTrackerMock + resetCountersForManagedBlockSignerCalled := false + arguments.SentSignaturesTracker = &testscommon.SentSignatureTrackerStub{ + ResetCountersForManagedBlockSignerCalled: func(signerPk []byte) { + resetCountersForManagedBlockSignerCalled = true + }, + } sp, _ := blproc.NewShardProcessor(arguments) debuggerMethodWasCalled := false @@ -2144,6 +2136,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.True(t, forkDetectorAddCalled) assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) assert.True(t, debuggerMethodWasCalled) + assert.True(t, resetCountersForManagedBlockSignerCalled) // this should sleep as there is an async call to display current hdr and block in CommitBlock time.Sleep(time.Second) } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 0508620283e..e23c8f8f1ec 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -566,14 +566,14 @@ func createPreProcessorContainer() process.PreProcessorsContainer { func createInterimProcessorContainer() process.IntermediateProcessorContainer { argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConverter: createMockPubkeyConverter(), - Store: initStore(), - PoolsHolder: initDataPool([]byte("test_hash1")), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: initStore(), + PoolsHolder: initDataPool([]byte("test_hash1")), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2210,14 +2210,14 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ - ShardCoordinator: shardCoordinator, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: tdp, - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + ShardCoordinator: shardCoordinator, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: tdp, + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } preFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) @@ -2278,7 +2278,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { return MaxGasLimitPerBlock }, }, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &commonMock.TxExecutionOrderHandlerStub{}, } interFactory, _ := shard.NewIntermediateProcessorsContainerFactory(argsFactory) diff --git a/process/economics/builtInFunctionsCost.go b/process/economics/builtInFunctionsCost.go deleted file mode 100644 index f784b5f2332..00000000000 --- a/process/economics/builtInFunctionsCost.go +++ /dev/null @@ -1,177 +0,0 @@ -package economics - -import ( - "github.com/mitchellh/mapstructure" - "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/process" -) - -// ArgsBuiltInFunctionCost holds all components that are needed to create a new instance of builtInFunctionsCost -type ArgsBuiltInFunctionCost struct { - GasSchedule core.GasScheduleNotifier - ArgsParser process.ArgumentsParser -} - -type builtInFunctionsCost struct { - gasConfig *process.GasCost - specialBuiltInFunctions map[string]struct{} - argsParser process.ArgumentsParser -} - -// NewBuiltInFunctionsCost will create a new instance of builtInFunctionsCost -func NewBuiltInFunctionsCost(args *ArgsBuiltInFunctionCost) (*builtInFunctionsCost, error) { - if args == nil { - return nil, process.ErrNilArgsBuiltInFunctionsConstHandler - } - if check.IfNil(args.ArgsParser) { - return nil, process.ErrNilArgumentParser - } - if check.IfNil(args.GasSchedule) { - return nil, process.ErrNilGasSchedule - } - - bs := &builtInFunctionsCost{ - argsParser: args.ArgsParser, - } - - bs.initSpecialBuiltInFunctionCostMap() - - var err error - bs.gasConfig, err = createGasConfig(args.GasSchedule.LatestGasSchedule()) - if err != nil { - return nil, err - } - - args.GasSchedule.RegisterNotifyHandler(bs) - - return bs, nil -} - -func (bc *builtInFunctionsCost) initSpecialBuiltInFunctionCostMap() { - bc.specialBuiltInFunctions = map[string]struct{}{ - core.BuiltInFunctionClaimDeveloperRewards: {}, - core.BuiltInFunctionChangeOwnerAddress: {}, - core.BuiltInFunctionSetUserName: {}, - core.BuiltInFunctionSaveKeyValue: {}, - core.BuiltInFunctionESDTTransfer: {}, - core.BuiltInFunctionESDTBurn: {}, - core.BuiltInFunctionESDTLocalBurn: {}, - core.BuiltInFunctionESDTLocalMint: {}, - core.BuiltInFunctionESDTNFTAddQuantity: {}, - core.BuiltInFunctionESDTNFTBurn: {}, - core.BuiltInFunctionESDTNFTCreate: {}, - } -} - -// GasScheduleChange is called when gas schedule is changed, thus all contracts must be updated -func (bc *builtInFunctionsCost) GasScheduleChange(gasSchedule map[string]map[string]uint64) { - newGasConfig, err := createGasConfig(gasSchedule) - if err != nil { - return - } - - bc.gasConfig = newGasConfig -} - -// ComputeBuiltInCost will compute built-in function cost -func (bc *builtInFunctionsCost) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return 0 - } - - switch function { - case core.BuiltInFunctionClaimDeveloperRewards: - return bc.gasConfig.BuiltInCost.ClaimDeveloperRewards - case core.BuiltInFunctionChangeOwnerAddress: - return bc.gasConfig.BuiltInCost.ChangeOwnerAddress - case core.BuiltInFunctionSetUserName: - return bc.gasConfig.BuiltInCost.SaveUserName - case core.BuiltInFunctionSaveKeyValue: - return bc.gasConfig.BuiltInCost.SaveKeyValue - case core.BuiltInFunctionESDTTransfer: - return bc.gasConfig.BuiltInCost.ESDTTransfer - case core.BuiltInFunctionESDTBurn: - return bc.gasConfig.BuiltInCost.ESDTBurn - case core.BuiltInFunctionESDTLocalBurn: - return bc.gasConfig.BuiltInCost.ESDTLocalBurn - case core.BuiltInFunctionESDTLocalMint: - return bc.gasConfig.BuiltInCost.ESDTLocalMint - case core.BuiltInFunctionESDTNFTAddQuantity: - return bc.gasConfig.BuiltInCost.ESDTNFTAddQuantity - case core.BuiltInFunctionESDTNFTBurn: - return bc.gasConfig.BuiltInCost.ESDTNFTBurn - case core.BuiltInFunctionESDTNFTCreate: - costStorage := calculateLenOfArguments(arguments) * bc.gasConfig.BaseOperationCost.StorePerByte - return bc.gasConfig.BuiltInCost.ESDTNFTCreate + costStorage - case core.BuiltInFunctionSetGuardian: - return bc.gasConfig.BuiltInCost.SetGuardian - case core.BuiltInFunctionGuardAccount: - return bc.gasConfig.BuiltInCost.GuardAccount - case core.BuiltInFunctionUnGuardAccount: - return bc.gasConfig.BuiltInCost.UnGuardAccount - default: - return 0 - } -} - -func calculateLenOfArguments(arguments [][]byte) uint64 { - totalLen := uint64(0) - for _, arg := range arguments { - totalLen += uint64(len(arg)) - } - - return totalLen -} - -// IsBuiltInFuncCall will check is the provided transaction is a build in function call -func (bc *builtInFunctionsCost) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - function, arguments, err := bc.argsParser.ParseCallData(string(tx.GetData())) - if err != nil { - return false - } - - _, isSpecialBuiltIn := bc.specialBuiltInFunctions[function] - isSCCallAfter := core.IsSmartContractAddress(tx.GetRcvAddr()) && len(arguments) > core.MinLenArgumentsESDTTransfer - - return isSpecialBuiltIn && !isSCCallAfter -} - -// IsInterfaceNil returns true if underlying object is nil -func (bc *builtInFunctionsCost) IsInterfaceNil() bool { - return bc == nil -} - -func createGasConfig(gasMap map[string]map[string]uint64) (*process.GasCost, error) { - baseOps := &process.BaseOperationCost{} - err := mapstructure.Decode(gasMap[common.BaseOperationCost], baseOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*baseOps) - if err != nil { - return nil, err - } - - builtInOps := &process.BuiltInCost{} - err = mapstructure.Decode(gasMap[common.BuiltInCost], builtInOps) - if err != nil { - return nil, err - } - - err = check.ForZeroUintFields(*builtInOps) - if err != nil { - return nil, err - } - - gasCost := process.GasCost{ - BaseOperationCost: *baseOps, - BuiltInCost: *builtInOps, - } - - return &gasCost, nil -} diff --git a/process/economics/builtInFunctionsCost_test.go b/process/economics/builtInFunctionsCost_test.go deleted file mode 100644 index befcca25912..00000000000 --- a/process/economics/builtInFunctionsCost_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package economics_test - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" - "github.com/stretchr/testify/require" -) - -func TestNewBuiltInFunctionsCost(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - args func() *economics.ArgsBuiltInFunctionCost - exErr error - }{ - { - name: "NilArguments", - args: func() *economics.ArgsBuiltInFunctionCost { - return nil - }, - exErr: process.ErrNilArgsBuiltInFunctionsConstHandler, - }, - { - name: "NilArgumentsParser", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: nil, - GasSchedule: testscommon.NewGasScheduleNotifierMock(nil), - } - }, - exErr: process.ErrNilArgumentParser, - }, - { - name: "NilGasScheduleHandler", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: nil, - } - }, - exErr: process.ErrNilGasSchedule, - }, - { - name: "ShouldWork", - args: func() *economics.ArgsBuiltInFunctionCost { - return &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - } - }, - exErr: nil, - }, - } - - for _, test := range tests { - _, err := economics.NewBuiltInFunctionsCost(test.args()) - require.Equal(t, test.exErr, err) - } -} - -func TestNewBuiltInFunctionsCost_GasConfig(t *testing.T) { - t.Parallel() - - args := &economics.ArgsBuiltInFunctionCost{ - ArgsParser: &mock.ArgumentParserMock{}, - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 0)), - } - - builtInCostHandler, err := economics.NewBuiltInFunctionsCost(args) - require.NotNil(t, err) - require.Nil(t, builtInCostHandler) - require.True(t, check.IfNil(builtInCostHandler)) -} diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index 60658b19bf2..5b7ce045237 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -27,31 +27,26 @@ var log = logger.GetOrCreate("process/economics") type economicsData struct { *gasConfigHandler *rewardsConfigHandler - gasPriceModifier float64 - minInflation float64 - yearSettings map[uint32]*config.YearSetting - mutYearSettings sync.RWMutex - statusHandler core.AppStatusHandler - builtInFunctionsCostHandler BuiltInFunctionsCostHandler - enableEpochsHandler common.EnableEpochsHandler - txVersionHandler process.TxVersionCheckerHandler - mut sync.RWMutex + gasPriceModifier float64 + minInflation float64 + yearSettings map[uint32]*config.YearSetting + mutYearSettings sync.RWMutex + statusHandler core.AppStatusHandler + enableEpochsHandler common.EnableEpochsHandler + txVersionHandler process.TxVersionCheckerHandler + mut sync.RWMutex } // ArgsNewEconomicsData defines the arguments needed for new economics economicsData type ArgsNewEconomicsData struct { - TxVersionChecker process.TxVersionCheckerHandler - BuiltInFunctionsCostHandler BuiltInFunctionsCostHandler - Economics *config.EconomicsConfig - EpochNotifier process.EpochNotifier - EnableEpochsHandler common.EnableEpochsHandler + TxVersionChecker process.TxVersionCheckerHandler + Economics *config.EconomicsConfig + EpochNotifier process.EpochNotifier + EnableEpochsHandler common.EnableEpochsHandler } // NewEconomicsData will create an object with information about economics parameters func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { - if check.IfNil(args.BuiltInFunctionsCostHandler) { - return nil, process.ErrNilBuiltInFunctionsCostHandler - } if check.IfNil(args.TxVersionChecker) { return nil, process.ErrNilTransactionVersionChecker } @@ -75,12 +70,11 @@ func NewEconomicsData(args ArgsNewEconomicsData) (*economicsData, error) { } ed := &economicsData{ - minInflation: args.Economics.GlobalSettings.MinimumInflation, - gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, - statusHandler: statusHandler.NewNilStatusHandler(), - builtInFunctionsCostHandler: args.BuiltInFunctionsCostHandler, - enableEpochsHandler: args.EnableEpochsHandler, - txVersionHandler: args.TxVersionChecker, + minInflation: args.Economics.GlobalSettings.MinimumInflation, + gasPriceModifier: args.Economics.FeeSettings.GasPriceModifier, + statusHandler: statusHandler.NewNilStatusHandler(), + enableEpochsHandler: args.EnableEpochsHandler, + txVersionHandler: args.TxVersionChecker, } ed.yearSettings = make(map[uint32]*config.YearSetting) @@ -517,23 +511,8 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValue(tx data.Transact // ComputeGasUsedAndFeeBasedOnRefundValueInEpoch will compute gas used value and transaction fee using refund value from a SCR in a specific epoch func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.TransactionWithFeeHandler, refundValue *big.Int, epoch uint32) (uint64, *big.Int) { if refundValue.Cmp(big.NewInt(0)) == 0 { - if ed.builtInFunctionsCostHandler.IsBuiltInFuncCall(tx) { - builtInCost := ed.builtInFunctionsCostHandler.ComputeBuiltInCost(tx) - computedGasLimit := ed.ComputeGasLimitInEpoch(tx, epoch) - - gasLimitWithBuiltInCost := builtInCost + computedGasLimit - txFee := ed.ComputeTxFeeBasedOnGasUsedInEpoch(tx, gasLimitWithBuiltInCost, epoch) - - gasLimitWithoutMoveBalance := tx.GetGasLimit() - computedGasLimit - // transaction will consume all the gas if sender provided too much gas - if isTooMuchGasProvided(gasLimitWithoutMoveBalance, gasLimitWithoutMoveBalance-builtInCost) { - return tx.GetGasLimit(), ed.ComputeTxFeeInEpoch(tx, epoch) - } - - return gasLimitWithBuiltInCost, txFee - } - txFee := ed.ComputeTxFeeInEpoch(tx, epoch) + return tx.GetGasLimit(), txFee } @@ -560,15 +539,6 @@ func (ed *economicsData) ComputeGasUsedAndFeeBasedOnRefundValueInEpoch(tx data.T return gasUsed, txFee } -func isTooMuchGasProvided(gasProvided uint64, gasRemained uint64) bool { - if gasProvided <= gasRemained { - return false - } - - gasUsed := gasProvided - gasRemained - return gasProvided > gasUsed*process.MaxGasFeeHigherFactorAccepted -} - // ComputeTxFeeBasedOnGasUsed will compute transaction fee func (ed *economicsData) ComputeTxFeeBasedOnGasUsed(tx data.TransactionWithFeeHandler, gasUsed uint64) *big.Int { currenEpoch := ed.enableEpochsHandler.GetCurrentEpoch() diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 417ef1b7826..1f2c913a826 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -16,13 +16,10 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/economics" - "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -106,13 +103,12 @@ func createArgsForEconomicsData(gasModifier float64) economics.ArgsNewEconomicsD return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } -func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHandler) economics.ArgsNewEconomicsData { +func createArgsForEconomicsDataRealFees() economics.ArgsNewEconomicsData { feeSettings := feeSettingsReal() args := economics.ArgsNewEconomicsData{ Economics: createDummyEconomicsConfig(feeSettings), @@ -122,8 +118,7 @@ func createArgsForEconomicsDataRealFees(handler economics.BuiltInFunctionsCostHa return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: handler, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } return args } @@ -525,16 +520,6 @@ func TestNewEconomicsData_InvalidTopUpGradientPointShouldErr(t *testing.T) { assert.True(t, errors.Is(err, process.ErrInvalidRewardsTopUpGradientPoint)) } -func TestNewEconomicsData_NilBuiltInFunctionsCostHandlerShouldErr(t *testing.T) { - t.Parallel() - - args := createArgsForEconomicsData(1) - args.BuiltInFunctionsCostHandler = nil - - _, err := economics.NewEconomicsData(args) - assert.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) -} - func TestNewEconomicsData_NilTxVersionCheckerShouldErr(t *testing.T) { t.Parallel() @@ -1141,7 +1126,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueZero(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx1 := &transaction.Transaction{ GasPrice: 1000000000, @@ -1194,7 +1179,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheckGasUsedValue(t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) { t.Parallel() - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{})) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) txData := []byte("0061736d0100000001150460037f7f7e017f60027f7f017e60017e0060000002420303656e7611696e74363473746f7261676553746f7265000003656e7610696e74363473746f726167654c6f6164000103656e760b696e74363466696e6973680002030504030303030405017001010105030100020608017f01419088040b072f05066d656d6f7279020004696e6974000309696e6372656d656e7400040964656372656d656e7400050367657400060a8a01041300418088808000410742011080808080001a0b2e01017e4180888080004107418088808000410710818080800042017c22001080808080001a20001082808080000b2e01017e41808880800041074180888080004107108180808000427f7c22001080808080001a20001082808080000b160041808880800041071081808080001082808080000b0b0f01004180080b08434f554e54455200@0500@0100") tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1214,11 +1199,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueCheck(t *testing.T) func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMuchGasProvided(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1236,11 +1217,6 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn_ToMu } func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing.T) { - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - txStake := &transaction.Transaction{ GasPrice: 1000000000, GasLimit: 250000000, @@ -1250,7 +1226,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. expectedGasUsed := uint64(39378847) expectedFee, _ := big.NewInt(0).SetString("39378847000000000", 10) - args := createArgsForEconomicsDataRealFees(builtInCostHandler) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ PenalizedTooMuchGasEnableEpoch: 1000, @@ -1267,11 +1243,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueStakeTx(t *testing. func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1279,8 +1251,8 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t Data: []byte("ESDTTransfer@54474e2d383862383366@0a"), } - expectedGasUsed := uint64(104001) - expectedFee, _ := big.NewInt(0).SetString("104000010000000", 10) + expectedGasUsed := uint64(104009) + expectedFee, _ := big.NewInt(0).SetString("104000090000000", 10) refundValue, _ := big.NewInt(0).SetString("0", 10) gasUsed, fee := economicData.ComputeGasUsedAndFeeBasedOnRefundValue(tx, refundValue) @@ -1291,11 +1263,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltIn(t *t func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMuchGas(t *testing.T) { t.Parallel() - builtInCostHandler, _ := economics.NewBuiltInFunctionsCost(&economics.ArgsBuiltInFunctionCost{ - GasSchedule: testscommon.NewGasScheduleNotifierMock(defaults.FillGasMapInternal(map[string]map[string]uint64{}, 1)), - ArgsParser: smartContract.NewArgumentParser(), - }) - economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees(builtInCostHandler)) + economicData, _ := economics.NewEconomicsData(createArgsForEconomicsDataRealFees()) tx := &transaction.Transaction{ GasPrice: 1000000000, @@ -1315,7 +1283,7 @@ func TestEconomicsData_ComputeGasUsedAndFeeBasedOnRefundValueSpecialBuiltInTooMu func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() args.EpochNotifier = forking.NewGenericEpochNotifier() args.EnableEpochsHandler, _ = enablers.NewEnableEpochsHandler(config.EnableEpochs{ GasPriceModifierEnableEpoch: 1, @@ -1353,7 +1321,7 @@ func TestEconomicsData_ComputeGasLimitBasedOnBalance(t *testing.T) { func TestEconomicsData_MaxGasPriceSetGuardian(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() maxGasPriceSetGuardianString := "2000000" expectedMaxGasPriceSetGuardian, err := strconv.ParseUint(maxGasPriceSetGuardianString, 10, 64) require.Nil(t, err) @@ -1369,7 +1337,7 @@ func TestEconomicsData_SetStatusHandler(t *testing.T) { t.Run("nil status handler should error", func(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() economicData, _ := economics.NewEconomicsData(args) err := economicData.SetStatusHandler(nil) @@ -1378,7 +1346,7 @@ func TestEconomicsData_SetStatusHandler(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - args := createArgsForEconomicsDataRealFees(&mock.BuiltInCostHandlerStub{}) + args := createArgsForEconomicsDataRealFees() economicData, _ := economics.NewEconomicsData(args) err := economicData.SetStatusHandler(&statusHandler.AppStatusHandlerStub{}) diff --git a/process/economics/interface.go b/process/economics/interface.go index 766ba7563e3..41332c30eef 100644 --- a/process/economics/interface.go +++ b/process/economics/interface.go @@ -1,17 +1,9 @@ package economics import ( - "github.com/multiversx/mx-chain-core-go/data" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -// BuiltInFunctionsCostHandler is able to calculate the cost of a built-in function call -type BuiltInFunctionsCostHandler interface { - ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool - IsInterfaceNil() bool -} - // EpochNotifier raises epoch change events type EpochNotifier interface { RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) diff --git a/process/errors.go b/process/errors.go index 6ae40412109..207184f3cb7 100644 --- a/process/errors.go +++ b/process/errors.go @@ -194,6 +194,9 @@ var ErrNilShardCoordinator = errors.New("nil shard coordinator") // ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") +// ErrNilStakingDataProvider signals that a nil staking data provider was used +var ErrNilStakingDataProvider = errors.New("nil staking data provider") + // ErrNilKeyGen signals that an operation has been attempted to or with a nil single sign key generator var ErrNilKeyGen = errors.New("nil key generator") @@ -981,12 +984,6 @@ var ErrMaxAccumulatedFeesExceeded = errors.New("max accumulated fees has been ex // ErrMaxDeveloperFeesExceeded signals that max developer fees has been exceeded var ErrMaxDeveloperFeesExceeded = errors.New("max developer fees has been exceeded") -// ErrNilBuiltInFunctionsCostHandler signals that a nil built-in functions cost handler has been provided -var ErrNilBuiltInFunctionsCostHandler = errors.New("nil built in functions cost handler") - -// ErrNilArgsBuiltInFunctionsConstHandler signals that a nil arguments struct for built-in functions cost handler has been provided -var ErrNilArgsBuiltInFunctionsConstHandler = errors.New("nil arguments for built in functions cost handler") - // ErrInvalidEpochStartMetaBlockConsensusPercentage signals that a small epoch start meta block consensus percentage has been provided var ErrInvalidEpochStartMetaBlockConsensusPercentage = errors.New("invalid epoch start meta block consensus percentage") @@ -1226,3 +1223,6 @@ var ErrNilStorageService = errors.New("nil storage service") // ErrInvalidAsyncArguments signals that invalid arguments were given for async/callBack processing var ErrInvalidAsyncArguments = errors.New("invalid arguments to process async/callback function") + +// ErrNilSentSignatureTracker defines the error for setting a nil SentSignatureTracker +var ErrNilSentSignatureTracker = errors.New("nil sent signature tracker") diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go index 79861ced4bd..f58b8e41f72 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory_test.go @@ -23,14 +23,14 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateProcessorsFactory() metachain.ArgsNewIntermediateProcessorsContainerFactory { args := metachain.ArgsNewIntermediateProcessorsContainerFactory{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: dataRetrieverMock.NewPoolsHolderMock(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/metachain/vmContainerFactory.go b/process/factory/metachain/vmContainerFactory.go index c3dbb17e4e6..8f8fd90bbc9 100644 --- a/process/factory/metachain/vmContainerFactory.go +++ b/process/factory/metachain/vmContainerFactory.go @@ -44,10 +44,12 @@ type vmContainerFactory struct { scFactory vm.SystemSCContainerFactory shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewVMContainerFactory defines the arguments needed to create a new VM container factory type ArgsNewVMContainerFactory struct { + ArgBlockChainHook hooks.ArgBlockChainHook Economics process.EconomicsDataHandler MessageSignVerifier vm.MessageSignVerifier GasSchedule core.GasScheduleNotifier @@ -62,6 +64,7 @@ type ArgsNewVMContainerFactory struct { PubkeyConv core.PubkeyConverter BlockChainHook process.BlockChainHookWithAccountsAdapter EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewVMContainerFactory is responsible for creating a new virtual machine factory object @@ -108,6 +111,9 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, if check.IfNil(args.EnableEpochsHandler) { return nil, vm.ErrNilEnableEpochsHandler } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilNodesCoordinator) + } cryptoHook := hooks.NewVMCryptoHook() @@ -127,6 +133,7 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, addressPubKeyConverter: args.PubkeyConv, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, }, nil } @@ -200,6 +207,7 @@ func (vmf *vmContainerFactory) createSystemVMFactoryAndEEI() (vm.SystemSCContain AddressPubKeyConverter: vmf.addressPubKeyConverter, ShardCoordinator: vmf.shardCoordinator, EnableEpochsHandler: vmf.enableEpochsHandler, + NodesCoordinator: vmf.nodesCoordinator, } scFactory, err := systemVMFactory.NewSystemSCFactory(argsNewSystemScFactory) if err != nil { diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index 41212156305..ff542213ef4 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -17,6 +17,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" wasmConfig "github.com/multiversx/mx-chain-vm-go/config" @@ -62,6 +63,14 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew BleedPercentagePerRound: 1, MaxNumberOfNodesForStake: 1, ActivateBLSPubKeyMessageVerification: false, + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, + }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, @@ -69,6 +78,9 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew ChanceComputer: &mock.RaterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } } @@ -228,6 +240,18 @@ func TestNewVMContainerFactory_NilShardCoordinator(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilShardCoordinator)) } +func TestNewVMContainerFactory_NilNodesCoordinatorFails(t *testing.T) { + t.Parallel() + + gasSchedule := makeGasSchedule() + argsNewVmContainerFactory := createVmContainerMockArgument(gasSchedule) + argsNewVmContainerFactory.NodesCoordinator = nil + vmf, err := NewVMContainerFactory(argsNewVmContainerFactory) + + assert.True(t, check.IfNil(vmf)) + assert.True(t, errors.Is(err, process.ErrNilNodesCoordinator)) +} + func TestNewVMContainerFactory_NilEnableEpochsHandler(t *testing.T) { t.Parallel() @@ -296,10 +320,9 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -342,6 +365,8 @@ func TestVmContainerFactory_Create(t *testing.T) { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -352,12 +377,21 @@ func TestVmContainerFactory_Create(t *testing.T) { MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, ValidatorAccountsDB: &stateMock.AccountsStub{}, UserAccountsDB: &stateMock.AccountsStub{}, ChanceComputer: &mock.RaterMock{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + NodesCoordinator: &shardingMocks.NodesCoordinatorMock{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }}, } vmf, err := NewVMContainerFactory(argsNewVMContainerFactory) assert.NotNil(t, vmf) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 2f2cc7a9c52..5835a7361ac 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -57,14 +57,14 @@ func createMockPubkeyConverter() *testscommon.PubkeyConverterMock { func createMockArgsNewIntermediateProcessorsFactory() shard.ArgsNewIntermediateProcessorsContainerFactory { args := shard.ArgsNewIntermediateProcessorsContainerFactory{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), - PubkeyConverter: createMockPubkeyConverter(), - Store: &storageStubs.ChainStorerStub{}, - PoolsHolder: createDataPools(), - EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(5), + PubkeyConverter: createMockPubkeyConverter(), + Store: &storageStubs.ChainStorerStub{}, + PoolsHolder: createDataPools(), + EconomicsFee: &economicsmocks.EconomicsHandlerStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.KeepExecOrderOnCreatedSCRsFlag), TxExecutionOrderHandler: &txExecOrderStub.TxExecutionOrderHandlerStub{}, } return args diff --git a/process/factory/shard/vmContainerFactory_test.go b/process/factory/shard/vmContainerFactory_test.go index df3ffab673e..ac0a2dd6608 100644 --- a/process/factory/shard/vmContainerFactory_test.go +++ b/process/factory/shard/vmContainerFactory_test.go @@ -1,6 +1,7 @@ package shard import ( + "runtime" "sync" "testing" @@ -150,6 +151,10 @@ func TestNewVMContainerFactory_OkValues(t *testing.T) { } func TestVmContainerFactory_Create(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + t.Parallel() args := createMockVMAccountsArguments() @@ -175,6 +180,10 @@ func TestVmContainerFactory_Create(t *testing.T) { } func TestVmContainerFactory_ResolveWasmVMVersion(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("skipping test on arm64") + } + epochNotifierInstance := forking.NewGenericEpochNotifier() numCalled := 0 diff --git a/process/headerCheck/common.go b/process/headerCheck/common.go new file mode 100644 index 00000000000..01946580d87 --- /dev/null +++ b/process/headerCheck/common.go @@ -0,0 +1,52 @@ +package headerCheck + +import ( + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// ComputeConsensusGroup will compute the consensus group that assembled the provided block +func ComputeConsensusGroup(header data.HeaderHandler, nodesCoordinator nodesCoordinator.NodesCoordinator) (validatorsGroup []nodesCoordinator.Validator, err error) { + if check.IfNil(header) { + return nil, process.ErrNilHeaderHandler + } + if check.IfNil(nodesCoordinator) { + return nil, process.ErrNilNodesCoordinator + } + + prevRandSeed := header.GetPrevRandSeed() + + // TODO: change here with an activation flag if start of epoch block needs to be validated by the new epoch nodes + epoch := header.GetEpoch() + if header.IsStartOfEpochBlock() && epoch > 0 { + epoch = epoch - 1 + } + + return nodesCoordinator.ComputeConsensusGroup(prevRandSeed, header.GetRound(), header.GetShardID(), epoch) +} + +// ComputeSignersPublicKeys will extract from the provided consensus group slice only the strings that matched with the bitmap +func ComputeSignersPublicKeys(consensusGroup []string, bitmap []byte) []string { + nbBitsBitmap := len(bitmap) * 8 + consensusGroupSize := len(consensusGroup) + size := consensusGroupSize + if consensusGroupSize > nbBitsBitmap { + size = nbBitsBitmap + } + + result := make([]string, 0, len(consensusGroup)) + + for i := 0; i < size; i++ { + indexRequired := (bitmap[i/8] & (1 << uint16(i%8))) > 0 + if !indexRequired { + continue + } + + pubKey := consensusGroup[i] + result = append(result, pubKey) + } + + return result +} diff --git a/process/headerCheck/common_test.go b/process/headerCheck/common_test.go new file mode 100644 index 00000000000..0961b7f2a20 --- /dev/null +++ b/process/headerCheck/common_test.go @@ -0,0 +1,187 @@ +package headerCheck + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/stretchr/testify/assert" +) + +func TestComputeConsensusGroup(t *testing.T) { + t.Parallel() + + t.Run("nil header should error", func(t *testing.T) { + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Fail(t, "should have not called ComputeValidatorsGroupCalled") + return nil, nil + } + + vGroup, err := ComputeConsensusGroup(nil, nodesCoordinatorInstance) + assert.Equal(t, process.ErrNilHeaderHandler, err) + assert.Nil(t, vGroup) + }) + t.Run("nil nodes coordinator should error", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + } + + vGroup, err := ComputeConsensusGroup(header, nil) + assert.Equal(t, process.ErrNilNodesCoordinator, err) + assert.Nil(t, vGroup) + }) + t.Run("should work for a random block", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + } + + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 1, 2) + + validatorGroup := []nodesCoordinator.Validator{validator1, validator2} + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Equal(t, header.PrevRandSeed, randomness) + assert.Equal(t, header.Round, round) + assert.Equal(t, header.ShardID, shardId) + assert.Equal(t, header.Epoch, epoch) + + return validatorGroup, nil + } + + vGroup, err := ComputeConsensusGroup(header, nodesCoordinatorInstance) + assert.Nil(t, err) + assert.Equal(t, validatorGroup, vGroup) + }) + t.Run("should work for a start of epoch block", func(t *testing.T) { + header := &block.Header{ + Epoch: 1123, + Round: 37373, + Nonce: 38383, + ShardID: 2, + PrevRandSeed: []byte("prev rand seed"), + EpochStartMetaHash: []byte("epoch start metahash"), + } + + validator1, _ := nodesCoordinator.NewValidator([]byte("pk1"), 1, 1) + validator2, _ := nodesCoordinator.NewValidator([]byte("pk2"), 1, 2) + + validatorGroup := []nodesCoordinator.Validator{validator1, validator2} + nodesCoordinatorInstance := shardingMocks.NewNodesCoordinatorMock() + nodesCoordinatorInstance.ComputeValidatorsGroupCalled = func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) { + assert.Equal(t, header.PrevRandSeed, randomness) + assert.Equal(t, header.Round, round) + assert.Equal(t, header.ShardID, shardId) + assert.Equal(t, header.Epoch-1, epoch) + + return validatorGroup, nil + } + + vGroup, err := ComputeConsensusGroup(header, nodesCoordinatorInstance) + assert.Nil(t, err) + assert.Equal(t, validatorGroup, vGroup) + }) +} + +func generatePubKeys(num int) []string { + consensusGroup := make([]string, 0, num) + for i := 0; i < num; i++ { + consensusGroup = append(consensusGroup, fmt.Sprintf("pub key %d", i)) + } + + return consensusGroup +} + +func TestComputeSignersPublicKeys(t *testing.T) { + t.Parallel() + + t.Run("should compute with 16 validators", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(16) + mask0 := byte(0b00110101) + mask1 := byte(0b01001101) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := []string{ + "pub key 0", + "pub key 2", + "pub key 4", + "pub key 5", + + "pub key 8", + "pub key 10", + "pub key 11", + "pub key 14", + } + + assert.Equal(t, expected, result) + }) + t.Run("should compute with 14 validators", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(14) + mask0 := byte(0b00110101) + mask1 := byte(0b00001101) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := []string{ + "pub key 0", + "pub key 2", + "pub key 4", + "pub key 5", + + "pub key 8", + "pub key 10", + "pub key 11", + } + + assert.Equal(t, expected, result) + }) + t.Run("should compute with 14 validators, mask is 0", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(14) + mask0 := byte(0b00000000) + mask1 := byte(0b00000000) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := make([]string, 0) + + assert.Equal(t, expected, result) + }) + t.Run("should compute with 14 validators, mask contains all bits set", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(14) + mask0 := byte(0b11111111) + mask1 := byte(0b00111111) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + + assert.Equal(t, consensusGroup, result) + }) + t.Run("should compute with 17 validators, mask contains 2 bytes", func(t *testing.T) { + t.Parallel() + + consensusGroup := generatePubKeys(17) + mask0 := byte(0b11111111) + mask1 := byte(0b11111111) + + result := ComputeSignersPublicKeys(consensusGroup, []byte{mask0, mask1}) + expected := generatePubKeys(16) + assert.Equal(t, expected, result) + }) +} diff --git a/process/headerCheck/headerSignatureVerify.go b/process/headerCheck/headerSignatureVerify.go index 999bc82e881..308af919366 100644 --- a/process/headerCheck/headerSignatureVerify.go +++ b/process/headerCheck/headerSignatureVerify.go @@ -30,7 +30,7 @@ type ArgsHeaderSigVerifier struct { FallbackHeaderValidator process.FallbackHeaderValidator } -//HeaderSigVerifier is component used to check if a header is valid +// HeaderSigVerifier is component used to check if a header is valid type HeaderSigVerifier struct { marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -301,15 +301,7 @@ func (hsv *HeaderSigVerifier) verifyLeaderSignature(leaderPubKey crypto.PublicKe } func (hsv *HeaderSigVerifier) getLeader(header data.HeaderHandler) (crypto.PublicKey, error) { - prevRandSeed := header.GetPrevRandSeed() - - // TODO: remove if start of epoch block needs to be validated by the new epoch nodes - epoch := header.GetEpoch() - if header.IsStartOfEpochBlock() && epoch > 0 { - epoch = epoch - 1 - } - - headerConsensusGroup, err := hsv.nodesCoordinator.ComputeConsensusGroup(prevRandSeed, header.GetRound(), header.GetShardID(), epoch) + headerConsensusGroup, err := ComputeConsensusGroup(header, hsv.nodesCoordinator) if err != nil { return nil, err } diff --git a/process/interface.go b/process/interface.go index ee86ee3302c..69b1b139e89 100644 --- a/process/interface.go +++ b/process/interface.go @@ -287,9 +287,9 @@ type ValidatorStatisticsProcessor interface { Process(shardValidatorInfo data.ShardValidatorInfoHandler) error IsInterfaceNil() bool RootHash() ([]byte, error) - ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error Commit() ([]byte, error) DisplayRatings(epoch uint32) SetLastFinalizedRootHash([]byte) @@ -318,6 +318,8 @@ type TransactionLogProcessorDatabase interface { // ValidatorsProvider is the main interface for validators' provider type ValidatorsProvider interface { GetLatestValidators() map[string]*validator.ValidatorStatistics + GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdate() error IsInterfaceNil() bool Close() error } @@ -945,10 +947,10 @@ type EpochStartDataCreator interface { // RewardsCreator defines the functionality for the metachain to create rewards at end of epoch type RewardsCreator interface { CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewards() *big.Int GetLocalTxCache() epochStart.TransactionCacher @@ -962,8 +964,8 @@ type RewardsCreator interface { // EpochStartValidatorInfoCreator defines the functionality for the metachain to create validator statistics at end of epoch type EpochStartValidatorInfoCreator interface { - CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher CreateMarshalledData(body *block.Body) map[string][][]byte GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo @@ -975,7 +977,10 @@ type EpochStartValidatorInfoCreator interface { // EpochStartSystemSCProcessor defines the functionality for the metachain to process system smart contract and end of epoch type EpochStartSystemSCProcessor interface { - ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContract( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, + ) error ProcessDelegationRewards( miniBlocks block.MiniBlockSlice, rewardTxs epochStart.TransactionCacher, @@ -1345,3 +1350,11 @@ type Debugger interface { Close() error IsInterfaceNil() bool } + +// SentSignaturesTracker defines a component able to handle sent signature from self +type SentSignaturesTracker interface { + StartRound() + SignatureSent(pkBytes []byte) + ResetCountersForManagedBlockSigner(signerPk []byte) + IsInterfaceNil() bool +} diff --git a/process/mock/builtInCostHandlerStub.go b/process/mock/builtInCostHandlerStub.go deleted file mode 100644 index 4ee3b23b062..00000000000 --- a/process/mock/builtInCostHandlerStub.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { -} - -// ComputeBuiltInCost - -func (b *BuiltInCostHandlerStub) ComputeBuiltInCost(_ data.TransactionWithFeeHandler) uint64 { - return 1 -} - -// IsBuiltInFuncCall - -func (b *BuiltInCostHandlerStub) IsBuiltInFuncCall(_ data.TransactionWithFeeHandler) bool { - return false -} - -// IsInterfaceNil - -func (b *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return b == nil -} diff --git a/process/mock/epochEconomicsStub.go b/process/mock/epochEconomicsStub.go index 99e8b0dd359..7a65f7c3fcf 100644 --- a/process/mock/epochEconomicsStub.go +++ b/process/mock/epochEconomicsStub.go @@ -19,7 +19,9 @@ func (e *EpochEconomicsStub) ComputeEndOfEpochEconomics(metaBlock *block.MetaBlo if e.ComputeEndOfEpochEconomicsCalled != nil { return e.ComputeEndOfEpochEconomicsCalled(metaBlock) } - return &block.Economics{}, nil + return &block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0), + }, nil } // VerifyRewardsPerBlock - diff --git a/process/mock/epochRewardsCreatorStub.go b/process/mock/epochRewardsCreatorStub.go deleted file mode 100644 index ce17c1e636a..00000000000 --- a/process/mock/epochRewardsCreatorStub.go +++ /dev/null @@ -1,109 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochRewardsCreatorStub - -type EpochRewardsCreatorStub struct { - CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) (block.MiniBlockSlice, error) - VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, - ) error - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - SaveBlockDataToStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.MetaHeaderHandler, body *block.Body) - GetRewardsTxsCalled func(body *block.Body) map[string]data.TransactionHandler - GetProtocolSustainCalled func() *big.Int - GetLocalTxCacheCalled func() epochStart.TransactionCacher -} - -// GetProtocolSustainabilityRewards - -func (e *EpochRewardsCreatorStub) GetProtocolSustainabilityRewards() *big.Int { - if e.GetProtocolSustainCalled != nil { - return e.GetProtocolSustainCalled() - } - return big.NewInt(0) -} - -// GetLocalTxCache - -func (e *EpochRewardsCreatorStub) GetLocalTxCache() epochStart.TransactionCacher { - if e.GetLocalTxCacheCalled != nil { - return e.GetLocalTxCacheCalled() - } - return &TxForCurrentBlockStub{} -} - -// CreateRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) CreateRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) (block.MiniBlockSlice, error) { - if e.CreateRewardsMiniBlocksCalled != nil { - return e.CreateRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil, nil -} - -// VerifyRewardsMiniBlocks - -func (e *EpochRewardsCreatorStub) VerifyRewardsMiniBlocks( - metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, - computedEconomics *block.Economics, -) error { - if e.VerifyRewardsMiniBlocksCalled != nil { - return e.VerifyRewardsMiniBlocksCalled(metaBlock, validatorsInfo, computedEconomics) - } - return nil -} - -// CreateMarshalledData - -func (e *EpochRewardsCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetRewardsTxs - -func (e *EpochRewardsCreatorStub) GetRewardsTxs(body *block.Body) map[string]data.TransactionHandler { - if e.GetRewardsTxsCalled != nil { - return e.GetRewardsTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochRewardsCreatorStub) SaveBlockDataToStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochRewardsCreatorStub) DeleteBlockDataFromStorage(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochRewardsCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochRewardsCreatorStub) RemoveBlockDataFromPools(metaBlock data.MetaHeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/process/mock/epochStartDataCreatorStub.go b/process/mock/epochStartDataCreatorStub.go index 1cbfccaec5b..dd38c5a1198 100644 --- a/process/mock/epochStartDataCreatorStub.go +++ b/process/mock/epochStartDataCreatorStub.go @@ -1,6 +1,9 @@ package mock -import "github.com/multiversx/mx-chain-core-go/data/block" +import ( + "github.com/multiversx/mx-chain-core-go/data/block" + "math/big" +) // EpochStartDataCreatorStub - type EpochStartDataCreatorStub struct { @@ -13,7 +16,11 @@ func (e *EpochStartDataCreatorStub) CreateEpochStartData() (*block.EpochStart, e if e.CreateEpochStartDataCalled != nil { return e.CreateEpochStartDataCalled() } - return &block.EpochStart{}, nil + return &block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{{}}, + Economics: block.Economics{ + RewardsForProtocolSustainability: big.NewInt(0)}, + }, nil } // VerifyEpochStartDataForMetablock - diff --git a/process/mock/epochStartSystemSCStub.go b/process/mock/epochStartSystemSCStub.go deleted file mode 100644 index fd2c92553cf..00000000000 --- a/process/mock/epochStartSystemSCStub.go +++ /dev/null @@ -1,46 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochStartSystemSCStub - -type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error - ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error - ToggleUnStakeUnBondCalled func(value bool) error -} - -// ToggleUnStakeUnBond - -func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { - if e.ToggleUnStakeUnBondCalled != nil { - return e.ToggleUnStakeUnBondCalled(value) - } - return nil -} - -// ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { - if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) - } - return nil -} - -// ProcessDelegationRewards - -func (e *EpochStartSystemSCStub) ProcessDelegationRewards( - miniBlocks block.MiniBlockSlice, - txCache epochStart.TransactionCacher, -) error { - if e.ProcessDelegationRewardsCalled != nil { - return e.ProcessDelegationRewardsCalled(miniBlocks, txCache) - } - return nil -} - -// IsInterfaceNil - -func (e *EpochStartSystemSCStub) IsInterfaceNil() bool { - return e == nil -} diff --git a/process/mock/epochValidatorInfoCreatorStub.go b/process/mock/epochValidatorInfoCreatorStub.go deleted file mode 100644 index 445d305596e..00000000000 --- a/process/mock/epochValidatorInfoCreatorStub.go +++ /dev/null @@ -1,86 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/state" -) - -// EpochValidatorInfoCreatorStub - -type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error - GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher - CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte - GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo - SaveBlockDataToStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - DeleteBlockDataFromStorageCalled func(metaBlock data.HeaderHandler, body *block.Body) - RemoveBlockDataFromPoolsCalled func(metaBlock data.HeaderHandler, body *block.Body) -} - -// CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { - if e.CreateValidatorInfoMiniBlocksCalled != nil { - return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) - } - return make(block.MiniBlockSlice, 0), nil -} - -// VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { - if e.VerifyValidatorInfoMiniBlocksCalled != nil { - return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) - } - return nil -} - -// GetLocalValidatorInfoCache - -func (e *EpochValidatorInfoCreatorStub) GetLocalValidatorInfoCache() epochStart.ValidatorInfoCacher { - if e.GetLocalValidatorInfoCacheCalled != nil { - return e.GetLocalValidatorInfoCacheCalled() - } - return nil -} - -// CreateMarshalledData - -func (e *EpochValidatorInfoCreatorStub) CreateMarshalledData(body *block.Body) map[string][][]byte { - if e.CreateMarshalledDataCalled != nil { - return e.CreateMarshalledDataCalled(body) - } - return nil -} - -// GetValidatorInfoTxs - -func (e *EpochValidatorInfoCreatorStub) GetValidatorInfoTxs(body *block.Body) map[string]*state.ShardValidatorInfo { - if e.GetValidatorInfoTxsCalled != nil { - return e.GetValidatorInfoTxsCalled(body) - } - return nil -} - -// SaveBlockDataToStorage - -func (e *EpochValidatorInfoCreatorStub) SaveBlockDataToStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.SaveBlockDataToStorageCalled != nil { - e.SaveBlockDataToStorageCalled(metaBlock, body) - } -} - -// DeleteBlockDataFromStorage - -func (e *EpochValidatorInfoCreatorStub) DeleteBlockDataFromStorage(metaBlock data.HeaderHandler, body *block.Body) { - if e.DeleteBlockDataFromStorageCalled != nil { - e.DeleteBlockDataFromStorageCalled(metaBlock, body) - } -} - -// IsInterfaceNil - -func (e *EpochValidatorInfoCreatorStub) IsInterfaceNil() bool { - return e == nil -} - -// RemoveBlockDataFromPools - -func (e *EpochValidatorInfoCreatorStub) RemoveBlockDataFromPools(metaBlock data.HeaderHandler, body *block.Body) { - if e.RemoveBlockDataFromPoolsCalled != nil { - e.RemoveBlockDataFromPoolsCalled(metaBlock, body) - } -} diff --git a/process/mock/nodesSetupStub.go b/process/mock/nodesSetupStub.go deleted file mode 100644 index b8e21ce5fcb..00000000000 --- a/process/mock/nodesSetupStub.go +++ /dev/null @@ -1,188 +0,0 @@ -package mock - -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetAdaptivityCalled func() bool - GetHysteresisCalled func() float32 - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 0 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 0 -} - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 0 -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 0 -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - return nil, nil -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - - return false -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - - return 0 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/process/mock/transactionSimulatorStub.go b/process/mock/transactionSimulatorStub.go index 70363230936..971cda66d04 100644 --- a/process/mock/transactionSimulatorStub.go +++ b/process/mock/transactionSimulatorStub.go @@ -1,19 +1,20 @@ package mock import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" txSimData "github.com/multiversx/mx-chain-go/process/transactionEvaluator/data" ) // TransactionSimulatorStub - type TransactionSimulatorStub struct { - ProcessTxCalled func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) + ProcessTxCalled func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) } // ProcessTx - -func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (tss *TransactionSimulatorStub) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { if tss.ProcessTxCalled != nil { - return tss.ProcessTxCalled(tx) + return tss.ProcessTxCalled(tx, currentHeader) } return nil, nil diff --git a/process/mock/validatorsProviderStub.go b/process/mock/validatorsProviderStub.go deleted file mode 100644 index 98ea652340b..00000000000 --- a/process/mock/validatorsProviderStub.go +++ /dev/null @@ -1,28 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data/validator" -) - -// ValidatorsProviderStub - -type ValidatorsProviderStub struct { - GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics -} - -// GetLatestValidators - -func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.ValidatorStatistics { - if vp.GetLatestValidatorsCalled != nil { - return vp.GetLatestValidatorsCalled() - } - return nil -} - -// Close - -func (vp *ValidatorsProviderStub) Close() error { - return nil -} - -// IsInterfaceNil - -func (vp *ValidatorsProviderStub) IsInterfaceNil() bool { - return vp == nil -} diff --git a/process/peer/interface.go b/process/peer/interface.go index 94377bfdd53..2a8a447e694 100644 --- a/process/peer/interface.go +++ b/process/peer/interface.go @@ -2,6 +2,8 @@ package peer import ( "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" ) // DataPool indicates the main functionality needed in order to fetch the required blocks from the pool @@ -9,3 +11,12 @@ type DataPool interface { Headers() dataRetriever.HeadersPool IsInterfaceNil() bool } + +// StakingDataProviderAPI is able to provide staking data from the system smart contracts +type StakingDataProviderAPI interface { + ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + FillValidatorInfo(validator state.ValidatorInfoHandler) error + GetOwnersData() map[string]*epochStart.OwnerData + Clean() + IsInterfaceNil() bool +} diff --git a/process/peer/process.go b/process/peer/process.go index 5c3364fe5f7..4c04de6a25d 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -196,6 +196,18 @@ func (vs *validatorStatistics) saveNodesCoordinatorUpdates(epoch uint32) (bool, } nodeForcedToRemain = nodeForcedToRemain || tmpNodeForcedToRemain + if vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step2Flag) { + nodesMap, err = vs.nodesCoordinator.GetAllShuffledOutValidatorsPublicKeys(epoch) + if err != nil { + return false, err + } + + _, err = vs.saveUpdatesForNodesMap(nodesMap, common.AuctionList) + if err != nil { + return false, err + } + } + return nodeForcedToRemain, nil } @@ -238,12 +250,16 @@ func (vs *validatorStatistics) saveUpdatesForList( isNodeLeaving := (peerType == common.WaitingList || peerType == common.EligibleList) && peerAcc.GetList() == string(common.LeavingList) isNodeWithLowRating := vs.isValidatorWithLowRating(peerAcc) isNodeJailed := vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) && peerType == common.InactiveList && isNodeWithLowRating + isStakingV4Started := vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if isNodeJailed { - peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.JailedList), uint32(index), isStakingV4Started) } else if isNodeLeaving { - peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(common.LeavingList), uint32(index), isStakingV4Started) + if isStakingV4Started { + peerAcc.SetPreviousList(string(peerType)) + } } else { - peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index)) + peerAcc.SetListAndIndex(shardID, string(peerType), uint32(index), isStakingV4Started) } err = vs.peerAdapter.SaveAccount(peerAcc) @@ -444,23 +460,19 @@ func (vs *validatorStatistics) RootHash() ([]byte, error) { func (vs *validatorStatistics) getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, vs.shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < vs.shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := vs.unmarshalPeer(pa) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := vs.PeerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -503,7 +515,9 @@ func (vs *validatorStatistics) PeerAccountToValidatorInfo(peerAccount state.Peer PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: list, + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RatingModifier: ratingModifier, @@ -555,7 +569,7 @@ func (vs *validatorStatistics) jailValidatorIfBadRatingAndInactive(validatorAcco return } - validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList()) + validatorAccount.SetListAndIndex(validatorAccount.GetShardId(), string(common.JailedList), validatorAccount.GetIndexInList(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder) (state.PeerAccountHandler, error) { @@ -571,7 +585,7 @@ func (vs *validatorStatistics) unmarshalPeer(peerAccountData core.KeyValueHolder } // GetValidatorInfoForRootHash returns all the peer accounts from the trie with the given rootHash -func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { sw := core.NewStopWatch() sw.Start("GetValidatorInfoForRootHash") defer func() { @@ -598,10 +612,10 @@ func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map // ProcessRatingsEndOfEpoch makes end of epoch process on the rating func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( - validatorInfos map[uint32][]*state.ValidatorInfo, + validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32, ) error { - if len(validatorInfos) == 0 { + if validatorInfos == nil || len(validatorInfos.GetAllValidatorsInfo()) == 0 { return process.ErrNilValidatorInfos } @@ -610,14 +624,14 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } signedThreshold := vs.rater.GetSignedBlocksThreshold() - for shardId, validators := range validatorInfos { + for shardId, validators := range validatorInfos.GetShardValidatorsInfoMap() { for _, validator := range validators { if !vs.enableEpochsHandler.IsFlagEnabled(common.StakingV2FlagAfterEpoch) { - if validator.List != string(common.EligibleList) { + if validator.GetList() != string(common.EligibleList) { continue } } else { - if validator.List != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { + if validator.GetList() != string(common.EligibleList) && !validatorInfo.WasLeavingEligibleInCurrentEpoch(validator) { continue } } @@ -633,7 +647,7 @@ func (vs *validatorStatistics) ProcessRatingsEndOfEpoch( } func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, signedThreshold float32, shardId uint32, epoch uint32, @@ -642,19 +656,19 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( return nil } - validatorOccurrences := core.MaxUint32(1, validator.ValidatorSuccess+validator.ValidatorFailure+validator.ValidatorIgnoredSignatures) - computedThreshold := float32(validator.ValidatorSuccess) / float32(validatorOccurrences) + validatorOccurrences := core.MaxUint32(1, validator.GetValidatorSuccess()+validator.GetValidatorFailure()+validator.GetValidatorIgnoredSignatures()) + computedThreshold := float32(validator.GetValidatorSuccess()) / float32(validatorOccurrences) if computedThreshold <= signedThreshold { increasedRatingTimes := uint32(0) if !vs.enableEpochsHandler.IsFlagEnabled(common.BelowSignedThresholdFlag) { - increasedRatingTimes = validator.ValidatorFailure + increasedRatingTimes = validator.GetValidatorFailure() } else { - increasedRatingTimes = validator.ValidatorSuccess + validator.ValidatorIgnoredSignatures + increasedRatingTimes = validator.GetValidatorSuccess() + validator.GetValidatorIgnoredSignatures() } - newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.TempRating, increasedRatingTimes) - pa, err := vs.loadPeerAccount(validator.PublicKey) + newTempRating := vs.rater.RevertIncreaseValidator(shardId, validator.GetTempRating(), increasedRatingTimes) + pa, err := vs.loadPeerAccount(validator.GetPublicKey()) if err != nil { return err } @@ -667,23 +681,23 @@ func (vs *validatorStatistics) verifySignaturesBelowSignedThreshold( } log.Debug("below signed blocks threshold", - "pk", validator.PublicKey, + "pk", validator.GetPublicKey(), "signed %", computedThreshold, - "validatorSuccess", validator.ValidatorSuccess, - "validatorFailure", validator.ValidatorFailure, - "validatorIgnored", validator.ValidatorIgnoredSignatures, + "validatorSuccess", validator.GetValidatorSuccess(), + "validatorFailure", validator.GetValidatorFailure(), + "validatorIgnored", validator.GetValidatorIgnoredSignatures(), "new tempRating", newTempRating, - "old tempRating", validator.TempRating, + "old tempRating", validator.GetTempRating(), ) - validator.TempRating = newTempRating + validator.SetTempRating(newTempRating) } return nil } // ResetValidatorStatisticsAtNewEpoch resets the validator info at the start of a new epoch -func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { sw := core.NewStopWatch() sw.Start("ResetValidatorStatisticsAtNewEpoch") defer func() { @@ -691,24 +705,22 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin log.Debug("ResetValidatorStatisticsAtNewEpoch", sw.GetMeasurements()...) }() - for _, validators := range vInfos { - for _, validator := range validators { - account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) - if err != nil { - return err - } + for _, validator := range vInfos.GetAllValidatorsInfo() { + account, err := vs.peerAdapter.LoadAccount(validator.GetPublicKey()) + if err != nil { + return err + } - peerAccount, ok := account.(state.PeerAccountHandler) - if !ok { - return process.ErrWrongTypeAssertion - } - peerAccount.ResetAtNewEpoch() - vs.setToJailedIfNeeded(peerAccount, validator) + peerAccount, ok := account.(state.PeerAccountHandler) + if !ok { + return process.ErrWrongTypeAssertion + } + peerAccount.ResetAtNewEpoch() + vs.setToJailedIfNeeded(peerAccount, validator) - err = vs.peerAdapter.SaveAccount(peerAccount) - if err != nil { - return err - } + err = vs.peerAdapter.SaveAccount(peerAccount) + if err != nil { + return err } } @@ -717,23 +729,23 @@ func (vs *validatorStatistics) ResetValidatorStatisticsAtNewEpoch(vInfos map[uin func (vs *validatorStatistics) setToJailedIfNeeded( peerAccount state.PeerAccountHandler, - validator *state.ValidatorInfo, + validator state.ValidatorInfoHandler, ) { if !vs.enableEpochsHandler.IsFlagEnabled(common.SwitchJailWaitingFlag) { return } - if validator.List == string(common.WaitingList) || validator.List == string(common.EligibleList) { + if validator.GetList() == string(common.WaitingList) || validator.GetList() == string(common.EligibleList) { return } - if validator.List == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + if validator.GetList() == string(common.JailedList) && peerAccount.GetList() != string(common.JailedList) { + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return } if vs.isValidatorWithLowRating(peerAccount) { - peerAccount.SetListAndIndex(validator.ShardId, string(common.JailedList), validator.Index) + peerAccount.SetListAndIndex(validator.GetShardId(), string(common.JailedList), validator.GetIndex(), vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) } } @@ -994,7 +1006,7 @@ func (vs *validatorStatistics) savePeerAccountData( peerAccount.SetRating(startRating) peerAccount.SetTempRating(startRating) - peerAccount.SetListAndIndex(shardID, string(peerType), index) + peerAccount.SetListAndIndex(shardID, string(peerType), index, vs.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag)) return vs.peerAdapter.SaveAccount(peerAccount) } diff --git a/process/peer/process_test.go b/process/peer/process_test.go index daa885cff3a..69adb3e936a 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" @@ -28,6 +29,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -99,10 +101,9 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { MaxGasPriceSetGuardian: "100000", }, }, - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(), + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economics.NewEconomicsData(argsNewEconomicsData) @@ -122,7 +123,7 @@ func createMockArguments() peer.ArgValidatorStatisticsProcessor { RewardsHandler: economicsData, MaxComputableRounds: 1000, MaxConsecutiveRoundsOfRatingDecrease: 2000, - NodesSetup: &mock.NodesSetupStub{}, + NodesSetup: &genesisMocks.NodesSetupStub{}, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SwitchJailWaitingFlag, common.BelowSignedThresholdFlag), } return arguments @@ -312,7 +313,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateErrOnGetAccountFail(t *tes arguments := createMockArguments() arguments.PeerAdapter = peerAdapters - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -334,7 +335,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateGetAccountReturnsInvalid(t arguments := createMockArguments() arguments.PeerAdapter = peerAdapter - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -359,7 +360,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateSetAddressErrors(t *testin } arguments := createMockArguments() - arguments.NodesSetup = &mock.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + arguments.NodesSetup = &genesisMocks.NodesSetupStub{InitialNodesInfoCalled: func() (m map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, m2 map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { oneMap := make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) oneMap[0] = append(oneMap[0], mock.NewNodeInfo([]byte("aaaa"), []byte("aaaa"), 0, 50)) return oneMap, oneMap @@ -2073,9 +2074,9 @@ func TestValidatorStatistics_Process(t *testing.T) { validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) validatorInfos, _ := validatorStatistics.GetValidatorInfoForRootHash(hash) - vi0 := validatorInfos[0][0] + vi0 := validatorInfos.GetShardValidatorsInfoMap()[0][0] newTempRating := uint32(25) - vi0.TempRating = newTempRating + vi0.SetTempRating(newTempRating) assert.NotEqual(t, newTempRating, pa0.GetRating()) @@ -2145,10 +2146,10 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { validatorInfos, err := validatorStatistics.GetValidatorInfoForRootHash(hash) assert.NotNil(t, validatorInfos) assert.Nil(t, err) - assert.Equal(t, uint32(0), validatorInfos[0][0].ShardId) - compare(t, pa0, validatorInfos[0][0]) - assert.Equal(t, core.MetachainShardId, validatorInfos[core.MetachainShardId][0].ShardId) - compare(t, paMeta, validatorInfos[core.MetachainShardId][0]) + assert.Equal(t, uint32(0), validatorInfos.GetShardValidatorsInfoMap()[0][0].GetShardId()) + compare(t, pa0, validatorInfos.GetShardValidatorsInfoMap()[0][0]) + assert.Equal(t, core.MetachainShardId, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetShardId()) + compare(t, paMeta, validatorInfos.GetShardValidatorsInfoMap()[core.MetachainShardId][0]) }) } @@ -2161,7 +2162,7 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNilMapShouldErr( err := validatorStatistics.ProcessRatingsEndOfEpoch(nil, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) - vi := make(map[uint32][]*state.ValidatorInfo) + vi := state.NewShardValidatorsInfoMap() err = validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Equal(t, process.ErrNilValidatorInfos, err) } @@ -2179,9 +2180,8 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu tempRating1 := uint32(75) tempRating2 := uint32(80) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = &state.ValidatorInfo{ + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, ShardId: core.MetachainShardId, List: "", @@ -2195,12 +2195,10 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } - - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = &state.ValidatorInfo{ + }) + _ = vi.Add(&state.ValidatorInfo{ PublicKey: nil, - ShardId: core.MetachainShardId, + ShardId: 0, List: "", Index: 0, TempRating: tempRating2, @@ -2212,12 +2210,12 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithNoValidatorFailu ValidatorFailure: 0, NumSelectedInSuccessBlocks: 20, AccumulatedFees: nil, - } + }) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, tempRating1, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFailureShouldWork(t *testing.T) { @@ -2246,18 +2244,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithSmallValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) expectedTempRating2 := tempRating2 - uint32(rater.IncreaseValidator)*(validatorSuccess2+validatorIgnored2) - assert.Equal(t, expectedTempRating2, vi[0][0].TempRating) + assert.Equal(t, expectedTempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible(t *testing.T) { @@ -2287,20 +2283,19 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochComputesJustEligible validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLeaving(t *testing.T) { @@ -2332,21 +2327,21 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochV2ComputesEligibleLe validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[core.MetachainShardId][0].List = string(common.LeavingList) + vi := state.NewShardValidatorsInfoMap() + validatorLeaving := createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) + validatorLeaving.SetList(string(common.LeavingList)) + _ = vi.Add(validatorLeaving) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) - vi[0][0].List = string(common.WaitingList) + validatorWaiting := createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + validatorWaiting.SetList(string(common.WaitingList)) + _ = vi.Add(validatorWaiting) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) expectedTempRating1 := tempRating1 - uint32(rater.MetaIncreaseValidator)*(validatorSuccess1+validatorIgnored1) - assert.Equal(t, expectedTempRating1, vi[core.MetachainShardId][0].TempRating) + assert.Equal(t, expectedTempRating1, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) - assert.Equal(t, tempRating2, vi[0][0].TempRating) + assert.Equal(t, tempRating2, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFailureBelowMinRatingShouldWork(t *testing.T) { @@ -2374,18 +2369,16 @@ func TestValidatorStatistics_ProcessValidatorInfosEndOfEpochWithLargeValidatorFa validatorIgnored2 := uint32(90) validatorFailure2 := uint32(9) - vi := make(map[uint32][]*state.ValidatorInfo) - vi[core.MetachainShardId] = make([]*state.ValidatorInfo, 1) - vi[core.MetachainShardId][0] = createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1) - vi[0] = make([]*state.ValidatorInfo, 1) - vi[0][0] = createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2) + vi := state.NewShardValidatorsInfoMap() + _ = vi.Add(createMockValidatorInfo(core.MetachainShardId, tempRating1, validatorSuccess1, validatorIgnored1, validatorFailure1)) + _ = vi.Add(createMockValidatorInfo(0, tempRating2, validatorSuccess2, validatorIgnored2, validatorFailure2)) validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) err := validatorStatistics.ProcessRatingsEndOfEpoch(vi, 1) assert.Nil(t, err) - assert.Equal(t, rater.MinRating, vi[core.MetachainShardId][0].TempRating) - assert.Equal(t, rater.MinRating, vi[0][0].TempRating) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[core.MetachainShardId][0].GetTempRating()) + assert.Equal(t, rater.MinRating, vi.GetShardValidatorsInfoMap()[0][0].GetTempRating()) } func TestValidatorsProvider_PeerAccoutToValidatorInfo(t *testing.T) { @@ -2485,26 +2478,26 @@ func createMockValidatorInfo(shardId uint32, tempRating uint32, validatorSuccess } } -func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo *state.ValidatorInfo) { - assert.Equal(t, peerAccount.GetShardId(), validatorInfo.ShardId) - assert.Equal(t, peerAccount.GetRating(), validatorInfo.Rating) - assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.TempRating) - assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.PublicKey) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.ValidatorFailure) - assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.ValidatorSuccess) - assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.ValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.LeaderFailure) - assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.LeaderSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.TotalValidatorFailure) - assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.TotalValidatorSuccess) - assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.TotalValidatorIgnoredSignatures) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.TotalLeaderFailure) - assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.TotalLeaderSuccess) - assert.Equal(t, peerAccount.GetList(), validatorInfo.List) - assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.Index) - assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.RewardAddress) - assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.AccumulatedFees) - assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.NumSelectedInSuccessBlocks) +func compare(t *testing.T, peerAccount state.PeerAccountHandler, validatorInfo state.ValidatorInfoHandler) { + assert.Equal(t, peerAccount.GetShardId(), validatorInfo.GetShardId()) + assert.Equal(t, peerAccount.GetRating(), validatorInfo.GetRating()) + assert.Equal(t, peerAccount.GetTempRating(), validatorInfo.GetTempRating()) + assert.Equal(t, peerAccount.AddressBytes(), validatorInfo.GetPublicKey()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumFailure(), validatorInfo.GetValidatorFailure()) + assert.Equal(t, peerAccount.GetValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetValidatorSuccess()) + assert.Equal(t, peerAccount.GetValidatorIgnoredSignaturesRate(), validatorInfo.GetValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumFailure(), validatorInfo.GetLeaderFailure()) + assert.Equal(t, peerAccount.GetLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetLeaderSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumFailure(), validatorInfo.GetTotalValidatorFailure()) + assert.Equal(t, peerAccount.GetTotalValidatorSuccessRate().GetNumSuccess(), validatorInfo.GetTotalValidatorSuccess()) + assert.Equal(t, peerAccount.GetTotalValidatorIgnoredSignaturesRate(), validatorInfo.GetTotalValidatorIgnoredSignatures()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumFailure(), validatorInfo.GetTotalLeaderFailure()) + assert.Equal(t, peerAccount.GetTotalLeaderSuccessRate().GetNumSuccess(), validatorInfo.GetTotalLeaderSuccess()) + assert.Equal(t, peerAccount.GetList(), validatorInfo.GetList()) + assert.Equal(t, peerAccount.GetIndexInList(), validatorInfo.GetIndex()) + assert.Equal(t, peerAccount.GetRewardAddress(), validatorInfo.GetRewardAddress()) + assert.Equal(t, peerAccount.GetAccumulatedFees(), validatorInfo.GetAccumulatedFees()) + assert.Equal(t, peerAccount.GetNumSelectedInSuccessBlocks(), validatorInfo.GetNumSelectedInSuccessBlocks()) } func createPeerAccounts(addrBytes0 []byte, addrBytesMeta []byte) (state.PeerAccountHandler, state.PeerAccountHandler) { @@ -2655,6 +2648,114 @@ func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdates(t *testing.T) assert.False(t, nodeForcedToRemain) } +func TestValidatorStatisticsProcessor_SaveNodesCoordinatorUpdatesWithStakingV4(t *testing.T) { + t.Parallel() + + peerAdapter := getAccountsMock() + arguments := createMockArguments() + arguments.PeerAdapter = peerAdapter + + pk0 := []byte("pk0") + pk1 := []byte("pk1") + pk2 := []byte("pk2") + + account0, _ := accounts.NewPeerAccount(pk0) + account1, _ := accounts.NewPeerAccount(pk1) + account2, _ := accounts.NewPeerAccount(pk2) + + ctLoadAccount := &atomic.Counter{} + ctSaveAccount := &atomic.Counter{} + + peerAdapter.LoadAccountCalled = func(address []byte) (vmcommon.AccountHandler, error) { + ctLoadAccount.Increment() + + switch string(address) { + case string(pk0): + return account0, nil + case string(pk1): + return account1, nil + case string(pk2): + return account2, nil + default: + require.Fail(t, "should not have called this for other address") + return nil, nil + } + } + peerAdapter.SaveAccountCalled = func(account vmcommon.AccountHandler) error { + ctSaveAccount.Increment() + peerAccount := account.(state.PeerAccountHandler) + require.Equal(t, uint32(0), peerAccount.GetIndexInList()) + + switch string(account.AddressBytes()) { + case string(pk0): + require.Equal(t, string(common.EligibleList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk1): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, uint32(0), peerAccount.GetShardId()) + return nil + case string(pk2): + require.Equal(t, string(common.AuctionList), peerAccount.GetList()) + require.Equal(t, core.MetachainShardId, peerAccount.GetShardId()) + return nil + default: + require.Fail(t, "should not have called this for other account") + return nil + } + } + + arguments.NodesCoordinator = &shardingMocks.NodesCoordinatorMock{ + GetAllEligibleValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk0}, + } + return mapNodes, nil + }, + GetAllShuffledOutValidatorsPublicKeysCalled: func(epoch uint32) (map[uint32][][]byte, error) { + mapNodes := map[uint32][][]byte{ + 0: {pk1}, + core.MetachainShardId: {pk2}, + } + return mapNodes, nil + }, + } + stakingV4Step2EnableEpochCalledCt := 0 + arguments.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + if flag == common.StakingV4Step2Flag { + stakingV4Step2EnableEpochCalledCt++ + switch stakingV4Step2EnableEpochCalledCt { + case 1: + return false + case 2: + return true + default: + require.Fail(t, "should only call this twice") + } + } + + return false + }, + } + + validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) + nodeForcedToRemain, err := validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(1), ctSaveAccount.Get()) + require.Equal(t, int64(1), ctLoadAccount.Get()) + + ctSaveAccount.Reset() + ctLoadAccount.Reset() + + nodeForcedToRemain, err = validatorStatistics.SaveNodesCoordinatorUpdates(0) + require.Nil(t, err) + require.False(t, nodeForcedToRemain) + require.Equal(t, int64(3), ctSaveAccount.Get()) + require.Equal(t, int64(3), ctLoadAccount.Get()) +} + func TestValidatorStatisticsProcessor_getActualList(t *testing.T) { t.Parallel() diff --git a/process/peer/ratingReader.go b/process/peer/ratingReader.go index 4a8c8f1c5be..83f236b3869 100644 --- a/process/peer/ratingReader.go +++ b/process/peer/ratingReader.go @@ -5,13 +5,13 @@ type RatingReader struct { getRating func(string) uint32 } -//GetRating returns the Rating for the specified public key +// GetRating returns the Rating for the specified public key func (bsr *RatingReader) GetRating(pk string) uint32 { rating := bsr.getRating(pk) return rating } -//IsInterfaceNil checks if the underlying object is nil +// IsInterfaceNil checks if the underlying object is nil func (bsr *RatingReader) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/peer/validatorsProvider.go b/process/peer/validatorsProvider.go index 6ab8d0ac49b..7c3b8505310 100644 --- a/process/peer/validatorsProvider.go +++ b/process/peer/validatorsProvider.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/validator" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -24,14 +25,22 @@ type validatorsProvider struct { nodesCoordinator process.NodesCoordinator validatorStatistics process.ValidatorStatisticsProcessor cache map[string]*validator.ValidatorStatistics + cachedAuctionValidators []*common.AuctionListValidatorAPIResponse + cachedRandomness []byte cacheRefreshIntervalDuration time.Duration refreshCache chan uint32 lastCacheUpdate time.Time + lastAuctionCacheUpdate time.Time lock sync.RWMutex + auctionMutex sync.RWMutex cancelFunc func() - pubkeyConverter core.PubkeyConverter - maxRating uint32 - currentEpoch uint32 + validatorPubKeyConverter core.PubkeyConverter + addressPubKeyConverter core.PubkeyConverter + stakingDataProvider StakingDataProviderAPI + auctionListSelector epochStart.AuctionListSelector + + maxRating uint32 + currentEpoch uint32 } // ArgValidatorsProvider contains all parameters needed for creating a validatorsProvider @@ -40,7 +49,10 @@ type ArgValidatorsProvider struct { EpochStartEventNotifier process.EpochStartEventNotifier CacheRefreshIntervalDurationInSec time.Duration ValidatorStatistics process.ValidatorStatisticsProcessor - PubKeyConverter core.PubkeyConverter + ValidatorPubKeyConverter core.PubkeyConverter + AddressPubKeyConverter core.PubkeyConverter + StakingDataProvider StakingDataProviderAPI + AuctionListSelector epochStart.AuctionListSelector StartEpoch uint32 MaxRating uint32 } @@ -53,8 +65,11 @@ func NewValidatorsProvider( if check.IfNil(args.ValidatorStatistics) { return nil, process.ErrNilValidatorStatistics } - if check.IfNil(args.PubKeyConverter) { - return nil, process.ErrNilPubkeyConverter + if check.IfNil(args.ValidatorPubKeyConverter) { + return nil, fmt.Errorf("%w for validators", process.ErrNilPubkeyConverter) + } + if check.IfNil(args.AddressPubKeyConverter) { + return nil, fmt.Errorf("%w for addresses", process.ErrNilPubkeyConverter) } if check.IfNil(args.NodesCoordinator) { return nil, process.ErrNilNodesCoordinator @@ -62,6 +77,12 @@ func NewValidatorsProvider( if check.IfNil(args.EpochStartEventNotifier) { return nil, process.ErrNilEpochStartNotifier } + if check.IfNil(args.StakingDataProvider) { + return nil, process.ErrNilStakingDataProvider + } + if check.IfNil(args.AuctionListSelector) { + return nil, epochStart.ErrNilAuctionListSelector + } if args.MaxRating == 0 { return nil, process.ErrMaxRatingZero } @@ -74,14 +95,20 @@ func NewValidatorsProvider( valProvider := &validatorsProvider{ nodesCoordinator: args.NodesCoordinator, validatorStatistics: args.ValidatorStatistics, + stakingDataProvider: args.StakingDataProvider, cache: make(map[string]*validator.ValidatorStatistics), + cachedAuctionValidators: make([]*common.AuctionListValidatorAPIResponse, 0), + cachedRandomness: make([]byte, 0), cacheRefreshIntervalDuration: args.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + auctionMutex: sync.RWMutex{}, cancelFunc: cancelfunc, maxRating: args.MaxRating, - pubkeyConverter: args.PubKeyConverter, + validatorPubKeyConverter: args.ValidatorPubKeyConverter, + addressPubKeyConverter: args.AddressPubKeyConverter, currentEpoch: args.StartEpoch, + auctionListSelector: args.AuctionListSelector, } go valProvider.startRefreshProcess(currentContext) @@ -92,19 +119,23 @@ func NewValidatorsProvider( // GetLatestValidators gets the latest configuration of validators from the peerAccountsTrie func (vp *validatorsProvider) GetLatestValidators() map[string]*validator.ValidatorStatistics { + vp.updateCacheIfNeeded() + vp.lock.RLock() - shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration + clonedMap := cloneMap(vp.cache) vp.lock.RUnlock() - if shouldUpdate { - vp.updateCache() - } + return clonedMap +} +func (vp *validatorsProvider) updateCacheIfNeeded() { vp.lock.RLock() - clonedMap := cloneMap(vp.cache) + shouldUpdate := time.Since(vp.lastCacheUpdate) > vp.cacheRefreshIntervalDuration vp.lock.RUnlock() - return clonedMap + if shouldUpdate { + vp.updateCache() + } } func cloneMap(cache map[string]*validator.ValidatorStatistics) map[string]*validator.ValidatorStatistics { @@ -182,6 +213,7 @@ func (vp *validatorsProvider) updateCache() { } allNodes, err := vp.validatorStatistics.GetValidatorInfoForRootHash(lastFinalizedRootHash) if err != nil { + allNodes = state.NewShardValidatorsInfoMap() log.Trace("validatorsProvider - GetLatestValidatorInfos failed", "error", err) } @@ -199,48 +231,46 @@ func (vp *validatorsProvider) updateCache() { func (vp *validatorsProvider) createNewCache( epoch uint32, - allNodes map[uint32][]*state.ValidatorInfo, + allNodes state.ShardValidatorsInfoMapHandler, ) map[string]*validator.ValidatorStatistics { newCache := vp.createValidatorApiResponseMapFromValidatorInfoMap(allNodes) nodesMapEligible, err := vp.nodesCoordinator.GetAllEligibleValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllEligibleValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapEligible, common.EligibleList) nodesMapWaiting, err := vp.nodesCoordinator.GetAllWaitingValidatorsPublicKeys(epoch) if err != nil { - log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch) + log.Debug("validatorsProvider - GetAllWaitingValidatorsPublicKeys failed", "epoch", epoch, "error", err) } vp.aggregateLists(newCache, nodesMapWaiting, common.WaitingList) return newCache } -func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes map[uint32][]*state.ValidatorInfo) map[string]*validator.ValidatorStatistics { +func (vp *validatorsProvider) createValidatorApiResponseMapFromValidatorInfoMap(allNodes state.ShardValidatorsInfoMapHandler) map[string]*validator.ValidatorStatistics { newCache := make(map[string]*validator.ValidatorStatistics) - for _, validatorInfosInShard := range allNodes { - for _, validatorInfo := range validatorInfosInShard { - strKey := vp.pubkeyConverter.SilentEncode(validatorInfo.PublicKey, log) - - newCache[strKey] = &validator.ValidatorStatistics{ - NumLeaderSuccess: validatorInfo.LeaderSuccess, - NumLeaderFailure: validatorInfo.LeaderFailure, - NumValidatorSuccess: validatorInfo.ValidatorSuccess, - NumValidatorFailure: validatorInfo.ValidatorFailure, - NumValidatorIgnoredSignatures: validatorInfo.ValidatorIgnoredSignatures, - TotalNumLeaderSuccess: validatorInfo.TotalLeaderSuccess, - TotalNumLeaderFailure: validatorInfo.TotalLeaderFailure, - TotalNumValidatorSuccess: validatorInfo.TotalValidatorSuccess, - TotalNumValidatorFailure: validatorInfo.TotalValidatorFailure, - TotalNumValidatorIgnoredSignatures: validatorInfo.TotalValidatorIgnoredSignatures, - RatingModifier: validatorInfo.RatingModifier, - Rating: float32(validatorInfo.Rating) * 100 / float32(vp.maxRating), - TempRating: float32(validatorInfo.TempRating) * 100 / float32(vp.maxRating), - ShardId: validatorInfo.ShardId, - ValidatorStatus: validatorInfo.List, - } + + for _, validatorInfo := range allNodes.GetAllValidatorsInfo() { + strKey := vp.validatorPubKeyConverter.SilentEncode(validatorInfo.GetPublicKey(), log) + newCache[strKey] = &validator.ValidatorStatistics{ + NumLeaderSuccess: validatorInfo.GetLeaderSuccess(), + NumLeaderFailure: validatorInfo.GetLeaderFailure(), + NumValidatorSuccess: validatorInfo.GetValidatorSuccess(), + NumValidatorFailure: validatorInfo.GetValidatorFailure(), + NumValidatorIgnoredSignatures: validatorInfo.GetValidatorIgnoredSignatures(), + TotalNumLeaderSuccess: validatorInfo.GetTotalLeaderSuccess(), + TotalNumLeaderFailure: validatorInfo.GetTotalLeaderFailure(), + TotalNumValidatorSuccess: validatorInfo.GetTotalValidatorSuccess(), + TotalNumValidatorFailure: validatorInfo.GetTotalValidatorFailure(), + TotalNumValidatorIgnoredSignatures: validatorInfo.GetTotalValidatorIgnoredSignatures(), + RatingModifier: validatorInfo.GetRatingModifier(), + Rating: float32(validatorInfo.GetRating()) * 100 / float32(vp.maxRating), + TempRating: float32(validatorInfo.GetTempRating()) * 100 / float32(vp.maxRating), + ShardId: validatorInfo.GetShardId(), + ValidatorStatus: validatorInfo.GetList(), } } @@ -254,8 +284,7 @@ func (vp *validatorsProvider) aggregateLists( ) { for shardID, shardValidators := range validatorsMap { for _, val := range shardValidators { - encodedKey := vp.pubkeyConverter.SilentEncode(val, log) - + encodedKey := vp.validatorPubKeyConverter.SilentEncode(val, log) foundInTrieValidator, ok := newCache[encodedKey] peerType := string(currentList) @@ -288,6 +317,12 @@ func shouldCombine(triePeerType common.PeerType, currentPeerType common.PeerType return isLeaving && isEligibleOrWaiting } +// ForceUpdate will trigger the update process of all caches +func (vp *validatorsProvider) ForceUpdate() error { + vp.updateCache() + return vp.updateAuctionListCache() +} + // IsInterfaceNil returns true if there is no value under the interface func (vp *validatorsProvider) IsInterfaceNil() bool { return vp == nil diff --git a/process/peer/validatorsProviderAuction.go b/process/peer/validatorsProviderAuction.go new file mode 100644 index 00000000000..144ace850fb --- /dev/null +++ b/process/peer/validatorsProviderAuction.go @@ -0,0 +1,220 @@ +package peer + +import ( + "bytes" + "math/big" + "sort" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/epochStart" + "github.com/multiversx/mx-chain-go/state" +) + +// GetAuctionList returns an array containing the validators that are currently in the auction list +func (vp *validatorsProvider) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + err := vp.updateAuctionListCacheIfNeeded() + if err != nil { + return nil, err + } + + vp.auctionMutex.RLock() + ret := make([]*common.AuctionListValidatorAPIResponse, len(vp.cachedAuctionValidators)) + copy(ret, vp.cachedAuctionValidators) + vp.auctionMutex.RUnlock() + + return ret, nil +} + +func (vp *validatorsProvider) updateAuctionListCacheIfNeeded() error { + vp.auctionMutex.RLock() + shouldUpdate := time.Since(vp.lastAuctionCacheUpdate) > vp.cacheRefreshIntervalDuration + vp.auctionMutex.RUnlock() + + if shouldUpdate { + return vp.updateAuctionListCache() + } + + return nil +} + +func (vp *validatorsProvider) updateAuctionListCache() error { + rootHash := vp.validatorStatistics.LastFinalizedRootHash() + if len(rootHash) == 0 { + return state.ErrNilRootHash + } + + validatorsMap, err := vp.validatorStatistics.GetValidatorInfoForRootHash(rootHash) + if err != nil { + return err + } + + vp.auctionMutex.Lock() + vp.cachedRandomness = rootHash + vp.auctionMutex.Unlock() + + newCache, err := vp.createValidatorsAuctionCache(validatorsMap) + if err != nil { + return err + } + + vp.auctionMutex.Lock() + vp.lastAuctionCacheUpdate = time.Now() + vp.cachedAuctionValidators = newCache + vp.auctionMutex.Unlock() + + return nil +} + +func (vp *validatorsProvider) createValidatorsAuctionCache(validatorsMap state.ShardValidatorsInfoMapHandler) ([]*common.AuctionListValidatorAPIResponse, error) { + defer vp.stakingDataProvider.Clean() + + err := vp.fillAllValidatorsInfo(validatorsMap) + if err != nil { + return nil, err + } + + selectedNodes, err := vp.getSelectedNodesFromAuction(validatorsMap) + if err != nil { + return nil, err + } + + auctionListValidators, qualifiedOwners := vp.getAuctionListValidatorsAPIResponse(selectedNodes) + sortList(auctionListValidators, qualifiedOwners) + return auctionListValidators, nil +} + +func (vp *validatorsProvider) fillAllValidatorsInfo(validatorsMap state.ShardValidatorsInfoMapHandler) error { + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + err := vp.stakingDataProvider.FillValidatorInfo(validator) + if err != nil { + return err + } + } + + _, _, err := vp.stakingDataProvider.ComputeUnQualifiedNodes(validatorsMap) + return err +} + +func (vp *validatorsProvider) getSelectedNodesFromAuction(validatorsMap state.ShardValidatorsInfoMapHandler) ([]state.ValidatorInfoHandler, error) { + vp.auctionMutex.RLock() + randomness := vp.cachedRandomness + vp.auctionMutex.RUnlock() + + err := vp.auctionListSelector.SelectNodesFromAuctionList(validatorsMap, randomness) + if err != nil { + return nil, err + } + + selectedNodes := make([]state.ValidatorInfoHandler, 0) + for _, validator := range validatorsMap.GetAllValidatorsInfo() { + if validator.GetList() == string(common.SelectedFromAuctionList) { + selectedNodes = append(selectedNodes, validator.ShallowClone()) + } + } + + return selectedNodes, nil +} + +func sortList(list []*common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) { + sort.SliceStable(list, func(i, j int) bool { + qualifiedTopUpValidator1, _ := big.NewInt(0).SetString(list[i].QualifiedTopUp, 10) + qualifiedTopUpValidator2, _ := big.NewInt(0).SetString(list[j].QualifiedTopUp, 10) + if qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) == 0 { + return compareByNumQualified(list[i], list[j], qualifiedOwners) + } + + return qualifiedTopUpValidator1.Cmp(qualifiedTopUpValidator2) > 0 + }) +} + +func compareByNumQualified(owner1Nodes, owner2Nodes *common.AuctionListValidatorAPIResponse, qualifiedOwners map[string]bool) bool { + owner1Qualified := qualifiedOwners[owner1Nodes.Owner] + owner2Qualified := qualifiedOwners[owner2Nodes.Owner] + + bothQualified := owner1Qualified && owner2Qualified + if !bothQualified { + return owner1Qualified + } + + owner1NumQualified := getNumQualified(owner1Nodes.Nodes) + owner2NumQualified := getNumQualified(owner2Nodes.Nodes) + + return owner1NumQualified > owner2NumQualified +} + +func getNumQualified(nodes []*common.AuctionNode) uint32 { + numQualified := uint32(0) + for _, node := range nodes { + if node.Qualified { + numQualified++ + } + } + + return numQualified +} + +func (vp *validatorsProvider) getAuctionListValidatorsAPIResponse( + selectedNodes []state.ValidatorInfoHandler, +) ([]*common.AuctionListValidatorAPIResponse, map[string]bool) { + auctionListValidators := make([]*common.AuctionListValidatorAPIResponse, 0) + qualifiedOwners := make(map[string]bool) + + for ownerPubKey, ownerData := range vp.stakingDataProvider.GetOwnersData() { + numAuctionNodes := len(ownerData.AuctionList) + if numAuctionNodes > 0 { + ownerEncodedPubKey := vp.addressPubKeyConverter.SilentEncode([]byte(ownerPubKey), log) + auctionValidator := &common.AuctionListValidatorAPIResponse{ + Owner: ownerEncodedPubKey, + NumStakedNodes: ownerData.NumStakedNodes, + TotalTopUp: ownerData.TotalTopUp.String(), + TopUpPerNode: ownerData.TopUpPerNode.String(), + QualifiedTopUp: ownerData.TopUpPerNode.String(), + Nodes: make([]*common.AuctionNode, 0, numAuctionNodes), + } + vp.fillAuctionQualifiedValidatorAPIData(selectedNodes, ownerData, auctionValidator) + auctionListValidators = append(auctionListValidators, auctionValidator) + + qualifiedOwners[ownerEncodedPubKey] = ownerData.Qualified + } + } + + return auctionListValidators, qualifiedOwners +} + +func (vp *validatorsProvider) fillAuctionQualifiedValidatorAPIData( + selectedNodes []state.ValidatorInfoHandler, + ownerData *epochStart.OwnerData, + auctionValidatorAPI *common.AuctionListValidatorAPIResponse, +) { + auctionValidatorAPI.Nodes = make([]*common.AuctionNode, 0, len(ownerData.AuctionList)) + numOwnerQualifiedNodes := int64(0) + for _, nodeInAuction := range ownerData.AuctionList { + auctionNode := &common.AuctionNode{ + BlsKey: vp.validatorPubKeyConverter.SilentEncode(nodeInAuction.GetPublicKey(), log), + Qualified: false, + } + if ownerData.Qualified && contains(selectedNodes, nodeInAuction) { + auctionNode.Qualified = true + numOwnerQualifiedNodes++ + } + + auctionValidatorAPI.Nodes = append(auctionValidatorAPI.Nodes, auctionNode) + } + + if numOwnerQualifiedNodes > 0 { + activeNodes := big.NewInt(ownerData.NumActiveNodes) + qualifiedNodes := big.NewInt(numOwnerQualifiedNodes) + ownerRemainingNodes := big.NewInt(0).Add(activeNodes, qualifiedNodes) + auctionValidatorAPI.QualifiedTopUp = big.NewInt(0).Div(ownerData.TotalTopUp, ownerRemainingNodes).String() + } +} + +func contains(list []state.ValidatorInfoHandler, validator state.ValidatorInfoHandler) bool { + for _, val := range list { + if bytes.Equal(val.GetPublicKey(), validator.GetPublicKey()) { + return true + } + } + return false +} diff --git a/process/peer/validatorsProvider_test.go b/process/peer/validatorsProvider_test.go index cd718e0c78b..931567a2435 100644 --- a/process/peer/validatorsProvider_test.go +++ b/process/peer/validatorsProvider_test.go @@ -6,12 +6,14 @@ import ( "encoding/hex" "fmt" "math/big" + "strings" "sync" "sync/atomic" "testing" "time" "github.com/multiversx/mx-chain-core-go/core" + coreAtomic "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/validator" @@ -22,8 +24,10 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewValidatorsProvider_WithNilValidatorStatisticsShouldErr(t *testing.T) { @@ -42,16 +46,36 @@ func TestNewValidatorsProvider_WithMaxRatingZeroShouldErr(t *testing.T) { assert.Nil(t, vp) } -func TestNewValidatorsProvider_WithNilValidatorPubkeyConverterShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilValidatorPubKeyConverterShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() - arg.PubKeyConverter = nil + arg.ValidatorPubKeyConverter = nil vp, err := NewValidatorsProvider(arg) - assert.Equal(t, process.ErrNilPubkeyConverter, err) + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "validator")) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilNodesCoordinatorrShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithNilAddressPubkeyConverterShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AddressPubKeyConverter = nil + vp, err := NewValidatorsProvider(arg) + + assert.True(t, errors.Is(err, process.ErrNilPubkeyConverter)) + assert.True(t, strings.Contains(err.Error(), "address")) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilStakingDataProviderShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.StakingDataProvider = nil + vp, err := NewValidatorsProvider(arg) + + assert.Equal(t, process.ErrNilStakingDataProvider, err) + assert.True(t, check.IfNil(vp)) +} + +func TestNewValidatorsProvider_WithNilNodesCoordinatorShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.NodesCoordinator = nil vp, err := NewValidatorsProvider(arg) @@ -69,7 +93,7 @@ func TestNewValidatorsProvider_WithNilStartOfEpochTriggerShouldErr(t *testing.T) assert.True(t, check.IfNil(vp)) } -func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testing.T) { +func TestNewValidatorsProvider_WithZeroRefreshCacheIntervalInSecShouldErr(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = 0 vp, err := NewValidatorsProvider(arg) @@ -78,25 +102,33 @@ func TestNewValidatorsProvider_WithNilRefresCacheIntervalInSecShouldErr(t *testi assert.True(t, check.IfNil(vp)) } +func TestNewValidatorsProvider_WithNilAuctionListSelectorShouldErr(t *testing.T) { + arg := createDefaultValidatorsProviderArg() + arg.AuctionListSelector = nil + vp, err := NewValidatorsProvider(arg) + + require.Nil(t, vp) + require.Equal(t, epochStart.ErrNilAuctionListSelector, err) +} + func TestValidatorsProvider_GetLatestValidatorsSecondHashDoesNotExist(t *testing.T) { mut := sync.Mutex{} root := []byte("rootHash") e := errors.Errorf("not ok") initialInfo := createMockValidatorInfo() - validatorInfos := map[uint32][]*state.ValidatorInfo{ - 0: {initialInfo}, - } + validatorInfos := state.NewShardValidatorsInfoMap() + _ = validatorInfos.Add(initialInfo) gotOk := false gotNil := false - vs := &mock.ValidatorStatisticsProcessorStub{ + vs := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() (bytes []byte) { mut.Lock() defer mut.Unlock() return root }, - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m map[uint32][]*state.ValidatorInfo, err error) { + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (m state.ShardValidatorsInfoMapHandler, err error) { mut.Lock() defer mut.Unlock() if bytes.Equal([]byte("rootHash"), rootHash) { @@ -167,10 +199,10 @@ func TestValidatorsProvider_CallsPopulateAndRegister(t *testing.T) { }, } - arg.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ - GetValidatorInfoForRootHashCalled: func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + arg.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(&numPopulateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil }, LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") @@ -189,12 +221,12 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { expectedErr := errors.New("expectedError") arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return nil, expectedErr } @@ -213,7 +245,7 @@ func TestValidatorsProvider_UpdateCache_WithError(t *testing.T) { cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, lock: sync.RWMutex{}, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), } vsp.updateCache() @@ -233,6 +265,8 @@ func TestValidatorsProvider_Cancel_startRefreshProcess(t *testing.T) { cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: make(chan uint32), lock: sync.RWMutex{}, + stakingDataProvider: &stakingcommon.StakingDataProviderStub{}, + auctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } ctx, cancelFunc := context.WithCancel(context.Background()) @@ -264,21 +298,20 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { pk := []byte("pk1") initialShardId := uint32(1) initialList := string(common.EligibleList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) - validatorsMap[initialShardId] = []*state.ValidatorInfo{ - { - PublicKey: pk, - List: initialList, - ShardId: initialShardId, - }, - } + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pk, + List: initialList, + ShardId: initialShardId, + }) + arg := createDefaultValidatorsProviderArg() - validatorProc := &mock.ValidatorStatisticsProcessorStub{ + validatorProc := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorProc.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { return validatorsMap, nil } @@ -288,16 +321,15 @@ func TestValidatorsProvider_UpdateCache(t *testing.T) { cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, refreshCache: nil, - pubkeyConverter: testscommon.NewPubkeyConverterMock(32), + validatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), lock: sync.RWMutex{}, } vsp.updateCache() assert.NotNil(t, vsp.cache) - assert.Equal(t, len(validatorsMap[initialShardId]), len(vsp.cache)) - encodedKey, err := arg.PubKeyConverter.Encode(pk) - assert.Nil(t, err) + assert.Equal(t, len(validatorsMap.GetShardValidatorsInfoMap()[initialShardId]), len(vsp.cache)) + encodedKey, _ := arg.ValidatorPubKeyConverter.Encode(pk) assert.NotNil(t, vsp.cache[encodedKey]) assert.Equal(t, initialList, vsp.cache[encodedKey].ValidatorStatus) assert.Equal(t, initialShardId, vsp.cache[encodedKey].ShardId) @@ -315,12 +347,9 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { trieLeavingShardId := uint32(2) leavingList := string(common.LeavingList) - encodedEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) - encondedInactive, err := pubKeyConverter.Encode(pkInactive) - assert.Nil(t, err) - encodedLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) + encodedEligible, _ := pubKeyConverter.Encode(pkEligible) + encondedInactive, _ := pubKeyConverter.Encode(pkInactive) + encodedLeaving, _ := pubKeyConverter.Encode(pkLeaving) cache := make(map[string]*validator.ValidatorStatistics) cache[encondedInactive] = &validator.ValidatorStatistics{ValidatorStatus: inactiveList, ShardId: trieInctiveShardId} cache[encodedEligible] = &validator.ValidatorStatistics{ValidatorStatus: eligibleList, ShardId: trieEligibleShardId} @@ -335,7 +364,7 @@ func TestValidatorsProvider_aggregatePType_equal(t *testing.T) { } vp := validatorsProvider{ - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, } vp.aggregateLists(cache, validatorsMap, common.EligibleList) @@ -363,47 +392,41 @@ func TestValidatorsProvider_createCache(t *testing.T) { pkNew := []byte("pk5") newList := string(common.NewList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) waitingShardId := uint32(1) leavingShardId := uint32(2) inactiveShardId := uint32(3) newShardId := core.MetachainShardId - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligible, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[waitingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkWaiting, - ShardId: waitingShardId, - List: waitingList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeaving, - ShardId: leavingShardId, - List: leavingList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[newShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkNew, - ShardId: newShardId, - List: newList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligible, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkWaiting, + ShardId: waitingShardId, + List: waitingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkLeaving, + ShardId: leavingShardId, + List: leavingList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + + PublicKey: pkNew, + ShardId: newShardId, + List: newList, + }) arg := createDefaultValidatorsProviderArg() pubKeyConverter := testscommon.NewPubkeyConverterMock(32) vsp := validatorsProvider{ @@ -411,7 +434,7 @@ func TestValidatorsProvider_createCache(t *testing.T) { validatorStatistics: arg.ValidatorStatistics, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, - pubkeyConverter: pubKeyConverter, + validatorPubKeyConverter: pubKeyConverter, lock: sync.RWMutex{}, } @@ -419,26 +442,22 @@ func TestValidatorsProvider_createCache(t *testing.T) { assert.NotNil(t, cache) - encodedPkEligible, err := pubKeyConverter.Encode(pkEligible) - assert.Nil(t, err) + encodedPkEligible, _ := pubKeyConverter.Encode(pkEligible) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, eligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkWaiting, err := pubKeyConverter.Encode(pkWaiting) - assert.Nil(t, err) + encodedPkWaiting, _ := pubKeyConverter.Encode(pkWaiting) assert.NotNil(t, cache[encodedPkWaiting]) assert.Equal(t, waitingList, cache[encodedPkWaiting].ValidatorStatus) assert.Equal(t, waitingShardId, cache[encodedPkWaiting].ShardId) - encodedPkLeaving, err := pubKeyConverter.Encode(pkLeaving) - assert.Nil(t, err) + encodedPkLeaving, _ := pubKeyConverter.Encode(pkLeaving) assert.NotNil(t, cache[encodedPkLeaving]) assert.Equal(t, leavingList, cache[encodedPkLeaving].ValidatorStatus) assert.Equal(t, leavingShardId, cache[encodedPkLeaving].ShardId) - encodedPkNew, err := pubKeyConverter.Encode(pkNew) - assert.Nil(t, err) + encodedPkNew, _ := pubKeyConverter.Encode(pkNew) assert.NotNil(t, cache[encodedPkNew]) assert.Equal(t, newList, cache[encodedPkNew].ValidatorStatus) assert.Equal(t, newShardId, cache[encodedPkNew].ShardId) @@ -452,31 +471,25 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { pkLeavingInTrie := []byte("pk3") leavingList := string(common.LeavingList) - validatorsMap := make(map[uint32][]*state.ValidatorInfo) + validatorsMap := state.NewShardValidatorsInfoMap() eligibleShardId := uint32(0) inactiveShardId := uint32(1) leavingShardId := uint32(2) - validatorsMap[eligibleShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkEligibleInTrie, - ShardId: eligibleShardId, - List: eligibleList, - }, - } - validatorsMap[inactiveShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkInactive, - ShardId: inactiveShardId, - List: inactiveList, - }, - } - validatorsMap[leavingShardId] = []*state.ValidatorInfo{ - { - PublicKey: pkLeavingInTrie, - ShardId: leavingShardId, - List: leavingList, - }, - } + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkEligibleInTrie, + ShardId: eligibleShardId, + List: eligibleList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkInactive, + ShardId: inactiveShardId, + List: inactiveList, + }) + _ = validatorsMap.Add(&state.ValidatorInfo{ + PublicKey: pkLeavingInTrie, + ShardId: leavingShardId, + List: leavingList, + }) arg := createDefaultValidatorsProviderArg() nodesCoordinator := shardingMocks.NewNodesCoordinatorMock() nodesCoordinatorEligibleShardId := uint32(5) @@ -491,7 +504,7 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { vsp := validatorsProvider{ nodesCoordinator: nodesCoordinator, validatorStatistics: arg.ValidatorStatistics, - pubkeyConverter: arg.PubKeyConverter, + validatorPubKeyConverter: arg.ValidatorPubKeyConverter, cache: nil, cacheRefreshIntervalDuration: arg.CacheRefreshIntervalDurationInSec, lock: sync.RWMutex{}, @@ -499,14 +512,12 @@ func TestValidatorsProvider_createCache_combined(t *testing.T) { cache := vsp.createNewCache(0, validatorsMap) - encodedPkEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedPkEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.NotNil(t, cache[encodedPkEligible]) assert.Equal(t, eligibleList, cache[encodedPkEligible].ValidatorStatus) assert.Equal(t, nodesCoordinatorEligibleShardId, cache[encodedPkEligible].ShardId) - encodedPkLeavingInTrie, err := arg.PubKeyConverter.Encode(pkLeavingInTrie) - assert.Nil(t, err) + encodedPkLeavingInTrie, _ := arg.ValidatorPubKeyConverter.Encode(pkLeavingInTrie) computedPeerType := fmt.Sprintf(common.CombinedPeerType, common.EligibleList, common.LeavingList) assert.NotNil(t, cache[encodedPkLeavingInTrie]) assert.Equal(t, computedPeerType, cache[encodedPkLeavingInTrie].ValidatorStatus) @@ -519,14 +530,14 @@ func TestValidatorsProvider_CallsPopulateOnlyAfterTimeout(t *testing.T) { arg := createDefaultValidatorsProviderArg() arg.CacheRefreshIntervalDurationInSec = time.Millisecond * 10 - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { atomic.AddInt32(populateCacheCalled, 1) - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } arg.ValidatorStatistics = validatorStatisticsProcessor @@ -560,31 +571,29 @@ func TestValidatorsProvider_CallsUpdateCacheOnEpochChange(t *testing.T) { arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache epochStartNotifier.NotifyAll(&block.Header{Nonce: 1, ShardID: 2, Round: 3}) time.Sleep(arg.CacheRefreshIntervalDurationInSec) @@ -600,31 +609,29 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin arg.CacheRefreshIntervalDurationInSec = 5 * time.Millisecond pkEligibleInTrie := []byte("pk1") - validatorStatisticsProcessor := &mock.ValidatorStatisticsProcessorStub{ + validatorStatisticsProcessor := &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, } - validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { + validatorStatisticsProcessor.GetValidatorInfoForRootHashCalled = func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { callNumber++ // first call comes from the constructor if callNumber == 1 { - return nil, nil + return state.NewShardValidatorsInfoMap(), nil } - return map[uint32][]*state.ValidatorInfo{ - 0: { - { - PublicKey: pkEligibleInTrie, - List: string(common.EligibleList), - }, - }, - }, nil + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(&state.ValidatorInfo{ + ShardId: 0, + PublicKey: pkEligibleInTrie, + List: string(common.EligibleList), + }) + return validatorsMap, nil } arg.ValidatorStatistics = validatorStatisticsProcessor vsp, _ := NewValidatorsProvider(arg) - encodedEligible, err := arg.PubKeyConverter.Encode(pkEligibleInTrie) - assert.Nil(t, err) + encodedEligible, _ := arg.ValidatorPubKeyConverter.Encode(pkEligibleInTrie) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache time.Sleep(arg.CacheRefreshIntervalDurationInSec) assert.Equal(t, 0, len(vsp.GetCache())) // nothing in cache @@ -636,6 +643,409 @@ func TestValidatorsProvider_DoesntCallUpdateUpdateCacheWithoutRequests(t *testin assert.Equal(t, 1, len(resp)) assert.NotNil(t, vsp.GetCache()[encodedEligible]) } + +func TestValidatorsProvider_GetAuctionList(t *testing.T) { + t.Parallel() + + t.Run("error getting root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, state.ErrNilRootHash, err) + }) + + t.Run("error getting validators info for root hash", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + return nil, expectedErr + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + }) + + t.Run("error filling validator info, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedValidator := &state.ValidatorInfo{PublicKey: []byte("pubKey"), List: string(common.AuctionList)} + expectedErr := errors.New("local error") + expectedRootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + require.Equal(t, expectedRootHash, rootHash) + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(expectedValidator) + return validatorsMap, nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + require.Equal(t, expectedValidator, validator) + return expectedErr + }, + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("error selecting nodes from auction, staking data provider cache should be cleaned", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + cleanCalled := &coreAtomic.Flag{} + expectedErr := errors.New("local error") + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + return expectedErr + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + CleanCalled: func() { + cleanCalled.SetValue(true) + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, list) + require.Equal(t, expectedErr, err) + require.True(t, cleanCalled.IsSet()) + }) + + t.Run("empty list, check normal flow is executed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + expectedRootHash := []byte("root hash") + ctRootHashCalled := uint32(0) + ctGetValidatorsInfoForRootHash := uint32(0) + ctSelectNodesFromAuctionList := uint32(0) + ctFillValidatorInfoCalled := uint32(0) + ctGetOwnersDataCalled := uint32(0) + ctComputeUnqualifiedNodes := uint32(0) + + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + atomic.AddUint32(&ctRootHashCalled, 1) + return expectedRootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + atomic.AddUint32(&ctGetValidatorsInfoForRootHash, 1) + require.Equal(t, expectedRootHash, rootHash) + return state.NewShardValidatorsInfoMap(), nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + atomic.AddUint32(&ctSelectNodesFromAuctionList, 1) + require.Equal(t, expectedRootHash, randomness) + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + FillValidatorInfoCalled: func(validator state.ValidatorInfoHandler) error { + atomic.AddUint32(&ctFillValidatorInfoCalled, 1) + return nil + }, + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + atomic.AddUint32(&ctGetOwnersDataCalled, 1) + return nil + }, + ComputeUnQualifiedNodesCalled: func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { + atomic.AddUint32(&ctComputeUnqualifiedNodes, 1) + return nil, nil, nil + }, + } + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Empty(t, list) + require.Equal(t, ctRootHashCalled, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctGetValidatorsInfoForRootHash, uint32(2)) // another call is from constructor in startRefreshProcess.updateCache + require.Equal(t, ctFillValidatorInfoCalled, uint32(0)) + require.Equal(t, ctGetOwnersDataCalled, uint32(1)) + require.Equal(t, ctComputeUnqualifiedNodes, uint32(1)) + require.Equal(t, expectedRootHash, vp.cachedRandomness) + }) + + t.Run("normal flow, check data is correctly computed", func(t *testing.T) { + t.Parallel() + args := createDefaultValidatorsProviderArg() + + v1 := &state.ValidatorInfo{PublicKey: []byte("pk1"), List: string(common.AuctionList)} + v2 := &state.ValidatorInfo{PublicKey: []byte("pk2"), List: string(common.AuctionList)} + v3 := &state.ValidatorInfo{PublicKey: []byte("pk3"), List: string(common.AuctionList)} + v4 := &state.ValidatorInfo{PublicKey: []byte("pk4"), List: string(common.AuctionList)} + v5 := &state.ValidatorInfo{PublicKey: []byte("pk5"), List: string(common.AuctionList)} + v6 := &state.ValidatorInfo{PublicKey: []byte("pk6"), List: string(common.AuctionList)} + v7 := &state.ValidatorInfo{PublicKey: []byte("pk7"), List: string(common.EligibleList)} + v8 := &state.ValidatorInfo{PublicKey: []byte("pk8"), List: string(common.WaitingList)} + v9 := &state.ValidatorInfo{PublicKey: []byte("pk9"), List: string(common.LeavingList)} + v10 := &state.ValidatorInfo{PublicKey: []byte("pk10"), List: string(common.JailedList)} + v11 := &state.ValidatorInfo{PublicKey: []byte("pk11"), List: string(common.AuctionList)} + v12 := &state.ValidatorInfo{PublicKey: []byte("pk12"), List: string(common.AuctionList)} + + owner1 := "owner1" + owner2 := "owner2" + owner3 := "owner3" + owner4 := "owner4" + owner5 := "owner5" + owner6 := "owner6" + owner7 := "owner7" + ownersData := map[string]*epochStart.OwnerData{ + owner1: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(7500), + TopUpPerNode: big.NewInt(2500), + AuctionList: []state.ValidatorInfoHandler{v1, v2}, // owner1 will have v1 & v2 selected + Qualified: true, // with qualifiedTopUp = 2500 + }, + owner2: { + NumStakedNodes: 3, + NumActiveNodes: 1, + TotalTopUp: big.NewInt(3000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{v3, v4}, // owner2 will have v3 selected + Qualified: true, // with qualifiedTopUp = 1500 + }, + owner3: { + NumStakedNodes: 2, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(4000), + TopUpPerNode: big.NewInt(2000), + AuctionList: []state.ValidatorInfoHandler{v5, v6}, // owner3 will have v5 selected + Qualified: true, // with qualifiedTopUp = 4000 + }, + owner4: { + NumStakedNodes: 3, + NumActiveNodes: 2, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v7}, // owner4 has one node in auction, but is not qualified + Qualified: false, // should be sent at the bottom of the list + }, + owner5: { + NumStakedNodes: 5, + NumActiveNodes: 5, + TotalTopUp: big.NewInt(5000), + TopUpPerNode: big.NewInt(1000), + AuctionList: []state.ValidatorInfoHandler{}, // owner5 has no nodes in auction, will not appear in API list + Qualified: true, + }, + // owner6 has same stats as owner7. After selection, owner7 will have its node selected => should be listed above owner 6 + owner6: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v11}, + Qualified: true, // should be added + }, + owner7: { + NumStakedNodes: 1, + NumActiveNodes: 0, + TotalTopUp: big.NewInt(0), + TopUpPerNode: big.NewInt(0), + AuctionList: []state.ValidatorInfoHandler{v12}, + Qualified: true, + }, + } + + validatorsMap := state.NewShardValidatorsInfoMap() + _ = validatorsMap.Add(v1) + _ = validatorsMap.Add(v2) + _ = validatorsMap.Add(v3) + _ = validatorsMap.Add(v4) + _ = validatorsMap.Add(v5) + _ = validatorsMap.Add(v6) + _ = validatorsMap.Add(v7) + _ = validatorsMap.Add(v8) + _ = validatorsMap.Add(v9) + _ = validatorsMap.Add(v10) + _ = validatorsMap.Add(v11) + _ = validatorsMap.Add(v12) + + rootHash := []byte("root hash") + args.ValidatorStatistics = &testscommon.ValidatorStatisticsProcessorStub{ + LastFinalizedRootHashCalled: func() []byte { + return rootHash + }, + GetValidatorInfoForRootHashCalled: func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { + return validatorsMap, nil + }, + } + args.AuctionListSelector = &stakingcommon.AuctionListSelectorStub{ + SelectNodesFromAuctionListCalled: func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error { + selectedV1 := v1.ShallowClone() + selectedV1.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v1, selectedV1) + + selectedV2 := v2.ShallowClone() + selectedV2.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v2, selectedV2) + + selectedV3 := v3.ShallowClone() + selectedV3.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v3, selectedV3) + + selectedV5 := v5.ShallowClone() + selectedV5.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v5, selectedV5) + + selectedV12 := v12.ShallowClone() + selectedV12.SetList(string(common.SelectedFromAuctionList)) + _ = validatorsInfoMap.Replace(v12, selectedV12) + + return nil + }, + } + args.StakingDataProvider = &stakingcommon.StakingDataProviderStub{ + GetOwnersDataCalled: func() map[string]*epochStart.OwnerData { + return ownersData + }, + } + + vp, _ := NewValidatorsProvider(args) + time.Sleep(args.CacheRefreshIntervalDurationInSec) + + expectedList := []*common.AuctionListValidatorAPIResponse{ + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner3), log), + NumStakedNodes: 2, + TotalTopUp: "4000", + TopUpPerNode: "2000", + QualifiedTopUp: "4000", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v5.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v6.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner1), log), + NumStakedNodes: 3, + TotalTopUp: "7500", + TopUpPerNode: "2500", + QualifiedTopUp: "2500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v1.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v2.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner2), log), + NumStakedNodes: 3, + TotalTopUp: "3000", + TopUpPerNode: "1000", + QualifiedTopUp: "1500", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v3.PublicKey, log), + Qualified: true, + }, + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v4.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner7), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v12.PublicKey, log), + Qualified: true, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner6), log), + NumStakedNodes: 1, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v11.PublicKey, log), + Qualified: false, + }, + }, + }, + { + Owner: args.AddressPubKeyConverter.SilentEncode([]byte(owner4), log), + NumStakedNodes: 3, + TotalTopUp: "0", + TopUpPerNode: "0", + QualifiedTopUp: "0", + Nodes: []*common.AuctionNode{ + { + BlsKey: args.ValidatorPubKeyConverter.SilentEncode(v7.PublicKey, log), + Qualified: false, + }, + }, + }, + } + + list, err := vp.GetAuctionList() + require.Nil(t, err) + require.Equal(t, expectedList, list) + }) + +} + func createMockValidatorInfo() *state.ValidatorInfo { initialInfo := &state.ValidatorInfo{ PublicKey: []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), @@ -675,13 +1085,16 @@ func createDefaultValidatorsProviderArg() ArgValidatorsProvider { NodesCoordinator: &shardingMocks.NodesCoordinatorMock{}, StartEpoch: 1, EpochStartEventNotifier: &mock.EpochStartNotifierStub{}, + StakingDataProvider: &stakingcommon.StakingDataProviderStub{}, CacheRefreshIntervalDurationInSec: 1 * time.Millisecond, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{ + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{ LastFinalizedRootHashCalled: func() []byte { return []byte("rootHash") }, }, - MaxRating: 100, - PubKeyConverter: testscommon.NewPubkeyConverterMock(32), + MaxRating: 100, + ValidatorPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AddressPubKeyConverter: testscommon.NewPubkeyConverterMock(32), + AuctionListSelector: &stakingcommon.AuctionListSelectorStub{}, } } diff --git a/process/rating/chance.go b/process/rating/chance.go index 8ad3c092cec..71233ba3d3e 100644 --- a/process/rating/chance.go +++ b/process/rating/chance.go @@ -9,17 +9,17 @@ type selectionChance struct { chancePercentage uint32 } -//GetMaxThreshold returns the maxThreshold until this ChancePercentage holds +// GetMaxThreshold returns the maxThreshold until this ChancePercentage holds func (bsr *selectionChance) GetMaxThreshold() uint32 { return bsr.maxThreshold } -//GetChancePercentage returns the percentage for the RatingChance +// GetChancePercentage returns the percentage for the RatingChance func (bsr *selectionChance) GetChancePercentage() uint32 { return bsr.chancePercentage } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (bsr *selectionChance) IsInterfaceNil() bool { return bsr == nil } diff --git a/process/rating/disabledRatingReader.go b/process/rating/disabledRatingReader.go index 8b7ac6662c1..b57f06b2dca 100644 --- a/process/rating/disabledRatingReader.go +++ b/process/rating/disabledRatingReader.go @@ -10,17 +10,17 @@ func NewDisabledRatingReader(startRating uint32) *disabledRatingReader { return &disabledRatingReader{startRating: startRating} } -//GetRating gets the rating for the public key +// GetRating gets the rating for the public key func (rr *disabledRatingReader) GetRating(string) uint32 { return rr.startRating } -//UpdateRatingFromTempRating sets the new rating to the value of the tempRating +// UpdateRatingFromTempRating sets the new rating to the value of the tempRating func (rr *disabledRatingReader) UpdateRatingFromTempRating([]string) error { return nil } -//IsInterfaceNil verifies if the interface is nil +// IsInterfaceNil verifies if the interface is nil func (rr *disabledRatingReader) IsInterfaceNil() bool { return rr == nil } diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index af8bc00d688..e9b166b52ea 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -230,16 +230,17 @@ func (stp *stakingToPeer) updatePeerStateV1( isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) isJailed := stakingData.JailedNonce >= stakingData.UnJailedNonce && stakingData.JailedNonce > 0 + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) if !isJailed { if stakingData.StakedNonce == nonce && !isValidator { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.RegisterNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -250,7 +251,7 @@ func (stp *stakingToPeer) updatePeerStateV1( } if !isValidator && account.GetUnStakedEpoch() == common.DefaultUnstakedEpoch { - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } } @@ -276,11 +277,13 @@ func (stp *stakingToPeer) updatePeerState( return err } + isStakingV4Started := stp.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) + isUnJailForInactive := !isNew && !stakingData.Staked && stakingData.UnJailedNonce == nonce && account.GetList() == string(common.JailedList) if isUnJailForInactive { log.Debug("unJail for inactive node changed status to inactive list", "blsKey", account.AddressBytes(), "unStakedEpoch", stakingData.UnStakedEpoch) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) if account.GetTempRating() < stp.unJailRating { account.SetTempRating(stp.unJailRating) } @@ -313,18 +316,23 @@ func (stp *stakingToPeer) updatePeerState( log.Debug("new node", "blsKey", blsPubKey) } + newNodesList := common.NewList + if isStakingV4Started { + newNodesList = common.AuctionList + } + isValidator := account.GetList() == string(common.EligibleList) || account.GetList() == string(common.WaitingList) if !stakingData.Jailed { if stakingData.StakedNonce == nonce && !isValidator { - log.Debug("node is staked, changed status to new", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.StakedNonce)) + log.Debug("node is staked, changed status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.StakedNonce), isStakingV4Started) account.SetTempRating(stp.startRating) account.SetUnStakedEpoch(common.DefaultUnstakedEpoch) } if stakingData.UnStakedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is unStaked, changed status to leaving list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.UnStakedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } @@ -337,20 +345,20 @@ func (stp *stakingToPeer) updatePeerState( isNewValidator := !isValidator && stakingData.Staked if isNewValidator { - log.Debug("node is unJailed and staked, changing status to new list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.NewList), uint32(stakingData.UnJailedNonce)) + log.Debug("node is unJailed and staked, changing status to", "list", newNodesList, "blsKey", blsPubKey) + account.SetListAndIndex(account.GetShardId(), string(newNodesList), uint32(stakingData.UnJailedNonce), isStakingV4Started) } if account.GetList() == string(common.JailedList) { log.Debug("node is unJailed and not staked, changing status to inactive list", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.InactiveList), uint32(stakingData.UnJailedNonce), isStakingV4Started) account.SetUnStakedEpoch(stakingData.UnStakedEpoch) } } if stakingData.JailedNonce == nonce && account.GetList() != string(common.InactiveList) { log.Debug("node is jailed, setting status to leaving", "blsKey", blsPubKey) - account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce)) + account.SetListAndIndex(account.GetShardId(), string(common.LeavingList), uint32(stakingData.JailedNonce), isStakingV4Started) account.SetTempRating(stp.jailRating) } diff --git a/process/scToProtocol/stakingToPeer_test.go b/process/scToProtocol/stakingToPeer_test.go index 4ac4a2fa081..f53495e92c9 100644 --- a/process/scToProtocol/stakingToPeer_test.go +++ b/process/scToProtocol/stakingToPeer_test.go @@ -673,8 +673,10 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { }, } + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.StakeFlag, common.ValidatorToDelegationFlag) arguments := createMockArgumentsNewStakingToPeer() arguments.PeerState = peerAccountsDB + arguments.EnableEpochsHandler = enableEpochsHandler stp, _ := NewStakingToPeer(arguments) stakingData := systemSmartContracts.StakedDataV2_0{ @@ -703,11 +705,19 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, nonce) + assert.NoError(t, err) + assert.True(t, bytes.Equal(blsPubKey, peerAccount.GetBLSPublicKey())) + assert.True(t, bytes.Equal(stakingData.RewardAddress, peerAccount.GetRewardAddress())) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 11 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.EligibleList), 5) + peerAccount.SetListAndIndex(0, string(common.EligibleList), 5, false) stakingData.JailedNonce = 12 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.JailedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -721,6 +731,12 @@ func TestStakingToPeer_UpdatePeerState(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.NewList), peerAccount.GetList()) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + err = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) + assert.NoError(t, err) + assert.Equal(t, string(common.AuctionList), peerAccount.GetList()) + enableEpochsHandler.RemoveActiveFlags(common.StakingV4StartedFlag) + stakingData.UnStakedNonce = 15 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) @@ -769,7 +785,7 @@ func TestStakingToPeer_UnJailFromInactive(t *testing.T) { _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnStakedNonce) assert.Equal(t, string(common.LeavingList), peerAccount.GetList()) - peerAccount.SetListAndIndex(0, string(common.JailedList), 5) + peerAccount.SetListAndIndex(0, string(common.JailedList), 5, false) stakingData.UnJailedNonce = 14 _ = stp.updatePeerState(stakingData, blsPubKey, stakingData.UnJailedNonce) assert.Equal(t, string(common.InactiveList), peerAccount.GetList()) diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 9d1c8bcd4f3..7bd0c9a2f52 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -180,7 +180,6 @@ func NewSmartContractProcessor(args scrCommon.ArgsNewSmartContractProcessor) (*s common.OptimizeGasUsedInCrossMiniBlocksFlag, common.OptimizeNFTStoreFlag, common.RemoveNonUpdatedStorageFlag, - common.BuiltInFunctionOnMetaFlag, common.BackwardCompSaveKeyValueFlag, common.ReturnDataToLastTransferFlagAfterEpoch, common.FixAsyncCallBackArgsListFlag, @@ -2823,7 +2822,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } @@ -2861,7 +2860,8 @@ func (sc *scProcessor) processSimpleSCR( if err != nil { return err } - if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) { + isSenderMeta := sc.shardCoordinator.ComputeId(scResult.SndAddr) == core.MetachainShardId + if !isPayable && !bytes.Equal(scResult.RcvAddr, scResult.OriginalSender) && !isSenderMeta { return process.ErrAccountNotPayable } diff --git a/process/smartContract/processProxy/processProxy.go b/process/smartContract/processProxy/processProxy.go index d2408c36dfa..c64db4791a4 100644 --- a/process/smartContract/processProxy/processProxy.go +++ b/process/smartContract/processProxy/processProxy.go @@ -169,11 +169,11 @@ func (proxy *scProcessorProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/processProxy/processProxy_test.go b/process/smartContract/processProxy/processProxy_test.go index ba0a9c1c0b8..0b5695386a8 100644 --- a/process/smartContract/processProxy/processProxy_test.go +++ b/process/smartContract/processProxy/processProxy_test.go @@ -129,7 +129,11 @@ func TestNewSmartContractProcessorProxy(t *testing.T) { t.Parallel() args := createMockSmartContractProcessorArguments() - args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub(common.SCProcessorV2Flag) + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { + return flag == common.SCProcessorV2Flag + }, + } proxy, err := NewSmartContractProcessorProxy(args, &epochNotifierMock.EpochNotifierStub{}) assert.False(t, check.IfNil(proxy)) diff --git a/process/smartContract/processProxy/testProcessProxy.go b/process/smartContract/processProxy/testProcessProxy.go index 31c6514814b..5d5d96ee0d2 100644 --- a/process/smartContract/processProxy/testProcessProxy.go +++ b/process/smartContract/processProxy/testProcessProxy.go @@ -145,11 +145,11 @@ func (proxy *scProcessorTestProxy) IsInterfaceNil() bool { } // EpochConfirmed is called whenever a new epoch is confirmed -func (proxy *scProcessorTestProxy) EpochConfirmed(_ uint32, _ uint64) { +func (proxy *scProcessorTestProxy) EpochConfirmed(epoch uint32, _ uint64) { proxy.mutRc.Lock() defer proxy.mutRc.Unlock() - if proxy.args.EnableEpochsHandler.IsFlagEnabled(common.SCProcessorV2Flag) { + if proxy.args.EnableEpochsHandler.IsFlagEnabledInEpoch(common.SCProcessorV2Flag, epoch) { proxy.setActiveProcessorV2() return } diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 014a1751495..c53c7ef83c9 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -3339,12 +3339,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionsFlag, common.BuiltInFunctionOnMetaFlag) - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3749,7 +3743,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3836,9 +3830,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -4253,8 +4245,7 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } diff --git a/process/smartContract/processorV2/processV2.go b/process/smartContract/processorV2/processV2.go index 938bfe725c3..126433c6dee 100644 --- a/process/smartContract/processorV2/processV2.go +++ b/process/smartContract/processorV2/processV2.go @@ -163,9 +163,7 @@ func NewSmartContractProcessorV2(args scrCommon.ArgsNewSmartContractProcessor) ( if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } - err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ - common.BuiltInFunctionOnMetaFlag, - }) + err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{}) if err != nil { return nil, err } @@ -2735,7 +2733,7 @@ func (sc *scProcessor) ProcessSmartContractResult(scr *smartContractResult.Smart returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err case process.BuiltInFunctionCall: - if sc.shardCoordinator.SelfId() == core.MetachainShardId && !sc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { + if sc.shardCoordinator.SelfId() == core.MetachainShardId { returnCode, err = sc.ExecuteSmartContractTransaction(scr, sndAcc, dstAcc) return returnCode, err } diff --git a/process/smartContract/processorV2/process_test.go b/process/smartContract/processorV2/process_test.go index 01a623cbe26..eedea17f1ad 100644 --- a/process/smartContract/processorV2/process_test.go +++ b/process/smartContract/processorV2/process_test.go @@ -371,7 +371,6 @@ func TestNewSmartContractProcessorVerifyAllMembers(t *testing.T) { t.Parallel() arguments := createMockSmartContractProcessorArguments() - arguments.EnableEpochs.BuiltInFunctionOnMetaEnableEpoch = 10 sc, _ := NewSmartContractProcessorV2(arguments) assert.Equal(t, arguments.VmContainer, sc.vmContainer) @@ -3273,12 +3272,6 @@ func TestScProcessor_ProcessSmartContractResultExecuteSCIfMetaAndBuiltIn(t *test _, err = sc.ProcessSmartContractResult(&scr) require.Nil(t, err) require.True(t, executeCalled) - - executeCalled = false - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) - _, err = sc.ProcessSmartContractResult(&scr) - require.Nil(t, err) - require.False(t, executeCalled) } func TestScProcessor_ProcessRelayedSCRValueBackToRelayer(t *testing.T) { @@ -3704,7 +3697,7 @@ func TestSmartContractProcessor_computeTotalConsumedFeeAndDevRwdWithDifferentSCC feeHandler, err := economics.NewEconomicsData(*args) require.Nil(t, err) require.NotNil(t, feeHandler) - arguments.TxFeeHandler, _ = postprocess.NewFeeAccumulator() + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.EconomicsFee = feeHandler arguments.ShardCoordinator = shardCoordinator @@ -3790,9 +3783,7 @@ func TestSmartContractProcessor_finishSCExecutionV2(t *testing.T) { arguments.EconomicsFee, err = economics.NewEconomicsData(*args) require.Nil(t, err) - arguments.TxFeeHandler, err = postprocess.NewFeeAccumulator() - require.Nil(t, err) - + arguments.TxFeeHandler = postprocess.NewFeeAccumulator() arguments.ShardCoordinator = shardCoordinator arguments.AccountsDB = &stateMock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { @@ -4191,8 +4182,7 @@ func createRealEconomicsDataArgs() *economics.ArgsNewEconomicsData { return flag == common.GasPriceModifierFlag }, }, - BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } } diff --git a/process/smartContract/scQueryService.go b/process/smartContract/scQueryService.go index eb3d9b95e4e..ec6ad67e87c 100644 --- a/process/smartContract/scQueryService.go +++ b/process/smartContract/scQueryService.go @@ -22,53 +22,57 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/smartContract/scrCommon" "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/multiversx/mx-chain-vm-common-go/parsers" ) var _ process.SCQueryService = (*SCQueryService)(nil) +var logQueryService = logger.GetOrCreate("process/smartcontract.queryService") + // MaxGasLimitPerQuery - each unit is the equivalent of 1 nanosecond processing time const MaxGasLimitPerQuery = 300_000_000_000 // SCQueryService can execute Get functions over SC to fetch stored values type SCQueryService struct { - vmContainer process.VirtualMachinesContainer - economicsFee process.FeeHandler - mutRunSc sync.Mutex - blockChainHook process.BlockChainHookWithAccountsAdapter - mainBlockChain data.ChainHandler - apiBlockChain data.ChainHandler - numQueries int - gasForQuery uint64 - wasmVMChangeLocker common.Locker - bootstrapper process.Bootstrapper - allowExternalQueriesChan chan struct{} - historyRepository dblookupext.HistoryRepository - shardCoordinator sharding.Coordinator - storageService dataRetriever.StorageService - marshaller marshal.Marshalizer - hasher hashing.Hasher - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + vmContainer process.VirtualMachinesContainer + economicsFee process.FeeHandler + mutRunSc sync.Mutex + blockChainHook process.BlockChainHookWithAccountsAdapter + mainBlockChain data.ChainHandler + apiBlockChain data.ChainHandler + gasForQuery uint64 + wasmVMChangeLocker common.Locker + bootstrapper process.Bootstrapper + allowExternalQueriesChan chan struct{} + historyRepository dblookupext.HistoryRepository + shardCoordinator sharding.Coordinator + storageService dataRetriever.StorageService + marshaller marshal.Marshalizer + hasher hashing.Hasher + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + isInHistoricalBalancesMode bool } // ArgsNewSCQueryService defines the arguments needed for the sc query service type ArgsNewSCQueryService struct { - VmContainer process.VirtualMachinesContainer - EconomicsFee process.FeeHandler - BlockChainHook process.BlockChainHookWithAccountsAdapter - MainBlockChain data.ChainHandler - APIBlockChain data.ChainHandler - WasmVMChangeLocker common.Locker - Bootstrapper process.Bootstrapper - AllowExternalQueriesChan chan struct{} - MaxGasLimitPerQuery uint64 - HistoryRepository dblookupext.HistoryRepository - ShardCoordinator sharding.Coordinator - StorageService dataRetriever.StorageService - Marshaller marshal.Marshalizer - Hasher hashing.Hasher - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + VmContainer process.VirtualMachinesContainer + EconomicsFee process.FeeHandler + BlockChainHook process.BlockChainHookWithAccountsAdapter + MainBlockChain data.ChainHandler + APIBlockChain data.ChainHandler + WasmVMChangeLocker common.Locker + Bootstrapper process.Bootstrapper + AllowExternalQueriesChan chan struct{} + MaxGasLimitPerQuery uint64 + HistoryRepository dblookupext.HistoryRepository + ShardCoordinator sharding.Coordinator + StorageService dataRetriever.StorageService + Marshaller marshal.Marshalizer + Hasher hashing.Hasher + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + IsInHistoricalBalancesMode bool } // NewSCQueryService returns a new instance of SCQueryService @@ -85,21 +89,22 @@ func NewSCQueryService( gasForQuery = args.MaxGasLimitPerQuery } return &SCQueryService{ - vmContainer: args.VmContainer, - economicsFee: args.EconomicsFee, - mainBlockChain: args.MainBlockChain, - apiBlockChain: args.APIBlockChain, - blockChainHook: args.BlockChainHook, - wasmVMChangeLocker: args.WasmVMChangeLocker, - bootstrapper: args.Bootstrapper, - gasForQuery: gasForQuery, - allowExternalQueriesChan: args.AllowExternalQueriesChan, - historyRepository: args.HistoryRepository, - shardCoordinator: args.ShardCoordinator, - storageService: args.StorageService, - marshaller: args.Marshaller, - hasher: args.Hasher, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + vmContainer: args.VmContainer, + economicsFee: args.EconomicsFee, + mainBlockChain: args.MainBlockChain, + apiBlockChain: args.APIBlockChain, + blockChainHook: args.BlockChainHook, + wasmVMChangeLocker: args.WasmVMChangeLocker, + bootstrapper: args.Bootstrapper, + gasForQuery: gasForQuery, + allowExternalQueriesChan: args.AllowExternalQueriesChan, + historyRepository: args.HistoryRepository, + shardCoordinator: args.ShardCoordinator, + storageService: args.StorageService, + marshaller: args.Marshaller, + hasher: args.Hasher, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + isInHistoricalBalancesMode: args.IsInHistoricalBalancesMode, }, nil } @@ -179,8 +184,7 @@ func (service *SCQueryService) shouldAllowQueriesExecution() bool { } func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice uint64) (*vmcommon.VMOutput, common.BlockInfo, error) { - log.Trace("executeScCall", "function", query.FuncName, "numQueries", service.numQueries) - service.numQueries++ + logQueryService.Trace("executeScCall", "address", query.ScAddress, "function", query.FuncName, "blockNonce", query.BlockNonce.Value, "blockHash", query.BlockHash) shouldEarlyExitBecauseOfSyncState := query.ShouldBeSynced && service.bootstrapper.GetNodeState() == common.NsNotSynchronized if shouldEarlyExitBecauseOfSyncState { @@ -198,11 +202,11 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return nil, nil, err } - accountsAdapter := service.blockChainHook.GetAccountsAdapter() - err = accountsAdapter.RecreateTrie(blockRootHash) + err = service.recreateTrie(blockRootHash, blockHeader) if err != nil { return nil, nil, err } + service.blockChainHook.SetCurrentHeader(blockHeader) } shouldCheckRootHashChanges := query.SameScState @@ -212,8 +216,6 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui rootHashBeforeExecution = service.apiBlockChain.GetCurrentBlockRootHash() } - service.blockChainHook.SetCurrentHeader(service.mainBlockChain.GetCurrentBlockHeader()) - service.wasmVMChangeLocker.RLock() vm, _, err := scrCommon.FindVMByScAddress(service.vmContainer, query.ScAddress) if err != nil { @@ -229,15 +231,6 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return nil, nil, err } - if service.hasRetriableExecutionError(vmOutput) { - log.Error("Retriable execution error detected. Will retry (once) executeScCall()", "returnCode", vmOutput.ReturnCode, "returnMessage", vmOutput.ReturnMessage) - - vmOutput, err = vm.RunSmartContractCall(vmInput) - if err != nil { - return nil, nil, err - } - } - if query.SameScState { err = service.checkForRootHashChanges(rootHashBeforeExecution) if err != nil { @@ -258,9 +251,26 @@ func (service *SCQueryService) executeScCall(query *process.SCQuery, gasPrice ui return vmOutput, blockInfo, nil } +func (service *SCQueryService) recreateTrie(blockRootHash []byte, blockHeader data.HeaderHandler) error { + if check.IfNil(blockHeader) { + return process.ErrNilBlockHeader + } + + accountsAdapter := service.blockChainHook.GetAccountsAdapter() + + if service.isInHistoricalBalancesMode { + logQueryService.Trace("calling RecreateTrieFromEpoch", "block", blockHeader.GetNonce(), "rootHash", blockRootHash) + holder := holders.NewRootHashHolder(blockRootHash, core.OptionalUint32{Value: blockHeader.GetEpoch(), HasValue: true}) + + return accountsAdapter.RecreateTrieFromEpoch(holder) + } + + logQueryService.Trace("calling RecreateTrie", "block", blockHeader.GetNonce(), "rootHash", blockRootHash) + return accountsAdapter.RecreateTrie(blockRootHash) +} + // TODO: extract duplicated code with nodeBlocks.go func (service *SCQueryService) extractBlockHeaderAndRootHash(query *process.SCQuery) (data.HeaderHandler, []byte, error) { - if len(query.BlockHash) > 0 { currentHeader, err := service.getBlockHeaderByHash(query.BlockHash) if err != nil { @@ -417,10 +427,6 @@ func (service *SCQueryService) createVMCallInput(query *process.SCQuery, gasPric return vmContractCallInput } -func (service *SCQueryService) hasRetriableExecutionError(vmOutput *vmcommon.VMOutput) bool { - return vmOutput.ReturnMessage == "allocation error" -} - // ComputeScCallGasLimit will estimate how many gas a transaction will consume func (service *SCQueryService) ComputeScCallGasLimit(tx *transaction.Transaction) (uint64, error) { argParser := parsers.NewCallArgsParser() diff --git a/process/smartContract/scQueryServiceDispatcher.go b/process/smartContract/scQueryServiceDispatcher.go index 2c51b47d55d..981f71f3dd9 100644 --- a/process/smartContract/scQueryServiceDispatcher.go +++ b/process/smartContract/scQueryServiceDispatcher.go @@ -78,7 +78,7 @@ func (sqsd *scQueryServiceDispatcher) Close() error { for _, scQueryService := range sqsd.list { err := scQueryService.Close() if err != nil { - log.Error("error while closing inner SC query service in scQueryServiceDispatcher.Close", "error", err) + logQueryService.Error("error while closing inner SC query service in scQueryServiceDispatcher.Close", "error", err) errFound = err } } diff --git a/process/smartContract/scQueryService_test.go b/process/smartContract/scQueryService_test.go index 0b76f3a739e..d71542a8aaa 100644 --- a/process/smartContract/scQueryService_test.go +++ b/process/smartContract/scQueryService_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -41,7 +40,7 @@ func createMockArgumentsForSCQuery() ArgsNewSCQueryService { BlockChainHook: &testscommon.BlockChainHookStub{ GetAccountsAdapterCalled: func() state.AccountsAdapter { return &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { return nil }, } @@ -59,9 +58,10 @@ func createMockArgumentsForSCQuery() ArgsNewSCQueryService { return &storageStubs.StorerStub{}, nil }, }, - Marshaller: &marshallerMock.MarshalizerStub{}, - Hasher: &testscommon.HasherStub{}, - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + Marshaller: &marshallerMock.MarshalizerStub{}, + Hasher: &testscommon.HasherStub{}, + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + IsInHistoricalBalancesMode: false, } } @@ -367,10 +367,11 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { _, _, _ = target.ExecuteQuery(&query) assert.True(t, runWasCalled) }) - t.Run("block hash should work", func(t *testing.T) { + t.Run("block hash should work - in deep history mode", func(t *testing.T) { t.Parallel() runWasCalled := false + epoch := uint32(37) mockVM := &mock.VMExecutionHandlerStub{ RunSmartContractCallCalled: func(input *vmcommon.ContractCallInput) (output *vmcommon.VMOutput, e error) { @@ -396,6 +397,13 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return uint64(math.MaxUint64) }, } + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Epoch: epoch, + } + }, + } providedHash := []byte("provided hash") providedRootHash := []byte("provided root hash") argsNewSCQuery.Marshaller = &marshallerMock.MarshalizerMock{} @@ -422,14 +430,21 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return true }, GetEpochByHashCalled: func(hash []byte) (uint32, error) { - return 12, nil + return epoch, nil }, } - wasRecreateTrieCalled := false + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + providedAccountsAdapter := &stateMocks.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { - wasRecreateTrieCalled = true - assert.Equal(t, providedRootHash, rootHash) + recreateTrieWasCalled = true + return nil + }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieFromEpochWasCalled = true + assert.Equal(t, providedRootHash, options.GetRootHash()) return nil }, } @@ -438,6 +453,7 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return providedAccountsAdapter }, } + argsNewSCQuery.IsInHistoricalBalancesMode = true target, _ := NewSCQueryService(argsNewSCQuery) @@ -452,13 +468,16 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { BlockHash: providedHash, } - _, _, _ = target.ExecuteQuery(&query) + _, _, err := target.ExecuteQuery(&query) assert.True(t, runWasCalled) - assert.True(t, wasRecreateTrieCalled) + assert.True(t, recreateTrieFromEpochWasCalled) + assert.False(t, recreateTrieWasCalled) + assert.Nil(t, err) }) - t.Run("block nonce should work", func(t *testing.T) { + t.Run("block hash should work - in normal mode", func(t *testing.T) { t.Parallel() + epoch := uint32(12) runWasCalled := false mockVM := &mock.VMExecutionHandlerStub{ @@ -487,22 +506,14 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { } providedHash := []byte("provided hash") providedRootHash := []byte("provided root hash") - providedNonce := uint64(123) argsNewSCQuery.Marshaller = &marshallerMock.MarshalizerMock{} - counter := 0 argsNewSCQuery.StorageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { return &storageStubs.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return providedHash, nil - }, GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { - counter++ - if counter > 2 { - return nil, fmt.Errorf("no scheduled") - } hdr := &block.Header{ RootHash: providedRootHash, + Epoch: epoch, } buff, _ := argsNewSCQuery.Marshaller.Marshal(hdr) return buff, nil @@ -515,23 +526,30 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { return true }, GetEpochByHashCalled: func(hash []byte) (uint32, error) { - require.Equal(t, providedHash, hash) - return 12, nil + return epoch, nil }, } - wasRecreateTrieCalled := false + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + providedAccountsAdapter := &stateMocks.AccountsStub{ RecreateTrieCalled: func(rootHash []byte) error { - wasRecreateTrieCalled = true + recreateTrieWasCalled = true assert.Equal(t, providedRootHash, rootHash) return nil }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieFromEpochWasCalled = true + return nil + }, } argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ GetAccountsAdapterCalled: func() state.AccountsAdapter { return providedAccountsAdapter }, } + argsNewSCQuery.IsInHistoricalBalancesMode = false target, _ := NewSCQueryService(argsNewSCQuery) @@ -543,15 +561,123 @@ func TestExecuteQuery_ShouldReceiveQueryCorrectly(t *testing.T) { ScAddress: scAddress, FuncName: funcName, Arguments: dataArgs, - BlockNonce: core.OptionalUint64{ - Value: providedNonce, - HasValue: true, - }, + BlockHash: providedHash, } - _, _, _ = target.ExecuteQuery(&query) + _, _, err := target.ExecuteQuery(&query) assert.True(t, runWasCalled) - assert.True(t, wasRecreateTrieCalled) + assert.True(t, recreateTrieWasCalled) + assert.False(t, recreateTrieFromEpochWasCalled) + assert.Nil(t, err) + }) +} + +func TestSCQueryService_RecreateTrie(t *testing.T) { + t.Parallel() + + testRootHash := []byte("test root hash") + t.Run("should not call RecreateTrie if block header is nil", func(t *testing.T) { + t.Parallel() + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(rootHash []byte) error { + require.Fail(t, "should not be called") + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + err := service.recreateTrie(testRootHash, nil) + assert.ErrorIs(t, err, process.ErrNilBlockHeader) + }) + t.Run("should call RecreateTrieFromEpoch if in deep history mode", func(t *testing.T) { + t.Parallel() + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.IsInHistoricalBalancesMode = true + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil // after the genesis we do not have a header as current block + }, + } + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(rootHash []byte) error { + recreateTrieWasCalled = true + recreateTrieFromEpochWasCalled = false + + assert.Equal(t, testRootHash, rootHash) + return nil + }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieWasCalled = false + recreateTrieFromEpochWasCalled = true + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + + // For genesis block, RecreateTrieFromEpoch should be called + err := service.recreateTrie(testRootHash, &block.Header{}) + assert.Nil(t, err) + assert.True(t, recreateTrieFromEpochWasCalled) + assert.False(t, recreateTrieWasCalled) + }) + t.Run("should call RecreateTrie if not in deep history mode", func(t *testing.T) { + t.Parallel() + + recreateTrieWasCalled := false + recreateTrieFromEpochWasCalled := false + + argsNewSCQuery := createMockArgumentsForSCQuery() + argsNewSCQuery.IsInHistoricalBalancesMode = false + argsNewSCQuery.MainBlockChain = &testscommon.ChainHandlerStub{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return nil // after the genesis we do not have a header as current block + }, + } + argsNewSCQuery.BlockChainHook = &testscommon.BlockChainHookStub{ + GetAccountsAdapterCalled: func() state.AccountsAdapter { + return &stateMocks.AccountsStub{ + RecreateTrieCalled: func(rootHash []byte) error { + recreateTrieWasCalled = true + recreateTrieFromEpochWasCalled = false + + assert.Equal(t, testRootHash, rootHash) + return nil + }, + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + recreateTrieWasCalled = false + recreateTrieFromEpochWasCalled = true + + assert.Equal(t, testRootHash, options.GetRootHash()) + return nil + }, + } + }, + } + + service, _ := NewSCQueryService(argsNewSCQuery) + + // For genesis block, RecreateTrieFromEpoch should be called + err := service.recreateTrie(testRootHash, &block.Header{}) + assert.Nil(t, err) + assert.False(t, recreateTrieFromEpochWasCalled) + assert.True(t, recreateTrieWasCalled) }) } @@ -896,16 +1022,6 @@ func TestSCQueryService_ShouldFailIfStateChanged(t *testing.T) { t.Parallel() args := createMockArgumentsForSCQuery() - args.BlockChainHook = &testscommon.BlockChainHookStub{ - GetAccountsAdapterCalled: func() state.AccountsAdapter { - return &stateMocks.AccountsStub{ - RecreateTrieCalled: func(rootHash []byte) error { - return nil - }, - } - }, - } - rootHashCalledCounter := 0 args.APIBlockChain = &testscommon.ChainHandlerStub{ GetCurrentBlockRootHashCalled: func() []byte { @@ -927,7 +1043,7 @@ func TestSCQueryService_ShouldFailIfStateChanged(t *testing.T) { FuncName: "function", }) require.Nil(t, res) - require.True(t, errors.Is(err, process.ErrStateChangedWhileExecutingVmQuery)) + require.ErrorIs(t, err, process.ErrStateChangedWhileExecutingVmQuery) } func TestSCQueryService_ShouldWorkIfStateDidntChange(t *testing.T) { diff --git a/process/track/errors.go b/process/track/errors.go index 2a0c2e57672..2c9a3a5c297 100644 --- a/process/track/errors.go +++ b/process/track/errors.go @@ -30,3 +30,6 @@ var ErrNotarizedHeaderOffsetIsOutOfBound = errors.New("requested offset of the n // ErrNilRoundHandler signals that a nil roundHandler has been provided var ErrNilRoundHandler = errors.New("nil roundHandler") + +// ErrNilKeysHandler signals that a nil keys handler was provided +var ErrNilKeysHandler = errors.New("nil keys handler") diff --git a/process/track/interface.go b/process/track/interface.go index 7d7966060da..1dbfa2caa2c 100644 --- a/process/track/interface.go +++ b/process/track/interface.go @@ -1,6 +1,7 @@ package track import ( + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" ) @@ -47,3 +48,10 @@ type blockBalancerHandler interface { SetLastShardProcessedMetaNonce(shardID uint32, nonce uint64) IsInterfaceNil() bool } + +// KeysHandler defines the operations implemented by a component that will manage all keys, +// including the single signer keys or the set of multi-keys +type KeysHandler interface { + ResetRoundsWithoutReceivedMessages(pkBytes []byte, pid core.PeerID) + IsInterfaceNil() bool +} diff --git a/consensus/spos/sentSignaturesTracker.go b/process/track/sentSignaturesTracker.go similarity index 62% rename from consensus/spos/sentSignaturesTracker.go rename to process/track/sentSignaturesTracker.go index de7ecd69543..515f56a61f6 100644 --- a/consensus/spos/sentSignaturesTracker.go +++ b/process/track/sentSignaturesTracker.go @@ -1,11 +1,10 @@ -package spos +package track import ( "sync" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/consensus" ) // externalPeerID is just a marker so the ResetRoundsWithoutReceivedMessages will know it is not an owned peer ID @@ -15,11 +14,11 @@ const externalPeerID = core.PeerID("external peer id") type sentSignaturesTracker struct { mut sync.RWMutex sentFromSelf map[string]struct{} - keysHandler consensus.KeysHandler + keysHandler KeysHandler } // NewSentSignaturesTracker will create a new instance of a tracker able to record if a signature was sent from self -func NewSentSignaturesTracker(keysHandler consensus.KeysHandler) (*sentSignaturesTracker, error) { +func NewSentSignaturesTracker(keysHandler KeysHandler) (*sentSignaturesTracker, error) { if check.IfNil(keysHandler) { return nil, ErrNilKeysHandler } @@ -44,21 +43,18 @@ func (tracker *sentSignaturesTracker) SignatureSent(pkBytes []byte) { tracker.mut.Unlock() } -// ReceivedActualSigners is called whenever a final info is received. If a signer public key did not send a signature -// from the current host, it will call the reset rounds without received message. This is the case when another instance of a -// multikey node (possibly running as main) broadcast only the final info as it contained the leader + a few signers -func (tracker *sentSignaturesTracker) ReceivedActualSigners(signersPks []string) { +// ResetCountersForManagedBlockSigner is called at commit time and will call the reset rounds without received messages +// for the provided key that actually signed a block +func (tracker *sentSignaturesTracker) ResetCountersForManagedBlockSigner(signerPk []byte) { tracker.mut.RLock() defer tracker.mut.RUnlock() - for _, signerPk := range signersPks { - _, isSentFromSelf := tracker.sentFromSelf[signerPk] - if isSentFromSelf { - continue - } - - tracker.keysHandler.ResetRoundsWithoutReceivedMessages([]byte(signerPk), externalPeerID) + _, isSentFromSelf := tracker.sentFromSelf[string(signerPk)] + if isSentFromSelf { + return } + + tracker.keysHandler.ResetRoundsWithoutReceivedMessages(signerPk, externalPeerID) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/consensus/spos/sentSignaturesTracker_test.go b/process/track/sentSignaturesTracker_test.go similarity index 69% rename from consensus/spos/sentSignaturesTracker_test.go rename to process/track/sentSignaturesTracker_test.go index a0ecc275e68..8a60dba37dd 100644 --- a/consensus/spos/sentSignaturesTracker_test.go +++ b/process/track/sentSignaturesTracker_test.go @@ -1,4 +1,4 @@ -package spos +package track import ( "testing" @@ -37,13 +37,11 @@ func TestSentSignaturesTracker_IsInterfaceNil(t *testing.T) { assert.False(t, tracker.IsInterfaceNil()) } -func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { +func TestSentSignaturesTracker_ResetCountersForManagedBlockSigner(t *testing.T) { t.Parallel() - pk1 := "pk1" - pk2 := "pk2" - pk3 := "pk3" - pk4 := "pk4" + pk1 := []byte("pk1") + pk2 := []byte("pk2") t.Run("empty map should call remove", func(t *testing.T) { t.Parallel() @@ -56,13 +54,12 @@ func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { }, } - signers := []string{pk1, pk2} tracker, _ := NewSentSignaturesTracker(keysHandler) - tracker.ReceivedActualSigners(signers) + tracker.ResetCountersForManagedBlockSigner(pk1) - assert.Equal(t, [][]byte{[]byte(pk1), []byte(pk2)}, pkBytesSlice) + assert.Equal(t, [][]byte{pk1}, pkBytesSlice) }) - t.Run("should call remove only for the public keys that did not sent signatures from self", func(t *testing.T) { + t.Run("should call remove only for the public key that did not sent signatures from self", func(t *testing.T) { t.Parallel() pkBytesSlice := make([][]byte, 0) @@ -73,21 +70,21 @@ func TestSentSignaturesTracker_ReceivedActualSigners(t *testing.T) { }, } - signers := []string{pk1, pk2, pk3, pk4} tracker, _ := NewSentSignaturesTracker(keysHandler) - tracker.SignatureSent([]byte(pk1)) - tracker.SignatureSent([]byte(pk3)) + tracker.SignatureSent(pk1) - tracker.ReceivedActualSigners(signers) - assert.Equal(t, [][]byte{[]byte("pk2"), []byte("pk4")}, pkBytesSlice) + tracker.ResetCountersForManagedBlockSigner(pk1) + tracker.ResetCountersForManagedBlockSigner(pk2) + assert.Equal(t, [][]byte{pk2}, pkBytesSlice) t.Run("after reset, all should be called", func(t *testing.T) { tracker.StartRound() - tracker.ReceivedActualSigners(signers) + tracker.ResetCountersForManagedBlockSigner(pk1) + tracker.ResetCountersForManagedBlockSigner(pk2) assert.Equal(t, [][]byte{ - []byte("pk2"), []byte("pk4"), // from the previous test - []byte("pk1"), []byte("pk2"), []byte("pk3"), []byte("pk4"), // from this call + pk2, // from the previous test + pk1, pk2, // from this call }, pkBytesSlice) }) }) diff --git a/process/transaction/metaProcess.go b/process/transaction/metaProcess.go index f4a10a0fb6c..c9a2b868dab 100644 --- a/process/transaction/metaProcess.go +++ b/process/transaction/metaProcess.go @@ -65,7 +65,6 @@ func NewMetaTxProcessor(args ArgsNewMetaTxProcessor) (*metaTxProcessor, error) { } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.PenalizedTooMuchGasFlag, - common.BuiltInFunctionOnMetaFlag, common.ESDTFlag, }) if err != nil { @@ -146,10 +145,6 @@ func (txProc *metaTxProcessor) ProcessTransaction(tx *transaction.Transaction) ( case process.SCInvoking: return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) case process.BuiltInFunctionCall: - if txProc.enableEpochsHandler.IsFlagEnabled(common.BuiltInFunctionOnMetaFlag) { - return txProc.processBuiltInFunctionCall(tx, tx.SndAddr, tx.RcvAddr) - } - if txProc.enableEpochsHandler.IsFlagEnabled(common.ESDTFlag) { return txProc.processSCInvoking(tx, tx.SndAddr, tx.RcvAddr) } @@ -192,18 +187,6 @@ func (txProc *metaTxProcessor) processSCInvoking( return txProc.scProcessor.ExecuteSmartContractTransaction(tx, acntSrc, acntDst) } -func (txProc *metaTxProcessor) processBuiltInFunctionCall( - tx *transaction.Transaction, - adrSrc, adrDst []byte, -) (vmcommon.ReturnCode, error) { - acntSrc, acntDst, err := txProc.getAccounts(adrSrc, adrDst) - if err != nil { - return 0, err - } - - return txProc.scProcessor.ExecuteBuiltInFunction(tx, acntSrc, acntDst) -} - // IsInterfaceNil returns true if there is no value under the interface func (txProc *metaTxProcessor) IsInterfaceNil() bool { return txProc == nil diff --git a/process/transaction/metaProcess_test.go b/process/transaction/metaProcess_test.go index ac536af4e30..eaaa1382d2e 100644 --- a/process/transaction/metaProcess_test.go +++ b/process/transaction/metaProcess_test.go @@ -451,19 +451,6 @@ func TestMetaTxProcessor_ProcessTransactionBuiltInCallTxShouldWork(t *testing.T) assert.Nil(t, err) assert.True(t, wasCalled) assert.Equal(t, 0, saveAccountCalled) - - builtInCalled := false - scProcessorMock.ExecuteBuiltInFunctionCalled = func(tx data.TransactionHandler, acntSrc, acntDst state.UserAccountHandler) (vmcommon.ReturnCode, error) { - builtInCalled = true - return 0, nil - } - - enableEpochsHandlerStub.AddActiveFlags(common.BuiltInFunctionOnMetaFlag) - - _, err = txProc.ProcessTransaction(&tx) - assert.Nil(t, err) - assert.True(t, builtInCalled) - assert.Equal(t, 0, saveAccountCalled) } func TestMetaTxProcessor_ProcessTransactionWithInvalidUsernameShouldNotError(t *testing.T) { diff --git a/process/transactionEvaluator/transactionEvaluator.go b/process/transactionEvaluator/transactionEvaluator.go index b9184ae3fad..9e61d138419 100644 --- a/process/transactionEvaluator/transactionEvaluator.go +++ b/process/transactionEvaluator/transactionEvaluator.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/facade" @@ -32,6 +33,7 @@ type ArgsApiTransactionEvaluator struct { Accounts state.AccountsAdapterWithClean ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + BlockChain data.ChainHandler } type apiTransactionEvaluator struct { @@ -41,6 +43,7 @@ type apiTransactionEvaluator struct { feeHandler process.FeeHandler txSimulator facade.TransactionSimulatorProcessor enableEpochsHandler common.EnableEpochsHandler + blockChain data.ChainHandler mutExecution sync.RWMutex } @@ -64,6 +67,9 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti if check.IfNil(args.EnableEpochsHandler) { return nil, process.ErrNilEnableEpochsHandler } + if check.IfNil(args.BlockChain) { + return nil, process.ErrNilBlockChain + } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.CleanUpInformativeSCRsFlag, }) @@ -78,6 +84,7 @@ func NewAPITransactionEvaluator(args ArgsApiTransactionEvaluator) (*apiTransacti accounts: args.Accounts, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + blockChain: args.BlockChain, } return tce, nil @@ -91,7 +98,9 @@ func (ate *apiTransactionEvaluator) SimulateTransactionExecution(tx *transaction ate.mutExecution.Unlock() }() - return ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + + return ate.txSimulator.ProcessTx(tx, currentHeader) } // ComputeTransactionGasLimit will calculate how many gas units a transaction will consume @@ -140,8 +149,8 @@ func (ate *apiTransactionEvaluator) simulateTransactionCost(tx *transaction.Tran } costResponse := &transaction.CostResponse{} - - res, err := ate.txSimulator.ProcessTx(tx) + currentHeader := ate.getCurrentBlockHeader() + res, err := ate.txSimulator.ProcessTx(tx, currentHeader) if err != nil { costResponse.ReturnMessage = err.Error() return costResponse, nil @@ -228,6 +237,15 @@ func (ate *apiTransactionEvaluator) addMissingFieldsIfNeeded(tx *transaction.Tra return nil } +func (ate *apiTransactionEvaluator) getCurrentBlockHeader() data.HeaderHandler { + currentHeader := ate.blockChain.GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + return ate.blockChain.GetGenesisHeader() + } + + return currentHeader +} + func (ate *apiTransactionEvaluator) getTxGasLimit(tx *transaction.Transaction) (uint64, error) { selfShardID := ate.shardCoordinator.SelfId() maxGasLimitPerBlock := ate.feeHandler.MaxGasLimitPerBlock(selfShardID) - 1 diff --git a/process/transactionEvaluator/transactionEvaluator_test.go b/process/transactionEvaluator/transactionEvaluator_test.go index 586072856ac..f36a5388777 100644 --- a/process/transactionEvaluator/transactionEvaluator_test.go +++ b/process/transactionEvaluator/transactionEvaluator_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -30,6 +31,7 @@ func createArgs() ArgsApiTransactionEvaluator { Accounts: &stateMock.AccountsStub{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + BlockChain: &testscommon.ChainHandlerMock{}, } } @@ -43,6 +45,16 @@ func TestTransactionEvaluator_NilTxTypeHandler(t *testing.T) { require.Equal(t, process.ErrNilTxTypeHandler, err) } +func TestTransactionEvaluator_NilBlockChain(t *testing.T) { + t.Parallel() + args := createArgs() + args.BlockChain = nil + tce, err := NewAPITransactionEvaluator(args) + + require.Nil(t, tce) + require.Equal(t, process.ErrNilBlockChain, err) +} + func TestTransactionEvaluator_NilFeeHandlerShouldErr(t *testing.T) { t.Parallel() @@ -115,7 +127,7 @@ func TestComputeTransactionGasLimit_MoveBalance(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -154,7 +166,7 @@ func TestComputeTransactionGasLimit_MoveBalanceInvalidNonceShouldStillComputeCos }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, simulationErr }, } @@ -185,7 +197,7 @@ func TestComputeTransactionGasLimit_BuiltInFunction(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.Ok, @@ -221,7 +233,7 @@ func TestComputeTransactionGasLimit_BuiltInFunctionShouldErr(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return nil, localErr }, } @@ -251,7 +263,7 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{}, nil }, } @@ -260,7 +272,8 @@ func TestComputeTransactionGasLimit_NilVMOutput(t *testing.T) { return &stateMock.UserAccountStub{Balance: big.NewInt(100000)}, nil }, } - tce, _ := NewAPITransactionEvaluator(args) + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) tx := &transaction.Transaction{} cost, err := tce.ComputeTransactionGasLimit(tx) @@ -281,7 +294,7 @@ func TestComputeTransactionGasLimit_RetCodeNotOk(t *testing.T) { }, } args.TxSimulator = &mock.TransactionSimulatorStub{ - ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { + ProcessTxCalled: func(tx *transaction.Transaction, _ data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { return &txSimData.SimulationResultsWithVMOutput{ VMOutput: &vmcommon.VMOutput{ ReturnCode: vmcommon.UserError, @@ -335,3 +348,82 @@ func TestExtractGasUsedFromMessage(t *testing.T) { require.Equal(t, uint64(0), extractGasRemainedFromMessage("", gasRemainedSplitString)) require.Equal(t, uint64(0), extractGasRemainedFromMessage("too much gas provided, gas needed = 10000, gas used = wrong", gasUsedSlitString)) } + +func TestApiTransactionEvaluator_SimulateTransactionExecution(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return nil, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.SimulateTransactionExecution(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_ComputeTransactionGasLimit(t *testing.T) { + t.Parallel() + + called := false + expectedNonce := uint64(1000) + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("test")) + + args.TxTypeHandler = &testscommon.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (process.TransactionType, process.TransactionType) { + return process.SCInvoking, process.SCInvoking + }, + } + args.TxSimulator = &mock.TransactionSimulatorStub{ + ProcessTxCalled: func(_ *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { + called = true + require.Equal(t, expectedNonce, currentHeader.GetNonce()) + return &txSimData.SimulationResultsWithVMOutput{}, nil + }, + } + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + tx := &transaction.Transaction{} + + _, err = tce.ComputeTransactionGasLimit(tx) + require.Nil(t, err) + require.True(t, called) +} + +func TestApiTransactionEvaluator_GetCurrentHeader(t *testing.T) { + t.Parallel() + + args := createArgs() + args.BlockChain = &testscommon.ChainHandlerMock{} + _ = args.BlockChain.SetGenesisHeader(&block.Header{Nonce: 0}) + + tce, err := NewAPITransactionEvaluator(args) + require.Nil(t, err) + + currentHeader := tce.getCurrentBlockHeader() + require.Equal(t, uint64(0), currentHeader.GetNonce()) + + expectedNonce := uint64(100) + _ = args.BlockChain.SetCurrentBlockHeaderAndRootHash(&block.Header{Nonce: expectedNonce}, []byte("root")) + + currentHeader = tce.getCurrentBlockHeader() + require.Equal(t, expectedNonce, currentHeader.GetNonce()) +} diff --git a/process/transactionEvaluator/transactionSimulator.go b/process/transactionEvaluator/transactionSimulator.go index 8d1a405643d..c87e79b0472 100644 --- a/process/transactionEvaluator/transactionSimulator.go +++ b/process/transactionEvaluator/transactionSimulator.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/receipt" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" @@ -33,6 +34,7 @@ type ArgsTxSimulator struct { Hasher hashing.Hasher Marshalizer marshal.Marshalizer DataFieldParser DataFieldParser + BlockChainHook process.BlockChainHookHandler } type refundHandler interface { @@ -50,6 +52,7 @@ type transactionSimulator struct { marshalizer marshal.Marshalizer refundDetector refundHandler dataFieldParser DataFieldParser + blockChainHook process.BlockChainHookHandler } // NewTransactionSimulator returns a new instance of a transactionSimulator @@ -78,6 +81,9 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error if check.IfNilReflect(args.DataFieldParser) { return nil, ErrNilDataFieldParser } + if check.IfNil(args.BlockChainHook) { + return nil, process.ErrNilBlockChainHook + } return &transactionSimulator{ txProcessor: args.TransactionProcessor, @@ -89,17 +95,20 @@ func NewTransactionSimulator(args ArgsTxSimulator) (*transactionSimulator, error hasher: args.Hasher, refundDetector: transactionAPI.NewRefundDetector(), dataFieldParser: args.DataFieldParser, + blockChainHook: args.BlockChainHook, }, nil } // ProcessTx will process the transaction in a special environment, where state-writing is not allowed -func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction) (*txSimData.SimulationResultsWithVMOutput, error) { +func (ts *transactionSimulator) ProcessTx(tx *transaction.Transaction, currentHeader data.HeaderHandler) (*txSimData.SimulationResultsWithVMOutput, error) { ts.mutOperation.Lock() defer ts.mutOperation.Unlock() txStatus := transaction.TxStatusPending failReason := "" + ts.blockChainHook.SetCurrentHeader(currentHeader) + retCode, err := ts.txProcessor.ProcessTransaction(tx) if err != nil { failReason = err.Error() diff --git a/process/transactionEvaluator/transactionSimulator_test.go b/process/transactionEvaluator/transactionSimulator_test.go index 727f158c7eb..94da76f4254 100644 --- a/process/transactionEvaluator/transactionSimulator_test.go +++ b/process/transactionEvaluator/transactionSimulator_test.go @@ -76,6 +76,15 @@ func TestNewTransactionSimulator(t *testing.T) { }, exError: ErrNilHasher, }, + { + name: "NilBlockChainHook", + argsFunc: func() ArgsTxSimulator { + args := getTxSimulatorArgs() + args.BlockChainHook = nil + return args + }, + exError: process.ErrNilBlockChainHook, + }, { name: "NilMarshalizer", argsFunc: func() ArgsTxSimulator { @@ -125,7 +134,7 @@ func TestTransactionSimulator_ProcessTxProcessingErrShouldSignal(t *testing.T) { } ts, _ := NewTransactionSimulator(args) - results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}) + results, err := ts.ProcessTx(&transaction.Transaction{Nonce: 37}, &block.Header{}) require.NoError(t, err) require.Equal(t, expErr.Error(), results.FailReason) } @@ -207,7 +216,7 @@ func TestTransactionSimulator_ProcessTxShouldIncludeScrsAndReceipts(t *testing.T txHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, tx) args.VMOutputCacher.Put(txHash, &vmcommon.VMOutput{}, 0) - results, err := ts.ProcessTx(tx) + results, err := ts.ProcessTx(tx, &block.Header{}) require.NoError(t, err) require.Equal( t, @@ -236,6 +245,7 @@ func getTxSimulatorArgs() ArgsTxSimulator { Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, DataFieldParser: dataFieldParser, + BlockChainHook: &testscommon.BlockChainHookStub{}, } } @@ -261,7 +271,7 @@ func TestTransactionSimulator_ProcessTxConcurrentCalls(t *testing.T) { for i := 0; i < numCalls; i++ { go func(idx int) { time.Sleep(time.Millisecond * 10) - _, _ = txSimulator.ProcessTx(tx) + _, _ = txSimulator.ProcessTx(tx, &block.Header{}) wg.Done() }(i) } diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index 56d792dc7ed..8fa1d11b3db 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -3,6 +3,7 @@ generateConfig() { TMP_SHARD_OBSERVERCOUNT=$SHARD_OBSERVERCOUNT TMP_META_OBSERVERCOUNT=$META_OBSERVERCOUNT + # set num of observers to 0, they will start with generated keys if [[ $MULTI_KEY_NODES -eq 1 ]]; then TMP_SHARD_OBSERVERCOUNT=0 TMP_META_OBSERVERCOUNT=0 @@ -131,10 +132,53 @@ updateNodeConfig() { sed -i '/\[Antiflood\]/,/\[Logger\]/ s/true/false/' config_observer.toml fi + updateConfigsForStakingV4 + echo "Updated configuration for Nodes." popd } +updateConfigsForStakingV4() { + config=$(cat enableEpochs.toml) + + echo "Updating staking v4 configs" + + # Get the StakingV4Step3EnableEpoch value + staking_enable_epoch=$(echo "$config" | awk -F '=' '/ StakingV4Step3EnableEpoch/{gsub(/^[ \t]+|[ \t]+$/,"", $2); print $2; exit}') + # Count the number of entries in MaxNodesChangeEnableEpoch + entry_count=$(echo "$config" | awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /\{/) {count++}} END {print count}') + + # Check if entry_count is less than 2 + if [ "$entry_count" -lt 2 ]; then + echo "Not enough entries found to update" + else + # Find all entries in MaxNodesChangeEnableEpoch + all_entries=$(awk '/MaxNodesChangeEnableEpoch/,/\]/{if ($0 ~ /^[[:space:]]*\{/) {p=1}; if (p) print; if ($0 ~ /\]/) p=0}' enableEpochs.toml | grep -vE '^\s*#' | sed '/^\s*$/d') + + # Get the index of the entry with EpochEnable equal to StakingV4Step3EnableEpoch + index=$(echo "$all_entries" | grep -n "EpochEnable = $staking_enable_epoch" | cut -d: -f1) + + if [[ -z "${index// }" ]]; then + echo -e "\033[1;33mWarning: MaxNodesChangeEnableEpoch does not contain an entry enable epoch for StakingV4Step3EnableEpoch, nodes might fail to start...\033[0m" + else + prev_entry=$(echo "$all_entries" | sed -n "$((index-1))p") + curr_entry=$(echo "$all_entries" | sed -n "$((index))p") + + # Extract the value of MaxNumNodes & NodesToShufflePerShard from prev_entry + max_nodes_from_prev_epoch=$(echo "$prev_entry" | awk -F 'MaxNumNodes = ' '{print $2}' | cut -d ',' -f1) + nodes_to_shuffle_per_shard=$(echo "$prev_entry" | awk -F 'NodesToShufflePerShard = ' '{gsub(/[^0-9]+/, "", $2); print $2}') + + # Calculate the new MaxNumNodes value based on the formula + new_max_nodes=$((max_nodes_from_prev_epoch - (SHARDCOUNT + 1) * nodes_to_shuffle_per_shard)) + curr_entry_updated=$(echo "$curr_entry" | awk -v new_max_nodes="$new_max_nodes" '{gsub(/MaxNumNodes = [0-9]+,/, "MaxNumNodes = " new_max_nodes ",")}1') + + echo "Updating entry in MaxNodesChangeEnableEpoch from $curr_entry to $curr_entry_updated" + + sed -i "/$staking_enable_epoch/,/$staking_enable_epoch/ s|.*$curr_entry.*|$curr_entry_updated|" enableEpochs.toml + fi + fi +} + copyProxyConfig() { pushd $TESTNETDIR diff --git a/scripts/testnet/include/observers.sh b/scripts/testnet/include/observers.sh index 6ba9ff9293a..50e7f5ade03 100644 --- a/scripts/testnet/include/observers.sh +++ b/scripts/testnet/include/observers.sh @@ -82,10 +82,18 @@ assembleCommand_startObserverNode() { let "KEY_INDEX=$TOTAL_NODECOUNT - $OBSERVER_INDEX - 1" WORKING_DIR=$TESTNETDIR/node_working_dirs/observer$OBSERVER_INDEX + KEYS_FLAGS="-validator-key-pem-file ./config/validatorKey.pem -sk-index $KEY_INDEX" + # if node is running in multi key mode, in order to avoid loading the common allValidatorKeys.pem file + # and force generating a new key for observers, simply provide an invalid path + if [[ $MULTI_KEY_NODES -eq 1 ]]; then + TMP_MISSING_PEM="missing-file.pem" + KEYS_FLAGS="-all-validator-keys-pem-file $TMP_MISSING_PEM -validator-key-pem-file $TMP_MISSING_PEM" + fi + local nodeCommand="./node \ -port $PORT --profile-mode -log-save -log-level $LOGLEVEL --log-logger-name --log-correlation --use-health-service -rest-api-interface localhost:$RESTAPIPORT \ -destination-shard-as-observer $SHARD \ - -sk-index $KEY_INDEX \ + $KEYS_FLAGS \ -working-directory $WORKING_DIR -config ./config/config_observer.toml $EXTRA_OBSERVERS_FLAGS" if [ -n "$NODE_NICENESS" ] diff --git a/scripts/testnet/variables.sh b/scripts/testnet/variables.sh index 1dc3c7cc65c..f3fb44c5866 100644 --- a/scripts/testnet/variables.sh +++ b/scripts/testnet/variables.sh @@ -170,10 +170,6 @@ export TOTAL_OBSERVERCOUNT=$total_observer_count # to enable the full archive feature on the observers, please use the --full-archive flag export EXTRA_OBSERVERS_FLAGS="-operation-mode db-lookup-extension" -if [[ $MULTI_KEY_NODES -eq 1 ]]; then - EXTRA_OBSERVERS_FLAGS="--no-key" -fi - # Leave unchanged. let "total_node_count = $SHARD_VALIDATORCOUNT * $SHARDCOUNT + $META_VALIDATORCOUNT + $TOTAL_OBSERVERCOUNT" export TOTAL_NODECOUNT=$total_node_count diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index e275c4ea165..32c6b4fa14c 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -7,7 +7,6 @@ import ( // EnableEpochsHandlerMock - type EnableEpochsHandlerMock struct { - WaitingListFixEnableEpochField uint32 RefactorPeersMiniBlocksEnableEpochField uint32 IsRefactorPeersMiniBlocksFlagEnabledField bool CurrentEpoch uint32 @@ -18,8 +17,6 @@ func (mock *EnableEpochsHandlerMock) GetActivationEpoch(flag core.EnableEpochFla switch flag { case common.RefactorPeersMiniBlocksFlag: return mock.RefactorPeersMiniBlocksEnableEpochField - case common.WaitingListFixFlag: - return mock.WaitingListFixEnableEpochField default: return 0 diff --git a/sharding/nodesCoordinator/common.go b/sharding/nodesCoordinator/common.go index c771e711740..1e376cd6b65 100644 --- a/sharding/nodesCoordinator/common.go +++ b/sharding/nodesCoordinator/common.go @@ -52,6 +52,7 @@ func displayNodesConfiguration( waiting map[uint32][]Validator, leaving map[uint32][]Validator, actualRemaining map[uint32][]Validator, + shuffledOut map[uint32][]Validator, nbShards uint32, ) { for shard := uint32(0); shard <= nbShards; shard++ { @@ -75,6 +76,10 @@ func displayNodesConfiguration( pk := v.PubKey() log.Debug("actually remaining", "pk", pk, "shardID", shardID) } + for _, v := range shuffledOut[shardID] { + pk := v.PubKey() + log.Debug("shuffled out", "pk", pk, "shardID", shardID) + } } } diff --git a/sharding/nodesCoordinator/consensusGroupProviderBench_test.go b/sharding/nodesCoordinator/consensusGroupProviderBench_test.go index c24f6f9549f..49731812213 100644 --- a/sharding/nodesCoordinator/consensusGroupProviderBench_test.go +++ b/sharding/nodesCoordinator/consensusGroupProviderBench_test.go @@ -1,11 +1,9 @@ package nodesCoordinator import ( - "math/rand" "testing" ) -const randSeed = 75 const numValidators = 63 const numValidatorsInEligibleList = 400 @@ -20,7 +18,6 @@ func getRandomness() []byte { func BenchmarkReslicingBasedProvider_Get(b *testing.B) { numVals := numValidators - rand.Seed(randSeed) expElList := getExpandedEligibleList(numValidatorsInEligibleList) randomness := getRandomness() @@ -32,7 +29,6 @@ func BenchmarkReslicingBasedProvider_Get(b *testing.B) { func BenchmarkSelectionBasedProvider_Get(b *testing.B) { numVals := numValidators - rand.Seed(randSeed) expElList := getExpandedEligibleList(numValidatorsInEligibleList) randomness := getRandomness() diff --git a/sharding/nodesCoordinator/dtos.go b/sharding/nodesCoordinator/dtos.go index 854dd931d8d..ab54bdeb4fa 100644 --- a/sharding/nodesCoordinator/dtos.go +++ b/sharding/nodesCoordinator/dtos.go @@ -7,6 +7,7 @@ type ArgsUpdateNodes struct { NewNodes []Validator UnStakeLeaving []Validator AdditionalLeaving []Validator + Auction []Validator Rand []byte NbShards uint32 Epoch uint32 @@ -16,6 +17,7 @@ type ArgsUpdateNodes struct { type ResUpdateNodes struct { Eligible map[uint32][]Validator Waiting map[uint32][]Validator + ShuffledOut map[uint32][]Validator Leaving []Validator StillRemaining []Validator } diff --git a/sharding/nodesCoordinator/errors.go b/sharding/nodesCoordinator/errors.go index def3944cc0d..3d063f4605e 100644 --- a/sharding/nodesCoordinator/errors.go +++ b/sharding/nodesCoordinator/errors.go @@ -114,3 +114,12 @@ var ErrNilGenesisNodesSetupHandler = errors.New("nil genesis nodes setup handler // ErrKeyNotFoundInWaitingList signals that the provided key has not been found in waiting list var ErrKeyNotFoundInWaitingList = errors.New("key not found in waiting list") + +// ErrNilNodesCoordinatorRegistryFactory signals that a nil nodes coordinator registry factory has been given +var ErrNilNodesCoordinatorRegistryFactory = errors.New("nil nodes coordinator registry factory has been given") + +// ErrReceivedAuctionValidatorsBeforeStakingV4 signals that auction nodes have been received from peer mini blocks before enabling staking v4 +var ErrReceivedAuctionValidatorsBeforeStakingV4 = errors.New("should not have received selected nodes from auction in peer mini blocks, since staking v4 is not enabled yet") + +// ErrNilEpochNotifier signals that a nil EpochNotifier has been provided +var ErrNilEpochNotifier = errors.New("nil epoch notifier provided") diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index a4a7e178ee1..ceecc9ca352 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -7,10 +7,12 @@ import ( "sync" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" ) var _ NodesShuffler = (*randHashShuffler)(nil) @@ -24,6 +26,7 @@ type NodesShufflerArgs struct { ShuffleBetweenShards bool MaxNodesEnableConfig []config.MaxNodesChangeConfig EnableEpochsHandler common.EnableEpochsHandler + EnableEpochs config.EnableEpochs } type shuffleNodesArg struct { @@ -32,14 +35,26 @@ type shuffleNodesArg struct { unstakeLeaving []Validator additionalLeaving []Validator newNodes []Validator + auction []Validator randomness []byte distributor ValidatorsDistributor nodesMeta uint32 nodesPerShard uint32 nbShards uint32 maxNodesToSwapPerShard uint32 + maxNumNodes uint32 flagBalanceWaitingLists bool - flagWaitingListFix bool + flagStakingV4Step2 bool + flagStakingV4Step3 bool +} + +type shuffledNodesConfig struct { + numShuffled uint32 + numNewEligible uint32 + numNewWaiting uint32 + numSelectedAuction uint32 + maxNumNodes uint32 + flagStakingV4Step2 bool } // TODO: Decide if transaction load statistics will be used for limiting the number of shards @@ -48,16 +63,20 @@ type randHashShuffler struct { // when reinitialization of node in new shard is implemented shuffleBetweenShards bool - adaptivity bool - nodesShard uint32 - nodesMeta uint32 - shardHysteresis uint32 - metaHysteresis uint32 - activeNodesConfig config.MaxNodesChangeConfig - availableNodesConfigs []config.MaxNodesChangeConfig - mutShufflerParams sync.RWMutex - validatorDistributor ValidatorsDistributor - enableEpochsHandler common.EnableEpochsHandler + adaptivity bool + nodesShard uint32 + nodesMeta uint32 + shardHysteresis uint32 + metaHysteresis uint32 + activeNodesConfig config.MaxNodesChangeConfig + availableNodesConfigs []config.MaxNodesChangeConfig + mutShufflerParams sync.RWMutex + validatorDistributor ValidatorsDistributor + enableEpochsHandler common.EnableEpochsHandler + stakingV4Step2EnableEpoch uint32 + flagStakingV4Step2 atomic.Flag + stakingV4Step3EnableEpoch uint32 + flagStakingV4Step3 atomic.Flag } // NewHashValidatorsShuffler creates a validator shuffler that uses a hash between validator key and a given @@ -71,7 +90,6 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro } err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.BalanceWaitingListsFlag, - common.WaitingListFixFlag, }) if err != nil { return nil, err @@ -80,6 +98,9 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro var configs []config.MaxNodesChangeConfig log.Debug("hashValidatorShuffler: enable epoch for max nodes change", "epoch", args.MaxNodesEnableConfig) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 2", "epoch", args.EnableEpochs.StakingV4Step2EnableEpoch) + log.Debug("hashValidatorShuffler: enable epoch for staking v4 step 3", "epoch", args.EnableEpochs.StakingV4Step3EnableEpoch) + if args.MaxNodesEnableConfig != nil { configs = make([]config.MaxNodesChangeConfig, len(args.MaxNodesEnableConfig)) copy(configs, args.MaxNodesEnableConfig) @@ -87,9 +108,11 @@ func NewHashValidatorsShuffler(args *NodesShufflerArgs) (*randHashShuffler, erro log.Debug("Shuffler created", "shuffleBetweenShards", args.ShuffleBetweenShards) rxs := &randHashShuffler{ - shuffleBetweenShards: args.ShuffleBetweenShards, - availableNodesConfigs: configs, - enableEpochsHandler: args.EnableEpochsHandler, + shuffleBetweenShards: args.ShuffleBetweenShards, + availableNodesConfigs: configs, + enableEpochsHandler: args.EnableEpochsHandler, + stakingV4Step2EnableEpoch: args.EnableEpochs.StakingV4Step2EnableEpoch, + stakingV4Step3EnableEpoch: args.EnableEpochs.StakingV4Step3EnableEpoch, } rxs.UpdateParams(args.NodesShard, args.NodesMeta, args.Hysteresis, args.Adaptivity) @@ -178,6 +201,7 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo unstakeLeaving: args.UnStakeLeaving, additionalLeaving: args.AdditionalLeaving, newNodes: args.NewNodes, + auction: args.Auction, randomness: args.Rand, nodesMeta: nodesMeta, nodesPerShard: nodesPerShard, @@ -185,7 +209,9 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo distributor: rhs.validatorDistributor, maxNodesToSwapPerShard: rhs.activeNodesConfig.NodesToShufflePerShard, flagBalanceWaitingLists: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.BalanceWaitingListsFlag, args.Epoch), - flagWaitingListFix: rhs.enableEpochsHandler.IsFlagEnabledInEpoch(common.WaitingListFixFlag, args.Epoch), + flagStakingV4Step2: rhs.flagStakingV4Step2.IsSet(), + flagStakingV4Step3: rhs.flagStakingV4Step3.IsSet(), + maxNumNodes: rhs.activeNodesConfig.MaxNumNodes, }) } @@ -263,18 +289,12 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { eligibleCopy, waitingCopy, numToRemove, - remainingUnstakeLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingUnstakeLeaving) newEligible, newWaiting, stillRemainingAdditionalLeaving := removeLeavingNodesFromValidatorMaps( newEligible, newWaiting, numToRemove, - remainingAdditionalLeaving, - int(arg.nodesMeta), - int(arg.nodesPerShard), - arg.flagWaitingListFix) + remainingAdditionalLeaving) stillRemainingInLeaving := append(stillRemainingUnstakeLeaving, stillRemainingAdditionalLeaving...) @@ -282,17 +302,44 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { - log.Warn("moveNodesToMap failed", "error", err) + return nil, fmt.Errorf("moveNodesToMap failed, error: %w", err) } - err = distributeValidators(newWaiting, arg.newNodes, arg.randomness, false) + err = checkAndDistributeNewNodes(newWaiting, arg.newNodes, arg.randomness, arg.flagStakingV4Step3) if err != nil { - log.Warn("distributeValidators newNodes failed", "error", err) + return nil, fmt.Errorf("distributeValidators newNodes failed, error: %w", err) } - err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) - if err != nil { - log.Warn("distributeValidators shuffledOut failed", "error", err) + shuffledNodesCfg := &shuffledNodesConfig{ + numShuffled: getNumPubKeys(shuffledOutMap), + numNewEligible: getNumPubKeys(newEligible), + numNewWaiting: getNumPubKeys(newWaiting), + numSelectedAuction: uint32(len(arg.auction)), + maxNumNodes: arg.maxNumNodes, + flagStakingV4Step2: arg.flagStakingV4Step2, + } + + lowWaitingList := shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg) + if arg.flagStakingV4Step3 || lowWaitingList { + log.Debug("distributing selected nodes from auction to waiting", + "num auction nodes", len(arg.auction), "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute selected validators from AUCTION -> WAITING + err = distributeValidators(newWaiting, arg.auction, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators auction list failed, error: %w", err) + } + } + + if !arg.flagStakingV4Step2 || lowWaitingList { + log.Debug("distributing shuffled out nodes to waiting", + "num shuffled nodes", shuffledNodesCfg.numShuffled, "num waiting nodes", shuffledNodesCfg.numNewWaiting) + + // Distribute validators from SHUFFLED OUT -> WAITING + err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + return nil, fmt.Errorf("distributeValidators shuffled out failed, error: %w", err) + } } actualLeaving, _ := removeValidatorsFromList(allLeaving, stillRemainingInLeaving, len(stillRemainingInLeaving)) @@ -300,6 +347,7 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { return &ResUpdateNodes{ Eligible: newEligible, Waiting: newWaiting, + ShuffledOut: shuffledOutMap, Leaving: actualLeaving, StillRemaining: stillRemainingInLeaving, }, nil @@ -381,62 +429,16 @@ func removeLeavingNodesFromValidatorMaps( waiting map[uint32][]Validator, numToRemove map[uint32]int, leaving []Validator, - minNodesMeta int, - minNodesPerShard int, - waitingFixEnabled bool, ) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { stillRemainingInLeaving := make([]Validator, len(leaving)) copy(stillRemainingInLeaving, leaving) - if !waitingFixEnabled { - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) - newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) - return newEligible, newWaiting, stillRemainingInLeaving - } - - return removeLeavingNodes(eligible, waiting, numToRemove, stillRemainingInLeaving, minNodesMeta, minNodesPerShard) -} - -func removeLeavingNodes( - eligible map[uint32][]Validator, - waiting map[uint32][]Validator, - numToRemove map[uint32]int, - stillRemainingInLeaving []Validator, - minNodesMeta int, - minNodesPerShard int, -) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { - maxNumToRemoveFromWaiting := make(map[uint32]int) - for shardId := range eligible { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - maxNumToRemoveFromWaiting[shardId] = computedMinNumberOfNodes - } - - newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, maxNumToRemoveFromWaiting) - - for shardId, toRemove := range numToRemove { - computedMinNumberOfNodes := computeMinNumberOfNodes(eligible, waiting, shardId, minNodesMeta, minNodesPerShard) - if toRemove > computedMinNumberOfNodes { - numToRemove[shardId] = computedMinNumberOfNodes - } - } - + newWaiting, stillRemainingInLeaving := removeNodesFromMap(waiting, stillRemainingInLeaving, numToRemove) newEligible, stillRemainingInLeaving := removeNodesFromMap(eligible, stillRemainingInLeaving, numToRemove) return newEligible, newWaiting, stillRemainingInLeaving } -func computeMinNumberOfNodes(eligible map[uint32][]Validator, waiting map[uint32][]Validator, shardId uint32, minNodesMeta int, minNodesPerShard int) int { - minimumNumberOfNodes := minNodesPerShard - if shardId == core.MetachainShardId { - minimumNumberOfNodes = minNodesMeta - } - computedMinNumberOfNodes := len(eligible[shardId]) + len(waiting[shardId]) - minimumNumberOfNodes - if computedMinNumberOfNodes < 0 { - computedMinNumberOfNodes = 0 - } - return computedMinNumberOfNodes -} - // computeNewShards determines the new number of shards based on the number of nodes in the network func (rhs *randHashShuffler) computeNewShards( eligible map[uint32][]Validator, @@ -586,6 +588,51 @@ func removeValidatorFromList(validatorList []Validator, index int) []Validator { return validatorList[:len(validatorList)-1] } +func checkAndDistributeNewNodes( + waiting map[uint32][]Validator, + newNodes []Validator, + randomness []byte, + flagStakingV4Step3 bool, +) error { + if !flagStakingV4Step3 { + return distributeValidators(waiting, newNodes, randomness, false) + } + + if len(newNodes) > 0 { + return epochStart.ErrReceivedNewListNodeInStakingV4 + } + + return nil +} + +func shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg *shuffledNodesConfig) bool { + if !shuffledNodesCfg.flagStakingV4Step2 { + return false + } + + totalNewWaiting := shuffledNodesCfg.numNewWaiting + shuffledNodesCfg.numSelectedAuction + totalNodes := totalNewWaiting + shuffledNodesCfg.numNewEligible + shuffledNodesCfg.numShuffled + + log.Debug("checking if should distribute shuffled out nodes to waiting in staking v4", + "numShuffled", shuffledNodesCfg.numShuffled, + "numNewEligible", shuffledNodesCfg.numNewEligible, + "numSelectedAuction", shuffledNodesCfg.numSelectedAuction, + "totalNewWaiting", totalNewWaiting, + "totalNodes", totalNodes, + "maxNumNodes", shuffledNodesCfg.maxNumNodes, + ) + + distributeShuffledToWaitingInStakingV4 := false + if totalNodes <= shuffledNodesCfg.maxNumNodes { + log.Debug("num of total nodes in waiting is too low after shuffling; will distribute " + + "shuffled out nodes directly to waiting and skip sending them to auction") + + distributeShuffledToWaitingInStakingV4 = true + } + + return distributeShuffledToWaitingInStakingV4 +} + func removeValidatorFromListKeepOrder(validatorList []Validator, index int) []Validator { indexNotOK := index > len(validatorList)-1 || index < 0 if indexNotOK { @@ -646,6 +693,16 @@ func moveNodesToMap(destination map[uint32][]Validator, source map[uint32][]Vali return nil } +func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { + numPubKeys := uint32(0) + + for _, validatorsInShard := range shardValidatorsMap { + numPubKeys += uint32(len(validatorsInShard)) + } + + return numPubKeys +} + // moveMaxNumNodesToMap moves the validators in the source list to the corresponding destination list // but adding just enough nodes so that at most the number of nodes is kept in the destination list // The parameter maxNodesToMove is a limiting factor and should limit the number of nodes @@ -778,6 +835,12 @@ func (rhs *randHashShuffler) updateShufflerConfig(epoch uint32) { "epochEnable", rhs.activeNodesConfig.EpochEnable, "maxNodesToShufflePerShard", rhs.activeNodesConfig.NodesToShufflePerShard, ) + + rhs.flagStakingV4Step3.SetValue(epoch >= rhs.stakingV4Step3EnableEpoch) + log.Debug("staking v4 step3", "enabled", rhs.flagStakingV4Step3.IsSet()) + + rhs.flagStakingV4Step2.SetValue(epoch >= rhs.stakingV4Step2EnableEpoch) + log.Debug("staking v4 step2", "enabled", rhs.flagStakingV4Step2.IsSet()) } func (rhs *randHashShuffler) sortConfigs() { diff --git a/sharding/nodesCoordinator/hashValidatorShuffler_test.go b/sharding/nodesCoordinator/hashValidatorShuffler_test.go index 79a8ed1e7f8..788ec3f9b59 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler_test.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler_test.go @@ -13,10 +13,9 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/sharding/mock" - "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -194,8 +193,11 @@ func createHashShufflerInter() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: true, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -210,8 +212,11 @@ func createHashShufflerIntraShards() (*randHashShuffler, error) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -989,10 +994,7 @@ func Test_shuffleOutNodesWithLeaving(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) for _, shuffledOutPerShard := range shuffledOut { @@ -1027,10 +1029,7 @@ func Test_shuffleOutNodesWithLeavingMoreThanWaiting(t *testing.T) { copyEligibleMap, copyWaitingMap, numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - true) + leaving) shuffledOut, newEligible := shuffleOutNodes(newEligible, numToRemove, randomness) shuffleOutList := make([]Validator, 0) @@ -1048,52 +1047,30 @@ func Test_removeLeavingNodesFromValidatorMaps(t *testing.T) { waitingNodesPerShard := 40 nbShards := uint32(2) - tests := []struct { - waitingFixEnabled bool - remainingToRemove int - }{ - { - waitingFixEnabled: false, - remainingToRemove: 18, - }, - { - waitingFixEnabled: true, - remainingToRemove: 20, - }, + leaving := make([]Validator, 0) + + eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) + waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) + for _, waitingValidators := range waitingMap { + leaving = append(leaving, waitingValidators[:2]...) } - for _, tt := range tests { - t.Run("", func(t *testing.T) { - leaving := make([]Validator, 0) + numToRemove := make(map[uint32]int) - eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) - waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) - for _, waitingValidators := range waitingMap { - leaving = append(leaving, waitingValidators[:2]...) - } + for shardId := range waitingMap { + numToRemove[shardId] = maxShuffleOutNumber + } + copyEligibleMap := copyValidatorMap(eligibleMap) + copyWaitingMap := copyValidatorMap(waitingMap) - numToRemove := make(map[uint32]int) + _, _, _ = removeLeavingNodesFromValidatorMaps( + copyEligibleMap, + copyWaitingMap, + numToRemove, + leaving) - for shardId := range waitingMap { - numToRemove[shardId] = maxShuffleOutNumber - } - copyEligibleMap := copyValidatorMap(eligibleMap) - copyWaitingMap := copyValidatorMap(waitingMap) - - _, _, _ = removeLeavingNodesFromValidatorMaps( - copyEligibleMap, - copyWaitingMap, - numToRemove, - leaving, - eligibleNodesPerShard, - eligibleNodesPerShard, - tt.waitingFixEnabled, - ) - - for _, remainingToRemove := range numToRemove { - require.Equal(t, tt.remainingToRemove, remainingToRemove) - } - }) + for _, remainingToRemove := range numToRemove { + require.Equal(t, 18, remainingToRemove) } } @@ -1188,15 +1165,17 @@ func TestRandHashShuffler_UpdateParams(t *testing.T) { require.Nil(t, err) shuffler2 := &randHashShuffler{ - nodesShard: 200, - nodesMeta: 200, - shardHysteresis: 0, - metaHysteresis: 0, - adaptivity: true, - shuffleBetweenShards: true, - validatorDistributor: &CrossShardValidatorDistributor{}, - availableNodesConfigs: nil, - enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + nodesShard: 200, + nodesMeta: 200, + shardHysteresis: 0, + metaHysteresis: 0, + adaptivity: true, + shuffleBetweenShards: true, + validatorDistributor: &CrossShardValidatorDistributor{}, + availableNodesConfigs: nil, + stakingV4Step2EnableEpoch: 443, + stakingV4Step3EnableEpoch: 444, + enableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler.UpdateParams( @@ -1300,12 +1279,6 @@ func TestRandHashShuffler_UpdateNodeListsWaitingListFixDisabled(t *testing.T) { testUpdateNodesAndCheckNumLeaving(t, true) } -func TestRandHashShuffler_UpdateNodeListsWithWaitingListFixEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodesAndCheckNumLeaving(t, false) -} - func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { eligiblePerShard := 400 eligibleMeta := 10 @@ -1317,11 +1290,6 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1335,14 +1303,7 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { - if flag == common.WaitingListFixFlag { - return epoch >= uint32(waitingListFixEnableEpoch) - } - return false - }, - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1371,34 +1332,15 @@ func testUpdateNodesAndCheckNumLeaving(t *testing.T, beforeFix bool) { } } -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingDisabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, true) -} - -func TestRandHashShuffler_UpdateNodeListsWaitingListWithFixCheckWaitingEnabled(t *testing.T) { - t.Parallel() - - testUpdateNodeListsAndCheckWaitingList(t, false) -} - -func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { +func TestRandHashShuffler_UpdateNodeListsAndCheckWaitingList(t *testing.T) { eligiblePerShard := 400 eligibleMeta := 10 waitingPerShard := 400 nbShards := 1 - numLeaving := 2 - numNodesToShuffle := 80 - waitingListFixEnableEpoch := 0 - if beforeFix { - waitingListFixEnableEpoch = 9999 - } - shufflerArgs := &NodesShufflerArgs{ NodesShard: uint32(eligiblePerShard), NodesMeta: uint32(eligibleMeta), @@ -1412,14 +1354,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { NodesToShufflePerShard: uint32(numNodesToShuffle), }, }, - EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ - IsFlagEnabledInEpochCalled: func(flag core.EnableEpochFlag, epoch uint32) bool { - if flag == common.WaitingListFixFlag { - return epoch >= uint32(waitingListFixEnableEpoch) - } - return false - }, - }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -1453,9 +1388,7 @@ func testUpdateNodeListsAndCheckWaitingList(t *testing.T, beforeFix bool) { } expectedNumWaitingMovedToEligible := numNodesToShuffle - if beforeFix { - expectedNumWaitingMovedToEligible -= numLeaving - } + expectedNumWaitingMovedToEligible -= numLeaving assert.Equal(t, expectedNumWaitingMovedToEligible, numWaitingListToEligible) } @@ -1763,10 +1696,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromEligible(t *te eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard-1, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1804,10 +1734,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_FromWaiting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard-1, len(newWaiting[core.MetachainShardId])) @@ -1843,10 +1770,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_NonExisting(t *tes eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) assert.Equal(t, eligiblePerShard, len(newEligible[core.MetachainShardId])) assert.Equal(t, waitingPerShard, len(newWaiting[core.MetachainShardId])) @@ -1889,10 +1813,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2Eligible2Waiting2 eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) remainingInEligible := eligiblePerShard - 2 remainingInWaiting := waitingPerShard - 2 @@ -1949,10 +1870,7 @@ func TestRandHashShuffler_RemoveLeavingNodesFromValidatorMaps_2FromEligible2From eligibleCopy, waitingCopy, numToRemove, - leavingValidators, - eligiblePerShard, - eligiblePerShard, - true) + leavingValidators) // removed first 2 from waiting and just one from eligible remainingInEligible := eligiblePerShard - 1 @@ -2403,8 +2321,11 @@ func TestRandHashShuffler_UpdateNodeLists_All(t *testing.T) { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2509,6 +2430,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NoWaiting(t *testing.T) { ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -2570,6 +2492,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NilOrEmptyWaiting(t *test ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2642,6 +2565,57 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting(t *testing.T) assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) } +func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { + t.Parallel() + + numEligiblePerShard := 100 + numWaitingPerShard := 30 + numAuction := 40 + nbShards := uint32(2) + + eligibleMap := generateValidatorMap(numEligiblePerShard, nbShards) + waitingMap := generateValidatorMap(numWaitingPerShard, nbShards) + auctionList := generateValidatorList(numAuction) + + args := ArgsUpdateNodes{ + Eligible: eligibleMap, + Waiting: waitingMap, + UnStakeLeaving: make([]Validator, 0), + AdditionalLeaving: make([]Validator, 0), + Rand: generateRandomByteArray(32), + Auction: auctionList, + NbShards: nbShards, + Epoch: stakingV4Epoch, + } + + shuffler, _ := createHashShufflerIntraShards() + resUpdateNodeList, err := shuffler.UpdateNodeLists(args) + require.Nil(t, err) + + for _, auctionNode := range args.Auction { + found, _ := searchInMap(resUpdateNodeList.Waiting, auctionNode.PubKey()) + assert.True(t, found) + } + + allShuffledOut := getValidatorsInMap(resUpdateNodeList.ShuffledOut) + for _, shuffledOut := range allShuffledOut { + found, _ := searchInMap(args.Eligible, shuffledOut.PubKey()) + assert.True(t, found) + } + + allNewEligible := getValidatorsInMap(resUpdateNodeList.Eligible) + allNewWaiting := getValidatorsInMap(resUpdateNodeList.Waiting) + + previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard)*(int(nbShards)+1) + numAuction + currentNumberOfNodes := len(allNewEligible) + len(allNewWaiting) + len(allShuffledOut) + assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) + + args.NewNodes = generateValidatorList(100 * (int(nbShards) + 1)) + resUpdateNodeList, err = shuffler.UpdateNodeLists(args) + require.ErrorIs(t, err, epochStart.ErrReceivedNewListNodeInStakingV4) + require.Nil(t, resUpdateNodeList) +} + func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t *testing.T) { t.Parallel() @@ -2699,8 +2673,11 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{ + StakingV4Step2EnableEpoch: 443, + StakingV4Step3EnableEpoch: 444, + }, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go index b591e94e3e2..f70bce06b04 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go @@ -58,45 +58,49 @@ func (v validatorList) Less(i, j int) bool { // TODO: add a parameter for shardID when acting as observer type epochNodesConfig struct { - nbShards uint32 - shardID uint32 - eligibleMap map[uint32][]Validator - waitingMap map[uint32][]Validator - selectors map[uint32]RandomSelector - leavingMap map[uint32][]Validator - newList []Validator - mutNodesMaps sync.RWMutex + nbShards uint32 + shardID uint32 + eligibleMap map[uint32][]Validator + waitingMap map[uint32][]Validator + selectors map[uint32]RandomSelector + leavingMap map[uint32][]Validator + shuffledOutMap map[uint32][]Validator + newList []Validator + auctionList []Validator + mutNodesMaps sync.RWMutex } type indexHashedNodesCoordinator struct { - shardIDAsObserver uint32 - currentEpoch uint32 - shardConsensusGroupSize int - metaConsensusGroupSize int - numTotalEligible uint64 - selfPubKey []byte - savedStateKey []byte - marshalizer marshal.Marshalizer - hasher hashing.Hasher - shuffler NodesShuffler - epochStartRegistrationHandler EpochStartEventNotifier - bootStorer storage.Storer - nodesConfig map[uint32]*epochNodesConfig - mutNodesConfig sync.RWMutex - mutSavedStateKey sync.RWMutex - nodesCoordinatorHelper NodesCoordinatorHelper - consensusGroupCacher Cacher - loadingFromDisk atomic.Value - shuffledOutHandler ShuffledOutHandler - startEpoch uint32 - publicKeyToValidatorMap map[string]*validatorWithShardID - isFullArchive bool - chanStopNode chan endProcess.ArgEndProcess - flagWaitingListFix atomicFlags.Flag - nodeTypeProvider NodeTypeProviderHandler - enableEpochsHandler common.EnableEpochsHandler - validatorInfoCacher epochStart.ValidatorInfoCacher - genesisNodesSetupHandler GenesisNodesSetupHandler + shardIDAsObserver uint32 + currentEpoch uint32 + shardConsensusGroupSize int + metaConsensusGroupSize int + numTotalEligible uint64 + selfPubKey []byte + savedStateKey []byte + marshalizer marshal.Marshalizer + hasher hashing.Hasher + shuffler NodesShuffler + epochStartRegistrationHandler EpochStartEventNotifier + bootStorer storage.Storer + nodesConfig map[uint32]*epochNodesConfig + mutNodesConfig sync.RWMutex + mutSavedStateKey sync.RWMutex + nodesCoordinatorHelper NodesCoordinatorHelper + consensusGroupCacher Cacher + loadingFromDisk atomic.Value + shuffledOutHandler ShuffledOutHandler + startEpoch uint32 + publicKeyToValidatorMap map[string]*validatorWithShardID + isFullArchive bool + chanStopNode chan endProcess.ArgEndProcess + nodeTypeProvider NodeTypeProviderHandler + enableEpochsHandler common.EnableEpochsHandler + validatorInfoCacher epochStart.ValidatorInfoCacher + genesisNodesSetupHandler GenesisNodesSetupHandler + flagStakingV4Step2 atomicFlags.Flag + nodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory + flagStakingV4Started atomicFlags.Flag } // NewIndexHashedNodesCoordinator creates a new index hashed group selector @@ -109,52 +113,56 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed nodesConfig := make(map[uint32]*epochNodesConfig, nodesCoordinatorStoredEpochs) nodesConfig[arguments.Epoch] = &epochNodesConfig{ - nbShards: arguments.NbShards, - shardID: arguments.ShardIDAsObserver, - eligibleMap: make(map[uint32][]Validator), - waitingMap: make(map[uint32][]Validator), - selectors: make(map[uint32]RandomSelector), - leavingMap: make(map[uint32][]Validator), - newList: make([]Validator, 0), - } - + nbShards: arguments.NbShards, + shardID: arguments.ShardIDAsObserver, + eligibleMap: make(map[uint32][]Validator), + waitingMap: make(map[uint32][]Validator), + selectors: make(map[uint32]RandomSelector), + leavingMap: make(map[uint32][]Validator), + shuffledOutMap: make(map[uint32][]Validator), + newList: make([]Validator, 0), + auctionList: make([]Validator, 0), + } + + // todo: if not genesis, use previous randomness from start of epoch meta block savedKey := arguments.Hasher.Compute(string(arguments.SelfPublicKey)) ihnc := &indexHashedNodesCoordinator{ - marshalizer: arguments.Marshalizer, - hasher: arguments.Hasher, - shuffler: arguments.Shuffler, - epochStartRegistrationHandler: arguments.EpochStartNotifier, - bootStorer: arguments.BootStorer, - selfPubKey: arguments.SelfPublicKey, - nodesConfig: nodesConfig, - currentEpoch: arguments.Epoch, - savedStateKey: savedKey, - shardConsensusGroupSize: arguments.ShardConsensusGroupSize, - metaConsensusGroupSize: arguments.MetaConsensusGroupSize, - consensusGroupCacher: arguments.ConsensusGroupCache, - shardIDAsObserver: arguments.ShardIDAsObserver, - shuffledOutHandler: arguments.ShuffledOutHandler, - startEpoch: arguments.StartEpoch, - publicKeyToValidatorMap: make(map[string]*validatorWithShardID), - chanStopNode: arguments.ChanStopNode, - nodeTypeProvider: arguments.NodeTypeProvider, - isFullArchive: arguments.IsFullArchive, - enableEpochsHandler: arguments.EnableEpochsHandler, - validatorInfoCacher: arguments.ValidatorInfoCacher, - genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, + marshalizer: arguments.Marshalizer, + hasher: arguments.Hasher, + shuffler: arguments.Shuffler, + epochStartRegistrationHandler: arguments.EpochStartNotifier, + bootStorer: arguments.BootStorer, + selfPubKey: arguments.SelfPublicKey, + nodesConfig: nodesConfig, + currentEpoch: arguments.Epoch, + savedStateKey: savedKey, + shardConsensusGroupSize: arguments.ShardConsensusGroupSize, + metaConsensusGroupSize: arguments.MetaConsensusGroupSize, + consensusGroupCacher: arguments.ConsensusGroupCache, + shardIDAsObserver: arguments.ShardIDAsObserver, + shuffledOutHandler: arguments.ShuffledOutHandler, + startEpoch: arguments.StartEpoch, + publicKeyToValidatorMap: make(map[string]*validatorWithShardID), + chanStopNode: arguments.ChanStopNode, + nodeTypeProvider: arguments.NodeTypeProvider, + isFullArchive: arguments.IsFullArchive, + enableEpochsHandler: arguments.EnableEpochsHandler, + validatorInfoCacher: arguments.ValidatorInfoCacher, + genesisNodesSetupHandler: arguments.GenesisNodesSetupHandler, + nodesCoordinatorRegistryFactory: arguments.NodesCoordinatorRegistryFactory, } ihnc.loadingFromDisk.Store(false) ihnc.nodesCoordinatorHelper = ihnc - err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, arguments.Epoch) + err = ihnc.setNodesPerShards(arguments.EligibleNodes, arguments.WaitingNodes, nil, nil, arguments.Epoch) if err != nil { return nil, err } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(ihnc.savedStateKey) + err = ihnc.saveState(ihnc.savedStateKey, arguments.Epoch) if err != nil { log.Error("saving initial nodes coordinator config failed", "error", err.Error()) @@ -175,6 +183,7 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed currentConfig.waitingMap, currentConfig.leavingMap, make(map[uint32][]Validator), + currentConfig.shuffledOutMap, currentConfig.nbShards) ihnc.epochStartRegistrationHandler.RegisterHandler(ihnc) @@ -216,6 +225,9 @@ func checkArguments(arguments ArgNodesCoordinator) error { if check.IfNil(arguments.NodeTypeProvider) { return ErrNilNodeTypeProvider } + if check.IfNil(arguments.NodesCoordinatorRegistryFactory) { + return ErrNilNodesCoordinatorRegistryFactory + } if nil == arguments.ChanStopNode { return ErrNilNodeStopChannel } @@ -224,7 +236,6 @@ func checkArguments(arguments ArgNodesCoordinator) error { } err := core.CheckHandlerCompatibility(arguments.EnableEpochsHandler, []core.EnableEpochFlag{ common.RefactorPeersMiniBlocksFlag, - common.WaitingListFixFlag, }) if err != nil { return err @@ -244,6 +255,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( eligible map[uint32][]Validator, waiting map[uint32][]Validator, leaving map[uint32][]Validator, + shuffledOut map[uint32][]Validator, epoch uint32, ) error { ihnc.mutNodesConfig.Lock() @@ -283,6 +295,7 @@ func (ihnc *indexHashedNodesCoordinator) setNodesPerShards( nodesConfig.eligibleMap = eligible nodesConfig.waitingMap = waiting nodesConfig.leavingMap = leaving + nodesConfig.shuffledOutMap = shuffledOut nodesConfig.shardID, isCurrentNodeValidator = ihnc.computeShardForSelfPublicKey(nodesConfig) nodesConfig.selectors, err = ihnc.createSelectors(nodesConfig) if err != nil { @@ -509,6 +522,30 @@ func (ihnc *indexHashedNodesCoordinator) GetAllLeavingValidatorsPublicKeys(epoch return validatorsPubKeys, nil } +// GetAllShuffledOutValidatorsPublicKeys will return all shuffled out validator public keys from all shards +func (ihnc *indexHashedNodesCoordinator) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + validatorsPubKeys := make(map[uint32][][]byte) + + ihnc.mutNodesConfig.RLock() + nodesConfig, ok := ihnc.nodesConfig[epoch] + ihnc.mutNodesConfig.RUnlock() + + if !ok { + return nil, fmt.Errorf("%w epoch=%v", ErrEpochNodesConfigDoesNotExist, epoch) + } + + nodesConfig.mutNodesMaps.RLock() + defer nodesConfig.mutNodesMaps.RUnlock() + + for shardID, shuffledOutList := range nodesConfig.shuffledOutMap { + for _, shuffledOutValidator := range shuffledOutList { + validatorsPubKeys[shardID] = append(validatorsPubKeys[shardID], shuffledOutValidator.PubKey()) + } + } + + return validatorsPubKeys, nil +} + // GetValidatorsIndexes will return validators indexes for a block func (ihnc *indexHashedNodesCoordinator) GetValidatorsIndexes( publicKeys []string, @@ -563,7 +600,8 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - if _, ok := metaHdr.(*block.MetaBlock); !ok { + _, castOk := metaHdr.(*block.MetaBlock) + if !castOk { log.Error("could not process EpochStartPrepare on nodesCoordinator - not metaBlock") return } @@ -584,37 +622,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa return } - ihnc.mutNodesConfig.RLock() - previousConfig := ihnc.nodesConfig[ihnc.currentEpoch] - if previousConfig == nil { - log.Error("previous nodes config is nil") - ihnc.mutNodesConfig.RUnlock() - return - } - - // TODO: remove the copy if no changes are done to the maps - copiedPrevious := &epochNodesConfig{} - copiedPrevious.eligibleMap = copyValidatorMap(previousConfig.eligibleMap) - copiedPrevious.waitingMap = copyValidatorMap(previousConfig.waitingMap) - copiedPrevious.nbShards = previousConfig.nbShards - - ihnc.mutNodesConfig.RUnlock() - // TODO: compare with previous nodesConfig if exists - newNodesConfig, err := ihnc.computeNodesConfigFromList(copiedPrevious, allValidatorInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(allValidatorInfo) if err != nil { log.Error("could not compute nodes config from list - do nothing on nodesCoordinator epochStartPrepare") return } - if copiedPrevious.nbShards != newNodesConfig.nbShards { - log.Warn("number of shards does not match", - "previous epoch", ihnc.currentEpoch, - "previous number of shards", copiedPrevious.nbShards, - "new epoch", newEpoch, - "new number of shards", newNodesConfig.nbShards) - } - additionalLeavingMap, err := ihnc.nodesCoordinatorHelper.ComputeAdditionalLeaving(allValidatorInfo) if err != nil { log.Error("could not compute additionalLeaving Nodes - do nothing on nodesCoordinator epochStartPrepare") @@ -628,6 +642,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa Eligible: newNodesConfig.eligibleMap, Waiting: newNodesConfig.waitingMap, NewNodes: newNodesConfig.newList, + Auction: newNodesConfig.auctionList, UnStakeLeaving: unStakeLeavingList, AdditionalLeaving: additionalLeavingList, Rand: randomness, @@ -647,13 +662,13 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, newEpoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, newEpoch) if err != nil { log.Error("set nodes per shard failed", "error", err.Error()) } ihnc.fillPublicKeyToValidatorMap() - err = ihnc.saveState(randomness) + err = ihnc.saveState(randomness, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") displayNodesConfiguration( @@ -661,6 +676,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartPrepare(metaHdr data.HeaderHa resUpdateNodes.Waiting, leavingNodesMap, stillRemainingNodesMap, + resUpdateNodes.ShuffledOut, newNodesConfig.nbShards) ihnc.mutSavedStateKey.Lock() @@ -714,18 +730,13 @@ func (ihnc *indexHashedNodesCoordinator) GetChance(_ uint32) uint32 { } func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( - previousEpochConfig *epochNodesConfig, validatorInfos []*state.ShardValidatorInfo, ) (*epochNodesConfig, error) { eligibleMap := make(map[uint32][]Validator) waitingMap := make(map[uint32][]Validator) leavingMap := make(map[uint32][]Validator) newNodesList := make([]Validator, 0) - - if ihnc.flagWaitingListFix.IsSet() && previousEpochConfig == nil { - return nil, ErrNilPreviousEpochConfig - } - + auctionList := make([]Validator, 0) if len(validatorInfos) == 0 { log.Warn("computeNodesConfigFromList - validatorInfos len is 0") } @@ -743,25 +754,41 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( case string(common.EligibleList): eligibleMap[validatorInfo.ShardId] = append(eligibleMap[validatorInfo.ShardId], currentValidator) case string(common.LeavingList): - log.Debug("leaving node validatorInfo", "pk", validatorInfo.PublicKey) + log.Debug("leaving node validatorInfo", + "pk", validatorInfo.PublicKey, + "previous list", validatorInfo.PreviousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "shardId", validatorInfo.ShardId) leavingMap[validatorInfo.ShardId] = append(leavingMap[validatorInfo.ShardId], currentValidator) ihnc.addValidatorToPreviousMap( - previousEpochConfig, eligibleMap, waitingMap, currentValidator, - validatorInfo.ShardId) + validatorInfo, + ) case string(common.NewList): + if ihnc.flagStakingV4Step2.IsSet() { + return nil, epochStart.ErrReceivedNewListNodeInStakingV4 + } log.Debug("new node registered", "pk", validatorInfo.PublicKey) newNodesList = append(newNodesList, currentValidator) case string(common.InactiveList): log.Debug("inactive validator", "pk", validatorInfo.PublicKey) case string(common.JailedList): log.Debug("jailed validator", "pk", validatorInfo.PublicKey) + case string(common.SelectedFromAuctionList): + log.Debug("selected node from auction", "pk", validatorInfo.PublicKey) + if ihnc.flagStakingV4Step2.IsSet() { + auctionList = append(auctionList, currentValidator) + } else { + return nil, ErrReceivedAuctionValidatorsBeforeStakingV4 + } } } sort.Sort(validatorList(newNodesList)) + sort.Sort(validatorList(auctionList)) for _, eligibleList := range eligibleMap { sort.Sort(validatorList(eligibleList)) } @@ -783,6 +810,7 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( waitingMap: waitingMap, leavingMap: leavingMap, newList: newNodesList, + auctionList: auctionList, nbShards: uint32(nbShards), } @@ -790,30 +818,49 @@ func (ihnc *indexHashedNodesCoordinator) computeNodesConfigFromList( } func (ihnc *indexHashedNodesCoordinator) addValidatorToPreviousMap( - previousEpochConfig *epochNodesConfig, eligibleMap map[uint32][]Validator, waitingMap map[uint32][]Validator, currentValidator *validator, - currentValidatorShardId uint32) { - - if !ihnc.flagWaitingListFix.IsSet() { - eligibleMap[currentValidatorShardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + validatorInfo *state.ShardValidatorInfo, +) { + shardId := validatorInfo.ShardId + previousList := validatorInfo.PreviousList + + log.Debug("checking leaving node", + "current list", validatorInfo.List, + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) + + if !ihnc.flagStakingV4Started.IsSet() || len(previousList) == 0 { + log.Debug("leaving node before staking v4 or with not previous list set node found in", + "list", "eligible", "shardId", shardId, "previous list", previousList) + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId := searchInMap(previousEpochConfig.eligibleMap, currentValidator.PubKey()) - if found { + if previousList == string(common.EligibleList) { log.Debug("leaving node found in", "list", "eligible", "shardId", shardId) - eligibleMap[shardId] = append(eligibleMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + eligibleMap[shardId] = append(eligibleMap[shardId], currentValidator) return } - found, shardId = searchInMap(previousEpochConfig.waitingMap, currentValidator.PubKey()) - if found { + if previousList == string(common.WaitingList) { log.Debug("leaving node found in", "list", "waiting", "shardId", shardId) - waitingMap[shardId] = append(waitingMap[currentValidatorShardId], currentValidator) + currentValidator.index = validatorInfo.PreviousIndex + waitingMap[shardId] = append(waitingMap[shardId], currentValidator) return } + + log.Debug("leaving node not found in eligible or waiting", + "previous list", previousList, + "current index", validatorInfo.Index, + "previous index", validatorInfo.PreviousIndex, + "pk", currentValidator.PubKey(), + "shardId", shardId) } func (ihnc *indexHashedNodesCoordinator) handleErrorLog(err error, message string) { @@ -837,7 +884,7 @@ func (ihnc *indexHashedNodesCoordinator) EpochStartAction(hdr data.HeaderHandler needToRemove := epochToRemove >= 0 ihnc.currentEpoch = newEpoch - err := ihnc.saveState(ihnc.savedStateKey) + err := ihnc.saveState(ihnc.savedStateKey, newEpoch) ihnc.handleErrorLog(err, "saving nodes coordinator config failed") ihnc.mutNodesConfig.Lock() @@ -1044,6 +1091,18 @@ func (ihnc *indexHashedNodesCoordinator) computeShardForSelfPublicKey(nodesConfi return shardId, true } + if ihnc.flagStakingV4Step2.IsSet() { + found, shardId = searchInMap(nodesConfig.shuffledOutMap, pubKey) + if found { + log.Trace("computeShardForSelfPublicKey found validator in shuffled out", + "epoch", ihnc.currentEpoch, + "shard", shardId, + "validator PK", pubKey, + ) + return shardId, true + } + } + log.Trace("computeShardForSelfPublicKey returned default", "shard", selfShard, ) @@ -1241,8 +1300,11 @@ func (ihnc *indexHashedNodesCoordinator) getShardValidatorInfoData(txHash []byte } func (ihnc *indexHashedNodesCoordinator) updateEpochFlags(epoch uint32) { - ihnc.flagWaitingListFix.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.WaitingListFixFlag)) - log.Debug("indexHashedNodesCoordinator: waiting list fix", "enabled", ihnc.flagWaitingListFix.IsSet()) + ihnc.flagStakingV4Started.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step1Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Started", "enabled", ihnc.flagStakingV4Started.IsSet()) + + ihnc.flagStakingV4Step2.SetValue(epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag)) + log.Debug("indexHashedNodesCoordinator: flagStakingV4Step2", "enabled", ihnc.flagStakingV4Step2.IsSet()) } // GetWaitingEpochsLeftForPublicKey returns the number of epochs left for the public key until it becomes eligible diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go index bb96c6ec15a..3b80e8bdd23 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorLite.go @@ -6,7 +6,7 @@ import ( // SetNodesConfigFromValidatorsInfo sets epoch config based on validators list configuration func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch uint32, randomness []byte, validatorsInfo []*state.ShardValidatorInfo) error { - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorsInfo) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorsInfo) if err != nil { return err } @@ -41,7 +41,7 @@ func (ihnc *indexHashedNodesCoordinator) SetNodesConfigFromValidatorsInfo(epoch resUpdateNodes.Leaving, ) - err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, epoch) + err = ihnc.setNodesPerShards(resUpdateNodes.Eligible, resUpdateNodes.Waiting, leavingNodesMap, resUpdateNodes.ShuffledOut, epoch) if err != nil { return err } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go index 40f9995febe..813929bac90 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry.go @@ -1,35 +1,12 @@ package nodesCoordinator import ( - "encoding/json" "fmt" "strconv" "github.com/multiversx/mx-chain-go/common" ) -// SerializableValidator holds the minimal data required for marshalling and un-marshalling a validator -type SerializableValidator struct { - PubKey []byte `json:"pubKey"` - Chances uint32 `json:"chances"` - Index uint32 `json:"index"` -} - -// EpochValidators holds one epoch configuration for a nodes coordinator -type EpochValidators struct { - EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` - WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` - LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` -} - -// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator -type NodesCoordinatorRegistry struct { - EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` - CurrentEpoch uint32 `json:"currentEpoch"` -} - -// TODO: add proto marshalizer for these package - replace all json marshalizers - // LoadState loads the nodes coordinator state from the used boot storage func (ihnc *indexHashedNodesCoordinator) LoadState(key []byte) error { return ihnc.baseLoadState(key) @@ -48,8 +25,7 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { return err } - config := &NodesCoordinatorRegistry{} - err = json.Unmarshal(data, config) + config, err := ihnc.nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry(data) if err != nil { return err } @@ -58,8 +34,8 @@ func (ihnc *indexHashedNodesCoordinator) baseLoadState(key []byte) error { ihnc.savedStateKey = key ihnc.mutSavedStateKey.Unlock() - ihnc.currentEpoch = config.CurrentEpoch - log.Debug("loaded nodes config", "current epoch", config.CurrentEpoch) + ihnc.currentEpoch = config.GetCurrentEpoch() + log.Debug("loaded nodes config", "current epoch", config.GetCurrentEpoch()) nodesConfig, err := ihnc.registryToNodesCoordinator(config) if err != nil { @@ -83,22 +59,31 @@ func displayNodesConfigInfo(config map[uint32]*epochNodesConfig) { } } -func (ihnc *indexHashedNodesCoordinator) saveState(key []byte) error { - registry := ihnc.NodesCoordinatorToRegistry() - data, err := json.Marshal(registry) +func (ihnc *indexHashedNodesCoordinator) saveState(key []byte, epoch uint32) error { + registry := ihnc.NodesCoordinatorToRegistry(epoch) + data, err := ihnc.nodesCoordinatorRegistryFactory.GetRegistryData(registry, ihnc.currentEpoch) if err != nil { return err } - ncInternalkey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) - - log.Debug("saving nodes coordinator config", "key", ncInternalkey) + ncInternalKey := append([]byte(common.NodesCoordinatorRegistryKeyPrefix), key...) + log.Debug("saving nodes coordinator config", "key", ncInternalKey, "epoch", epoch) - return ihnc.bootStorer.Put(ncInternalkey, data) + return ihnc.bootStorer.Put(ncInternalKey, data) } // NodesCoordinatorToRegistry will export the nodesCoordinator data to the registry -func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoordinatorRegistry { +func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry(epoch uint32) NodesCoordinatorRegistryHandler { + if epoch >= ihnc.enableEpochsHandler.GetActivationEpoch(common.StakingV4Step2Flag) { + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with auction registry", "epoch", epoch) + return ihnc.nodesCoordinatorToRegistryWithAuction() + } + + log.Debug("indexHashedNodesCoordinator.NodesCoordinatorToRegistry called with old registry", "epoch", epoch) + return ihnc.nodesCoordinatorToOldRegistry() +} + +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToOldRegistry() NodesCoordinatorRegistryHandler { ihnc.mutNodesConfig.RLock() defer ihnc.mutNodesConfig.RUnlock() @@ -107,13 +92,8 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor EpochsConfig: make(map[string]*EpochValidators), } - minEpoch := 0 - lastEpoch := ihnc.getLastEpochConfig() - if lastEpoch >= nodesCoordinatorStoredEpochs { - minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 - } - - for epoch := uint32(minEpoch); epoch <= lastEpoch; epoch++ { + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { epochNodesData, ok := ihnc.nodesConfig[epoch] if !ok { continue @@ -125,6 +105,16 @@ func (ihnc *indexHashedNodesCoordinator) NodesCoordinatorToRegistry() *NodesCoor return registry } +func (ihnc *indexHashedNodesCoordinator) getMinAndLastEpoch() (uint32, uint32) { + minEpoch := 0 + lastEpoch := ihnc.getLastEpochConfig() + if lastEpoch >= nodesCoordinatorStoredEpochs { + minEpoch = int(lastEpoch) - nodesCoordinatorStoredEpochs + 1 + } + + return uint32(minEpoch), lastEpoch +} + func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { lastEpoch := uint32(0) for epoch := range ihnc.nodesConfig { @@ -137,13 +127,13 @@ func (ihnc *indexHashedNodesCoordinator) getLastEpochConfig() uint32 { } func (ihnc *indexHashedNodesCoordinator) registryToNodesCoordinator( - config *NodesCoordinatorRegistry, + config NodesCoordinatorRegistryHandler, ) (map[uint32]*epochNodesConfig, error) { var err error var epoch int64 result := make(map[uint32]*epochNodesConfig) - for epochStr, epochValidators := range config.EpochsConfig { + for epochStr, epochValidators := range config.GetEpochsConfig() { epoch, err = strconv.ParseInt(epochStr, 10, 64) if err != nil { return nil, err @@ -197,25 +187,33 @@ func epochNodesConfigToEpochValidators(config *epochNodesConfig) *EpochValidator return result } -func epochValidatorsToEpochNodesConfig(config *EpochValidators) (*epochNodesConfig, error) { +func epochValidatorsToEpochNodesConfig(config EpochValidatorsHandler) (*epochNodesConfig, error) { result := &epochNodesConfig{} var err error - result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.EligibleValidators) + result.eligibleMap, err = serializableValidatorsMapToValidatorsMap(config.GetEligibleValidators()) if err != nil { return nil, err } - result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.WaitingValidators) + result.waitingMap, err = serializableValidatorsMapToValidatorsMap(config.GetWaitingValidators()) if err != nil { return nil, err } - result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.LeavingValidators) + result.leavingMap, err = serializableValidatorsMapToValidatorsMap(config.GetLeavingValidators()) if err != nil { return nil, err } + configWithAuction, castOk := config.(EpochValidatorsHandlerWithAuction) + if castOk { + result.shuffledOutMap, err = serializableValidatorsMapToValidatorsMap(configWithAuction.GetShuffledOutValidators()) + if err != nil { + return nil, err + } + } + return result, nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..261aa60aefc --- /dev/null +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,55 @@ +package nodesCoordinator + +import ( + "fmt" +) + +// nodesCoordinatorToRegistryWithAuction will export the nodesCoordinator data to the registry which contains auction list +func (ihnc *indexHashedNodesCoordinator) nodesCoordinatorToRegistryWithAuction() *NodesCoordinatorRegistryWithAuction { + ihnc.mutNodesConfig.RLock() + defer ihnc.mutNodesConfig.RUnlock() + + registry := &NodesCoordinatorRegistryWithAuction{ + CurrentEpoch: ihnc.currentEpoch, + EpochsConfigWithAuction: make(map[string]*EpochValidatorsWithAuction), + } + + minEpoch, lastEpoch := ihnc.getMinAndLastEpoch() + for epoch := minEpoch; epoch <= lastEpoch; epoch++ { + epochNodesData, ok := ihnc.nodesConfig[epoch] + if !ok { + continue + } + + registry.EpochsConfigWithAuction[fmt.Sprint(epoch)] = epochNodesConfigToEpochValidatorsWithAuction(epochNodesData) + } + + return registry +} + +func epochNodesConfigToEpochValidatorsWithAuction(config *epochNodesConfig) *EpochValidatorsWithAuction { + result := &EpochValidatorsWithAuction{ + Eligible: make(map[string]Validators, len(config.eligibleMap)), + Waiting: make(map[string]Validators, len(config.waitingMap)), + Leaving: make(map[string]Validators, len(config.leavingMap)), + ShuffledOut: make(map[string]Validators, len(config.shuffledOutMap)), + } + + for k, v := range config.eligibleMap { + result.Eligible[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.waitingMap { + result.Waiting[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.leavingMap { + result.Leaving[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + for k, v := range config.shuffledOutMap { + result.ShuffledOut[fmt.Sprint(k)] = Validators{Data: ValidatorArrayToSerializableValidatorArray(v)} + } + + return result +} diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go index 348c7a74280..b2b99e6e87b 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorRegistry_test.go @@ -6,6 +6,9 @@ import ( "strconv" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -73,13 +76,23 @@ func validatorsEqualSerializableValidators(validators []Validator, sValidators [ } func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { + t.Parallel() + args := createArguments() + args.EnableEpochsHandler = &enableEpochsHandlerMock.EnableEpochsHandlerStub{ + GetActivationEpochCalled: func(flag core.EnableEpochFlag) uint32 { + if flag == common.StakingV4Step2Flag { + return stakingV4Epoch + } + return 0 + }, + } nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) expectedConfig := nodesCoordinator.nodesConfig[0] key := []byte("config") - err := nodesCoordinator.saveState(key) + err := nodesCoordinator.saveState(key, 0) assert.Nil(t, err) delete(nodesCoordinator.nodesConfig, 0) @@ -94,26 +107,77 @@ func TestIndexHashedNodesCoordinator_LoadStateAfterSave(t *testing.T) { assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) } -func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistry(t *testing.T) { +func TestIndexHashedNodesCoordinator_LoadStateAfterSaveWithStakingV4(t *testing.T) { + t.Parallel() + + args := createArguments() + args.Epoch = stakingV4Epoch + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + expectedConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + + key := []byte("config") + err := nodesCoordinator.saveState(key, stakingV4Epoch) + assert.Nil(t, err) + + delete(nodesCoordinator.nodesConfig, 0) + err = nodesCoordinator.LoadState(key) + assert.Nil(t, err) + + actualConfig := nodesCoordinator.nodesConfig[stakingV4Epoch] + assert.Equal(t, expectedConfig.shardID, actualConfig.shardID) + assert.Equal(t, expectedConfig.nbShards, actualConfig.nbShards) + assert.True(t, sameValidatorsMaps(expectedConfig.eligibleMap, actualConfig.eligibleMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.waitingMap, actualConfig.waitingMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.shuffledOutMap, actualConfig.shuffledOutMap)) + assert.True(t, sameValidatorsMaps(expectedConfig.leavingMap, actualConfig.leavingMap)) +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistryWithStakingV4(t *testing.T) { + args := createArguments() + args.Epoch = stakingV4Epoch + nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) + + nodesCoordinator.nodesConfig[stakingV4Epoch].leavingMap = createDummyNodesMap(3, 0, string(common.LeavingList)) + nodesCoordinator.nodesConfig[stakingV4Epoch].shuffledOutMap = createDummyNodesMap(3, 0, string(common.SelectedFromAuctionList)) + + ncr := nodesCoordinator.NodesCoordinatorToRegistry(stakingV4Epoch) + nc := nodesCoordinator.nodesConfig + + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) + + for epoch, config := range nc { + ncrWithAuction := ncr.GetEpochsConfig()[fmt.Sprint(epoch)].(EpochValidatorsHandlerWithAuction) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncrWithAuction.GetWaitingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.leavingMap, ncrWithAuction.GetLeavingValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncrWithAuction.GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.shuffledOutMap, ncrWithAuction.GetShuffledOutValidators())) + } +} + +func TestIndexHashedNodesCoordinator_nodesCoordinatorToRegistry(t *testing.T) { args := createArguments() nodesCoordinator, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) nc := nodesCoordinator.nodesConfig - assert.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.EpochsConfig)) + assert.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + assert.Equal(t, len(nodesCoordinator.nodesConfig), len(ncr.GetEpochsConfig())) for epoch, config := range nc { - assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.EpochsConfig[fmt.Sprint(epoch)].EligibleValidators)) - assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.EpochsConfig[fmt.Sprint(epoch)].WaitingValidators)) + assert.True(t, sameValidatorsDifferentMapTypes(config.eligibleMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetEligibleValidators())) + assert.True(t, sameValidatorsDifferentMapTypes(config.waitingMap, ncr.GetEpochsConfig()[fmt.Sprint(epoch)].GetWaitingValidators())) } } func TestIndexHashedNodesCoordinator_registryToNodesCoordinator(t *testing.T) { args := createArguments() nodesCoordinator1, _ := NewIndexHashedNodesCoordinator(args) - ncr := nodesCoordinator1.NodesCoordinatorToRegistry() + ncr := nodesCoordinator1.NodesCoordinatorToRegistry(args.Epoch) args = createArguments() nodesCoordinator2, _ := NewIndexHashedNodesCoordinator(args) @@ -147,17 +211,17 @@ func TestIndexHashedNodesCooridinator_nodesCoordinatorToRegistryLimitNumEpochsIn } } - ncr := nodesCoordinator.NodesCoordinatorToRegistry() + ncr := nodesCoordinator.NodesCoordinatorToRegistry(args.Epoch) nc := nodesCoordinator.nodesConfig - require.Equal(t, nodesCoordinator.currentEpoch, ncr.CurrentEpoch) - require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.EpochsConfig)) + require.Equal(t, nodesCoordinator.currentEpoch, ncr.GetCurrentEpoch()) + require.Equal(t, nodesCoordinatorStoredEpochs, len(ncr.GetEpochsConfig())) - for epochStr := range ncr.EpochsConfig { + for epochStr := range ncr.GetEpochsConfig() { epoch, err := strconv.Atoi(epochStr) require.Nil(t, err) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.EpochsConfig[epochStr].EligibleValidators)) - require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.EpochsConfig[epochStr].WaitingValidators)) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].eligibleMap, ncr.GetEpochsConfig()[epochStr].GetEligibleValidators())) + require.True(t, sameValidatorsDifferentMapTypes(nc[uint32(epoch)].waitingMap, ncr.GetEpochsConfig()[epochStr].GetWaitingValidators())) } } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go index c9e4779e73f..689fe95d341 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater.go @@ -76,7 +76,7 @@ func (ihnc *indexHashedNodesCoordinatorWithRater) ComputeAdditionalLeaving(allVa return extraLeavingNodesMap, nil } -//IsInterfaceNil verifies that the underlying value is nil +// IsInterfaceNil verifies that the underlying value is nil func (ihnc *indexHashedNodesCoordinatorWithRater) IsInterfaceNil() bool { return ihnc == nil } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go index dfd1bbbe2ad..40286a0c135 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinatorWithRater_test.go @@ -55,7 +55,7 @@ func TestIndexHashedGroupSelectorWithRater_SetNilEligibleMapShouldErr(t *testing waiting := createDummyNodesMap(2, 1, "waiting") nc, _ := NewIndexHashedNodesCoordinator(createArguments()) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) - assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, 0)) + assert.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waiting, nil, nil, 0)) } func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { @@ -79,25 +79,26 @@ func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("test"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("test"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, err := NewIndexHashedNodesCoordinator(arguments) assert.Nil(t, err) @@ -328,25 +329,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -383,25 +385,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturn bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -452,25 +455,26 @@ func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t eligibleMap[1] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) ihnc, _ := NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) @@ -537,26 +541,27 @@ func TestIndexHashedGroupSelectorWithRater_GetAllEligibleValidatorsPublicKeys(t eligibleMap[shardOneId] = listShard1 arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - IsFullArchive: false, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + IsFullArchive: false, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } nc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -781,8 +786,8 @@ func BenchmarkIndexHashedGroupSelectorWithRater_TestExpandList(b *testing.B) { } //a := []int{1, 2, 3, 4, 5, 6, 7, 8} - rand.Seed(time.Now().UnixNano()) - rand.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] }) + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + rnd.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] }) m2 := runtime.MemStats{} runtime.ReadMemStats(&m2) diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go index c1c01a67680..5db65609f59 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator_test.go @@ -35,6 +35,8 @@ import ( "github.com/stretchr/testify/require" ) +const stakingV4Epoch = 444 + func createDummyNodesList(nbNodes uint32, suffix string) []Validator { list := make([]Validator, 0) hasher := sha256.NewSha256() @@ -82,6 +84,14 @@ func isStringSubgroup(a []string, b []string) bool { return found } +func createNodesCoordinatorRegistryFactory() NodesCoordinatorRegistryFactory { + ncf, _ := NewNodesCoordinatorRegistryFactory( + &marshal.GogoProtoMarshalizer{}, + stakingV4Epoch, + ) + return ncf +} + func createArguments() ArgNodesCoordinator { nbShards := uint32(1) eligibleMap := createDummyNodesMap(10, nbShards, "eligible") @@ -92,7 +102,6 @@ func createArguments() ArgNodesCoordinator { Hysteresis: hysteresis, Adaptivity: adaptivity, ShuffleBetweenShards: shuffleBetweenShards, - MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, } nodeShuffler, _ := NewHashValidatorsShuffler(shufflerArgs) @@ -120,8 +129,9 @@ func createArguments() ArgNodesCoordinator { EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } return arguments } @@ -246,7 +256,7 @@ func TestIndexHashedNodesCoordinator_SetNilEligibleMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(nil, waitingMap, nil, nil, 0)) } func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { @@ -256,7 +266,7 @@ func TestIndexHashedNodesCoordinator_SetNilWaitingMapShouldErr(t *testing.T) { arguments := createArguments() ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, 0)) + require.Equal(t, ErrNilInputNodesMap, ihnc.setNodesPerShards(eligibleMap, nil, nil, nil, 0)) } func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { @@ -281,24 +291,25 @@ func TestIndexHashedNodesCoordinator_OkValShouldWork(t *testing.T) { bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 2, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -342,24 +353,25 @@ func TestIndexHashedNodesCoordinator_NewCoordinatorTooFewNodesShouldErr(t *testi bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 10, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 10, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -417,24 +429,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup1ValidatorShouldRetur bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: nodesMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: nodesMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) list2, err := ihnc.ComputeConsensusGroup([]byte("randomness"), 0, 0, 0) @@ -478,24 +491,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10locksNoM } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -567,24 +581,25 @@ func TestIndexHashedNodesCoordinator_ComputeValidatorsGroup400of400For10BlocksMe } arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: consensusGroupSize, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 1, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: lruCache, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 1, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: lruCache, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -970,24 +985,25 @@ func TestIndexHashedNodesCoordinator_GetValidatorWithPublicKeyShouldWork(t *test bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1054,25 +1070,26 @@ func TestIndexHashedGroupSelector_GetAllEligibleValidatorsPublicKeys(t *testing. bootStorer := genericMocks.NewStorerMock() arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: make(map[uint32][]Validator), - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: make(map[uint32][]Validator), + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1134,25 +1151,26 @@ func TestIndexHashedGroupSelector_GetAllWaitingValidatorsPublicKeys(t *testing.T eligibleMap[shardZeroId] = []Validator{&validator{}} arguments := ArgNodesCoordinator{ - ShardConsensusGroupSize: 1, - MetaConsensusGroupSize: 1, - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - Shuffler: nodeShuffler, - EpochStartNotifier: epochStartSubscriber, - BootStorer: bootStorer, - ShardIDAsObserver: shardZeroId, - NbShards: 2, - EligibleNodes: eligibleMap, - WaitingNodes: waitingMap, - SelfPublicKey: []byte("key"), - ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, - ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, - ChanStopNode: make(chan endProcess.ArgEndProcess), - NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &hashingMocks.HasherMock{}, + Shuffler: nodeShuffler, + EpochStartNotifier: epochStartSubscriber, + BootStorer: bootStorer, + ShardIDAsObserver: shardZeroId, + NbShards: 2, + EligibleNodes: eligibleMap, + WaitingNodes: waitingMap, + SelfPublicKey: []byte("key"), + ConsensusGroupCache: &mock.NodesCoordinatorCacheMock{}, + ShuffledOutHandler: &mock.ShuffledOutHandlerStub{}, + ChanStopNode: make(chan endProcess.ArgEndProcess), + NodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -1257,7 +1275,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldTriggerWrongConfigur }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) value := <-chanStopNode @@ -1283,7 +1301,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldNotTriggerWrongConfi }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.Empty(t, chanStopNode) @@ -1315,7 +1333,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeValidator }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeValidator, nodeTypeResult) @@ -1347,7 +1365,7 @@ func TestIndexHashedNodesCoordinator_setNodesPerShardsShouldSetNodeTypeObserver( }, } - err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) + err = ihnc.setNodesPerShards(eligibleMap, map[uint32][]Validator{}, map[uint32][]Validator{}, map[uint32][]Validator{}, 2) require.NoError(t, err) require.True(t, setTypeWasCalled) require.Equal(t, core.NodeTypeObserver, nodeTypeResult) @@ -1389,6 +1407,36 @@ func TestIndexHashedNodesCoordinator_EpochStartInEligible(t *testing.T) { require.True(t, isValidator) } +func TestIndexHashedNodesCoordinator_computeShardForSelfPublicKeyWithStakingV4(t *testing.T) { + t.Parallel() + + arguments := createArguments() + pk := []byte("pk") + arguments.SelfPublicKey = pk + nc, _ := NewIndexHashedNodesCoordinator(arguments) + epoch := uint32(2) + + metaShard := core.MetachainShardId + nc.nodesConfig = map[uint32]*epochNodesConfig{ + epoch: { + shardID: metaShard, + shuffledOutMap: map[uint32][]Validator{ + metaShard: {newValidatorMock(pk, 1, 1)}, + }, + }, + } + + computedShardId, isValidator := nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, nc.shardIDAsObserver, computedShardId) + require.False(t, isValidator) + + nc.flagStakingV4Step2.SetValue(true) + + computedShardId, isValidator = nc.computeShardForSelfPublicKey(nc.nodesConfig[epoch]) + require.Equal(t, metaShard, computedShardId) + require.True(t, isValidator) +} + func TestIndexHashedNodesCoordinator_EpochStartInWaiting(t *testing.T) { t.Parallel() @@ -1513,8 +1561,9 @@ func TestIndexHashedNodesCoordinator_EpochStart_EligibleSortedAscendingByIndex(t EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ IsRefactorPeersMiniBlocksFlagEnabledField: true, }, - ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: dataPool.NewCurrentEpochValidatorInfoPool(), + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, err := NewIndexHashedNodesCoordinator(arguments) @@ -2021,38 +2070,6 @@ func TestIndexHashedNodesCoordinator_ShuffleOutNilConfig(t *testing.T) { require.Equal(t, expectedShardForNotFound, newShard) } -func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPreviousNodesConfig(t *testing.T) { - t.Parallel() - - arguments := createArguments() - pk := []byte("pk") - arguments.SelfPublicKey = pk - ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - - ihnc.flagWaitingListFix.Reset() - validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.False(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - _ = ihnc.flagWaitingListFix.SetReturningPrevious() - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, validatorInfos) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) - - newNodesConfig, err = ihnc.computeNodesConfigFromList(nil, nil) - - assert.Nil(t, newNodesConfig) - assert.True(t, errors.Is(err, ErrNilPreviousEpochConfig)) -} - func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *testing.T) { t.Parallel() @@ -2062,12 +2079,12 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNoValidators(t *t ihnc, _ := NewIndexHashedNodesCoordinator(arguments) validatorInfos := make([]*state.ShardValidatorInfo, 0) - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) - newNodesConfig, err = ihnc.computeNodesConfigFromList(&epochNodesConfig{}, nil) + newNodesConfig, err = ihnc.computeNodesConfigFromList(nil) assert.Nil(t, newNodesConfig) assert.True(t, errors.Is(err, ErrMapSizeZero)) @@ -2099,13 +2116,62 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListNilPk(t *testing. }, } - newNodesConfig, err := ihnc.computeNodesConfigFromList(&epochNodesConfig{}, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, newNodesConfig) assert.NotNil(t, err) assert.Equal(t, ErrNilPubKey, err) } +func TestIndexHashedNodesCoordinator_computeNodesConfigFromListWithStakingV4(t *testing.T) { + t.Parallel() + arguments := createArguments() + nc, _ := NewIndexHashedNodesCoordinator(arguments) + + shard0Eligible := &state.ShardValidatorInfo{ + PublicKey: []byte("pk0"), + List: string(common.EligibleList), + Index: 1, + TempRating: 2, + ShardId: 0, + } + shard0Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk1"), + List: string(common.SelectedFromAuctionList), + Index: 3, + TempRating: 2, + ShardId: 0, + } + shard1Auction := &state.ShardValidatorInfo{ + PublicKey: []byte("pk2"), + List: string(common.SelectedFromAuctionList), + Index: 2, + TempRating: 2, + ShardId: 1, + } + validatorInfos := []*state.ShardValidatorInfo{shard0Eligible, shard0Auction, shard1Auction} + + newNodesConfig, err := nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, ErrReceivedAuctionValidatorsBeforeStakingV4, err) + require.Nil(t, newNodesConfig) + + nc.updateEpochFlags(stakingV4Epoch) + + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Nil(t, err) + v1, _ := NewValidator([]byte("pk2"), 1, 2) + v2, _ := NewValidator([]byte("pk1"), 1, 3) + require.Equal(t, []Validator{v1, v2}, newNodesConfig.auctionList) + + validatorInfos = append(validatorInfos, &state.ShardValidatorInfo{ + PublicKey: []byte("pk3"), + List: string(common.NewList), + }) + newNodesConfig, err = nc.computeNodesConfigFromList(validatorInfos) + require.Equal(t, epochStart.ErrReceivedNewListNodeInStakingV4, err) + require.Nil(t, newNodesConfig) +} + func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix(t *testing.T) { t.Parallel() @@ -2113,7 +2179,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix pk := []byte("pk") arguments.SelfPublicKey = pk ihnc, _ := NewIndexHashedNodesCoordinator(arguments) - _ = ihnc.flagWaitingListFix.SetReturningPrevious() + _ = ihnc.flagStakingV4Started.SetReturningPrevious() shard0Eligible0 := &state.ShardValidatorInfo{ PublicKey: []byte("pk0"), @@ -2154,15 +2220,18 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix ShardId: 0, } shard0Leaving0 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk6"), - List: string(common.LeavingList), - ShardId: 0, + PublicKey: []byte("pk6"), + List: string(common.LeavingList), + PreviousList: string(common.EligibleList), + ShardId: 0, } shardMetaLeaving1 := &state.ShardValidatorInfo{ - PublicKey: []byte("pk7"), - List: string(common.LeavingList), - Index: 1, - ShardId: core.MetachainShardId, + PublicKey: []byte("pk7"), + List: string(common.LeavingList), + PreviousList: string(common.WaitingList), + Index: 1, + PreviousIndex: 1, + ShardId: core.MetachainShardId, } validatorInfos := @@ -2177,29 +2246,7 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsWithFix shardMetaLeaving1, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Eligible0.PublicKey, 0, 0), - newValidatorMock(shard0Eligible1.PublicKey, 0, 0), - newValidatorMock(shard0Leaving0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaEligible0.PublicKey, 0, 0), - }, - }, - waitingMap: map[uint32][]Validator{ - 0: { - newValidatorMock(shard0Waiting0.PublicKey, 0, 0), - }, - core.MetachainShardId: { - newValidatorMock(shardmetaWaiting0.PublicKey, 0, 0), - newValidatorMock(shardMetaLeaving1.PublicKey, 0, 0), - }, - }, - } - - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2293,10 +2340,6 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t ShardId: core.MetachainShardId, } - previousConfig := &epochNodesConfig{ - eligibleMap: map[uint32][]Validator{}, - } - validatorInfos := []*state.ShardValidatorInfo{ shard0Eligible0, @@ -2309,8 +2352,8 @@ func TestIndexHashedNodesCoordinator_computeNodesConfigFromListValidatorsNoFix(t shardMetaLeaving1, } - ihnc.flagWaitingListFix.Reset() - newNodesConfig, err := ihnc.computeNodesConfigFromList(previousConfig, validatorInfos) + ihnc.flagStakingV4Started.Reset() + newNodesConfig, err := ihnc.computeNodesConfigFromList(validatorInfos) assert.Nil(t, err) assert.Equal(t, uint32(1), newNodesConfig.nbShards) @@ -2509,8 +2552,9 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) EnableEpochsHandler: &mock.EnableEpochsHandlerMock{ CurrentEpoch: 1, }, - ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, - GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + ValidatorInfoCacher: &vic.ValidatorInfoCacherStub{}, + GenesisNodesSetupHandler: &mock.NodesSetupMock{}, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -2585,6 +2629,7 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) return 0 }, }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) @@ -2669,6 +2714,7 @@ func TestIndexHashedGroupSelector_GetWaitingEpochsLeftForPublicKey(t *testing.T) return 2 }, }, + NodesCoordinatorRegistryFactory: createNodesCoordinatorRegistryFactory(), } ihnc, _ := NewIndexHashedNodesCoordinator(arguments) diff --git a/sharding/nodesCoordinator/interface.go b/sharding/nodesCoordinator/interface.go index 0c16a505364..68dfa9bbb15 100644 --- a/sharding/nodesCoordinator/interface.go +++ b/sharding/nodesCoordinator/interface.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // Validator defines a node that can be allocated to a shard for participation in a consensus group as validator @@ -46,6 +47,7 @@ type PublicKeysSelector interface { GetAllEligibleValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllWaitingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetAllLeavingValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) + GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) GetConsensusValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetOwnPublicKey() []byte } @@ -138,3 +140,38 @@ type GenesisNodesSetupHandler interface { MinMetaHysteresisNodes() uint32 IsInterfaceNil() bool } + +// EpochValidatorsHandler defines what one epoch configuration for a nodes coordinator should hold +type EpochValidatorsHandler interface { + GetEligibleValidators() map[string][]*SerializableValidator + GetWaitingValidators() map[string][]*SerializableValidator + GetLeavingValidators() map[string][]*SerializableValidator +} + +// EpochValidatorsHandlerWithAuction defines what one epoch configuration for a nodes coordinator should hold + shuffled out validators +type EpochValidatorsHandlerWithAuction interface { + EpochValidatorsHandler + GetShuffledOutValidators() map[string][]*SerializableValidator +} + +// NodesCoordinatorRegistryHandler defines what is used to initialize nodes coordinator +type NodesCoordinatorRegistryHandler interface { + GetEpochsConfig() map[string]EpochValidatorsHandler + GetCurrentEpoch() uint32 + SetCurrentEpoch(epoch uint32) +} + +// NodesCoordinatorRegistryFactory handles NodesCoordinatorRegistryHandler marshall/unmarshall +type NodesCoordinatorRegistryFactory interface { + CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) + GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) + IsInterfaceNil() bool +} + +// EpochNotifier can notify upon an epoch change and provide the current epoch +type EpochNotifier interface { + RegisterNotifyHandler(handler vmcommon.EpochSubscriberHandler) + CurrentEpoch() uint32 + CheckEpoch(header data.HeaderHandler) + IsInterfaceNil() bool +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistry.go b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go new file mode 100644 index 00000000000..fbf84919d7a --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistry.go @@ -0,0 +1,49 @@ +package nodesCoordinator + +// EpochValidators holds one epoch configuration for a nodes coordinator +type EpochValidators struct { + EligibleValidators map[string][]*SerializableValidator `json:"eligibleValidators"` + WaitingValidators map[string][]*SerializableValidator `json:"waitingValidators"` + LeavingValidators map[string][]*SerializableValidator `json:"leavingValidators"` +} + +// GetEligibleValidators returns all eligible validators from all shards +func (ev *EpochValidators) GetEligibleValidators() map[string][]*SerializableValidator { + return ev.EligibleValidators +} + +// GetWaitingValidators returns all waiting validators from all shards +func (ev *EpochValidators) GetWaitingValidators() map[string][]*SerializableValidator { + return ev.WaitingValidators +} + +// GetLeavingValidators returns all leaving validators from all shards +func (ev *EpochValidators) GetLeavingValidators() map[string][]*SerializableValidator { + return ev.LeavingValidators +} + +// NodesCoordinatorRegistry holds the data that can be used to initialize a nodes coordinator +type NodesCoordinatorRegistry struct { + EpochsConfig map[string]*EpochValidators `json:"epochConfigs"` + CurrentEpoch uint32 `json:"currentEpoch"` +} + +// GetCurrentEpoch returns the current epoch +func (ncr *NodesCoordinatorRegistry) GetCurrentEpoch() uint32 { + return ncr.CurrentEpoch +} + +// GetEpochsConfig returns epoch-validators configuration +func (ncr *NodesCoordinatorRegistry) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range ncr.EpochsConfig { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (ncr *NodesCoordinatorRegistry) SetCurrentEpoch(epoch uint32) { + ncr.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go new file mode 100644 index 00000000000..0ef508fbf89 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryFactory.go @@ -0,0 +1,80 @@ +package nodesCoordinator + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/marshal" +) + +type nodesCoordinatorRegistryFactory struct { + marshaller marshal.Marshalizer + stakingV4Step2EnableEpoch uint32 +} + +// NewNodesCoordinatorRegistryFactory creates a nodes coordinator registry factory which will create a +// NodesCoordinatorRegistryHandler from a buffer depending on the epoch +func NewNodesCoordinatorRegistryFactory( + marshaller marshal.Marshalizer, + stakingV4Step2EnableEpoch uint32, +) (*nodesCoordinatorRegistryFactory, error) { + if check.IfNil(marshaller) { + return nil, ErrNilMarshalizer + } + + return &nodesCoordinatorRegistryFactory{ + marshaller: marshaller, + stakingV4Step2EnableEpoch: stakingV4Step2EnableEpoch, + }, nil +} + +// CreateNodesCoordinatorRegistry creates a NodesCoordinatorRegistryHandler depending on the buffer. Old version uses +// NodesCoordinatorRegistry with a json marshaller; while the new version(from staking v4) uses NodesCoordinatorRegistryWithAuction +// with proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) CreateNodesCoordinatorRegistry(buff []byte) (NodesCoordinatorRegistryHandler, error) { + registry, err := ncf.createRegistryWithAuction(buff) + if err == nil { + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created registry with auction", + "epoch", registry.CurrentEpoch) + return registry, nil + } + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry creating old registry") + return createOldRegistry(buff) +} + +func (ncf *nodesCoordinatorRegistryFactory) createRegistryWithAuction(buff []byte) (*NodesCoordinatorRegistryWithAuction, error) { + registry := &NodesCoordinatorRegistryWithAuction{} + err := ncf.marshaller.Unmarshal(registry, buff) + if err != nil { + return nil, err + } + + log.Debug("nodesCoordinatorRegistryFactory.CreateNodesCoordinatorRegistry created old registry", + "epoch", registry.CurrentEpoch) + return registry, nil +} + +func createOldRegistry(buff []byte) (*NodesCoordinatorRegistry, error) { + registry := &NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData returns the registry data as buffer. Old version uses json marshaller, while new version uses proto marshaller +func (ncf *nodesCoordinatorRegistryFactory) GetRegistryData(registry NodesCoordinatorRegistryHandler, epoch uint32) ([]byte, error) { + if epoch >= ncf.stakingV4Step2EnableEpoch { + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with auction after staking v4", "epoch", epoch) + return ncf.marshaller.Marshal(registry) + } + log.Debug("nodesCoordinatorRegistryFactory.GetRegistryData called with old json before staking v4", "epoch", epoch) + return json.Marshal(registry) +} + +// IsInterfaceNil checks if the underlying pointer is nil +func (ncf *nodesCoordinatorRegistryFactory) IsInterfaceNil() bool { + return ncf == nil +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go new file mode 100644 index 00000000000..d9bea843a16 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.go @@ -0,0 +1,47 @@ +//go:generate protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/multiversx/protobuf/protobuf --gogoslick_out=. nodesCoordinatorRegistryWithAuction.proto +package nodesCoordinator + +func protoValidatorsMapToSliceMap(validators map[string]Validators) map[string][]*SerializableValidator { + ret := make(map[string][]*SerializableValidator) + + for shardID, val := range validators { + ret[shardID] = val.GetData() + } + + return ret +} + +// GetEligibleValidators returns all eligible validators from all shards +func (m *EpochValidatorsWithAuction) GetEligibleValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetEligible()) +} + +// GetWaitingValidators returns all waiting validators from all shards +func (m *EpochValidatorsWithAuction) GetWaitingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetWaiting()) +} + +// GetLeavingValidators returns all leaving validators from all shards +func (m *EpochValidatorsWithAuction) GetLeavingValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetLeaving()) +} + +// GetShuffledOutValidators returns all shuffled out validators from all shards +func (m *EpochValidatorsWithAuction) GetShuffledOutValidators() map[string][]*SerializableValidator { + return protoValidatorsMapToSliceMap(m.GetShuffledOut()) +} + +// GetEpochsConfig returns epoch-validators configuration +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfig() map[string]EpochValidatorsHandler { + ret := make(map[string]EpochValidatorsHandler) + for epoch, config := range m.GetEpochsConfigWithAuction() { + ret[epoch] = config + } + + return ret +} + +// SetCurrentEpoch sets internally the current epoch +func (m *NodesCoordinatorRegistryWithAuction) SetCurrentEpoch(epoch uint32) { + m.CurrentEpoch = epoch +} diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go new file mode 100644 index 00000000000..3c69dc78080 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.pb.go @@ -0,0 +1,2128 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nodesCoordinatorRegistryWithAuction.proto + +package nodesCoordinator + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SerializableValidator struct { + PubKey []byte `protobuf:"bytes,1,opt,name=PubKey,proto3" json:"pubKey"` + Chances uint32 `protobuf:"varint,2,opt,name=Chances,proto3" json:"chances"` + Index uint32 `protobuf:"varint,3,opt,name=Index,proto3" json:"index"` +} + +func (m *SerializableValidator) Reset() { *m = SerializableValidator{} } +func (*SerializableValidator) ProtoMessage() {} +func (*SerializableValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{0} +} +func (m *SerializableValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializableValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SerializableValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializableValidator.Merge(m, src) +} +func (m *SerializableValidator) XXX_Size() int { + return m.Size() +} +func (m *SerializableValidator) XXX_DiscardUnknown() { + xxx_messageInfo_SerializableValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_SerializableValidator proto.InternalMessageInfo + +func (m *SerializableValidator) GetPubKey() []byte { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *SerializableValidator) GetChances() uint32 { + if m != nil { + return m.Chances + } + return 0 +} + +func (m *SerializableValidator) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +type Validators struct { + Data []*SerializableValidator `protobuf:"bytes,1,rep,name=Data,proto3" json:"Data,omitempty"` +} + +func (m *Validators) Reset() { *m = Validators{} } +func (*Validators) ProtoMessage() {} +func (*Validators) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{1} +} +func (m *Validators) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validators) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validators) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validators.Merge(m, src) +} +func (m *Validators) XXX_Size() int { + return m.Size() +} +func (m *Validators) XXX_DiscardUnknown() { + xxx_messageInfo_Validators.DiscardUnknown(m) +} + +var xxx_messageInfo_Validators proto.InternalMessageInfo + +func (m *Validators) GetData() []*SerializableValidator { + if m != nil { + return m.Data + } + return nil +} + +type EpochValidatorsWithAuction struct { + Eligible map[string]Validators `protobuf:"bytes,1,rep,name=Eligible,proto3" json:"Eligible" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Waiting map[string]Validators `protobuf:"bytes,2,rep,name=Waiting,proto3" json:"Waiting" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Leaving map[string]Validators `protobuf:"bytes,3,rep,name=Leaving,proto3" json:"Leaving" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ShuffledOut map[string]Validators `protobuf:"bytes,4,rep,name=ShuffledOut,proto3" json:"ShuffledOut" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *EpochValidatorsWithAuction) Reset() { *m = EpochValidatorsWithAuction{} } +func (*EpochValidatorsWithAuction) ProtoMessage() {} +func (*EpochValidatorsWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{2} +} +func (m *EpochValidatorsWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochValidatorsWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EpochValidatorsWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochValidatorsWithAuction.Merge(m, src) +} +func (m *EpochValidatorsWithAuction) XXX_Size() int { + return m.Size() +} +func (m *EpochValidatorsWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_EpochValidatorsWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochValidatorsWithAuction proto.InternalMessageInfo + +func (m *EpochValidatorsWithAuction) GetEligible() map[string]Validators { + if m != nil { + return m.Eligible + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetWaiting() map[string]Validators { + if m != nil { + return m.Waiting + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetLeaving() map[string]Validators { + if m != nil { + return m.Leaving + } + return nil +} + +func (m *EpochValidatorsWithAuction) GetShuffledOut() map[string]Validators { + if m != nil { + return m.ShuffledOut + } + return nil +} + +type NodesCoordinatorRegistryWithAuction struct { + CurrentEpoch uint32 `protobuf:"varint,1,opt,name=CurrentEpoch,proto3" json:"CurrentEpoch,omitempty"` + EpochsConfigWithAuction map[string]*EpochValidatorsWithAuction `protobuf:"bytes,2,rep,name=EpochsConfigWithAuction,proto3" json:"EpochsConfigWithAuction,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NodesCoordinatorRegistryWithAuction) Reset() { *m = NodesCoordinatorRegistryWithAuction{} } +func (*NodesCoordinatorRegistryWithAuction) ProtoMessage() {} +func (*NodesCoordinatorRegistryWithAuction) Descriptor() ([]byte, []int) { + return fileDescriptor_f04461c784f438d5, []int{3} +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.Merge(m, src) +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_Size() int { + return m.Size() +} +func (m *NodesCoordinatorRegistryWithAuction) XXX_DiscardUnknown() { + xxx_messageInfo_NodesCoordinatorRegistryWithAuction.DiscardUnknown(m) +} + +var xxx_messageInfo_NodesCoordinatorRegistryWithAuction proto.InternalMessageInfo + +func (m *NodesCoordinatorRegistryWithAuction) GetCurrentEpoch() uint32 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func (m *NodesCoordinatorRegistryWithAuction) GetEpochsConfigWithAuction() map[string]*EpochValidatorsWithAuction { + if m != nil { + return m.EpochsConfigWithAuction + } + return nil +} + +func init() { + proto.RegisterType((*SerializableValidator)(nil), "proto.SerializableValidator") + proto.RegisterType((*Validators)(nil), "proto.Validators") + proto.RegisterType((*EpochValidatorsWithAuction)(nil), "proto.EpochValidatorsWithAuction") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.EligibleEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.LeavingEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.ShuffledOutEntry") + proto.RegisterMapType((map[string]Validators)(nil), "proto.EpochValidatorsWithAuction.WaitingEntry") + proto.RegisterType((*NodesCoordinatorRegistryWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction") + proto.RegisterMapType((map[string]*EpochValidatorsWithAuction)(nil), "proto.NodesCoordinatorRegistryWithAuction.EpochsConfigWithAuctionEntry") +} + +func init() { + proto.RegisterFile("nodesCoordinatorRegistryWithAuction.proto", fileDescriptor_f04461c784f438d5) +} + +var fileDescriptor_f04461c784f438d5 = []byte{ + // 561 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4f, 0x8f, 0xd2, 0x40, + 0x18, 0xc6, 0x3b, 0xb0, 0x80, 0xfb, 0x02, 0x09, 0x4e, 0x62, 0x6c, 0xc8, 0x66, 0xc0, 0x1a, 0x23, + 0x1e, 0x2c, 0x06, 0x0f, 0x1a, 0x0f, 0x26, 0x82, 0xc4, 0xf8, 0x0f, 0xdd, 0x6e, 0xe2, 0x26, 0x7b, + 0x6b, 0x61, 0x28, 0x13, 0xbb, 0x1d, 0x52, 0xa6, 0x1b, 0xf1, 0xa4, 0xf1, 0x0b, 0xf8, 0x31, 0x3c, + 0xf8, 0x11, 0xfc, 0x00, 0x7b, 0xe4, 0xc8, 0x89, 0x48, 0xb9, 0x18, 0x4e, 0xfb, 0x11, 0x0c, 0xd3, + 0xb2, 0x5b, 0x36, 0x8b, 0x6c, 0xb2, 0x9e, 0x98, 0x3e, 0x33, 0xcf, 0xef, 0x19, 0x1e, 0x5e, 0x0a, + 0xf7, 0x5c, 0xde, 0xa1, 0x83, 0x06, 0xe7, 0x5e, 0x87, 0xb9, 0xa6, 0xe0, 0x9e, 0x41, 0x6d, 0x36, + 0x10, 0xde, 0x70, 0x9f, 0x89, 0xde, 0x33, 0xbf, 0x2d, 0x18, 0x77, 0xf5, 0xbe, 0xc7, 0x05, 0xc7, + 0x29, 0xf9, 0x51, 0xbc, 0x6f, 0x33, 0xd1, 0xf3, 0x2d, 0xbd, 0xcd, 0x0f, 0xab, 0x36, 0xb7, 0x79, + 0x55, 0xca, 0x96, 0xdf, 0x95, 0x4f, 0xf2, 0x41, 0xae, 0x42, 0x97, 0xf6, 0x0d, 0xc1, 0x8d, 0x3d, + 0xea, 0x31, 0xd3, 0x61, 0x9f, 0x4d, 0xcb, 0xa1, 0x1f, 0x4c, 0x87, 0x75, 0x16, 0x41, 0x58, 0x83, + 0xf4, 0x7b, 0xdf, 0x7a, 0x4d, 0x87, 0x2a, 0x2a, 0xa3, 0x4a, 0xae, 0x0e, 0xf3, 0x49, 0x29, 0xdd, + 0x97, 0x8a, 0x11, 0xed, 0xe0, 0x3b, 0x90, 0x69, 0xf4, 0x4c, 0xb7, 0x4d, 0x07, 0x6a, 0xa2, 0x8c, + 0x2a, 0xf9, 0x7a, 0x76, 0x3e, 0x29, 0x65, 0xda, 0xa1, 0x64, 0x2c, 0xf7, 0x70, 0x09, 0x52, 0x2f, + 0xdd, 0x0e, 0xfd, 0xa4, 0x26, 0xe5, 0xa1, 0xed, 0xf9, 0xa4, 0x94, 0x62, 0x0b, 0xc1, 0x08, 0x75, + 0xed, 0x29, 0xc0, 0x69, 0xf0, 0x00, 0x3f, 0x80, 0xad, 0xe7, 0xa6, 0x30, 0x55, 0x54, 0x4e, 0x56, + 0xb2, 0xb5, 0x9d, 0xf0, 0xa6, 0xfa, 0x85, 0xb7, 0x34, 0xe4, 0x49, 0xed, 0x67, 0x0a, 0x8a, 0xcd, + 0x3e, 0x6f, 0xf7, 0xce, 0x28, 0xb1, 0x82, 0xf0, 0x2e, 0x5c, 0x6b, 0x3a, 0xcc, 0x66, 0x96, 0x43, + 0x23, 0x68, 0x35, 0x82, 0xae, 0x37, 0xe9, 0x4b, 0x47, 0xd3, 0x15, 0xde, 0xb0, 0xbe, 0x75, 0x3c, + 0x29, 0x29, 0xc6, 0x29, 0x06, 0xb7, 0x20, 0xb3, 0x6f, 0x32, 0xc1, 0x5c, 0x5b, 0x4d, 0x48, 0xa2, + 0xbe, 0x99, 0x18, 0x19, 0xe2, 0xc0, 0x25, 0x64, 0xc1, 0x7b, 0x43, 0xcd, 0xa3, 0x05, 0x2f, 0x79, + 0x59, 0x5e, 0x64, 0x58, 0xe1, 0x45, 0x1a, 0x3e, 0x80, 0xec, 0x5e, 0xcf, 0xef, 0x76, 0x1d, 0xda, + 0x79, 0xe7, 0x0b, 0x75, 0x4b, 0x32, 0x6b, 0x9b, 0x99, 0x31, 0x53, 0x9c, 0x1b, 0x87, 0x15, 0x5b, + 0x90, 0x5f, 0x29, 0x07, 0x17, 0x20, 0xf9, 0x31, 0x9a, 0x93, 0x6d, 0x63, 0xb1, 0xc4, 0x77, 0x21, + 0x75, 0x64, 0x3a, 0x3e, 0x95, 0x63, 0x91, 0xad, 0x5d, 0x8f, 0x82, 0xcf, 0x32, 0x8d, 0x70, 0xff, + 0x49, 0xe2, 0x31, 0x2a, 0xbe, 0x85, 0x5c, 0xbc, 0x9a, 0xff, 0x80, 0x8b, 0x37, 0x73, 0x55, 0xdc, + 0x2e, 0x14, 0xce, 0x97, 0x72, 0x45, 0xa4, 0xf6, 0x2b, 0x01, 0xb7, 0x5b, 0x9b, 0xff, 0xd8, 0x58, + 0x83, 0x5c, 0xc3, 0xf7, 0x3c, 0xea, 0x0a, 0xf9, 0x8b, 0xc9, 0xbc, 0xbc, 0xb1, 0xa2, 0xe1, 0xaf, + 0x08, 0x6e, 0xca, 0xd5, 0xa0, 0xc1, 0xdd, 0x2e, 0xb3, 0x63, 0xfe, 0x68, 0x32, 0x5f, 0x44, 0x77, + 0xb9, 0x44, 0xa2, 0xbe, 0x86, 0x24, 0xbf, 0xb5, 0xb1, 0x2e, 0xa7, 0x78, 0x08, 0x3b, 0xff, 0x32, + 0x5e, 0x50, 0xd7, 0xa3, 0xd5, 0xba, 0x6e, 0x6d, 0x1c, 0xcc, 0x58, 0x7d, 0xf5, 0x57, 0xa3, 0x29, + 0x51, 0xc6, 0x53, 0xa2, 0x9c, 0x4c, 0x09, 0xfa, 0x12, 0x10, 0xf4, 0x23, 0x20, 0xe8, 0x38, 0x20, + 0x68, 0x14, 0x10, 0x34, 0x0e, 0x08, 0xfa, 0x1d, 0x10, 0xf4, 0x27, 0x20, 0xca, 0x49, 0x40, 0xd0, + 0xf7, 0x19, 0x51, 0x46, 0x33, 0xa2, 0x8c, 0x67, 0x44, 0x39, 0x28, 0x9c, 0x7f, 0x9d, 0x5a, 0x69, + 0x19, 0xfc, 0xf0, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x19, 0xc5, 0xc4, 0x69, 0x05, 0x00, + 0x00, +} + +func (this *SerializableValidator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SerializableValidator) + if !ok { + that2, ok := that.(SerializableValidator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.PubKey, that1.PubKey) { + return false + } + if this.Chances != that1.Chances { + return false + } + if this.Index != that1.Index { + return false + } + return true +} +func (this *Validators) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Validators) + if !ok { + that2, ok := that.(Validators) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Data) != len(that1.Data) { + return false + } + for i := range this.Data { + if !this.Data[i].Equal(that1.Data[i]) { + return false + } + } + return true +} +func (this *EpochValidatorsWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EpochValidatorsWithAuction) + if !ok { + that2, ok := that.(EpochValidatorsWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Eligible) != len(that1.Eligible) { + return false + } + for i := range this.Eligible { + a := this.Eligible[i] + b := that1.Eligible[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Waiting) != len(that1.Waiting) { + return false + } + for i := range this.Waiting { + a := this.Waiting[i] + b := that1.Waiting[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.Leaving) != len(that1.Leaving) { + return false + } + for i := range this.Leaving { + a := this.Leaving[i] + b := that1.Leaving[i] + if !(&a).Equal(&b) { + return false + } + } + if len(this.ShuffledOut) != len(that1.ShuffledOut) { + return false + } + for i := range this.ShuffledOut { + a := this.ShuffledOut[i] + b := that1.ShuffledOut[i] + if !(&a).Equal(&b) { + return false + } + } + return true +} +func (this *NodesCoordinatorRegistryWithAuction) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NodesCoordinatorRegistryWithAuction) + if !ok { + that2, ok := that.(NodesCoordinatorRegistryWithAuction) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.CurrentEpoch != that1.CurrentEpoch { + return false + } + if len(this.EpochsConfigWithAuction) != len(that1.EpochsConfigWithAuction) { + return false + } + for i := range this.EpochsConfigWithAuction { + if !this.EpochsConfigWithAuction[i].Equal(that1.EpochsConfigWithAuction[i]) { + return false + } + } + return true +} +func (this *SerializableValidator) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&nodesCoordinator.SerializableValidator{") + s = append(s, "PubKey: "+fmt.Sprintf("%#v", this.PubKey)+",\n") + s = append(s, "Chances: "+fmt.Sprintf("%#v", this.Chances)+",\n") + s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Validators) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&nodesCoordinator.Validators{") + if this.Data != nil { + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EpochValidatorsWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&nodesCoordinator.EpochValidatorsWithAuction{") + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%#v: %#v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + if this.Eligible != nil { + s = append(s, "Eligible: "+mapStringForEligible+",\n") + } + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%#v: %#v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + if this.Waiting != nil { + s = append(s, "Waiting: "+mapStringForWaiting+",\n") + } + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%#v: %#v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + if this.Leaving != nil { + s = append(s, "Leaving: "+mapStringForLeaving+",\n") + } + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%#v: %#v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + if this.ShuffledOut != nil { + s = append(s, "ShuffledOut: "+mapStringForShuffledOut+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NodesCoordinatorRegistryWithAuction) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&nodesCoordinator.NodesCoordinatorRegistryWithAuction{") + s = append(s, "CurrentEpoch: "+fmt.Sprintf("%#v", this.CurrentEpoch)+",\n") + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%#v: %#v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + if this.EpochsConfigWithAuction != nil { + s = append(s, "EpochsConfigWithAuction: "+mapStringForEpochsConfigWithAuction+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringNodesCoordinatorRegistryWithAuction(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SerializableValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerializableValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializableValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if m.Chances != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.Chances)) + i-- + dAtA[i] = 0x10 + } + if len(m.PubKey) > 0 { + i -= len(m.PubKey) + copy(dAtA[i:], m.PubKey) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(m.PubKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Validators) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validators) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validators) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + for iNdEx := len(m.Data) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Data[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EpochValidatorsWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochValidatorsWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochValidatorsWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ShuffledOut) > 0 { + keysForShuffledOut := make([]string, 0, len(m.ShuffledOut)) + for k := range m.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + for iNdEx := len(keysForShuffledOut) - 1; iNdEx >= 0; iNdEx-- { + v := m.ShuffledOut[string(keysForShuffledOut[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForShuffledOut[iNdEx]) + copy(dAtA[i:], keysForShuffledOut[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForShuffledOut[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Leaving) > 0 { + keysForLeaving := make([]string, 0, len(m.Leaving)) + for k := range m.Leaving { + keysForLeaving = append(keysForLeaving, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + for iNdEx := len(keysForLeaving) - 1; iNdEx >= 0; iNdEx-- { + v := m.Leaving[string(keysForLeaving[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLeaving[iNdEx]) + copy(dAtA[i:], keysForLeaving[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForLeaving[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Waiting) > 0 { + keysForWaiting := make([]string, 0, len(m.Waiting)) + for k := range m.Waiting { + keysForWaiting = append(keysForWaiting, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + for iNdEx := len(keysForWaiting) - 1; iNdEx >= 0; iNdEx-- { + v := m.Waiting[string(keysForWaiting[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForWaiting[iNdEx]) + copy(dAtA[i:], keysForWaiting[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForWaiting[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Eligible) > 0 { + keysForEligible := make([]string, 0, len(m.Eligible)) + for k := range m.Eligible { + keysForEligible = append(keysForEligible, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + for iNdEx := len(keysForEligible) - 1; iNdEx >= 0; iNdEx-- { + v := m.Eligible[string(keysForEligible[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForEligible[iNdEx]) + copy(dAtA[i:], keysForEligible[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEligible[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodesCoordinatorRegistryWithAuction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodesCoordinatorRegistryWithAuction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EpochsConfigWithAuction) > 0 { + keysForEpochsConfigWithAuction := make([]string, 0, len(m.EpochsConfigWithAuction)) + for k := range m.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + for iNdEx := len(keysForEpochsConfigWithAuction) - 1; iNdEx >= 0; iNdEx-- { + v := m.EpochsConfigWithAuction[string(keysForEpochsConfigWithAuction[iNdEx])] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(keysForEpochsConfigWithAuction[iNdEx]) + copy(dAtA[i:], keysForEpochsConfigWithAuction[iNdEx]) + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(len(keysForEpochsConfigWithAuction[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.CurrentEpoch != 0 { + i = encodeVarintNodesCoordinatorRegistryWithAuction(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintNodesCoordinatorRegistryWithAuction(dAtA []byte, offset int, v uint64) int { + offset -= sovNodesCoordinatorRegistryWithAuction(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SerializableValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PubKey) + if l > 0 { + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + if m.Chances != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Chances)) + } + if m.Index != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.Index)) + } + return n +} + +func (m *Validators) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Data) > 0 { + for _, e := range m.Data { + l = e.Size() + n += 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + } + return n +} + +func (m *EpochValidatorsWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Eligible) > 0 { + for k, v := range m.Eligible { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Waiting) > 0 { + for k, v := range m.Waiting { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.Leaving) > 0 { + for k, v := range m.Leaving { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + if len(m.ShuffledOut) > 0 { + for k, v := range m.ShuffledOut { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + 1 + l + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodesCoordinatorRegistryWithAuction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CurrentEpoch != 0 { + n += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(m.CurrentEpoch)) + } + if len(m.EpochsConfigWithAuction) > 0 { + for k, v := range m.EpochsConfigWithAuction { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovNodesCoordinatorRegistryWithAuction(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovNodesCoordinatorRegistryWithAuction(uint64(len(k))) + l + n += mapEntrySize + 1 + sovNodesCoordinatorRegistryWithAuction(uint64(mapEntrySize)) + } + } + return n +} + +func sovNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozNodesCoordinatorRegistryWithAuction(x uint64) (n int) { + return sovNodesCoordinatorRegistryWithAuction(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SerializableValidator) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializableValidator{`, + `PubKey:` + fmt.Sprintf("%v", this.PubKey) + `,`, + `Chances:` + fmt.Sprintf("%v", this.Chances) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *Validators) String() string { + if this == nil { + return "nil" + } + repeatedStringForData := "[]*SerializableValidator{" + for _, f := range this.Data { + repeatedStringForData += strings.Replace(f.String(), "SerializableValidator", "SerializableValidator", 1) + "," + } + repeatedStringForData += "}" + s := strings.Join([]string{`&Validators{`, + `Data:` + repeatedStringForData + `,`, + `}`, + }, "") + return s +} +func (this *EpochValidatorsWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEligible := make([]string, 0, len(this.Eligible)) + for k, _ := range this.Eligible { + keysForEligible = append(keysForEligible, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEligible) + mapStringForEligible := "map[string]Validators{" + for _, k := range keysForEligible { + mapStringForEligible += fmt.Sprintf("%v: %v,", k, this.Eligible[k]) + } + mapStringForEligible += "}" + keysForWaiting := make([]string, 0, len(this.Waiting)) + for k, _ := range this.Waiting { + keysForWaiting = append(keysForWaiting, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWaiting) + mapStringForWaiting := "map[string]Validators{" + for _, k := range keysForWaiting { + mapStringForWaiting += fmt.Sprintf("%v: %v,", k, this.Waiting[k]) + } + mapStringForWaiting += "}" + keysForLeaving := make([]string, 0, len(this.Leaving)) + for k, _ := range this.Leaving { + keysForLeaving = append(keysForLeaving, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLeaving) + mapStringForLeaving := "map[string]Validators{" + for _, k := range keysForLeaving { + mapStringForLeaving += fmt.Sprintf("%v: %v,", k, this.Leaving[k]) + } + mapStringForLeaving += "}" + keysForShuffledOut := make([]string, 0, len(this.ShuffledOut)) + for k, _ := range this.ShuffledOut { + keysForShuffledOut = append(keysForShuffledOut, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForShuffledOut) + mapStringForShuffledOut := "map[string]Validators{" + for _, k := range keysForShuffledOut { + mapStringForShuffledOut += fmt.Sprintf("%v: %v,", k, this.ShuffledOut[k]) + } + mapStringForShuffledOut += "}" + s := strings.Join([]string{`&EpochValidatorsWithAuction{`, + `Eligible:` + mapStringForEligible + `,`, + `Waiting:` + mapStringForWaiting + `,`, + `Leaving:` + mapStringForLeaving + `,`, + `ShuffledOut:` + mapStringForShuffledOut + `,`, + `}`, + }, "") + return s +} +func (this *NodesCoordinatorRegistryWithAuction) String() string { + if this == nil { + return "nil" + } + keysForEpochsConfigWithAuction := make([]string, 0, len(this.EpochsConfigWithAuction)) + for k, _ := range this.EpochsConfigWithAuction { + keysForEpochsConfigWithAuction = append(keysForEpochsConfigWithAuction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEpochsConfigWithAuction) + mapStringForEpochsConfigWithAuction := "map[string]*EpochValidatorsWithAuction{" + for _, k := range keysForEpochsConfigWithAuction { + mapStringForEpochsConfigWithAuction += fmt.Sprintf("%v: %v,", k, this.EpochsConfigWithAuction[k]) + } + mapStringForEpochsConfigWithAuction += "}" + s := strings.Join([]string{`&NodesCoordinatorRegistryWithAuction{`, + `CurrentEpoch:` + fmt.Sprintf("%v", this.CurrentEpoch) + `,`, + `EpochsConfigWithAuction:` + mapStringForEpochsConfigWithAuction + `,`, + `}`, + }, "") + return s +} +func valueToStringNodesCoordinatorRegistryWithAuction(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SerializableValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializableValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializableValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKey = append(m.PubKey[:0], dAtA[iNdEx:postIndex]...) + if m.PubKey == nil { + m.PubKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chances", wireType) + } + m.Chances = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chances |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validators) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validators: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validators: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data, &SerializableValidator{}) + if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EpochValidatorsWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochValidatorsWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Eligible", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Eligible == nil { + m.Eligible = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Eligible[mapkey] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Waiting[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaving", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leaving == nil { + m.Leaving = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Leaving[mapkey] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShuffledOut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShuffledOut == nil { + m.ShuffledOut = make(map[string]Validators) + } + var mapkey string + mapvalue := &Validators{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Validators{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ShuffledOut[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodesCoordinatorRegistryWithAuction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodesCoordinatorRegistryWithAuction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochsConfigWithAuction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EpochsConfigWithAuction == nil { + m.EpochsConfigWithAuction = make(map[string]*EpochValidatorsWithAuction) + } + var mapkey string + var mapvalue *EpochValidatorsWithAuction + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &EpochValidatorsWithAuction{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.EpochsConfigWithAuction[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNodesCoordinatorRegistryWithAuction(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNodesCoordinatorRegistryWithAuction(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodesCoordinatorRegistryWithAuction + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthNodesCoordinatorRegistryWithAuction + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNodesCoordinatorRegistryWithAuction = fmt.Errorf("proto: unexpected end of group") +) diff --git a/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto new file mode 100644 index 00000000000..3ff1c90acb1 --- /dev/null +++ b/sharding/nodesCoordinator/nodesCoordinatorRegistryWithAuction.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package proto; + +option go_package = "nodesCoordinator"; +option (gogoproto.stable_marshaler_all) = true; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message SerializableValidator { + bytes PubKey = 1 [(gogoproto.jsontag) = "pubKey"]; + uint32 Chances = 2 [(gogoproto.jsontag) = "chances"]; + uint32 Index = 3 [(gogoproto.jsontag) = "index"]; +} + +message Validators { + repeated SerializableValidator Data = 1; +} + +message EpochValidatorsWithAuction { + map Eligible = 1 [(gogoproto.nullable) = false]; + map Waiting = 2 [(gogoproto.nullable) = false]; + map Leaving = 3 [(gogoproto.nullable) = false]; + map ShuffledOut = 4 [(gogoproto.nullable) = false]; +} + +message NodesCoordinatorRegistryWithAuction { + uint32 CurrentEpoch = 1; + map EpochsConfigWithAuction = 2; +} diff --git a/sharding/nodesCoordinator/shardingArgs.go b/sharding/nodesCoordinator/shardingArgs.go index 71a6b2684c3..67c542952d7 100644 --- a/sharding/nodesCoordinator/shardingArgs.go +++ b/sharding/nodesCoordinator/shardingArgs.go @@ -11,26 +11,27 @@ import ( // ArgNodesCoordinator holds all dependencies required by the nodes coordinator in order to create new instances type ArgNodesCoordinator struct { - ShardConsensusGroupSize int - MetaConsensusGroupSize int - Marshalizer marshal.Marshalizer - Hasher hashing.Hasher - Shuffler NodesShuffler - EpochStartNotifier EpochStartEventNotifier - BootStorer storage.Storer - ShardIDAsObserver uint32 - NbShards uint32 - EligibleNodes map[uint32][]Validator - WaitingNodes map[uint32][]Validator - SelfPublicKey []byte - Epoch uint32 - StartEpoch uint32 - ConsensusGroupCache Cacher - ShuffledOutHandler ShuffledOutHandler - ChanStopNode chan endProcess.ArgEndProcess - NodeTypeProvider NodeTypeProviderHandler - IsFullArchive bool - EnableEpochsHandler common.EnableEpochsHandler - ValidatorInfoCacher epochStart.ValidatorInfoCacher - GenesisNodesSetupHandler GenesisNodesSetupHandler + ShardConsensusGroupSize int + MetaConsensusGroupSize int + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + Shuffler NodesShuffler + EpochStartNotifier EpochStartEventNotifier + BootStorer storage.Storer + ShardIDAsObserver uint32 + NbShards uint32 + EligibleNodes map[uint32][]Validator + WaitingNodes map[uint32][]Validator + SelfPublicKey []byte + Epoch uint32 + StartEpoch uint32 + ConsensusGroupCache Cacher + ShuffledOutHandler ShuffledOutHandler + ChanStopNode chan endProcess.ArgEndProcess + NodeTypeProvider NodeTypeProviderHandler + IsFullArchive bool + EnableEpochsHandler common.EnableEpochsHandler + ValidatorInfoCacher epochStart.ValidatorInfoCacher + GenesisNodesSetupHandler GenesisNodesSetupHandler + NodesCoordinatorRegistryFactory NodesCoordinatorRegistryFactory } diff --git a/state/accounts/peerAccount.go b/state/accounts/peerAccount.go index a63b71ff040..8900edc6f1b 100644 --- a/state/accounts/peerAccount.go +++ b/state/accounts/peerAccount.go @@ -100,7 +100,12 @@ func (pa *peerAccount) SetTempRating(rating uint32) { } // SetListAndIndex will update the peer's list (eligible, waiting) and the index inside it with journal -func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32) { +func (pa *peerAccount) SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + pa.PreviousList = pa.List + pa.PreviousIndexInList = pa.IndexInList + } + pa.ShardId = shardID pa.List = list pa.IndexInList = index @@ -158,6 +163,11 @@ func (pa *peerAccount) GetTotalValidatorSuccessRate() state.SignRate { return &pa.TotalValidatorSuccessRate } +// SetPreviousList sets validator's previous list +func (pa *peerAccount) SetPreviousList(list string) { + pa.PreviousList = list +} + // IsInterfaceNil return if there is no value under the interface func (pa *peerAccount) IsInterfaceNil() bool { return pa == nil diff --git a/state/accounts/peerAccountData.pb.go b/state/accounts/peerAccountData.pb.go index 42b84c24dda..eb0a6ef69d9 100644 --- a/state/accounts/peerAccountData.pb.go +++ b/state/accounts/peerAccountData.pb.go @@ -96,6 +96,8 @@ type PeerAccountData struct { TotalValidatorIgnoredSignaturesRate uint32 `protobuf:"varint,16,opt,name=TotalValidatorIgnoredSignaturesRate,proto3" json:"totalValidatorIgnoredSignaturesRate"` Nonce uint64 `protobuf:"varint,17,opt,name=Nonce,proto3" json:"nonce"` UnStakedEpoch uint32 `protobuf:"varint,18,opt,name=UnStakedEpoch,proto3" json:"unStakedEpoch"` + PreviousList string `protobuf:"bytes,19,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndexInList uint32 `protobuf:"varint,20,opt,name=PreviousIndexInList,proto3" json:"previousIndexInList,omitempty"` } func (m *PeerAccountData) Reset() { *m = PeerAccountData{} } @@ -252,6 +254,20 @@ func (m *PeerAccountData) GetUnStakedEpoch() uint32 { return 0 } +func (m *PeerAccountData) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *PeerAccountData) GetPreviousIndexInList() uint32 { + if m != nil { + return m.PreviousIndexInList + } + return 0 +} + func init() { proto.RegisterType((*SignRate)(nil), "proto.SignRate") proto.RegisterType((*PeerAccountData)(nil), "proto.PeerAccountData") @@ -260,56 +276,59 @@ func init() { func init() { proto.RegisterFile("peerAccountData.proto", fileDescriptor_26bd0314afcce126) } var fileDescriptor_26bd0314afcce126 = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x8f, 0xdb, 0x44, - 0x18, 0x8d, 0xcb, 0x26, 0xbb, 0x3b, 0x49, 0x36, 0xcd, 0xa8, 0x14, 0x67, 0x81, 0x99, 0x90, 0x0a, - 0xc8, 0x81, 0x24, 0xe2, 0x87, 0xc4, 0x81, 0x53, 0x5c, 0x5a, 0x29, 0xb0, 0x54, 0xab, 0x49, 0x41, - 0x08, 0x24, 0xa4, 0xc9, 0x78, 0xea, 0x98, 0xda, 0x9e, 0x68, 0x66, 0xbc, 0xec, 0xde, 0xb8, 0x72, - 0xeb, 0x9f, 0x81, 0xf8, 0x4b, 0x7a, 0xdc, 0xe3, 0x9e, 0x0c, 0xeb, 0xbd, 0x20, 0x9f, 0xfa, 0x27, - 0x20, 0x4f, 0xdc, 0x34, 0x69, 0x9d, 0x5d, 0x4e, 0x89, 0xdf, 0x7b, 0xdf, 0xfb, 0xbe, 0x79, 0xfe, - 0xc6, 0xe0, 0xed, 0x05, 0xe7, 0x72, 0xcc, 0x98, 0x88, 0x23, 0xfd, 0x35, 0xd5, 0x74, 0xb8, 0x90, - 0x42, 0x0b, 0x58, 0x35, 0x3f, 0x87, 0x03, 0xcf, 0xd7, 0xf3, 0x78, 0x36, 0x64, 0x22, 0x1c, 0x79, - 0xc2, 0x13, 0x23, 0x03, 0xcf, 0xe2, 0x27, 0xe6, 0xc9, 0x3c, 0x98, 0x7f, 0xcb, 0xaa, 0xde, 0x37, - 0x60, 0x6f, 0xea, 0x7b, 0x11, 0xa1, 0x9a, 0x43, 0x04, 0xc0, 0xa3, 0x38, 0x9c, 0xc6, 0x8c, 0x71, - 0xa5, 0x6c, 0xab, 0x6b, 0xf5, 0x9b, 0x64, 0x0d, 0x29, 0xf8, 0x87, 0xd4, 0x0f, 0x62, 0xc9, 0xed, - 0x5b, 0x2b, 0xbe, 0x40, 0x7a, 0x7f, 0xd4, 0x41, 0xeb, 0x78, 0x73, 0x36, 0xf8, 0x05, 0x68, 0x38, - 0x47, 0xd3, 0xe3, 0x78, 0x16, 0xf8, 0xec, 0x5b, 0x7e, 0x66, 0x5c, 0x1b, 0xce, 0xed, 0x2c, 0xc1, - 0x8d, 0x59, 0xa0, 0x56, 0x38, 0xd9, 0x50, 0xc1, 0x31, 0x68, 0x12, 0xfe, 0x1b, 0x95, 0xee, 0xd8, - 0x75, 0x65, 0x3e, 0xcc, 0x2d, 0x53, 0xf6, 0x6e, 0x96, 0xe0, 0x77, 0xe4, 0x3a, 0xf1, 0x89, 0x08, - 0x7d, 0xcd, 0xc3, 0x85, 0x3e, 0x23, 0x9b, 0x15, 0xf0, 0x43, 0xb0, 0x3b, 0x9d, 0x53, 0xe9, 0x4e, - 0x5c, 0xfb, 0xad, 0x7c, 0x52, 0xa7, 0x9e, 0x25, 0x78, 0x57, 0x2d, 0x21, 0xf2, 0x92, 0x83, 0x14, - 0xdc, 0xf9, 0x81, 0x06, 0xbe, 0x4b, 0xb5, 0x90, 0xc5, 0x39, 0xf3, 0x2c, 0xec, 0x9d, 0xae, 0xd5, - 0xaf, 0x7f, 0xd6, 0x5a, 0xa6, 0x34, 0x7c, 0x19, 0x91, 0xf3, 0xde, 0xf3, 0x04, 0x57, 0xb2, 0x04, - 0xdf, 0x39, 0x29, 0x29, 0x22, 0xa5, 0x56, 0xf0, 0x47, 0xd0, 0x3e, 0xe2, 0xd4, 0xe5, 0x1b, 0xfe, - 0xd5, 0x72, 0xff, 0x4e, 0xe1, 0xdf, 0x0e, 0x5e, 0xaf, 0x20, 0x6f, 0x9a, 0xc0, 0x5f, 0x01, 0x5a, - 0x75, 0x9c, 0x78, 0x91, 0x90, 0xdc, 0xcd, 0x9d, 0xa8, 0x8e, 0x25, 0x5f, 0xb6, 0xa9, 0x99, 0xa3, - 0xf7, 0xb2, 0x04, 0xa3, 0x93, 0x6b, 0x95, 0xe4, 0x06, 0x27, 0xd8, 0x03, 0x35, 0x42, 0xb5, 0x1f, - 0x79, 0xf6, 0xae, 0xf1, 0x04, 0x59, 0x82, 0x6b, 0xd2, 0x20, 0xa4, 0x60, 0xe0, 0x10, 0x80, 0xc7, - 0x3c, 0x5c, 0x14, 0xba, 0x3d, 0xa3, 0x3b, 0xc8, 0x12, 0x0c, 0xf4, 0x0a, 0x25, 0x6b, 0x0a, 0xf8, - 0xcc, 0x02, 0xad, 0x31, 0x63, 0x71, 0x18, 0x07, 0x54, 0x73, 0xf7, 0x21, 0xe7, 0xca, 0xde, 0x37, - 0x6f, 0xfa, 0x49, 0x96, 0xe0, 0x0e, 0xdd, 0xa4, 0x5e, 0xbd, 0xeb, 0xbf, 0xfe, 0xc6, 0x0f, 0x42, - 0xaa, 0xe7, 0xa3, 0x99, 0xef, 0x0d, 0x27, 0x91, 0xfe, 0x6a, 0x6d, 0xe7, 0xc3, 0x38, 0xd0, 0xfe, - 0x09, 0x97, 0xea, 0x74, 0x14, 0x9e, 0x0e, 0xd8, 0x9c, 0xfa, 0xd1, 0x80, 0x09, 0xc9, 0x07, 0x9e, - 0x18, 0xb9, 0xf9, 0x6d, 0x71, 0x7c, 0x6f, 0x12, 0xe9, 0xfb, 0x54, 0x69, 0x2e, 0xc9, 0xeb, 0xed, - 0xe1, 0x2f, 0xe0, 0x30, 0xdf, 0x78, 0x1e, 0x70, 0xa6, 0xb9, 0x3b, 0x89, 0x8a, 0xb8, 0x9d, 0x40, - 0xb0, 0xa7, 0xca, 0x06, 0xe6, 0x48, 0x28, 0x4b, 0xf0, 0x61, 0xb4, 0x55, 0x45, 0xae, 0x71, 0x80, - 0x9f, 0x82, 0xfa, 0x24, 0x72, 0xf9, 0xe9, 0x24, 0x3a, 0xf2, 0x95, 0xb6, 0xeb, 0xc6, 0xb0, 0x95, - 0x25, 0xb8, 0xee, 0xbf, 0x82, 0xc9, 0xba, 0x06, 0x7e, 0x04, 0x76, 0x8c, 0xb6, 0xd1, 0xb5, 0xfa, - 0xfb, 0x0e, 0xcc, 0x12, 0x7c, 0x10, 0xf8, 0x4a, 0xaf, 0xad, 0xbe, 0xe1, 0xe1, 0xcf, 0xa0, 0x73, - 0x5f, 0x44, 0x8a, 0xb3, 0x38, 0x0f, 0xe0, 0x58, 0x8a, 0x85, 0x50, 0x5c, 0x7e, 0xe7, 0x2b, 0xc5, - 0x95, 0xdd, 0x34, 0x8d, 0xde, 0xcf, 0x63, 0x65, 0xdb, 0x44, 0x64, 0x7b, 0x3d, 0x5c, 0x80, 0xce, - 0x63, 0xa1, 0x69, 0x50, 0x7a, 0x59, 0x0e, 0xca, 0x97, 0xf9, 0x83, 0x62, 0x99, 0x3b, 0x7a, 0x5b, - 0x25, 0xd9, 0x6e, 0x0a, 0x3d, 0x70, 0xd7, 0x90, 0x6f, 0xde, 0x9d, 0x56, 0x79, 0x3b, 0x54, 0xb4, - 0xbb, 0xab, 0x4b, 0xcb, 0xc8, 0x16, 0x3b, 0x78, 0x06, 0xee, 0x6d, 0x4e, 0x51, 0x7e, 0x95, 0x6e, - 0x9b, 0x04, 0x3f, 0xce, 0x12, 0x7c, 0x4f, 0xdf, 0x2c, 0x27, 0xff, 0xc7, 0x13, 0x62, 0x50, 0x7d, - 0x24, 0x22, 0xc6, 0xed, 0x76, 0xd7, 0xea, 0xef, 0x38, 0xfb, 0x59, 0x82, 0xab, 0x51, 0x0e, 0x90, - 0x25, 0x0e, 0xbf, 0x04, 0xcd, 0xef, 0xa3, 0xa9, 0xa6, 0x4f, 0xb9, 0xfb, 0x60, 0x21, 0xd8, 0xdc, - 0x86, 0x66, 0x8a, 0x76, 0x96, 0xe0, 0x66, 0xbc, 0x4e, 0x90, 0x4d, 0x9d, 0xe3, 0x9c, 0x5f, 0xa2, - 0xca, 0xc5, 0x25, 0xaa, 0xbc, 0xb8, 0x44, 0xd6, 0xef, 0x29, 0xb2, 0xfe, 0x4c, 0x91, 0xf5, 0x3c, - 0x45, 0xd6, 0x79, 0x8a, 0xac, 0x8b, 0x14, 0x59, 0xff, 0xa4, 0xc8, 0xfa, 0x37, 0x45, 0x95, 0x17, - 0x29, 0xb2, 0x9e, 0x5d, 0xa1, 0xca, 0xf9, 0x15, 0xaa, 0x5c, 0x5c, 0xa1, 0xca, 0x4f, 0x7b, 0x74, - 0xf9, 0xf9, 0x56, 0xb3, 0x9a, 0x09, 0xf8, 0xf3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xff, - 0x1c, 0x23, 0x71, 0x06, 0x00, 0x00, + // 822 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xdb, 0x36, + 0x1c, 0xb5, 0xba, 0xfc, 0xa5, 0xed, 0xb8, 0x61, 0xb3, 0x4e, 0xce, 0x56, 0x32, 0x4d, 0xb1, 0x2d, + 0x87, 0xc5, 0xc6, 0xfe, 0x00, 0x3b, 0x0c, 0x18, 0x10, 0x75, 0x2d, 0xe0, 0x2d, 0x2b, 0x02, 0xba, + 0x1b, 0x86, 0x0d, 0x18, 0x40, 0x4b, 0xac, 0xcc, 0x55, 0x12, 0x05, 0x92, 0xca, 0x92, 0xdb, 0x3e, + 0x42, 0x3f, 0xc1, 0xce, 0xc3, 0x3e, 0x49, 0x8f, 0x39, 0xe6, 0xc4, 0x2d, 0xce, 0x65, 0xd0, 0xa9, + 0x1f, 0x61, 0x10, 0xad, 0xb8, 0x72, 0x23, 0xb7, 0x3d, 0xd9, 0x7c, 0xef, 0xfd, 0xde, 0x8f, 0xfc, + 0xf1, 0x11, 0x02, 0xef, 0xa6, 0x8c, 0xc9, 0x03, 0xdf, 0x17, 0x59, 0xa2, 0xbf, 0xa1, 0x9a, 0xf6, + 0x52, 0x29, 0xb4, 0x80, 0xcb, 0xf6, 0x67, 0x7b, 0x3f, 0xe4, 0x7a, 0x9c, 0x8d, 0x7a, 0xbe, 0x88, + 0xfb, 0xa1, 0x08, 0x45, 0xdf, 0xc2, 0xa3, 0xec, 0x89, 0x5d, 0xd9, 0x85, 0xfd, 0x37, 0xad, 0xda, + 0xfd, 0x16, 0xac, 0x0d, 0x79, 0x98, 0x10, 0xaa, 0x19, 0x44, 0x00, 0x3c, 0xca, 0xe2, 0x61, 0xe6, + 0xfb, 0x4c, 0x29, 0xd7, 0xd9, 0x71, 0xf6, 0xda, 0xa4, 0x82, 0x94, 0xfc, 0x43, 0xca, 0xa3, 0x4c, + 0x32, 0xf7, 0xc6, 0x8c, 0x2f, 0x91, 0xdd, 0x3f, 0x5b, 0xa0, 0x73, 0x34, 0xbf, 0x37, 0xf8, 0x05, + 0x68, 0x79, 0x87, 0xc3, 0xa3, 0x6c, 0x14, 0x71, 0xff, 0x3b, 0x76, 0x6a, 0x5d, 0x5b, 0xde, 0xcd, + 0xdc, 0xe0, 0xd6, 0x28, 0x52, 0x33, 0x9c, 0xcc, 0xa9, 0xe0, 0x01, 0x68, 0x13, 0xf6, 0x3b, 0x95, + 0xc1, 0x41, 0x10, 0xc8, 0x62, 0x33, 0x37, 0x6c, 0xd9, 0xfb, 0xb9, 0xc1, 0xef, 0xc9, 0x2a, 0xf1, + 0x89, 0x88, 0xb9, 0x66, 0x71, 0xaa, 0x4f, 0xc9, 0x7c, 0x05, 0xfc, 0x10, 0xac, 0x0e, 0xc7, 0x54, + 0x06, 0x83, 0xc0, 0x7d, 0xa7, 0xd8, 0xa9, 0xd7, 0xcc, 0x0d, 0x5e, 0x55, 0x53, 0x88, 0x5c, 0x71, + 0x90, 0x82, 0xad, 0x1f, 0x69, 0xc4, 0x03, 0xaa, 0x85, 0x2c, 0xcf, 0x59, 0xcc, 0xc2, 0x5d, 0xda, + 0x71, 0xf6, 0x9a, 0x9f, 0x75, 0xa6, 0x53, 0xea, 0x5d, 0x8d, 0xc8, 0xfb, 0xe0, 0xb9, 0xc1, 0x8d, + 0xdc, 0xe0, 0xad, 0xe3, 0x9a, 0x22, 0x52, 0x6b, 0x05, 0x7f, 0x02, 0x9b, 0x87, 0x8c, 0x06, 0x6c, + 0xce, 0x7f, 0xb9, 0xde, 0xbf, 0x5b, 0xfa, 0x6f, 0x46, 0xaf, 0x56, 0x90, 0xeb, 0x26, 0xf0, 0x37, + 0x80, 0x66, 0x1d, 0x07, 0x61, 0x22, 0x24, 0x0b, 0x0a, 0x27, 0xaa, 0x33, 0xc9, 0xa6, 0x6d, 0x56, + 0xec, 0xd1, 0x77, 0x73, 0x83, 0xd1, 0xf1, 0x6b, 0x95, 0xe4, 0x0d, 0x4e, 0x70, 0x17, 0xac, 0x10, + 0xaa, 0x79, 0x12, 0xba, 0xab, 0xd6, 0x13, 0xe4, 0x06, 0xaf, 0x48, 0x8b, 0x90, 0x92, 0x81, 0x3d, + 0x00, 0x1e, 0xb3, 0x38, 0x2d, 0x75, 0x6b, 0x56, 0xb7, 0x91, 0x1b, 0x0c, 0xf4, 0x0c, 0x25, 0x15, + 0x05, 0x7c, 0xe6, 0x80, 0xce, 0x81, 0xef, 0x67, 0x71, 0x16, 0x51, 0xcd, 0x82, 0x87, 0x8c, 0x29, + 0x77, 0xdd, 0xde, 0xf4, 0x93, 0xdc, 0xe0, 0x2e, 0x9d, 0xa7, 0x5e, 0xde, 0xf5, 0xdf, 0xff, 0xe0, + 0x07, 0x31, 0xd5, 0xe3, 0xfe, 0x88, 0x87, 0xbd, 0x41, 0xa2, 0xbf, 0xaa, 0x64, 0x3e, 0xce, 0x22, + 0xcd, 0x8f, 0x99, 0x54, 0x27, 0xfd, 0xf8, 0x64, 0xdf, 0x1f, 0x53, 0x9e, 0xec, 0xfb, 0x42, 0xb2, + 0xfd, 0x50, 0xf4, 0x83, 0xe2, 0xb5, 0x78, 0x3c, 0x1c, 0x24, 0xfa, 0x3e, 0x55, 0x9a, 0x49, 0xf2, + 0x6a, 0x7b, 0xf8, 0x2b, 0xd8, 0x2e, 0x12, 0xcf, 0x22, 0xe6, 0x6b, 0x16, 0x0c, 0x92, 0x72, 0xdc, + 0x5e, 0x24, 0xfc, 0xa7, 0xca, 0x05, 0xf6, 0x48, 0x28, 0x37, 0x78, 0x3b, 0x59, 0xa8, 0x22, 0xaf, + 0x71, 0x80, 0x9f, 0x82, 0xe6, 0x20, 0x09, 0xd8, 0xc9, 0x20, 0x39, 0xe4, 0x4a, 0xbb, 0x4d, 0x6b, + 0xd8, 0xc9, 0x0d, 0x6e, 0xf2, 0x97, 0x30, 0xa9, 0x6a, 0xe0, 0x47, 0x60, 0xc9, 0x6a, 0x5b, 0x3b, + 0xce, 0xde, 0xba, 0x07, 0x73, 0x83, 0x37, 0x22, 0xae, 0x74, 0x25, 0xfa, 0x96, 0x87, 0xbf, 0x80, + 0xee, 0x7d, 0x91, 0x28, 0xe6, 0x67, 0xc5, 0x00, 0x8e, 0xa4, 0x48, 0x85, 0x62, 0xf2, 0x7b, 0xae, + 0x14, 0x53, 0x6e, 0xdb, 0x36, 0xba, 0x53, 0x8c, 0xd5, 0x5f, 0x24, 0x22, 0x8b, 0xeb, 0x61, 0x0a, + 0xba, 0x8f, 0x85, 0xa6, 0x51, 0xed, 0x63, 0xd9, 0xa8, 0x0f, 0xf3, 0xdd, 0x32, 0xcc, 0x5d, 0xbd, + 0xa8, 0x92, 0x2c, 0x36, 0x85, 0x21, 0xb8, 0x6d, 0xc9, 0xeb, 0x6f, 0xa7, 0x53, 0xdf, 0x0e, 0x95, + 0xed, 0x6e, 0xeb, 0xda, 0x32, 0xb2, 0xc0, 0x0e, 0x9e, 0x82, 0x7b, 0xf3, 0xbb, 0xa8, 0x7f, 0x4a, + 0x37, 0xed, 0x04, 0x3f, 0xce, 0x0d, 0xbe, 0xa7, 0xdf, 0x2c, 0x27, 0x6f, 0xe3, 0x09, 0x31, 0x58, + 0x7e, 0x24, 0x12, 0x9f, 0xb9, 0x9b, 0x3b, 0xce, 0xde, 0x92, 0xb7, 0x9e, 0x1b, 0xbc, 0x9c, 0x14, + 0x00, 0x99, 0xe2, 0xf0, 0x4b, 0xd0, 0xfe, 0x21, 0x19, 0x6a, 0xfa, 0x94, 0x05, 0x0f, 0x52, 0xe1, + 0x8f, 0x5d, 0x68, 0x77, 0xb1, 0x99, 0x1b, 0xdc, 0xce, 0xaa, 0x04, 0x99, 0xd7, 0xc1, 0xaf, 0x41, + 0xeb, 0x48, 0xb2, 0x63, 0x2e, 0x32, 0x65, 0xc3, 0x73, 0xcb, 0x86, 0x67, 0xbb, 0x18, 0x4f, 0x5a, + 0xc1, 0x2b, 0x21, 0x9a, 0xd3, 0xc3, 0x21, 0xb8, 0x75, 0xb5, 0xae, 0xe6, 0x75, 0xcb, 0xb6, 0xbf, + 0x9b, 0x1b, 0x7c, 0x27, 0xbd, 0x4e, 0x57, 0xdc, 0xea, 0xaa, 0x3d, 0xef, 0xec, 0x02, 0x35, 0xce, + 0x2f, 0x50, 0xe3, 0xc5, 0x05, 0x72, 0xfe, 0x98, 0x20, 0xe7, 0xaf, 0x09, 0x72, 0x9e, 0x4f, 0x90, + 0x73, 0x36, 0x41, 0xce, 0xf9, 0x04, 0x39, 0xff, 0x4e, 0x90, 0xf3, 0xdf, 0x04, 0x35, 0x5e, 0x4c, + 0x90, 0xf3, 0xec, 0x12, 0x35, 0xce, 0x2e, 0x51, 0xe3, 0xfc, 0x12, 0x35, 0x7e, 0x5e, 0xa3, 0xd3, + 0x6f, 0x8a, 0x1a, 0xad, 0xd8, 0x5b, 0xff, 0xfc, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x70, 0x40, + 0xd1, 0x9b, 0x06, 0x07, 0x00, 0x00, } func (this *SignRate) Equal(that interface{}) bool { @@ -415,6 +434,12 @@ func (this *PeerAccountData) Equal(that interface{}) bool { if this.UnStakedEpoch != that1.UnStakedEpoch { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndexInList != that1.PreviousIndexInList { + return false + } return true } func (this *SignRate) GoString() string { @@ -432,7 +457,7 @@ func (this *PeerAccountData) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 22) + s := make([]string, 0, 24) s = append(s, "&accounts.PeerAccountData{") s = append(s, "BLSPublicKey: "+fmt.Sprintf("%#v", this.BLSPublicKey)+",\n") s = append(s, "RewardAddress: "+fmt.Sprintf("%#v", this.RewardAddress)+",\n") @@ -452,6 +477,8 @@ func (this *PeerAccountData) GoString() string { s = append(s, "TotalValidatorIgnoredSignaturesRate: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignaturesRate)+",\n") s = append(s, "Nonce: "+fmt.Sprintf("%#v", this.Nonce)+",\n") s = append(s, "UnStakedEpoch: "+fmt.Sprintf("%#v", this.UnStakedEpoch)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndexInList: "+fmt.Sprintf("%#v", this.PreviousIndexInList)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -516,6 +543,22 @@ func (m *PeerAccountData) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndexInList != 0 { + i = encodeVarintPeerAccountData(dAtA, i, uint64(m.PreviousIndexInList)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintPeerAccountData(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } if m.UnStakedEpoch != 0 { i = encodeVarintPeerAccountData(dAtA, i, uint64(m.UnStakedEpoch)) i-- @@ -734,6 +777,13 @@ func (m *PeerAccountData) Size() (n int) { if m.UnStakedEpoch != 0 { n += 2 + sovPeerAccountData(uint64(m.UnStakedEpoch)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovPeerAccountData(uint64(l)) + } + if m.PreviousIndexInList != 0 { + n += 2 + sovPeerAccountData(uint64(m.PreviousIndexInList)) + } return n } @@ -777,6 +827,8 @@ func (this *PeerAccountData) String() string { `TotalValidatorIgnoredSignaturesRate:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignaturesRate) + `,`, `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, `UnStakedEpoch:` + fmt.Sprintf("%v", this.UnStakedEpoch) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndexInList:` + fmt.Sprintf("%v", this.PreviousIndexInList) + `,`, `}`, }, "") return s @@ -1369,6 +1421,57 @@ func (m *PeerAccountData) Unmarshal(dAtA []byte) error { break } } + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPeerAccountData + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPeerAccountData + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndexInList", wireType) + } + m.PreviousIndexInList = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerAccountData + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndexInList |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPeerAccountData(dAtA[iNdEx:]) diff --git a/state/accounts/peerAccountData.proto b/state/accounts/peerAccountData.proto index d4cc3292c38..1a3e99a295f 100644 --- a/state/accounts/peerAccountData.proto +++ b/state/accounts/peerAccountData.proto @@ -33,4 +33,6 @@ message PeerAccountData { uint32 TotalValidatorIgnoredSignaturesRate = 16 [(gogoproto.jsontag) = "totalValidatorIgnoredSignaturesRate"]; uint64 Nonce = 17 [(gogoproto.jsontag) = "nonce"]; uint32 UnStakedEpoch = 18 [(gogoproto.jsontag) = "unStakedEpoch"]; + string PreviousList = 19 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndexInList = 20 [(gogoproto.jsontag) = "previousIndexInList,omitempty"]; } diff --git a/state/accountsDB.go b/state/accountsDB.go index db1396686d4..ce450bdcb95 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -831,6 +831,7 @@ func (adb *AccountsDB) CommitInEpoch(currentEpoch uint32, epochToCommit uint32) adb.mutOp.Lock() defer func() { adb.mainTrie.GetStorageManager().SetEpochForPutOperation(currentEpoch) + adb.mainTrie.GetStorageManager().GetStateStatsHandler().Reset() adb.mutOp.Unlock() adb.loadCodeMeasurements.resetAndPrint() }() diff --git a/state/accountsDBApi.go b/state/accountsDBApi.go index 0c283d4daec..2d07d99e818 100644 --- a/state/accountsDBApi.go +++ b/state/accountsDBApi.go @@ -171,8 +171,28 @@ func (accountsDB *accountsDBApi) RecreateTrie(rootHash []byte) error { } // RecreateTrieFromEpoch is a not permitted operation in this implementation and thus, will return an error -func (accountsDB *accountsDBApi) RecreateTrieFromEpoch(_ common.RootHashHolder) error { - return ErrOperationNotPermitted +func (accountsDB *accountsDBApi) RecreateTrieFromEpoch(options common.RootHashHolder) error { + accountsDB.mutRecreatedTrieBlockInfo.Lock() + defer accountsDB.mutRecreatedTrieBlockInfo.Unlock() + + if check.IfNil(options) { + return ErrNilRootHashHolder + } + + newBlockInfo := holders.NewBlockInfo([]byte{}, 0, options.GetRootHash()) + if newBlockInfo.Equal(accountsDB.blockInfo) { + return nil + } + + err := accountsDB.innerAccountsAdapter.RecreateTrieFromEpoch(options) + if err != nil { + accountsDB.blockInfo = nil + return err + } + + accountsDB.blockInfo = newBlockInfo + + return nil } // PruneTrie is a not permitted operation in this implementation and thus, does nothing diff --git a/state/accountsDBApi_test.go b/state/accountsDBApi_test.go index aee169c4f64..0d9aea1c098 100644 --- a/state/accountsDBApi_test.go +++ b/state/accountsDBApi_test.go @@ -16,7 +16,8 @@ import ( "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/testscommon" mockState "github.com/multiversx/mx-chain-go/testscommon/state" - "github.com/multiversx/mx-chain-go/testscommon/trie" + testTrie "github.com/multiversx/mx-chain-go/testscommon/trie" + "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -195,7 +196,6 @@ func TestAccountsDBApi_NotPermittedOperations(t *testing.T) { assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.SaveAccount(nil)) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RemoveAccount(nil)) assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RevertToSnapshot(0)) - assert.Equal(t, state.ErrOperationNotPermitted, accountsApi.RecreateTrieFromEpoch(nil)) buff, err := accountsApi.CommitInEpoch(0, 0) assert.Nil(t, buff) @@ -226,6 +226,41 @@ func TestAccountsDBApi_RecreateTrie(t *testing.T) { assert.True(t, wasCalled) } +func TestAccountsDBApi_RecreateTrieFromEpoch(t *testing.T) { + t.Parallel() + + t.Run("should error if the roothash holder is nil", func(t *testing.T) { + accountsApi, _ := state.NewAccountsDBApi(&mockState.AccountsStub{ + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + assert.Fail(t, "should have not called accountsApi.RecreateTrieFromEpochCalled") + + return nil + }, + }, createBlockInfoProviderStub(dummyRootHash)) + + err := accountsApi.RecreateTrieFromEpoch(nil) + assert.Equal(t, trie.ErrNilRootHashHolder, err) + }) + t.Run("should work", func(t *testing.T) { + wasCalled := false + rootHash := []byte("root hash") + epoch := core.OptionalUint32{Value: 37, HasValue: true} + accountsApi, _ := state.NewAccountsDBApi(&mockState.AccountsStub{ + RecreateTrieFromEpochCalled: func(options common.RootHashHolder) error { + wasCalled = true + assert.Equal(t, rootHash, options.GetRootHash()) + assert.Equal(t, epoch, options.GetEpoch()) + return nil + }, + }, createBlockInfoProviderStub(dummyRootHash)) + + holder := holders.NewRootHashHolder(rootHash, epoch) + err := accountsApi.RecreateTrieFromEpoch(holder) + assert.NoError(t, err) + assert.True(t, wasCalled) + }) +} + func TestAccountsDBApi_EmptyMethodsShouldNotPanic(t *testing.T) { t.Parallel() @@ -272,7 +307,7 @@ func TestAccountsDBApi_SimpleProxyMethodsShouldWork(t *testing.T) { }, GetTrieCalled: func(i []byte) (common.Trie, error) { getTrieCalled = true - return &trie.TrieStub{}, nil + return &testTrie.TrieStub{}, nil }, } diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 7f2658058fd..d3886f2d944 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -43,6 +43,7 @@ import ( trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/dataTrieMigrator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -127,7 +128,7 @@ func getDefaultStateComponentsWithCustomEnableEpochs(enableEpochs common.EnableE func getDefaultStateComponents( db common.BaseStorer, - enableEpochs common.EnableEpochsHandler, + enableEpochsHandler common.EnableEpochsHandler, ) (*state.AccountsDB, common.Trie, common.StorageManager) { generalCfg := config.TrieStorageManagerConfig{ PruningBufferLen: 1000, @@ -140,7 +141,7 @@ func getDefaultStateComponents( args := storage.GetStorageManagerArgs() args.MainStorer = db trieStorage, _ := trie.NewTrieStorageManager(args) - tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, enableEpochs, 5) + tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, enableEpochsHandler, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ RootHashesSize: 100, HashesSize: 10000, @@ -150,7 +151,7 @@ func getDefaultStateComponents( argsAccCreator := factory.ArgsAccountCreator{ Hasher: hasher, Marshaller: marshaller, - EnableEpochsHandler: enableEpochs, + EnableEpochsHandler: enableEpochsHandler, } accCreator, _ := factory.NewAccountCreator(argsAccCreator) @@ -3114,6 +3115,52 @@ func testAccountMethodsConcurrency( wg.Wait() } +func TestAccountsDB_MigrateDataTrieWithFunc(t *testing.T) { + t.Parallel() + + enableEpochsHandler := enableEpochsHandlerMock.NewEnableEpochsHandlerStub() + adb, _, _ := getDefaultStateComponents(testscommon.NewSnapshotPruningStorerMock(), enableEpochsHandler) + + addr := []byte("addr") + acc, _ := adb.LoadAccount(addr) + value := []byte("value") + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key"), value) + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key2"), value) + _ = adb.SaveAccount(acc) + + enableEpochsHandler.AddActiveFlags(common.AutoBalanceDataTriesFlag) + acc, _ = adb.LoadAccount(addr) + + isMigrated, err := acc.(state.AccountHandlerWithDataTrieMigrationStatus).IsDataTrieMigrated() + assert.Nil(t, err) + assert.False(t, isMigrated) + + accWithMigrate := acc.(vmcommon.UserAccountHandler).AccountDataHandler() + dataTrieMig := dataTrieMigrator.NewDataTrieMigrator(dataTrieMigrator.ArgsNewDataTrieMigrator{ + GasProvided: 100000000, + DataTrieGasCost: dataTrieMigrator.DataTrieGasCost{ + TrieLoadPerNode: 1, + TrieStorePerNode: 1, + }, + }) + err = accWithMigrate.MigrateDataTrieLeaves(vmcommon.ArgsMigrateDataTrieLeaves{ + OldVersion: core.NotSpecified, + NewVersion: core.AutoBalanceEnabled, + TrieMigrator: dataTrieMig, + }) + assert.Nil(t, err) + _ = adb.SaveAccount(acc) + + acc, _ = adb.LoadAccount(addr) + retrievedVal, _, err := acc.(state.UserAccountHandler).RetrieveValue([]byte("key")) + assert.Equal(t, value, retrievedVal) + assert.Nil(t, err) + + isMigrated, err = acc.(state.AccountHandlerWithDataTrieMigrationStatus).IsDataTrieMigrated() + assert.Nil(t, err) + assert.True(t, isMigrated) +} + func BenchmarkAccountsDB_GetMethodsInParallel(b *testing.B) { _, adb := getDefaultTrieAndAccountsDb() diff --git a/state/errors.go b/state/errors.go index 9d93a4a513e..8da20ed523d 100644 --- a/state/errors.go +++ b/state/errors.go @@ -145,6 +145,9 @@ var ErrNilStateMetrics = errors.New("nil sstate metrics") // ErrNilChannelsProvider signals that a nil channels provider has been given var ErrNilChannelsProvider = errors.New("nil channels provider") +// ErrNilRootHashHolder signals that a nil root hash holder was provided +var ErrNilRootHashHolder = errors.New("nil root hash holder provided") + // ErrNilStatsHandler signals that a nil stats handler provider has been given var ErrNilStatsHandler = errors.New("nil stats handler") @@ -154,5 +157,14 @@ var ErrNilLastSnapshotMarker = errors.New("nil last snapshot marker") // ErrNilSnapshotsManager signals that a nil snapshots manager has been given var ErrNilSnapshotsManager = errors.New("nil snapshots manager") +// ErrNilValidatorInfo signals that a nil value for the validator info has been provided +var ErrNilValidatorInfo = errors.New("validator info is nil") + +// ErrValidatorsDifferentShards signals that validators are not in the same shard +var ErrValidatorsDifferentShards = errors.New("validators are not in the same shard") + +// ErrValidatorNotFound signals that a validator was not found +var ErrValidatorNotFound = errors.New("validator not found") + // ErrNilStateChangesCollector signals that a nil state changes collector has been given var ErrNilStateChangesCollector = errors.New("nil state changes collector") diff --git a/state/export_test.go b/state/export_test.go index 0045adc880c..4398d616dd3 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -90,3 +90,9 @@ func (sm *snapshotsManager) GetLastSnapshotInfo() ([]byte, uint32) { func NewNilSnapshotsManager() *snapshotsManager { return nil } + +// AccountHandlerWithDataTrieMigrationStatus - +type AccountHandlerWithDataTrieMigrationStatus interface { + vmcommon.AccountHandler + IsDataTrieMigrated() (bool, error) +} diff --git a/state/interface.go b/state/interface.go index ad415547983..018590feb42 100644 --- a/state/interface.go +++ b/state/interface.go @@ -23,6 +23,47 @@ type Updater interface { IsInterfaceNil() bool } +// PeerAccountHandler models a peer state account, which can journalize a normal account's data +// with some extra features like signing statistics or rating information +type PeerAccountHandler interface { + GetBLSPublicKey() []byte + SetBLSPublicKey([]byte) error + GetRewardAddress() []byte + SetRewardAddress([]byte) error + GetAccumulatedFees() *big.Int + AddToAccumulatedFees(*big.Int) + GetList() string + GetPreviousList() string + GetIndexInList() uint32 + GetPreviousIndexInList() uint32 + GetShardId() uint32 + SetUnStakedEpoch(epoch uint32) + GetUnStakedEpoch() uint32 + IncreaseLeaderSuccessRate(uint32) + DecreaseLeaderSuccessRate(uint32) + IncreaseValidatorSuccessRate(uint32) + DecreaseValidatorSuccessRate(uint32) + IncreaseValidatorIgnoredSignaturesRate(uint32) + GetNumSelectedInSuccessBlocks() uint32 + IncreaseNumSelectedInSuccessBlocks() + GetLeaderSuccessRate() SignRate + GetValidatorSuccessRate() SignRate + GetValidatorIgnoredSignaturesRate() uint32 + GetTotalLeaderSuccessRate() SignRate + GetTotalValidatorSuccessRate() SignRate + GetTotalValidatorIgnoredSignaturesRate() uint32 + SetListAndIndex(shardID uint32, list string, index uint32, updatePreviousValues bool) + GetRating() uint32 + SetRating(uint32) + GetTempRating() uint32 + SetTempRating(uint32) + GetConsecutiveProposerMisses() uint32 + SetConsecutiveProposerMisses(uint322 uint32) + ResetAtNewEpoch() + SetPreviousList(list string) + vmcommon.AccountHandler +} + // AccountsAdapter is used for the structure that manages the accounts on top of a trie.PatriciaMerkleTrie // implementation type AccountsAdapter interface { @@ -182,43 +223,6 @@ type DataTrie interface { CollectLeavesForMigration(args vmcommon.ArgsMigrateDataTrieLeaves) error } -// PeerAccountHandler models a peer state account, which can journalize a normal account's data -// with some extra features like signing statistics or rating information -type PeerAccountHandler interface { - SetBLSPublicKey([]byte) error - GetRewardAddress() []byte - SetRewardAddress([]byte) error - GetAccumulatedFees() *big.Int - AddToAccumulatedFees(*big.Int) - GetList() string - GetIndexInList() uint32 - GetShardId() uint32 - SetUnStakedEpoch(epoch uint32) - GetUnStakedEpoch() uint32 - IncreaseLeaderSuccessRate(uint32) - DecreaseLeaderSuccessRate(uint32) - IncreaseValidatorSuccessRate(uint32) - DecreaseValidatorSuccessRate(uint32) - IncreaseValidatorIgnoredSignaturesRate(uint32) - GetNumSelectedInSuccessBlocks() uint32 - IncreaseNumSelectedInSuccessBlocks() - GetLeaderSuccessRate() SignRate - GetValidatorSuccessRate() SignRate - GetValidatorIgnoredSignaturesRate() uint32 - GetTotalLeaderSuccessRate() SignRate - GetTotalValidatorSuccessRate() SignRate - GetTotalValidatorIgnoredSignaturesRate() uint32 - SetListAndIndex(shardID uint32, list string, index uint32) - GetRating() uint32 - SetRating(uint32) - GetTempRating() uint32 - SetTempRating(uint32) - GetConsecutiveProposerMisses() uint32 - SetConsecutiveProposerMisses(uint322 uint32) - ResetAtNewEpoch() - vmcommon.AccountHandler -} - // UserAccountHandler models a user account, which can journalize account's data with some extra features // like balance, developer rewards, owner type UserAccountHandler interface { @@ -281,6 +285,74 @@ type LastSnapshotMarker interface { IsInterfaceNil() bool } +// ShardValidatorsInfoMapHandler shall be used to manage operations inside +// a map in a concurrent-safe way. +type ShardValidatorsInfoMapHandler interface { + GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler + GetAllValidatorsInfo() []ValidatorInfoHandler + GetValidator(blsKey []byte) ValidatorInfoHandler + + Add(validator ValidatorInfoHandler) error + Delete(validator ValidatorInfoHandler) error + Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error + SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error +} + +// ValidatorInfoHandler defines which data shall a validator info hold. +type ValidatorInfoHandler interface { + IsInterfaceNil() bool + + GetPublicKey() []byte + GetShardId() uint32 + GetList() string + GetIndex() uint32 + GetPreviousIndex() uint32 + GetTempRating() uint32 + GetRating() uint32 + GetRatingModifier() float32 + GetRewardAddress() []byte + GetLeaderSuccess() uint32 + GetLeaderFailure() uint32 + GetValidatorSuccess() uint32 + GetValidatorFailure() uint32 + GetValidatorIgnoredSignatures() uint32 + GetNumSelectedInSuccessBlocks() uint32 + GetAccumulatedFees() *big.Int + GetTotalLeaderSuccess() uint32 + GetTotalLeaderFailure() uint32 + GetTotalValidatorSuccess() uint32 + GetTotalValidatorFailure() uint32 + GetTotalValidatorIgnoredSignatures() uint32 + GetPreviousList() string + + SetPublicKey(publicKey []byte) + SetShardId(shardID uint32) + SetPreviousList(list string) + SetList(list string) + SetIndex(index uint32) + SetListAndIndex(list string, index uint32, updatePreviousValues bool) + SetTempRating(tempRating uint32) + SetRating(rating uint32) + SetRatingModifier(ratingModifier float32) + SetRewardAddress(rewardAddress []byte) + SetLeaderSuccess(leaderSuccess uint32) + SetLeaderFailure(leaderFailure uint32) + SetValidatorSuccess(validatorSuccess uint32) + SetValidatorFailure(validatorFailure uint32) + SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) + SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) + SetAccumulatedFees(accumulatedFees *big.Int) + SetTotalLeaderSuccess(totalLeaderSuccess uint32) + SetTotalLeaderFailure(totalLeaderFailure uint32) + SetTotalValidatorSuccess(totalValidatorSuccess uint32) + SetTotalValidatorFailure(totalValidatorFailure uint32) + SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) + + ShallowClone() ValidatorInfoHandler + String() string + GoString() string +} + // StateChangesCollector defines the methods needed for an StateChangesCollector implementation type StateChangesCollector interface { AddStateChange(stateChange StateChangeDTO) diff --git a/state/trackableDataTrie/trackableDataTrie.go b/state/trackableDataTrie/trackableDataTrie.go index d962373bc85..4226974189a 100644 --- a/state/trackableDataTrie/trackableDataTrie.go +++ b/state/trackableDataTrie/trackableDataTrie.go @@ -137,6 +137,11 @@ func (tdt *trackableDataTrie) MigrateDataTrieLeaves(args vmcommon.ArgsMigrateDat dataToBeMigrated := args.TrieMigrator.GetLeavesToBeMigrated() log.Debug("num leaves to be migrated", "num", len(dataToBeMigrated), "account", tdt.identifier) for _, leafData := range dataToBeMigrated { + val, err := tdt.getValueWithoutMetadata(leafData.Key, leafData) + if err != nil { + return err + } + originalKey, err := tdt.getOriginalKeyFromTrieData(leafData) if err != nil { return err @@ -144,7 +149,7 @@ func (tdt *trackableDataTrie) MigrateDataTrieLeaves(args vmcommon.ArgsMigrateDat dataEntry := dirtyData{ index: tdt.getIndexForKey(originalKey), - value: leafData.Value, + value: val, newVersion: args.NewVersion, } diff --git a/state/trackableDataTrie/trackableDataTrie_test.go b/state/trackableDataTrie/trackableDataTrie_test.go index de995a7082a..db2d7f83b7a 100644 --- a/state/trackableDataTrie/trackableDataTrie_test.go +++ b/state/trackableDataTrie/trackableDataTrie_test.go @@ -952,20 +952,22 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { t.Run("leaves that need to be migrated are added to dirty data", func(t *testing.T) { t.Parallel() + expectedValues := [][]byte{[]byte("value1"), []byte("value2"), []byte("value3")} + address := []byte("identifier") leavesToBeMigrated := []core.TrieData{ { Key: []byte("key1"), - Value: []byte("value1"), + Value: append([]byte("value1key1"), address...), Version: core.NotSpecified, }, { Key: []byte("key2"), - Value: []byte("value2"), + Value: append([]byte("value2key2"), address...), Version: core.NotSpecified, }, { Key: []byte("key3"), - Value: []byte("value3"), + Value: append([]byte("value3key3"), address...), Version: core.NotSpecified, }, } @@ -985,7 +987,7 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { }, } - tdt, _ := trackableDataTrie.NewTrackableDataTrie([]byte("identifier"), &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) + tdt, _ := trackableDataTrie.NewTrackableDataTrie(address, &hashingMocks.HasherMock{}, &marshallerMock.MarshalizerMock{}, enableEpchs) tdt.SetDataTrie(tr) args := vmcommon.ArgsMigrateDataTrieLeaves{ OldVersion: core.NotSpecified, @@ -999,7 +1001,7 @@ func TestTrackableDataTrie_MigrateDataTrieLeaves(t *testing.T) { assert.Equal(t, len(leavesToBeMigrated), len(dirtyData)) for i := range leavesToBeMigrated { d := dirtyData[string(leavesToBeMigrated[i].Key)] - assert.Equal(t, leavesToBeMigrated[i].Value, d.Value) + assert.Equal(t, expectedValues[i], d.Value) assert.Equal(t, core.TrieNodeVersion(100), d.NewVersion) } }) diff --git a/state/validatorInfo.go b/state/validatorInfo.go index 2ca0cf416e0..c6ea6d06001 100644 --- a/state/validatorInfo.go +++ b/state/validatorInfo.go @@ -2,11 +2,138 @@ package state +import mathbig "math/big" + // IsInterfaceNil returns true if there is no value under the interface func (vi *ValidatorInfo) IsInterfaceNil() bool { return vi == nil } +// SetPublicKey sets validator's public key +func (vi *ValidatorInfo) SetPublicKey(publicKey []byte) { + vi.PublicKey = publicKey +} + +// SetList sets validator's list +func (vi *ValidatorInfo) SetList(list string) { + vi.List = list +} + +// SetPreviousList sets validator's previous list +func (vi *ValidatorInfo) SetPreviousList(list string) { + vi.PreviousList = list +} + +func (vi *ValidatorInfo) SetListAndIndex(list string, index uint32, updatePreviousValues bool) { + if updatePreviousValues { + vi.PreviousIndex = vi.Index + vi.PreviousList = vi.List + } + + vi.List = list + vi.Index = index +} + +// SetShardId sets validator's public shard id +func (vi *ValidatorInfo) SetShardId(shardID uint32) { + vi.ShardId = shardID +} + +// SetIndex sets validator's index +func (vi *ValidatorInfo) SetIndex(index uint32) { + vi.Index = index +} + +// SetTempRating sets validator's temp rating +func (vi *ValidatorInfo) SetTempRating(tempRating uint32) { + vi.TempRating = tempRating +} + +// SetRating sets validator's rating +func (vi *ValidatorInfo) SetRating(rating uint32) { + vi.Rating = rating +} + +// SetRatingModifier sets validator's rating modifier +func (vi *ValidatorInfo) SetRatingModifier(ratingModifier float32) { + vi.RatingModifier = ratingModifier +} + +// SetRewardAddress sets validator's reward address +func (vi *ValidatorInfo) SetRewardAddress(rewardAddress []byte) { + vi.RewardAddress = rewardAddress +} + +// SetLeaderSuccess sets leader success +func (vi *ValidatorInfo) SetLeaderSuccess(leaderSuccess uint32) { + vi.LeaderSuccess = leaderSuccess +} + +// SetLeaderFailure sets validator's leader failure +func (vi *ValidatorInfo) SetLeaderFailure(leaderFailure uint32) { + vi.LeaderFailure = leaderFailure +} + +// SetValidatorSuccess sets validator's success +func (vi *ValidatorInfo) SetValidatorSuccess(validatorSuccess uint32) { + vi.ValidatorSuccess = validatorSuccess +} + +// SetValidatorFailure sets validator's failure +func (vi *ValidatorInfo) SetValidatorFailure(validatorFailure uint32) { + vi.ValidatorFailure = validatorFailure +} + +// SetValidatorIgnoredSignatures sets validator's ignored signatures +func (vi *ValidatorInfo) SetValidatorIgnoredSignatures(validatorIgnoredSignatures uint32) { + vi.ValidatorIgnoredSignatures = validatorIgnoredSignatures +} + +// SetNumSelectedInSuccessBlocks sets validator's num of selected in success block +func (vi *ValidatorInfo) SetNumSelectedInSuccessBlocks(numSelectedInSuccessBlock uint32) { + vi.NumSelectedInSuccessBlocks = numSelectedInSuccessBlock +} + +// SetAccumulatedFees sets validator's accumulated fees +func (vi *ValidatorInfo) SetAccumulatedFees(accumulatedFees *mathbig.Int) { + vi.AccumulatedFees = mathbig.NewInt(0).Set(accumulatedFees) +} + +// SetTotalLeaderSuccess sets validator's total leader success +func (vi *ValidatorInfo) SetTotalLeaderSuccess(totalLeaderSuccess uint32) { + vi.TotalLeaderSuccess = totalLeaderSuccess +} + +// SetTotalLeaderFailure sets validator's total leader failure +func (vi *ValidatorInfo) SetTotalLeaderFailure(totalLeaderFailure uint32) { + vi.TotalLeaderFailure = totalLeaderFailure +} + +// SetTotalValidatorSuccess sets validator's total success +func (vi *ValidatorInfo) SetTotalValidatorSuccess(totalValidatorSuccess uint32) { + vi.TotalValidatorSuccess = totalValidatorSuccess +} + +// SetTotalValidatorFailure sets validator's total failure +func (vi *ValidatorInfo) SetTotalValidatorFailure(totalValidatorFailure uint32) { + vi.TotalValidatorFailure = totalValidatorFailure +} + +// SetTotalValidatorIgnoredSignatures sets validator's total ignored signatures +func (vi *ValidatorInfo) SetTotalValidatorIgnoredSignatures(totalValidatorIgnoredSignatures uint32) { + vi.TotalValidatorIgnoredSignatures = totalValidatorIgnoredSignatures +} + +// ShallowClone returns a clone of the object +func (vi *ValidatorInfo) ShallowClone() ValidatorInfoHandler { + if vi == nil { + return nil + } + + validatorCopy := *vi + return &validatorCopy +} + // IsInterfaceNil returns true if there is no value under the interface func (svi *ShardValidatorInfo) IsInterfaceNil() bool { return svi == nil diff --git a/state/validatorInfo.pb.go b/state/validatorInfo.pb.go index 19907c86869..3261e3da880 100644 --- a/state/validatorInfo.pb.go +++ b/state/validatorInfo.pb.go @@ -51,6 +51,8 @@ type ValidatorInfo struct { TotalValidatorSuccess uint32 `protobuf:"varint,18,opt,name=TotalValidatorSuccess,proto3" json:"totalValidatorSuccess"` TotalValidatorFailure uint32 `protobuf:"varint,19,opt,name=TotalValidatorFailure,proto3" json:"totalValidatorFailure"` TotalValidatorIgnoredSignatures uint32 `protobuf:"varint,20,opt,name=TotalValidatorIgnoredSignatures,proto3" json:"totalValidatorIgnoredSignatures"` + PreviousList string `protobuf:"bytes,21,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,22,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ValidatorInfo) Reset() { *m = ValidatorInfo{} } @@ -221,13 +223,29 @@ func (m *ValidatorInfo) GetTotalValidatorIgnoredSignatures() uint32 { return 0 } +func (m *ValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks type ShardValidatorInfo struct { - PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` - ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` - List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` - Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` - TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"publicKey"` + ShardId uint32 `protobuf:"varint,2,opt,name=ShardId,proto3" json:"shardId"` + List string `protobuf:"bytes,3,opt,name=List,proto3" json:"list,omitempty"` + Index uint32 `protobuf:"varint,4,opt,name=Index,proto3" json:"index"` + TempRating uint32 `protobuf:"varint,5,opt,name=TempRating,proto3" json:"tempRating"` + PreviousList string `protobuf:"bytes,6,opt,name=PreviousList,proto3" json:"previousList,omitempty"` + PreviousIndex uint32 `protobuf:"varint,7,opt,name=PreviousIndex,proto3" json:"previousIndex,omitempty"` } func (m *ShardValidatorInfo) Reset() { *m = ShardValidatorInfo{} } @@ -293,6 +311,20 @@ func (m *ShardValidatorInfo) GetTempRating() uint32 { return 0 } +func (m *ShardValidatorInfo) GetPreviousList() string { + if m != nil { + return m.PreviousList + } + return "" +} + +func (m *ShardValidatorInfo) GetPreviousIndex() uint32 { + if m != nil { + return m.PreviousIndex + } + return 0 +} + func init() { proto.RegisterType((*ValidatorInfo)(nil), "proto.ValidatorInfo") proto.RegisterType((*ShardValidatorInfo)(nil), "proto.ShardValidatorInfo") @@ -301,52 +333,56 @@ func init() { func init() { proto.RegisterFile("validatorInfo.proto", fileDescriptor_bf9cdc082f0b2ec2) } var fileDescriptor_bf9cdc082f0b2ec2 = []byte{ - // 714 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0x4f, 0x4f, 0x13, 0x41, - 0x18, 0xc6, 0xbb, 0x48, 0x0b, 0x1d, 0x68, 0x81, 0x01, 0x74, 0x41, 0xb3, 0xd3, 0x60, 0x34, 0x4d, - 0xb4, 0xed, 0xc1, 0x83, 0x89, 0x1e, 0x94, 0x1a, 0x49, 0x1a, 0xf1, 0x4f, 0xa6, 0xc4, 0x83, 0x07, - 0x93, 0xe9, 0xee, 0x74, 0x3b, 0x71, 0xff, 0x90, 0xd9, 0xd9, 0x0a, 0x37, 0x3f, 0x02, 0x1f, 0xc3, - 0xf8, 0x49, 0x3c, 0x72, 0xe4, 0xb4, 0xd8, 0xe5, 0x62, 0xe6, 0xc4, 0x47, 0x30, 0x9d, 0x76, 0x69, - 0xb7, 0x2d, 0x78, 0xe2, 0xc4, 0xee, 0xfb, 0x3c, 0xcf, 0x6f, 0x5e, 0xfa, 0x4e, 0xdf, 0x82, 0xf5, - 0x2e, 0x71, 0x98, 0x45, 0x84, 0xcf, 0x1b, 0x5e, 0xdb, 0xaf, 0x1e, 0x72, 0x5f, 0xf8, 0x30, 0xab, - 0xfe, 0x6c, 0x57, 0x6c, 0x26, 0x3a, 0x61, 0xab, 0x6a, 0xfa, 0x6e, 0xcd, 0xf6, 0x6d, 0xbf, 0xa6, - 0xca, 0xad, 0xb0, 0xad, 0xde, 0xd4, 0x8b, 0x7a, 0x1a, 0xa4, 0x76, 0xce, 0x01, 0x28, 0x7c, 0x1e, - 0xa7, 0xc1, 0x27, 0x20, 0xff, 0x29, 0x6c, 0x39, 0xcc, 0x7c, 0x47, 0x8f, 0x75, 0xad, 0xa4, 0x95, - 0x97, 0xeb, 0x05, 0x19, 0xa1, 0xfc, 0x61, 0x52, 0xc4, 0x23, 0x1d, 0x3e, 0x02, 0x0b, 0xcd, 0x0e, - 0xe1, 0x56, 0xc3, 0xd2, 0xe7, 0x4a, 0x5a, 0xb9, 0x50, 0x5f, 0x92, 0x11, 0x5a, 0x08, 0x06, 0x25, - 0x9c, 0x68, 0xf0, 0x01, 0x98, 0xdf, 0x67, 0x81, 0xd0, 0xef, 0x94, 0xb4, 0x72, 0xbe, 0xbe, 0x28, - 0x23, 0x34, 0xef, 0xb0, 0x40, 0x60, 0x55, 0x85, 0x08, 0x64, 0x1b, 0x9e, 0x45, 0x8f, 0xf4, 0x79, - 0x85, 0xc8, 0xcb, 0x08, 0x65, 0x59, 0xbf, 0x80, 0x07, 0x75, 0x58, 0x05, 0xe0, 0x80, 0xba, 0x87, - 0x98, 0x08, 0xe6, 0xd9, 0x7a, 0x56, 0xb9, 0x8a, 0x32, 0x42, 0x40, 0x5c, 0x55, 0xf1, 0x98, 0x03, - 0xee, 0x80, 0xdc, 0xd0, 0x9b, 0x53, 0x5e, 0x20, 0x23, 0x94, 0xe3, 0x03, 0xdf, 0x50, 0x81, 0x2f, - 0x40, 0x71, 0xf0, 0xf4, 0xde, 0xb7, 0x58, 0x9b, 0x51, 0xae, 0x2f, 0x94, 0xb4, 0xf2, 0x5c, 0x1d, - 0xca, 0x08, 0x15, 0x79, 0x4a, 0xc1, 0x13, 0x4e, 0xb8, 0x0b, 0x0a, 0x98, 0x7e, 0x27, 0xdc, 0xda, - 0xb5, 0x2c, 0x4e, 0x83, 0x40, 0x5f, 0x54, 0x1f, 0xd3, 0x7d, 0x19, 0xa1, 0x7b, 0x7c, 0x5c, 0x78, - 0xea, 0xbb, 0xac, 0xdf, 0xa3, 0x38, 0xc6, 0xe9, 0x04, 0x7c, 0x0e, 0x0a, 0xfb, 0x94, 0x58, 0x94, - 0x37, 0x43, 0xd3, 0xec, 0x23, 0xf2, 0xaa, 0xd3, 0x35, 0x19, 0xa1, 0x82, 0x33, 0x2e, 0xe0, 0xb4, - 0x6f, 0x14, 0xdc, 0x23, 0xcc, 0x09, 0x39, 0xd5, 0xc1, 0x64, 0x70, 0x28, 0xe0, 0xb4, 0x0f, 0xbe, - 0x06, 0xab, 0x57, 0x83, 0x4e, 0x0e, 0x5d, 0x52, 0xd9, 0x0d, 0x19, 0xa1, 0xd5, 0xee, 0x84, 0x86, - 0xa7, 0xdc, 0x29, 0x42, 0x72, 0xfa, 0xf2, 0x0c, 0x42, 0xd2, 0xc0, 0x94, 0x1b, 0x7e, 0x05, 0xdb, - 0xa3, 0xcb, 0x66, 0x7b, 0x3e, 0xa7, 0x56, 0x93, 0xd9, 0x1e, 0x11, 0x21, 0xa7, 0x81, 0x5e, 0x50, - 0x2c, 0x43, 0x46, 0x68, 0xbb, 0x7b, 0xad, 0x0b, 0xdf, 0x40, 0xe8, 0xf3, 0x3f, 0x84, 0x6e, 0x93, - 0x3a, 0xd4, 0x14, 0xd4, 0x6a, 0x78, 0xc3, 0xce, 0xeb, 0x8e, 0x6f, 0x7e, 0x0b, 0xf4, 0xe2, 0x88, - 0xef, 0x5d, 0xeb, 0xc2, 0x37, 0x10, 0xe0, 0x89, 0x06, 0x56, 0x76, 0x4d, 0x33, 0x74, 0x43, 0x87, - 0x08, 0x6a, 0xed, 0x51, 0x1a, 0xe8, 0x2b, 0x6a, 0xf6, 0x6d, 0x19, 0xa1, 0x2d, 0x92, 0x96, 0x46, - 0xd3, 0xff, 0x75, 0x8e, 0xde, 0xba, 0x44, 0x74, 0x6a, 0x2d, 0x66, 0x57, 0x1b, 0x9e, 0x78, 0x39, - 0xf6, 0x25, 0x75, 0x43, 0x47, 0xb0, 0x2e, 0xe5, 0xc1, 0x51, 0xcd, 0x3d, 0xaa, 0x98, 0x1d, 0xc2, - 0xbc, 0x8a, 0xe9, 0x73, 0x5a, 0xb1, 0xfd, 0x9a, 0x45, 0x04, 0xa9, 0xd6, 0x99, 0xdd, 0xf0, 0xc4, - 0x1b, 0x12, 0x08, 0xca, 0xf1, 0xe4, 0xf1, 0x70, 0x0f, 0xc0, 0x03, 0x5f, 0x10, 0x27, 0x7d, 0x9b, - 0x56, 0xd5, 0xbf, 0x7a, 0x57, 0x46, 0x08, 0x8a, 0x29, 0x15, 0xcf, 0x48, 0x4c, 0x70, 0x92, 0xf1, - 0xae, 0xcd, 0xe4, 0x24, 0x03, 0x9e, 0x91, 0x80, 0x1f, 0xc1, 0xa6, 0xaa, 0x4e, 0xdd, 0x35, 0xa8, - 0x50, 0x5b, 0x32, 0x42, 0x9b, 0x62, 0x96, 0x01, 0xcf, 0xce, 0x4d, 0x03, 0x93, 0xde, 0xd6, 0xaf, - 0x03, 0x26, 0xed, 0xcd, 0xce, 0x41, 0x17, 0xa0, 0xb4, 0x30, 0x7d, 0x13, 0x37, 0x14, 0xfa, 0xa1, - 0x8c, 0x10, 0x12, 0x37, 0x5b, 0xf1, 0xff, 0x58, 0x3b, 0x3d, 0x0d, 0x40, 0xb5, 0x07, 0x6f, 0x7f, - 0xcd, 0x3e, 0x4e, 0xad, 0x59, 0xb5, 0xc9, 0xfa, 0x6b, 0x76, 0x6c, 0x0b, 0xdd, 0xce, 0xc2, 0xad, - 0xbf, 0x3a, 0xed, 0x19, 0x99, 0xb3, 0x9e, 0x91, 0xb9, 0xec, 0x19, 0xda, 0x8f, 0xd8, 0xd0, 0x7e, - 0xc6, 0x86, 0xf6, 0x3b, 0x36, 0xb4, 0xd3, 0xd8, 0xd0, 0xce, 0x62, 0x43, 0xfb, 0x13, 0x1b, 0xda, - 0xdf, 0xd8, 0xc8, 0x5c, 0xc6, 0x86, 0x76, 0x72, 0x61, 0x64, 0x4e, 0x2f, 0x8c, 0xcc, 0xd9, 0x85, - 0x91, 0xf9, 0x92, 0x0d, 0x04, 0x11, 0xb4, 0x95, 0x53, 0xbf, 0x46, 0xcf, 0xfe, 0x05, 0x00, 0x00, - 0xff, 0xff, 0x5e, 0xa1, 0xc3, 0x5e, 0xda, 0x06, 0x00, 0x00, + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xf3, 0x34, + 0x1c, 0x6f, 0xc6, 0xda, 0x3e, 0xf5, 0xd6, 0x3e, 0x9b, 0xf7, 0x42, 0x56, 0x50, 0x5c, 0x0d, 0x81, + 0x2a, 0x41, 0xdb, 0x03, 0x07, 0x24, 0x90, 0x80, 0x15, 0x31, 0xa9, 0x62, 0xc0, 0xe4, 0x4e, 0x1c, + 0x38, 0x20, 0xb9, 0x89, 0x9b, 0x5a, 0xe4, 0xa5, 0x72, 0x9c, 0xb2, 0xdd, 0xf8, 0x08, 0xfb, 0x18, + 0x88, 0x4f, 0xc2, 0x71, 0xc7, 0x9d, 0x0c, 0xcb, 0x38, 0x20, 0x9f, 0xf6, 0x11, 0x50, 0xdd, 0x66, + 0x4d, 0xda, 0x6e, 0x08, 0x3d, 0xda, 0xa9, 0xf1, 0xff, 0xf7, 0xe2, 0x7f, 0xfc, 0x77, 0x7f, 0x01, + 0x7b, 0x13, 0xe2, 0x31, 0x87, 0x88, 0x90, 0xf7, 0x82, 0x61, 0xd8, 0x1e, 0xf3, 0x50, 0x84, 0xb0, + 0xa8, 0x7f, 0xea, 0x2d, 0x97, 0x89, 0x51, 0x3c, 0x68, 0xdb, 0xa1, 0xdf, 0x71, 0x43, 0x37, 0xec, + 0xe8, 0xf2, 0x20, 0x1e, 0xea, 0x95, 0x5e, 0xe8, 0xa7, 0x99, 0xea, 0x38, 0xd9, 0x02, 0xd5, 0x1f, + 0xb2, 0x6e, 0xf0, 0x43, 0x50, 0x39, 0x8f, 0x07, 0x1e, 0xb3, 0xbf, 0xa1, 0x57, 0xa6, 0xd1, 0x30, + 0x9a, 0xdb, 0xdd, 0xaa, 0x92, 0xa8, 0x32, 0x4e, 0x8b, 0x78, 0x81, 0xc3, 0xf7, 0x41, 0xb9, 0x3f, + 0x22, 0xdc, 0xe9, 0x39, 0xe6, 0x46, 0xc3, 0x68, 0x56, 0xbb, 0x5b, 0x4a, 0xa2, 0x72, 0x34, 0x2b, + 0xe1, 0x14, 0x83, 0xef, 0x82, 0xcd, 0x33, 0x16, 0x09, 0xf3, 0xad, 0x86, 0xd1, 0xac, 0x74, 0x5f, + 0x29, 0x89, 0x36, 0x3d, 0x16, 0x09, 0xac, 0xab, 0x10, 0x81, 0x62, 0x2f, 0x70, 0xe8, 0xa5, 0xb9, + 0xa9, 0x2d, 0x2a, 0x4a, 0xa2, 0x22, 0x9b, 0x16, 0xf0, 0xac, 0x0e, 0xdb, 0x00, 0x5c, 0x50, 0x7f, + 0x8c, 0x89, 0x60, 0x81, 0x6b, 0x16, 0x35, 0xab, 0xa6, 0x24, 0x02, 0xe2, 0xb1, 0x8a, 0x33, 0x0c, + 0x78, 0x0c, 0x4a, 0x73, 0x6e, 0x49, 0x73, 0x81, 0x92, 0xa8, 0xc4, 0x67, 0xbc, 0x39, 0x02, 0x3f, + 0x05, 0xb5, 0xd9, 0xd3, 0xb7, 0xa1, 0xc3, 0x86, 0x8c, 0x72, 0xb3, 0xdc, 0x30, 0x9a, 0x1b, 0x5d, + 0xa8, 0x24, 0xaa, 0xf1, 0x1c, 0x82, 0x97, 0x98, 0xf0, 0x04, 0x54, 0x31, 0xfd, 0x85, 0x70, 0xe7, + 0xc4, 0x71, 0x38, 0x8d, 0x22, 0xf3, 0x95, 0x3e, 0xa6, 0x77, 0x94, 0x44, 0x6f, 0xf3, 0x2c, 0xf0, + 0x51, 0xe8, 0xb3, 0x69, 0x8f, 0xe2, 0x0a, 0xe7, 0x15, 0xf0, 0x13, 0x50, 0x3d, 0xa3, 0xc4, 0xa1, + 0xbc, 0x1f, 0xdb, 0xf6, 0xd4, 0xa2, 0xa2, 0x3b, 0xdd, 0x55, 0x12, 0x55, 0xbd, 0x2c, 0x80, 0xf3, + 0xbc, 0x85, 0xf0, 0x94, 0x30, 0x2f, 0xe6, 0xd4, 0x04, 0xcb, 0xc2, 0x39, 0x80, 0xf3, 0x3c, 0xf8, + 0x25, 0xd8, 0x79, 0x1c, 0x74, 0xba, 0xe9, 0x96, 0xd6, 0xee, 0x2b, 0x89, 0x76, 0x26, 0x4b, 0x18, + 0x5e, 0x61, 0xe7, 0x1c, 0xd2, 0xdd, 0xb7, 0xd7, 0x38, 0xa4, 0x0d, 0xac, 0xb0, 0xe1, 0x4f, 0xa0, + 0xbe, 0xb8, 0x6c, 0x6e, 0x10, 0x72, 0xea, 0xf4, 0x99, 0x1b, 0x10, 0x11, 0x73, 0x1a, 0x99, 0x55, + 0xed, 0x65, 0x29, 0x89, 0xea, 0x93, 0x27, 0x59, 0xf8, 0x19, 0x87, 0xa9, 0xff, 0x77, 0xb1, 0xdf, + 0xa7, 0x1e, 0xb5, 0x05, 0x75, 0x7a, 0xc1, 0xbc, 0xf3, 0xae, 0x17, 0xda, 0x3f, 0x47, 0x66, 0x6d, + 0xe1, 0x1f, 0x3c, 0xc9, 0xc2, 0xcf, 0x38, 0xc0, 0x6b, 0x03, 0xbc, 0x3e, 0xb1, 0xed, 0xd8, 0x8f, + 0x3d, 0x22, 0xa8, 0x73, 0x4a, 0x69, 0x64, 0xbe, 0xd6, 0xb3, 0x1f, 0x2a, 0x89, 0x8e, 0x48, 0x1e, + 0x5a, 0x4c, 0xff, 0xf7, 0x3f, 0xd1, 0xd7, 0x3e, 0x11, 0xa3, 0xce, 0x80, 0xb9, 0xed, 0x5e, 0x20, + 0x3e, 0xcb, 0xfc, 0x49, 0xfd, 0xd8, 0x13, 0x6c, 0x42, 0x79, 0x74, 0xd9, 0xf1, 0x2f, 0x5b, 0xf6, + 0x88, 0xb0, 0xa0, 0x65, 0x87, 0x9c, 0xb6, 0xdc, 0xb0, 0xe3, 0x10, 0x41, 0xda, 0x5d, 0xe6, 0xf6, + 0x02, 0xf1, 0x15, 0x89, 0x04, 0xe5, 0x78, 0x79, 0x7b, 0x78, 0x0a, 0xe0, 0x45, 0x28, 0x88, 0x97, + 0xbf, 0x4d, 0x3b, 0xfa, 0x55, 0x0f, 0x95, 0x44, 0x50, 0xac, 0xa0, 0x78, 0x8d, 0x62, 0xc9, 0x27, + 0x1d, 0xef, 0xee, 0x5a, 0x9f, 0x74, 0xc0, 0x6b, 0x14, 0xf0, 0x7b, 0x70, 0xa0, 0xab, 0x2b, 0x77, + 0x0d, 0x6a, 0xab, 0x23, 0x25, 0xd1, 0x81, 0x58, 0x47, 0xc0, 0xeb, 0x75, 0xab, 0x86, 0x69, 0x6f, + 0x7b, 0x4f, 0x19, 0xa6, 0xed, 0xad, 0xd7, 0x41, 0x1f, 0xa0, 0x3c, 0xb0, 0x7a, 0x13, 0xf7, 0xb5, + 0xf5, 0x7b, 0x4a, 0x22, 0x24, 0x9e, 0xa7, 0xe2, 0xff, 0xf2, 0x82, 0x9f, 0x83, 0xed, 0x73, 0x4e, + 0x27, 0x2c, 0x8c, 0x23, 0x9d, 0x81, 0x07, 0x3a, 0x03, 0xeb, 0x4a, 0xa2, 0xc3, 0x71, 0xa6, 0x9e, + 0x89, 0x8a, 0x1c, 0x7f, 0x1a, 0x36, 0xe9, 0x7a, 0x96, 0x92, 0x87, 0xba, 0x39, 0x1d, 0x36, 0xe3, + 0x2c, 0x90, 0x0d, 0x9b, 0x9c, 0xe2, 0xf8, 0xef, 0x0d, 0x00, 0x75, 0x14, 0xbf, 0x7c, 0xd2, 0x7f, + 0x90, 0x4b, 0x7a, 0x1d, 0xa6, 0x5e, 0xfe, 0xed, 0x5e, 0x28, 0xf3, 0x97, 0x8f, 0xb9, 0xf4, 0xa6, + 0xc7, 0x5c, 0xfe, 0xbf, 0xc7, 0xdc, 0xfd, 0xe2, 0xe6, 0xce, 0x2a, 0xdc, 0xde, 0x59, 0x85, 0x87, + 0x3b, 0xcb, 0xf8, 0x35, 0xb1, 0x8c, 0xdf, 0x12, 0xcb, 0xf8, 0x23, 0xb1, 0x8c, 0x9b, 0xc4, 0x32, + 0x6e, 0x13, 0xcb, 0xf8, 0x2b, 0xb1, 0x8c, 0x7f, 0x12, 0xab, 0xf0, 0x90, 0x58, 0xc6, 0xf5, 0xbd, + 0x55, 0xb8, 0xb9, 0xb7, 0x0a, 0xb7, 0xf7, 0x56, 0xe1, 0xc7, 0x62, 0x24, 0x88, 0xa0, 0x83, 0x92, + 0xfe, 0x26, 0x7f, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x69, 0x2e, 0x1c, 0xe0, 0x07, + 0x00, 0x00, } func (this *ValidatorInfo) Equal(that interface{}) bool { @@ -431,6 +467,12 @@ func (this *ValidatorInfo) Equal(that interface{}) bool { if this.TotalValidatorIgnoredSignatures != that1.TotalValidatorIgnoredSignatures { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ShardValidatorInfo) Equal(that interface{}) bool { @@ -467,13 +509,19 @@ func (this *ShardValidatorInfo) Equal(that interface{}) bool { if this.TempRating != that1.TempRating { return false } + if this.PreviousList != that1.PreviousList { + return false + } + if this.PreviousIndex != that1.PreviousIndex { + return false + } return true } func (this *ValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 24) + s := make([]string, 0, 26) s = append(s, "&state.ValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") @@ -495,6 +543,8 @@ func (this *ValidatorInfo) GoString() string { s = append(s, "TotalValidatorSuccess: "+fmt.Sprintf("%#v", this.TotalValidatorSuccess)+",\n") s = append(s, "TotalValidatorFailure: "+fmt.Sprintf("%#v", this.TotalValidatorFailure)+",\n") s = append(s, "TotalValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.TotalValidatorIgnoredSignatures)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -502,13 +552,15 @@ func (this *ShardValidatorInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 11) s = append(s, "&state.ShardValidatorInfo{") s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") s = append(s, "List: "+fmt.Sprintf("%#v", this.List)+",\n") s = append(s, "Index: "+fmt.Sprintf("%#v", this.Index)+",\n") s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") + s = append(s, "PreviousList: "+fmt.Sprintf("%#v", this.PreviousList)+",\n") + s = append(s, "PreviousIndex: "+fmt.Sprintf("%#v", this.PreviousIndex)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -540,6 +592,22 @@ func (m *ValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } if m.TotalValidatorIgnoredSignatures != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TotalValidatorIgnoredSignatures)) i-- @@ -686,6 +754,18 @@ func (m *ShardValidatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreviousIndex != 0 { + i = encodeVarintValidatorInfo(dAtA, i, uint64(m.PreviousIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.PreviousList) > 0 { + i -= len(m.PreviousList) + copy(dAtA[i:], m.PreviousList) + i = encodeVarintValidatorInfo(dAtA, i, uint64(len(m.PreviousList))) + i-- + dAtA[i] = 0x32 + } if m.TempRating != 0 { i = encodeVarintValidatorInfo(dAtA, i, uint64(m.TempRating)) i-- @@ -800,6 +880,13 @@ func (m *ValidatorInfo) Size() (n int) { if m.TotalValidatorIgnoredSignatures != 0 { n += 2 + sovValidatorInfo(uint64(m.TotalValidatorIgnoredSignatures)) } + l = len(m.PreviousList) + if l > 0 { + n += 2 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 2 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -826,6 +913,13 @@ func (m *ShardValidatorInfo) Size() (n int) { if m.TempRating != 0 { n += 1 + sovValidatorInfo(uint64(m.TempRating)) } + l = len(m.PreviousList) + if l > 0 { + n += 1 + l + sovValidatorInfo(uint64(l)) + } + if m.PreviousIndex != 0 { + n += 1 + sovValidatorInfo(uint64(m.PreviousIndex)) + } return n } @@ -860,6 +954,8 @@ func (this *ValidatorInfo) String() string { `TotalValidatorSuccess:` + fmt.Sprintf("%v", this.TotalValidatorSuccess) + `,`, `TotalValidatorFailure:` + fmt.Sprintf("%v", this.TotalValidatorFailure) + `,`, `TotalValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.TotalValidatorIgnoredSignatures) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -874,6 +970,8 @@ func (this *ShardValidatorInfo) String() string { `List:` + fmt.Sprintf("%v", this.List) + `,`, `Index:` + fmt.Sprintf("%v", this.Index) + `,`, `TempRating:` + fmt.Sprintf("%v", this.TempRating) + `,`, + `PreviousList:` + fmt.Sprintf("%v", this.PreviousList) + `,`, + `PreviousIndex:` + fmt.Sprintf("%v", this.PreviousIndex) + `,`, `}`, }, "") return s @@ -1349,6 +1447,57 @@ func (m *ValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) @@ -1525,6 +1674,57 @@ func (m *ShardValidatorInfo) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousList = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousIndex", wireType) + } + m.PreviousIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PreviousIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipValidatorInfo(dAtA[iNdEx:]) diff --git a/state/validatorInfo.proto b/state/validatorInfo.proto index c6256810091..2df2149d8f5 100644 --- a/state/validatorInfo.proto +++ b/state/validatorInfo.proto @@ -29,13 +29,17 @@ message ValidatorInfo { uint32 TotalValidatorSuccess = 18 [(gogoproto.jsontag) = "totalValidatorSuccess"]; uint32 TotalValidatorFailure = 19 [(gogoproto.jsontag) = "totalValidatorFailure"]; uint32 TotalValidatorIgnoredSignatures = 20 [(gogoproto.jsontag) = "totalValidatorIgnoredSignatures"]; + string PreviousList = 21 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 22 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } // ShardValidatorInfo represents the data regarding a validator that is stored in the PeerMiniblocks message ShardValidatorInfo { - bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; - uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; - string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; - uint32 Index = 4 [(gogoproto.jsontag) = "index"]; - uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + bytes PublicKey = 1 [(gogoproto.jsontag) = "publicKey"]; + uint32 ShardId = 2 [(gogoproto.jsontag) = "shardId"]; + string List = 3 [(gogoproto.jsontag) = "list,omitempty"]; + uint32 Index = 4 [(gogoproto.jsontag) = "index"]; + uint32 TempRating = 5 [(gogoproto.jsontag) = "tempRating"]; + string PreviousList = 6 [(gogoproto.jsontag) = "previousList,omitempty"]; + uint32 PreviousIndex = 7 [(gogoproto.jsontag) = "previousIndex,omitempty"]; } diff --git a/state/validatorInfo_test.go b/state/validatorInfo_test.go deleted file mode 100644 index 6a6ca0be930..00000000000 --- a/state/validatorInfo_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package state - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/stretchr/testify/assert" -) - -func TestValidatorInfo_IsInterfaceNile(t *testing.T) { - t.Parallel() - - vi := &ValidatorInfo{} - assert.False(t, check.IfNil(vi)) -} diff --git a/state/validatorsInfoMap.go b/state/validatorsInfoMap.go new file mode 100644 index 00000000000..e6c492d9d39 --- /dev/null +++ b/state/validatorsInfoMap.go @@ -0,0 +1,177 @@ +package state + +import ( + "bytes" + "encoding/hex" + "fmt" + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +type shardValidatorsInfoMap struct { + mutex sync.RWMutex + valInfoMap map[uint32][]ValidatorInfoHandler +} + +// NewShardValidatorsInfoMap creates an instance of shardValidatorsInfoMap which manages a +// map internally +func NewShardValidatorsInfoMap() *shardValidatorsInfoMap { + return &shardValidatorsInfoMap{ + mutex: sync.RWMutex{}, + valInfoMap: make(map[uint32][]ValidatorInfoHandler), + } +} + +// GetAllValidatorsInfo returns a []ValidatorInfoHandler copy with validators from all shards. +func (vi *shardValidatorsInfoMap) GetAllValidatorsInfo() []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for _, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret = append(ret, validatorsCopy...) + } + + return ret +} + +// GetShardValidatorsInfoMap returns a map copy of internally stored data +func (vi *shardValidatorsInfoMap) GetShardValidatorsInfoMap() map[uint32][]ValidatorInfoHandler { + ret := make(map[uint32][]ValidatorInfoHandler, len(vi.valInfoMap)) + + vi.mutex.RLock() + defer vi.mutex.RUnlock() + + for shardID, validatorsInShard := range vi.valInfoMap { + validatorsCopy := make([]ValidatorInfoHandler, len(validatorsInShard)) + copy(validatorsCopy, validatorsInShard) + ret[shardID] = validatorsCopy + } + + return ret +} + +// Add adds a ValidatorInfoHandler in its corresponding shardID +func (vi *shardValidatorsInfoMap) Add(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.mutex.Lock() + vi.valInfoMap[shardID] = append(vi.valInfoMap[shardID], validator) + vi.mutex.Unlock() + + return nil +} + +// GetValidator returns a ValidatorInfoHandler copy with the provided blsKey, +// if it is present in the map, otherwise returns nil +func (vi *shardValidatorsInfoMap) GetValidator(blsKey []byte) ValidatorInfoHandler { + for _, validator := range vi.GetAllValidatorsInfo() { + if bytes.Equal(validator.GetPublicKey(), blsKey) { + return validator.ShallowClone() + } + } + + return nil +} + +// Replace will replace an existing ValidatorInfoHandler with a new one. The old and new validator +// shall be in the same shard. If the old validator is not found in the map, an error is returned +func (vi *shardValidatorsInfoMap) Replace(old ValidatorInfoHandler, new ValidatorInfoHandler) error { + if check.IfNil(old) { + return fmt.Errorf("%w for old validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if check.IfNil(new) { + return fmt.Errorf("%w for new validator in shardValidatorsInfoMap.Replace", ErrNilValidatorInfo) + } + if old.GetShardId() != new.GetShardId() { + return fmt.Errorf("%w when trying to replace %s from shard %v with %s from shard %v", + ErrValidatorsDifferentShards, + hex.EncodeToString(old.GetPublicKey()), + old.GetShardId(), + hex.EncodeToString(new.GetPublicKey()), + new.GetShardId(), + ) + } + + shardID := old.GetShardId() + log.Debug("shardValidatorsInfoMap.Replace", + "old validator", hex.EncodeToString(old.GetPublicKey()), "shard", old.GetShardId(), "list", old.GetList(), + "with new validator", hex.EncodeToString(new.GetPublicKey()), "shard", new.GetShardId(), "list", new.GetList(), + ) + + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for idx, validator := range vi.valInfoMap[shardID] { + if bytes.Equal(validator.GetPublicKey(), old.GetPublicKey()) { + vi.valInfoMap[shardID][idx] = new + return nil + } + } + + return fmt.Errorf("old %w: %s when trying to replace it with %s", + ErrValidatorNotFound, + hex.EncodeToString(old.GetPublicKey()), + hex.EncodeToString(new.GetPublicKey()), + ) +} + +// SetValidatorsInShard resets all validators saved in a specific shard with the provided []ValidatorInfoHandler. +// Before setting them, it checks that provided validators have the same shardID as the one provided. +func (vi *shardValidatorsInfoMap) SetValidatorsInShard(shardID uint32, validators []ValidatorInfoHandler) error { + sameShardValidators := make([]ValidatorInfoHandler, 0, len(validators)) + for idx, validator := range validators { + if check.IfNil(validator) { + return fmt.Errorf("%w in shardValidatorsInfoMap.SetValidatorsInShard at index %d", + ErrNilValidatorInfo, + idx, + ) + } + if validator.GetShardId() != shardID { + return fmt.Errorf("%w, %s is in shard %d, but should be set in shard %d in shardValidatorsInfoMap.SetValidatorsInShard", + ErrValidatorsDifferentShards, + hex.EncodeToString(validator.GetPublicKey()), + validator.GetShardId(), + shardID, + ) + } + sameShardValidators = append(sameShardValidators, validator) + } + + vi.mutex.Lock() + vi.valInfoMap[shardID] = sameShardValidators + vi.mutex.Unlock() + + return nil +} + +// Delete will delete the provided validator from the internally stored map, if found. +// The validators slice at the corresponding shardID key will be re-sliced, without reordering +func (vi *shardValidatorsInfoMap) Delete(validator ValidatorInfoHandler) error { + if check.IfNil(validator) { + return ErrNilValidatorInfo + } + + shardID := validator.GetShardId() + vi.mutex.Lock() + defer vi.mutex.Unlock() + + for index, validatorInfo := range vi.valInfoMap[shardID] { + if bytes.Equal(validatorInfo.GetPublicKey(), validator.GetPublicKey()) { + length := len(vi.valInfoMap[shardID]) + vi.valInfoMap[shardID][index] = vi.valInfoMap[shardID][length-1] + vi.valInfoMap[shardID][length-1] = nil + vi.valInfoMap[shardID] = vi.valInfoMap[shardID][:length-1] + break + } + } + + return nil +} diff --git a/state/validatorsInfoMap_test.go b/state/validatorsInfoMap_test.go new file mode 100644 index 00000000000..e90c01993cd --- /dev/null +++ b/state/validatorsInfoMap_test.go @@ -0,0 +1,345 @@ +package state + +import ( + "encoding/hex" + "strconv" + "strings" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/stretchr/testify/require" +) + +func TestShardValidatorsInfoMap_OperationsWithNilValidators(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + t.Run("add nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Add(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("delete nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Delete(nil) + require.Equal(t, ErrNilValidatorInfo, err) + }) + + t.Run("replace nil validator", func(t *testing.T) { + t.Parallel() + + err := vi.Replace(nil, &ValidatorInfo{}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "old")) + + err = vi.Replace(&ValidatorInfo{}, nil) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "new")) + }) + + t.Run("set nil validators in shard", func(t *testing.T) { + t.Parallel() + + v := &ValidatorInfo{ShardId: 3, PublicKey: []byte("pk")} + err := vi.SetValidatorsInShard(3, []ValidatorInfoHandler{v, nil}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrNilValidatorInfo.Error())) + require.True(t, strings.Contains(err.Error(), "index 1")) + }) +} + +func TestShardValidatorsInfoMap_Add_GetShardValidatorsInfoMap_GetAllValidatorsInfo_GetValInfoPointerMap(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: core.MetachainShardId, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + allValidators := vi.GetAllValidatorsInfo() + require.Len(t, allValidators, 4) + require.Contains(t, allValidators, v0) + require.Contains(t, allValidators, v1) + require.Contains(t, allValidators, v2) + require.Contains(t, allValidators, v3) + + validatorsMap := vi.GetShardValidatorsInfoMap() + expectedValidatorsMap := map[uint32][]ValidatorInfoHandler{ + 0: {v0, v1}, + 1: {v2}, + core.MetachainShardId: {v3}, + } + require.Equal(t, validatorsMap, expectedValidatorsMap) +} + +func TestShardValidatorsInfoMap_GetValidator(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + pubKey0 := []byte("pk0") + pubKey1 := []byte("pk1") + v0 := &ValidatorInfo{ShardId: 0, PublicKey: pubKey0} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: pubKey1} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + require.Equal(t, v0, vi.GetValidator(pubKey0)) + require.Equal(t, v1, vi.GetValidator(pubKey1)) + require.Nil(t, vi.GetValidator([]byte("pk2"))) +} + +func TestShardValidatorsInfoMap_Delete(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + _ = vi.Add(v2) + _ = vi.Add(v3) + + _ = vi.Delete(&ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")}) + _ = vi.Delete(&ValidatorInfo{ShardId: 1, PublicKey: []byte("pk0")}) + require.Len(t, vi.GetAllValidatorsInfo(), 4) + + _ = vi.Delete(v1) + require.Len(t, vi.GetAllValidatorsInfo(), 3) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) + require.Equal(t, []ValidatorInfoHandler{v3}, vi.GetShardValidatorsInfoMap()[1]) + + _ = vi.Delete(v3) + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.Equal(t, []ValidatorInfoHandler{v0, v2}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_Replace(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + err := vi.Replace(v0, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk2")}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.Equal(t, []ValidatorInfoHandler{v0, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + err = vi.Replace(v0, v2) + require.Nil(t, err) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) + + v3 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk3")} + v4 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk4")} + err = vi.Replace(v3, v4) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorNotFound.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v2, v1}, vi.GetShardValidatorsInfoMap()[0]) +} + +func TestShardValidatorsInfoMap_SetValidatorsInShard(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + _ = vi.Add(v0) + + v1 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk1")} + v2 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk2")} + v3 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")} + shard0Validators := []ValidatorInfoHandler{v1, v2} + shard1Validators := []ValidatorInfoHandler{v3} + + err := vi.SetValidatorsInShard(1, shard0Validators) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v1.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, []ValidatorInfoHandler{v1, v2, v3}) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), ErrValidatorsDifferentShards.Error())) + require.True(t, strings.Contains(err.Error(), hex.EncodeToString(v3.PublicKey))) + require.Equal(t, []ValidatorInfoHandler{v0}, vi.GetShardValidatorsInfoMap()[0]) + require.Empty(t, vi.GetShardValidatorsInfoMap()[1]) + + err = vi.SetValidatorsInShard(0, shard0Validators) + require.Nil(t, err) + require.Equal(t, shard0Validators, vi.GetShardValidatorsInfoMap()[0]) + + err = vi.SetValidatorsInShard(1, shard1Validators) + require.Nil(t, err) + require.Equal(t, shard1Validators, vi.GetShardValidatorsInfoMap()[1]) +} + +func TestShardValidatorsInfoMap_GettersShouldReturnCopiesOfInternalData(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + v0 := &ValidatorInfo{ShardId: 0, PublicKey: []byte("pk0")} + v1 := &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk1")} + + _ = vi.Add(v0) + _ = vi.Add(v1) + + validatorsMap := vi.GetShardValidatorsInfoMap() + delete(validatorsMap, 0) + validatorsMap[1][0].SetPublicKey([]byte("rnd")) + + validators := vi.GetAllValidatorsInfo() + validators = append(validators, &ValidatorInfo{ShardId: 1, PublicKey: []byte("pk3")}) + + validator := vi.GetValidator([]byte("pk0")) + require.False(t, validator == v0) // require not same pointer + validator.SetShardId(2) + + require.Len(t, vi.GetAllValidatorsInfo(), 2) + require.True(t, vi.GetShardValidatorsInfoMap()[0][0] == v0) // check by pointer + require.True(t, vi.GetShardValidatorsInfoMap()[1][0] == v1) // check by pointer + require.NotEqual(t, vi.GetAllValidatorsInfo(), validators) +} + +func TestShardValidatorsInfoMap_Concurrency(t *testing.T) { + t.Parallel() + + vi := NewShardValidatorsInfoMap() + + numValidatorsShard0 := 100 + numValidatorsShard1 := 50 + numValidators := numValidatorsShard0 + numValidatorsShard1 + + shard0Validators := createValidatorsInfo(0, numValidatorsShard0) + shard1Validators := createValidatorsInfo(1, numValidatorsShard1) + + firstHalfShard0 := shard0Validators[:numValidatorsShard0/2] + secondHalfShard0 := shard0Validators[numValidatorsShard0/2:] + + firstHalfShard1 := shard1Validators[:numValidatorsShard1/2] + secondHalfShard1 := shard1Validators[numValidatorsShard1/2:] + + wg := &sync.WaitGroup{} + + wg.Add(numValidators) + go addValidatorsInShardConcurrently(vi, shard0Validators, wg) + go addValidatorsInShardConcurrently(vi, shard1Validators, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) + + wg.Add(numValidators / 2) + go deleteValidatorsConcurrently(vi, firstHalfShard0, wg) + go deleteValidatorsConcurrently(vi, firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], secondHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], secondHalfShard1) + + wg.Add(numValidators / 2) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0, wg) + go replaceValidatorsConcurrently(vi, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1, wg) + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], firstHalfShard0) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], firstHalfShard1) + + wg.Add(2) + go func() { + _ = vi.SetValidatorsInShard(0, shard0Validators) + wg.Done() + }() + go func() { + _ = vi.SetValidatorsInShard(1, shard1Validators) + wg.Done() + }() + wg.Wait() + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[0], shard0Validators) + requireSameValidatorsDifferentOrder(t, vi.GetShardValidatorsInfoMap()[1], shard1Validators) +} + +func requireSameValidatorsDifferentOrder(t *testing.T, dest []ValidatorInfoHandler, src []ValidatorInfoHandler) { + require.Equal(t, len(dest), len(src)) + + for _, v := range src { + require.Contains(t, dest, v) + } +} + +func createValidatorsInfo(shardID uint32, numOfValidators int) []ValidatorInfoHandler { + ret := make([]ValidatorInfoHandler, 0, numOfValidators) + + for i := 0; i < numOfValidators; i++ { + ret = append(ret, &ValidatorInfo{ + ShardId: shardID, + PublicKey: []byte(strconv.Itoa(int(shardID)) + "pubKey" + strconv.Itoa(i)), + }) + } + + return ret +} + +func addValidatorsInShardConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Add(val) + wg.Done() + }(validator) + } +} + +func deleteValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + validators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for _, validator := range validators { + go func(val ValidatorInfoHandler) { + _ = vi.Delete(val) + wg.Done() + }(validator) + } +} + +func replaceValidatorsConcurrently( + vi ShardValidatorsInfoMapHandler, + oldValidators []ValidatorInfoHandler, + newValidators []ValidatorInfoHandler, + wg *sync.WaitGroup, +) { + for idx := range oldValidators { + go func(old ValidatorInfoHandler, new ValidatorInfoHandler) { + _ = vi.Replace(old, new) + wg.Done() + }(oldValidators[idx], newValidators[idx]) + } +} diff --git a/statusHandler/statusMetricsProvider.go b/statusHandler/statusMetricsProvider.go index c4211e889a2..99f15ad1bf6 100644 --- a/statusHandler/statusMetricsProvider.go +++ b/statusHandler/statusMetricsProvider.go @@ -295,7 +295,6 @@ func (sm *statusMetrics) EnableEpochsMetrics() (map[string]interface{}, error) { enableEpochsMetrics[common.MetricDelegationSmartContractEnableEpoch] = sm.uint64Metrics[common.MetricDelegationSmartContractEnableEpoch] enableEpochsMetrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] = sm.uint64Metrics[common.MetricIncrementSCRNonceInMultiTransferEnableEpoch] enableEpochsMetrics[common.MetricBalanceWaitingListsEnableEpoch] = sm.uint64Metrics[common.MetricBalanceWaitingListsEnableEpoch] - enableEpochsMetrics[common.MetricWaitingListFixEnableEpoch] = sm.uint64Metrics[common.MetricWaitingListFixEnableEpoch] enableEpochsMetrics[common.MetricSetGuardianEnableEpoch] = sm.uint64Metrics[common.MetricSetGuardianEnableEpoch] numNodesChangeConfig := sm.uint64Metrics[common.MetricMaxNodesChangeEnableEpoch+"_count"] diff --git a/statusHandler/statusMetricsProvider_test.go b/statusHandler/statusMetricsProvider_test.go index 12831f384c6..5572b1754f8 100644 --- a/statusHandler/statusMetricsProvider_test.go +++ b/statusHandler/statusMetricsProvider_test.go @@ -315,7 +315,6 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { sm.SetUInt64Value(common.MetricDelegationSmartContractEnableEpoch, 2) sm.SetUInt64Value(common.MetricIncrementSCRNonceInMultiTransferEnableEpoch, 3) sm.SetUInt64Value(common.MetricBalanceWaitingListsEnableEpoch, 4) - sm.SetUInt64Value(common.MetricWaitingListFixEnableEpoch, 1) sm.SetUInt64Value(common.MetricSetGuardianEnableEpoch, 3) maxNodesChangeConfig := []map[string]uint64{ @@ -365,7 +364,6 @@ func TestStatusMetrics_EnableEpochMetrics(t *testing.T) { common.MetricDelegationSmartContractEnableEpoch: uint64(2), common.MetricIncrementSCRNonceInMultiTransferEnableEpoch: uint64(3), common.MetricBalanceWaitingListsEnableEpoch: uint64(4), - common.MetricWaitingListFixEnableEpoch: uint64(1), common.MetricSetGuardianEnableEpoch: uint64(3), common.MetricMaxNodesChangeEnableEpoch: []map[string]interface{}{ diff --git a/storage/factory/dbConfigHandler.go b/storage/factory/dbConfigHandler.go index 28ba8b5dcdb..2e5a611f293 100644 --- a/storage/factory/dbConfigHandler.go +++ b/storage/factory/dbConfigHandler.go @@ -1,6 +1,7 @@ package factory import ( + "errors" "fmt" "os" "path/filepath" @@ -14,6 +15,10 @@ const ( defaultType = "LvlDBSerial" ) +var ( + errInvalidConfiguration = errors.New("invalid configuration") +) + type dbConfigHandler struct { dbType string batchDelaySeconds int @@ -38,7 +43,7 @@ func NewDBConfigHandler(config config.DBConfig) *dbConfigHandler { // GetDBConfig will get the db config based on path func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { dbConfigFromFile := &config.DBConfig{} - err := core.LoadTomlFile(dbConfigFromFile, getPersisterConfigFilePath(path)) + err := readCorrectConfigurationFromToml(dbConfigFromFile, getPersisterConfigFilePath(path)) if err == nil { log.Debug("GetDBConfig: loaded db config from toml config file", "config path", path, @@ -79,6 +84,20 @@ func (dh *dbConfigHandler) GetDBConfig(path string) (*config.DBConfig, error) { return dbConfig, nil } +func readCorrectConfigurationFromToml(dbConfig *config.DBConfig, filePath string) error { + err := core.LoadTomlFile(dbConfig, filePath) + if err != nil { + return err + } + + isInvalidConfig := len(dbConfig.Type) == 0 || dbConfig.MaxBatchSize <= 0 || dbConfig.BatchDelaySeconds <= 0 || dbConfig.MaxOpenFiles <= 0 + if isInvalidConfig { + return errInvalidConfiguration + } + + return nil +} + // SaveDBConfigToFilePath will save the provided db config to specified path func (dh *dbConfigHandler) SaveDBConfigToFilePath(path string, dbConfig *config.DBConfig) error { pathExists, err := checkIfDirExists(path) @@ -92,13 +111,6 @@ func (dh *dbConfigHandler) SaveDBConfigToFilePath(path string, dbConfig *config. configFilePath := getPersisterConfigFilePath(path) - loadedDBConfig := &config.DBConfig{} - err = core.LoadTomlFile(loadedDBConfig, configFilePath) - if err == nil { - // config file already exists, no need to save config - return nil - } - err = core.SaveTomlFile(dbConfig, configFilePath) if err != nil { return err diff --git a/storage/factory/dbConfigHandler_test.go b/storage/factory/dbConfigHandler_test.go index 73fbfa55b81..910683d732d 100644 --- a/storage/factory/dbConfigHandler_test.go +++ b/storage/factory/dbConfigHandler_test.go @@ -2,11 +2,13 @@ package factory_test import ( "os" + "path" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -88,6 +90,37 @@ func TestDBConfigHandler_GetDBConfig(t *testing.T) { require.Nil(t, err) require.Equal(t, expectedDBConfig, conf) }) + t.Run("empty config.toml file, load default db config", func(t *testing.T) { + t.Parallel() + + testConfig := createDefaultDBConfig() + testConfig.BatchDelaySeconds = 37 + testConfig.MaxBatchSize = 38 + testConfig.MaxOpenFiles = 39 + testConfig.ShardIDProviderType = "BinarySplit" + testConfig.NumShards = 4 + pf := factory.NewDBConfigHandler(testConfig) + + dirPath := t.TempDir() + + f, _ := os.Create(path.Join(dirPath, factory.DBConfigFileName)) + _ = f.Close() + + expectedDBConfig := &config.DBConfig{ + FilePath: "", + Type: factory.DefaultType, + BatchDelaySeconds: testConfig.BatchDelaySeconds, + MaxBatchSize: testConfig.MaxBatchSize, + MaxOpenFiles: testConfig.MaxOpenFiles, + UseTmpAsFilePath: false, + ShardIDProviderType: "", + NumShards: 0, + } + + conf, err := pf.GetDBConfig(dirPath) + require.Nil(t, err) + require.Equal(t, expectedDBConfig, conf) + }) t.Run("empty dir, load db config from main config", func(t *testing.T) { t.Parallel() @@ -146,22 +179,33 @@ func TestDBConfigHandler_SaveDBConfigToFilePath(t *testing.T) { err := pf.SaveDBConfigToFilePath("no/valid/path", &dbConfig) require.Nil(t, err) }) - - t.Run("config file already present, should not fail", func(t *testing.T) { + t.Run("config file already present, should not fail and should rewrite", func(t *testing.T) { t.Parallel() - dbConfig := createDefaultDBConfig() + dbConfig1 := createDefaultDBConfig() + dbConfig1.MaxOpenFiles = 37 + dbConfig1.Type = "dbconfig1" dirPath := t.TempDir() configPath := factory.GetPersisterConfigFilePath(dirPath) - err := core.SaveTomlFile(dbConfig, configPath) + err := core.SaveTomlFile(dbConfig1, configPath) require.Nil(t, err) - pf := factory.NewDBConfigHandler(dbConfig) - err = pf.SaveDBConfigToFilePath(dirPath, &dbConfig) + pf := factory.NewDBConfigHandler(dbConfig1) + + dbConfig2 := createDefaultDBConfig() + dbConfig2.MaxOpenFiles = 38 + dbConfig2.Type = "dbconfig2" + + err = pf.SaveDBConfigToFilePath(dirPath, &dbConfig2) + require.Nil(t, err) + + loadedDBConfig := &config.DBConfig{} + err = core.LoadTomlFile(loadedDBConfig, path.Join(dirPath, "config.toml")) require.Nil(t, err) - }) + assert.Equal(t, dbConfig2, *loadedDBConfig) + }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/storage/factory/export_test.go b/storage/factory/export_test.go index 23317b7d4cf..177bc97358c 100644 --- a/storage/factory/export_test.go +++ b/storage/factory/export_test.go @@ -8,6 +8,9 @@ import ( // DefaultType exports the defaultType const to be used in tests const DefaultType = defaultType +// DBConfigFileName exports the dbConfigFileName const to be used in tests +const DBConfigFileName = dbConfigFileName + // GetPersisterConfigFilePath - func GetPersisterConfigFilePath(path string) string { return getPersisterConfigFilePath(path) diff --git a/storage/interface.go b/storage/interface.go index 328eb86c4ed..c3e5aa3826d 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -216,8 +216,8 @@ type PersisterFactoryHandler interface { // StateStatsHandler defines the behaviour needed to handler storage statistics type StateStatsHandler interface { - IncrCache() - IncrSnapshotCache() - IncrPersister(epoch uint32) - IncrSnapshotPersister(epoch uint32) + IncrementCache() + IncrementSnapshotCache() + IncrementPersister(epoch uint32) + IncrementSnapshotPersister(epoch uint32) } diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go index f90f1c75aaa..2007454a7c8 100644 --- a/storage/pruning/pruningStorer.go +++ b/storage/pruning/pruningStorer.go @@ -434,7 +434,7 @@ func (ps *PruningStorer) createAndInitPersister(pd *persisterData) (storage.Pers func (ps *PruningStorer) Get(key []byte) ([]byte, error) { v, ok := ps.cacher.Get(key) if ok { - ps.stateStatsHandler.IncrCache() + ps.stateStatsHandler.IncrementCache() return v.([]byte), nil } @@ -457,7 +457,7 @@ func (ps *PruningStorer) Get(key []byte) ([]byte, error) { // if found in persistence unit, add it to cache and return _ = ps.cacher.Put(key, val, len(val)) - ps.stateStatsHandler.IncrPersister(ps.activePersisters[idx].epoch) + ps.stateStatsHandler.IncrementPersister(ps.activePersisters[idx].epoch) return val, nil } diff --git a/storage/pruning/triePruningStorer.go b/storage/pruning/triePruningStorer.go index 1eb290023c6..e013820db65 100644 --- a/storage/pruning/triePruningStorer.go +++ b/storage/pruning/triePruningStorer.go @@ -95,7 +95,7 @@ func (ps *triePruningStorer) PutInEpochWithoutCache(key []byte, data []byte, epo func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) { v, ok := ps.cacher.Get(key) if ok && !bytes.Equal([]byte(common.ActiveDBKey), key) { - ps.stateStatsHandler.IncrSnapshotCache() + ps.stateStatsHandler.IncrementSnapshotCache() return v.([]byte), core.OptionalUint32{}, nil } @@ -118,7 +118,7 @@ func (ps *triePruningStorer) GetFromOldEpochsWithoutAddingToCache(key []byte) ([ HasValue: true, } - ps.stateStatsHandler.IncrSnapshotPersister(epoch.Value) + ps.stateStatsHandler.IncrementSnapshotPersister(epoch.Value) return val, epoch, nil } diff --git a/storage/pruning/triePruningStorer_test.go b/storage/pruning/triePruningStorer_test.go index 4d9a7c83227..28dc5c93f8e 100644 --- a/storage/pruning/triePruningStorer_test.go +++ b/storage/pruning/triePruningStorer_test.go @@ -76,6 +76,31 @@ func TestTriePruningStorer_GetFromOldEpochsWithoutCacheSearchesOnlyOldEpochsAndR assert.True(t, strings.Contains(err.Error(), "not found")) } +func TestTriePruningStorer_GetFromOldEpochsWithCache(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewTriePruningStorer(args) + cacher := testscommon.NewCacherMock() + ps.SetCacher(cacher) + + testKey1 := []byte("key1") + testVal1 := []byte("value1") + + err := ps.PutInEpoch(testKey1, testVal1, 0) + assert.Nil(t, err) + + err = ps.ChangeEpochSimple(1) + assert.Nil(t, err) + ps.SetEpochForPutOperation(1) + + res, epoch, err := ps.GetFromOldEpochsWithoutAddingToCache(testKey1) + assert.Equal(t, testVal1, res) + assert.Nil(t, err) + assert.False(t, epoch.HasValue) + assert.Equal(t, uint32(0), epoch.Value) +} + func TestTriePruningStorer_GetFromOldEpochsWithoutCacheLessActivePersisters(t *testing.T) { t.Parallel() diff --git a/testscommon/bootstrapMocks/bootstrapParamsStub.go b/testscommon/bootstrapMocks/bootstrapParamsStub.go index d62f2d72b61..56d0b6219bd 100644 --- a/testscommon/bootstrapMocks/bootstrapParamsStub.go +++ b/testscommon/bootstrapMocks/bootstrapParamsStub.go @@ -7,7 +7,7 @@ type BootstrapParamsHandlerMock struct { EpochCalled func() uint32 SelfShardIDCalled func() uint32 NumOfShardsCalled func() uint32 - NodesConfigCalled func() *nodesCoordinator.NodesCoordinatorRegistry + NodesConfigCalled func() nodesCoordinator.NodesCoordinatorRegistryHandler } // Epoch - @@ -36,7 +36,7 @@ func (bphm *BootstrapParamsHandlerMock) NumOfShards() uint32 { } // NodesConfig - -func (bphm *BootstrapParamsHandlerMock) NodesConfig() *nodesCoordinator.NodesCoordinatorRegistry { +func (bphm *BootstrapParamsHandlerMock) NodesConfig() nodesCoordinator.NodesCoordinatorRegistryHandler { if bphm.NodesConfigCalled != nil { return bphm.NodesConfigCalled() } diff --git a/testscommon/builtInCostHandlerStub.go b/testscommon/builtInCostHandlerStub.go deleted file mode 100644 index 046cc45ac2b..00000000000 --- a/testscommon/builtInCostHandlerStub.go +++ /dev/null @@ -1,34 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-core-go/data" -) - -// BuiltInCostHandlerStub - -type BuiltInCostHandlerStub struct { - ComputeBuiltInCostCalled func(tx data.TransactionWithFeeHandler) uint64 - IsBuiltInFuncCallCalled func(tx data.TransactionWithFeeHandler) bool -} - -// ComputeBuiltInCost - -func (stub *BuiltInCostHandlerStub) ComputeBuiltInCost(tx data.TransactionWithFeeHandler) uint64 { - if stub.ComputeBuiltInCostCalled != nil { - return stub.ComputeBuiltInCostCalled(tx) - } - - return 1 -} - -// IsBuiltInFuncCall - -func (stub *BuiltInCostHandlerStub) IsBuiltInFuncCall(tx data.TransactionWithFeeHandler) bool { - if stub.IsBuiltInFuncCallCalled != nil { - return stub.IsBuiltInFuncCallCalled(tx) - } - - return false -} - -// IsInterfaceNil returns true if underlying object is nil -func (stub *BuiltInCostHandlerStub) IsInterfaceNil() bool { - return stub == nil -} diff --git a/testscommon/chainSimulator/chainSimulatorMock.go b/testscommon/chainSimulator/chainSimulatorMock.go new file mode 100644 index 00000000000..07db474a07e --- /dev/null +++ b/testscommon/chainSimulator/chainSimulatorMock.go @@ -0,0 +1,31 @@ +package chainSimulator + +import "github.com/multiversx/mx-chain-go/node/chainSimulator/process" + +// ChainSimulatorMock - +type ChainSimulatorMock struct { + GenerateBlocksCalled func(numOfBlocks int) error + GetNodeHandlerCalled func(shardID uint32) process.NodeHandler +} + +// GenerateBlocks - +func (mock *ChainSimulatorMock) GenerateBlocks(numOfBlocks int) error { + if mock.GenerateBlocksCalled != nil { + return mock.GenerateBlocksCalled(numOfBlocks) + } + + return nil +} + +// GetNodeHandler - +func (mock *ChainSimulatorMock) GetNodeHandler(shardID uint32) process.NodeHandler { + if mock.GetNodeHandlerCalled != nil { + return mock.GetNodeHandlerCalled(shardID) + } + return nil +} + +// IsInterfaceNil - +func (mock *ChainSimulatorMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/chainSimulator/nodeHandlerMock.go b/testscommon/chainSimulator/nodeHandlerMock.go new file mode 100644 index 00000000000..23941f914eb --- /dev/null +++ b/testscommon/chainSimulator/nodeHandlerMock.go @@ -0,0 +1,127 @@ +package chainSimulator + +import ( + chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/api/shared" + "github.com/multiversx/mx-chain-go/consensus" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/node/chainSimulator/dtos" + "github.com/multiversx/mx-chain-go/sharding" +) + +// NodeHandlerMock - +type NodeHandlerMock struct { + GetProcessComponentsCalled func() factory.ProcessComponentsHolder + GetChainHandlerCalled func() chainData.ChainHandler + GetBroadcastMessengerCalled func() consensus.BroadcastMessenger + GetShardCoordinatorCalled func() sharding.Coordinator + GetCryptoComponentsCalled func() factory.CryptoComponentsHolder + GetCoreComponentsCalled func() factory.CoreComponentsHolder + GetStateComponentsCalled func() factory.StateComponentsHolder + GetFacadeHandlerCalled func() shared.FacadeHandler + GetStatusCoreComponentsCalled func() factory.StatusCoreComponentsHolder + SetKeyValueForAddressCalled func(addressBytes []byte, state map[string]string) error + SetStateForAddressCalled func(address []byte, state *dtos.AddressState) error + CloseCalled func() error +} + +// GetProcessComponents - +func (mock *NodeHandlerMock) GetProcessComponents() factory.ProcessComponentsHolder { + if mock.GetProcessComponentsCalled != nil { + return mock.GetProcessComponentsCalled() + } + return nil +} + +// GetChainHandler - +func (mock *NodeHandlerMock) GetChainHandler() chainData.ChainHandler { + if mock.GetChainHandlerCalled != nil { + return mock.GetChainHandlerCalled() + } + return nil +} + +// GetBroadcastMessenger - +func (mock *NodeHandlerMock) GetBroadcastMessenger() consensus.BroadcastMessenger { + if mock.GetBroadcastMessengerCalled != nil { + return mock.GetBroadcastMessengerCalled() + } + return nil +} + +// GetShardCoordinator - +func (mock *NodeHandlerMock) GetShardCoordinator() sharding.Coordinator { + if mock.GetShardCoordinatorCalled != nil { + return mock.GetShardCoordinatorCalled() + } + return nil +} + +// GetCryptoComponents - +func (mock *NodeHandlerMock) GetCryptoComponents() factory.CryptoComponentsHolder { + if mock.GetCryptoComponentsCalled != nil { + return mock.GetCryptoComponentsCalled() + } + return nil +} + +// GetCoreComponents - +func (mock *NodeHandlerMock) GetCoreComponents() factory.CoreComponentsHolder { + if mock.GetCoreComponentsCalled != nil { + return mock.GetCoreComponentsCalled() + } + return nil +} + +// GetStateComponents - +func (mock *NodeHandlerMock) GetStateComponents() factory.StateComponentsHolder { + if mock.GetStateComponentsCalled != nil { + return mock.GetStateComponentsCalled() + } + return nil +} + +// GetFacadeHandler - +func (mock *NodeHandlerMock) GetFacadeHandler() shared.FacadeHandler { + if mock.GetFacadeHandlerCalled != nil { + return mock.GetFacadeHandlerCalled() + } + return nil +} + +// GetStatusCoreComponents - +func (mock *NodeHandlerMock) GetStatusCoreComponents() factory.StatusCoreComponentsHolder { + if mock.GetStatusCoreComponentsCalled != nil { + return mock.GetStatusCoreComponentsCalled() + } + return nil +} + +// SetKeyValueForAddress - +func (mock *NodeHandlerMock) SetKeyValueForAddress(addressBytes []byte, state map[string]string) error { + if mock.SetKeyValueForAddressCalled != nil { + return mock.SetKeyValueForAddressCalled(addressBytes, state) + } + return nil +} + +// SetStateForAddress - +func (mock *NodeHandlerMock) SetStateForAddress(address []byte, state *dtos.AddressState) error { + if mock.SetStateForAddressCalled != nil { + return mock.SetStateForAddressCalled(address, state) + } + return nil +} + +// Close - +func (mock *NodeHandlerMock) Close() error { + if mock.CloseCalled != nil { + return mock.CloseCalled() + } + return nil +} + +// IsInterfaceNil - +func (mock *NodeHandlerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/components/components.go b/testscommon/components/components.go index cc4ec1b03ab..1dcaeff3b14 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -134,7 +134,7 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse coreComponents := GetCoreComponents() cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) dataComponents := GetDataComponents(coreComponents, shardCoordinator) processComponents := GetProcessComponents( shardCoordinator, @@ -199,6 +199,13 @@ func GetCryptoArgs(coreComponents factory.CoreComponentsHolder) cryptoComp.Crypt }, EnableEpochs: config.EnableEpochs{ BLSMultiSignerEnableEpoch: []config.MultiSignerConfig{{EnableEpoch: 0, Type: "no-KOSK"}}, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, }, } @@ -325,7 +332,7 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { } // GetStateFactoryArgs - -func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp.StateComponentsFactoryArgs { +func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder, statusCoreComp factory.StatusCoreComponentsHolder) stateComp.StateComponentsFactoryArgs { tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerUser, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) tsm, _ = trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) @@ -344,7 +351,7 @@ func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp. stateComponentsFactoryArgs := stateComp.StateComponentsFactoryArgs{ Config: GetGeneralConfig(), Core: coreComponents, - StatusCore: GetStatusCoreComponents(), + StatusCore: statusCoreComp, StorageService: disabled.NewChainStorer(), ProcessingMode: common.Normal, ChainHandler: &testscommon.ChainHandlerStub{}, @@ -359,7 +366,7 @@ func GetProcessComponentsFactoryArgs(shardCoordinator sharding.Coordinator) proc cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processArgs := GetProcessArgs( shardCoordinator, coreComponents, @@ -548,6 +555,8 @@ func GetProcessArgs( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100, + NodeLimitPercentage: 100, }, DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ MinCreationDeposit: "100", @@ -558,12 +567,30 @@ func GetProcessArgs( MinServiceFee: 0, MaxServiceFee: 100, }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, FlagsConfig: config.ContextFlagsConfig{ Version: "v1.0.0", }, + RoundConfig: testscommon.GetDefaultRoundsConfig(), TxExecutionOrderHandler: &commonMocks.TxExecutionOrderHandlerStub{}, + EpochConfig: config.EpochConfig{ + EnableEpochs: config.EnableEpochs{ + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 100, + NodesToShufflePerShard: 2, + }, + }, + }, + }, } } @@ -626,7 +653,7 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin cryptoComponents := GetCryptoComponents(coreComponents) networkComponents := GetNetworkComponents(cryptoComponents) dataComponents := GetDataComponents(coreComponents, shardCoordinator) - stateComponents := GetStateComponents(coreComponents) + stateComponents := GetStateComponents(coreComponents, GetStatusCoreComponents()) processComponents := GetProcessComponents( shardCoordinator, coreComponents, @@ -718,22 +745,22 @@ func GetCryptoComponents(coreComponents factory.CoreComponentsHolder) factory.Cr } // GetStateComponents - -func GetStateComponents(coreComponents factory.CoreComponentsHolder) factory.StateComponentsHolder { - stateArgs := GetStateFactoryArgs(coreComponents) +func GetStateComponents(coreComponents factory.CoreComponentsHolder, statusCoreComponents factory.StatusCoreComponentsHolder) factory.StateComponentsHolder { + stateArgs := GetStateFactoryArgs(coreComponents, statusCoreComponents) stateComponentsFactory, err := stateComp.NewStateComponentsFactory(stateArgs) if err != nil { - log.Error("getStateComponents NewStateComponentsFactory", "error", err.Error()) + log.Error("GetStateComponents NewStateComponentsFactory", "error", err.Error()) return nil } stateComponents, err := stateComp.NewManagedStateComponents(stateComponentsFactory) if err != nil { - log.Error("getStateComponents NewManagedStateComponents", "error", err.Error()) + log.Error("GetStateComponents NewManagedStateComponents", "error", err.Error()) return nil } err = stateComponents.Create() if err != nil { - log.Error("getStateComponents Create", "error", err.Error()) + log.Error("GetStateComponents Create", "error", err.Error()) return nil } return stateComponents @@ -756,7 +783,7 @@ func GetStatusCoreComponents() factory.StatusCoreComponentsHolder { err = statusCoreComponents.Create() if err != nil { - log.Error("statusCoreComponents Create", "error", err.Error()) + log.Error("GetStatusCoreComponents Create", "error", err.Error()) return nil } diff --git a/testscommon/components/default.go b/testscommon/components/default.go index c39baf24385..514b8355407 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -13,12 +13,15 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverTests "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genesisMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/stakingcommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" @@ -42,17 +45,18 @@ func GetDefaultCoreComponents() *mock.CoreComponentsMock { MinTransactionVersionCalled: func() uint32 { return 1 }, - WatchdogTimer: &testscommon.WatchdogMock{}, - AlarmSch: &testscommon.AlarmSchedulerStub{}, - NtpSyncTimer: &testscommon.SyncTimerStub{}, - RoundHandlerField: &testscommon.RoundHandlerMock{}, - EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, - RatingsConfig: &testscommon.RatingsInfoMock{}, - RatingHandler: &testscommon.RaterMock{}, - NodesConfig: &testscommon.NodesSetupStub{}, - StartTime: time.Time{}, - NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, - EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + WatchdogTimer: &testscommon.WatchdogMock{}, + AlarmSch: &testscommon.AlarmSchedulerStub{}, + NtpSyncTimer: &testscommon.SyncTimerStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + RatingHandler: &testscommon.RaterMock{}, + NodesConfig: &genesisMocks.NodesSetupStub{}, + StartTime: time.Time{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + EpochChangeNotifier: &epochNotifierMock.EpochNotifierStub{}, + EnableEpochsHandlerField: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, } } @@ -131,8 +135,8 @@ func GetDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr BootSore: &mock.BootstrapStorerMock{}, HeaderSigVerif: &mock.HeaderSigVerifierStub{}, HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, - ValidatorStatistics: &mock.ValidatorStatisticsProcessorStub{}, - ValidatorProvider: &mock.ValidatorsProviderStub{}, + ValidatorStatistics: &testscommon.ValidatorStatisticsProcessorStub{}, + ValidatorProvider: &stakingcommon.ValidatorsProviderStub{}, BlockTrack: &mock.BlockTrackerStub{}, PendingMiniBlocksHdl: &mock.PendingMiniBlocksHandlerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 5c711addbb0..d3d30562954 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -4,6 +4,7 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/dataPool" @@ -142,6 +143,11 @@ func (holder *PoolsHolderMock) Headers() dataRetriever.HeadersPool { return holder.headers } +// SetHeadersPool - +func (holder *PoolsHolderMock) SetHeadersPool(headersPool dataRetriever.HeadersPool) { + holder.headers = headersPool +} + // MiniBlocks - func (holder *PoolsHolderMock) MiniBlocks() storage.Cacher { return holder.miniBlocks diff --git a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go index 16fc9019390..bf633508147 100644 --- a/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerMock/enableEpochsHandlerStub.go @@ -44,6 +44,10 @@ func (stub *EnableEpochsHandlerStub) AddActiveFlags(flags ...core.EnableEpochFla stub.Lock() defer stub.Unlock() + if len(stub.activeFlags) == 0 { + stub.activeFlags = make(map[core.EnableEpochFlag]struct{}) + } + for _, flag := range flags { stub.activeFlags[flag] = struct{}{} } diff --git a/integrationTests/mock/epochStartSystemSCStub.go b/testscommon/epochStartSystemSCStub.go similarity index 72% rename from integrationTests/mock/epochStartSystemSCStub.go rename to testscommon/epochStartSystemSCStub.go index fd2c92553cf..ff4e4addbf4 100644 --- a/integrationTests/mock/epochStartSystemSCStub.go +++ b/testscommon/epochStartSystemSCStub.go @@ -1,6 +1,7 @@ -package mock +package testscommon import ( + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" @@ -8,7 +9,7 @@ import ( // EpochStartSystemSCStub - type EpochStartSystemSCStub struct { - ProcessSystemSmartContractCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error + ProcessSystemSmartContractCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler, header data.HeaderHandler) error ProcessDelegationRewardsCalled func(miniBlocks block.MiniBlockSlice, txCache epochStart.TransactionCacher) error ToggleUnStakeUnBondCalled func(value bool) error } @@ -22,9 +23,12 @@ func (e *EpochStartSystemSCStub) ToggleUnStakeUnBond(value bool) error { } // ProcessSystemSmartContract - -func (e *EpochStartSystemSCStub) ProcessSystemSmartContract(validatorInfos map[uint32][]*state.ValidatorInfo, nonce uint64, epoch uint32) error { +func (e *EpochStartSystemSCStub) ProcessSystemSmartContract( + validatorsInfo state.ShardValidatorsInfoMapHandler, + header data.HeaderHandler, +) error { if e.ProcessSystemSmartContractCalled != nil { - return e.ProcessSystemSmartContractCalled(validatorInfos, nonce, epoch) + return e.ProcessSystemSmartContractCalled(validatorsInfo, header) } return nil } diff --git a/integrationTests/mock/epochValidatorInfoCreatorStub.go b/testscommon/epochValidatorInfoCreatorStub.go similarity index 88% rename from integrationTests/mock/epochValidatorInfoCreatorStub.go rename to testscommon/epochValidatorInfoCreatorStub.go index 445d305596e..31c07037f1e 100644 --- a/integrationTests/mock/epochValidatorInfoCreatorStub.go +++ b/testscommon/epochValidatorInfoCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -9,8 +9,8 @@ import ( // EpochValidatorInfoCreatorStub - type EpochValidatorInfoCreatorStub struct { - CreateValidatorInfoMiniBlocksCalled func(validatorsInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) - VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error + CreateValidatorInfoMiniBlocksCalled func(validatorsInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) + VerifyValidatorInfoMiniBlocksCalled func(miniblocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error GetLocalValidatorInfoCacheCalled func() epochStart.ValidatorInfoCacher CreateMarshalledDataCalled func(body *block.Body) map[string][][]byte GetValidatorInfoTxsCalled func(body *block.Body) map[string]*state.ShardValidatorInfo @@ -20,7 +20,7 @@ type EpochValidatorInfoCreatorStub struct { } // CreateValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo map[uint32][]*state.ValidatorInfo) (block.MiniBlockSlice, error) { +func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorInfo state.ShardValidatorsInfoMapHandler) (block.MiniBlockSlice, error) { if e.CreateValidatorInfoMiniBlocksCalled != nil { return e.CreateValidatorInfoMiniBlocksCalled(validatorInfo) } @@ -28,7 +28,7 @@ func (e *EpochValidatorInfoCreatorStub) CreateValidatorInfoMiniBlocks(validatorI } // VerifyValidatorInfoMiniBlocks - -func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo map[uint32][]*state.ValidatorInfo) error { +func (e *EpochValidatorInfoCreatorStub) VerifyValidatorInfoMiniBlocks(miniBlocks []*block.MiniBlock, validatorsInfo state.ShardValidatorsInfoMapHandler) error { if e.VerifyValidatorInfoMiniBlocksCalled != nil { return e.VerifyValidatorInfoMiniBlocksCalled(miniBlocks, validatorsInfo) } diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index 0cf69ff24ed..06814edb1f5 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -363,7 +363,8 @@ func GetGeneralConfig() config.Config { CheckNodesOnDisk: false, }, Antiflood: config.AntifloodConfig{ - NumConcurrentResolverJobs: 2, + NumConcurrentResolverJobs: 2, + NumConcurrentResolvingTrieNodesJobs: 1, TxAccumulator: config.TxAccumulatorConfig{ MaxAllowedTimeInMilliseconds: 10, MaxDeviationTimeInMilliseconds: 1, @@ -415,6 +416,9 @@ func GetGeneralConfig() config.Config { "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, }, + ResourceStats: config.ResourceStatsConfig{ + RefreshIntervalInSec: 1, + }, } } diff --git a/integrationTests/mock/nodesSetupStub.go b/testscommon/genesisMocks/nodesSetupStub.go similarity index 95% rename from integrationTests/mock/nodesSetupStub.go rename to testscommon/genesisMocks/nodesSetupStub.go index affb71e3530..ebe1cfe778a 100644 --- a/integrationTests/mock/nodesSetupStub.go +++ b/testscommon/genesisMocks/nodesSetupStub.go @@ -1,82 +1,82 @@ -package mock +package genesisMocks -import "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +import ( + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) // NodesSetupStub - type NodesSetupStub struct { - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - GetRoundDurationCalled func() uint64 - GetChainIdCalled func() string - GetMinTransactionVersionCalled func() uint32 + InitialNodesPubKeysCalled func() map[uint32][]string + InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) + GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) + NumberOfShardsCalled func() uint32 GetShardConsensusGroupSizeCalled func() uint32 GetMetaConsensusGroupSizeCalled func() uint32 - NumberOfShardsCalled func() uint32 - MinNumberOfNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 + GetRoundDurationCalled func() uint64 MinNumberOfMetaNodesCalled func() uint32 + MinNumberOfShardNodesCalled func() uint32 GetHysteresisCalled func() float32 GetAdaptivityCalled func() bool + InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) + InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + GetStartTimeCalled func() int64 + MinNumberOfNodesCalled func() uint32 AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - GetShardIDForPubKeyCalled func(pubkey []byte) (uint32, error) - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - InitialNodesPubKeysCalled func() map[uint32][]string MinNumberOfNodesWithHysteresisCalled func() uint32 MinShardHysteresisNodesCalled func() uint32 MinMetaHysteresisNodesCalled func() uint32 + GetChainIdCalled func() string + GetMinTransactionVersionCalled func() uint32 } -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() +// InitialNodesPubKeys - +func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { + if n.InitialNodesPubKeysCalled != nil { + return n.InitialNodesPubKeysCalled() } - return 1 + return map[uint32][]string{0: {"val1", "val2"}} } -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() +// InitialEligibleNodesPubKeysForShard - +func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { + if n.InitialEligibleNodesPubKeysForShardCalled != nil { + return n.InitialEligibleNodesPubKeysForShardCalled(shardId) } - return 1 + return []string{"val1", "val2"}, nil } -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() +// NumberOfShards - +func (n *NodesSetupStub) NumberOfShards() uint32 { + if n.NumberOfShardsCalled != nil { + return n.NumberOfShardsCalled() } - - return 0 + return 1 } -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() +// GetShardIDForPubKey - +func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { + if n.GetShardIDForPubKeyCalled != nil { + return n.GetShardIDForPubKeyCalled(pubkey) } - - return false + return 0, nil } -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() +// GetShardConsensusGroupSize - +func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { + if n.GetShardConsensusGroupSizeCalled != nil { + return n.GetShardConsensusGroupSizeCalled() } - return 0 + return 1 } -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() +// GetMetaConsensusGroupSize - +func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { + if n.GetMetaConsensusGroupSizeCalled != nil { + return n.GetMetaConsensusGroupSizeCalled() } - return 0 + return 1 } // GetRoundDuration - @@ -84,54 +84,49 @@ func (n *NodesSetupStub) GetRoundDuration() uint64 { if n.GetRoundDurationCalled != nil { return n.GetRoundDurationCalled() } - return 0 + return 4000 } -// GetChainId - -func (n *NodesSetupStub) GetChainId() string { - if n.GetChainIdCalled != nil { - return n.GetChainIdCalled() - } - return "chainID" -} - -// GetMinTransactionVersion - -func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { - if n.GetMinTransactionVersionCalled != nil { - return n.GetMinTransactionVersionCalled() +// MinNumberOfMetaNodes - +func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { + if n.MinNumberOfMetaNodesCalled != nil { + return n.MinNumberOfMetaNodesCalled() } return 1 } -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() +// MinNumberOfShardNodes - +func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { + if n.MinNumberOfShardNodesCalled != nil { + return n.MinNumberOfShardNodesCalled() } - return 0 + return 1 } -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() +// GetHysteresis - +func (n *NodesSetupStub) GetHysteresis() float32 { + if n.GetHysteresisCalled != nil { + return n.GetHysteresisCalled() } return 0 } -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() +// GetAdaptivity - +func (n *NodesSetupStub) GetAdaptivity() bool { + if n.GetAdaptivityCalled != nil { + return n.GetAdaptivityCalled() } - return 0 + return false } // InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { +func (n *NodesSetupStub) InitialNodesInfoForShard( + shardId uint32, +) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { if n.InitialNodesInfoForShardCalled != nil { return n.InitialNodesInfoForShardCalled(shardId) } + return nil, nil, nil } @@ -140,49 +135,56 @@ func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.Genes if n.InitialNodesInfoCalled != nil { return n.InitialNodesInfoCalled() } + return nil, nil } -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() +// GetStartTime - +func (n *NodesSetupStub) GetStartTime() int64 { + if n.GetStartTimeCalled != nil { + return n.GetStartTimeCalled() } - return nil + return 0 } -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) +// MinNumberOfNodes - +func (n *NodesSetupStub) MinNumberOfNodes() uint32 { + if n.MinNumberOfNodesCalled != nil { + return n.MinNumberOfNodesCalled() } - return 0, nil + return 1 } -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { + if n.MinNumberOfNodesWithHysteresisCalled != nil { + return n.MinNumberOfNodesWithHysteresisCalled() } - - return []string{"val1", "val2"}, nil + return n.MinNumberOfNodes() } -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() +// AllInitialNodes - +func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { + if n.AllInitialNodesCalled != nil { + return n.AllInitialNodesCalled() } + return nil +} - return map[uint32][]string{0: {"val1", "val2"}} +// GetChainId - +func (n *NodesSetupStub) GetChainId() string { + if n.GetChainIdCalled != nil { + return n.GetChainIdCalled() + } + return "chainID" } -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() +// GetMinTransactionVersion - +func (n *NodesSetupStub) GetMinTransactionVersion() uint32 { + if n.GetMinTransactionVersionCalled != nil { + return n.GetMinTransactionVersionCalled() } - return n.MinNumberOfNodes() + return 1 } // MinShardHysteresisNodes - diff --git a/testscommon/headerHandlerStub.go b/testscommon/headerHandlerStub.go index 7bbd8d2883e..ab1d354ec60 100644 --- a/testscommon/headerHandlerStub.go +++ b/testscommon/headerHandlerStub.go @@ -12,6 +12,7 @@ type HeaderHandlerStub struct { EpochField uint32 RoundField uint64 TimestampField uint64 + BlockBodyTypeInt32Field int32 GetMiniBlockHeadersWithDstCalled func(destId uint32) map[string]uint32 GetOrderedCrossMiniblocksWithDstCalled func(destId uint32) []*data.MiniBlockInfo GetPubKeysBitmapCalled func() []byte @@ -28,6 +29,15 @@ type HeaderHandlerStub struct { HasScheduledMiniBlocksCalled func() bool GetNonceCalled func() uint64 CheckFieldsForNilCalled func() error + SetShardIDCalled func(shardID uint32) error + SetPrevHashCalled func(hash []byte) error + SetPrevRandSeedCalled func(seed []byte) error + SetPubKeysBitmapCalled func(bitmap []byte) error + SetChainIDCalled func(chainID []byte) error + SetTimeStampCalled func(timestamp uint64) error + SetRandSeedCalled func(seed []byte) error + SetSignatureCalled func(signature []byte) error + SetLeaderSignatureCalled func(signature []byte) error } // GetAccumulatedFees - @@ -56,7 +66,10 @@ func (hhs *HeaderHandlerStub) GetReceiptsHash() []byte { } // SetShardID - -func (hhs *HeaderHandlerStub) SetShardID(_ uint32) error { +func (hhs *HeaderHandlerStub) SetShardID(shardID uint32) error { + if hhs.SetShardIDCalled != nil { + return hhs.SetShardIDCalled(shardID) + } return nil } @@ -114,7 +127,10 @@ func (hhs *HeaderHandlerStub) GetPrevHash() []byte { // GetPrevRandSeed - func (hhs *HeaderHandlerStub) GetPrevRandSeed() []byte { - return hhs.GetPrevRandSeedCalled() + if hhs.GetPrevRandSeedCalled != nil { + return hhs.GetPrevRandSeedCalled() + } + return make([]byte, 0) } // GetRandSeed - @@ -124,7 +140,10 @@ func (hhs *HeaderHandlerStub) GetRandSeed() []byte { // GetPubKeysBitmap - func (hhs *HeaderHandlerStub) GetPubKeysBitmap() []byte { - return hhs.GetPubKeysBitmapCalled() + if hhs.GetPubKeysBitmapCalled != nil { + return hhs.GetPubKeysBitmapCalled() + } + return make([]byte, 0) } // GetSignature - @@ -172,8 +191,11 @@ func (hhs *HeaderHandlerStub) SetRound(_ uint64) error { } // SetTimeStamp - -func (hhs *HeaderHandlerStub) SetTimeStamp(_ uint64) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetTimeStamp(timestamp uint64) error { + if hhs.SetTimeStampCalled != nil { + return hhs.SetTimeStampCalled(timestamp) + } + return nil } // SetRootHash - @@ -182,38 +204,59 @@ func (hhs *HeaderHandlerStub) SetRootHash(_ []byte) error { } // SetPrevHash - -func (hhs *HeaderHandlerStub) SetPrevHash(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevHash(hash []byte) error { + if hhs.SetPrevHashCalled != nil { + return hhs.SetPrevHashCalled(hash) + } + return nil } // SetPrevRandSeed - -func (hhs *HeaderHandlerStub) SetPrevRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPrevRandSeed(seed []byte) error { + if hhs.SetPrevRandSeedCalled != nil { + return hhs.SetPrevRandSeedCalled(seed) + } + return nil } // SetRandSeed - -func (hhs *HeaderHandlerStub) SetRandSeed(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetRandSeed(seed []byte) error { + if hhs.SetRandSeedCalled != nil { + return hhs.SetRandSeedCalled(seed) + } + return nil } // SetPubKeysBitmap - -func (hhs *HeaderHandlerStub) SetPubKeysBitmap(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetPubKeysBitmap(bitmap []byte) error { + if hhs.SetPubKeysBitmapCalled != nil { + return hhs.SetPubKeysBitmapCalled(bitmap) + } + return nil } // SetSignature - -func (hhs *HeaderHandlerStub) SetSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetSignature(signature []byte) error { + if hhs.SetSignatureCalled != nil { + return hhs.SetSignatureCalled(signature) + } + return nil } // SetLeaderSignature - -func (hhs *HeaderHandlerStub) SetLeaderSignature(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetLeaderSignature(signature []byte) error { + if hhs.SetLeaderSignatureCalled != nil { + return hhs.SetLeaderSignatureCalled(signature) + } + return nil } // SetChainID - -func (hhs *HeaderHandlerStub) SetChainID(_ []byte) error { - panic("implement me") +func (hhs *HeaderHandlerStub) SetChainID(chainID []byte) error { + if hhs.SetChainIDCalled != nil { + return hhs.SetChainIDCalled(chainID) + } + return nil } // SetTxCount - @@ -248,7 +291,7 @@ func (hhs *HeaderHandlerStub) GetMetaBlockHashes() [][]byte { // GetBlockBodyTypeInt32 - func (hhs *HeaderHandlerStub) GetBlockBodyTypeInt32() int32 { - panic("implement me") + return hhs.BlockBodyTypeInt32Field } // GetValidatorStatsRootHash - @@ -377,3 +420,10 @@ func (hhs *HeaderHandlerStub) HasScheduledMiniBlocks() bool { } return false } + +// SetBlockBodyTypeInt32 - +func (hhs *HeaderHandlerStub) SetBlockBodyTypeInt32(blockBodyType int32) error { + hhs.BlockBodyTypeInt32Field = blockBodyType + + return nil +} diff --git a/testscommon/keysHandlerSingleSignerMock.go b/testscommon/keysHandlerSingleSignerMock.go index 9235a5a2abe..afc38cbfab5 100644 --- a/testscommon/keysHandlerSingleSignerMock.go +++ b/testscommon/keysHandlerSingleSignerMock.go @@ -67,6 +67,11 @@ func (mock *keysHandlerSingleSignerMock) IsOriginalPublicKeyOfTheNode(pkBytes [] func (mock *keysHandlerSingleSignerMock) ResetRoundsWithoutReceivedMessages(_ []byte, _ core.PeerID) { } +// GetRedundancyStepInReason - +func (mock *keysHandlerSingleSignerMock) GetRedundancyStepInReason() string { + return "" +} + // IsInterfaceNil - func (mock *keysHandlerSingleSignerMock) IsInterfaceNil() bool { return mock == nil diff --git a/testscommon/keysHandlerStub.go b/testscommon/keysHandlerStub.go index 8549de432f3..5821f305654 100644 --- a/testscommon/keysHandlerStub.go +++ b/testscommon/keysHandlerStub.go @@ -15,6 +15,7 @@ type KeysHandlerStub struct { GetAssociatedPidCalled func(pkBytes []byte) core.PeerID IsOriginalPublicKeyOfTheNodeCalled func(pkBytes []byte) bool ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) + GetRedundancyStepInReasonCalled func() string } // GetHandledPrivateKey - @@ -76,6 +77,15 @@ func (stub *KeysHandlerStub) ResetRoundsWithoutReceivedMessages(pkBytes []byte, } } +// GetRedundancyStepInReason - +func (stub *KeysHandlerStub) GetRedundancyStepInReason() string { + if stub.GetRedundancyStepInReasonCalled != nil { + return stub.GetRedundancyStepInReasonCalled() + } + + return "" +} + // IsInterfaceNil - func (stub *KeysHandlerStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go index 8c9d56dca7b..62d7232eaf4 100644 --- a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go +++ b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go @@ -6,19 +6,21 @@ import ( "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" ) // BootstrapComponentsStub - type BootstrapComponentsStub struct { - Bootstrapper factory.EpochStartBootstrapper - BootstrapParams factory.BootstrapParamsHolder - NodeRole core.NodeType - ShCoordinator sharding.Coordinator - ShardCoordinatorCalled func() sharding.Coordinator - HdrVersionHandler nodeFactory.HeaderVersionHandler - VersionedHdrFactory nodeFactory.VersionedHeaderFactory - HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - GuardedAccountHandlerField process.GuardedAccountHandler + Bootstrapper factory.EpochStartBootstrapper + BootstrapParams factory.BootstrapParamsHolder + NodeRole core.NodeType + ShCoordinator sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator + HdrVersionHandler nodeFactory.HeaderVersionHandler + VersionedHdrFactory nodeFactory.VersionedHeaderFactory + HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + GuardedAccountHandlerField process.GuardedAccountHandler + NodesCoordinatorRegistryFactoryField nodesCoordinator.NodesCoordinatorRegistryFactory } // Create - @@ -85,6 +87,11 @@ func (bcs *BootstrapComponentsStub) GuardedAccountHandler() process.GuardedAccou return bcs.GuardedAccountHandlerField } +// NodesCoordinatorRegistryFactory - +func (bcs *BootstrapComponentsStub) NodesCoordinatorRegistryFactory() nodesCoordinator.NodesCoordinatorRegistryFactory { + return bcs.NodesCoordinatorRegistryFactoryField +} + // String - func (bcs *BootstrapComponentsStub) String() string { return "BootstrapComponentsStub" diff --git a/testscommon/mainFactoryMocks/dataComponentsStub.go b/testscommon/mainFactoryMocks/dataComponentsStub.go new file mode 100644 index 00000000000..3de2c0b33e6 --- /dev/null +++ b/testscommon/mainFactoryMocks/dataComponentsStub.go @@ -0,0 +1,69 @@ +package mainFactoryMocks + +import ( + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/factory" +) + +// DataComponentsHolderStub - +type DataComponentsHolderStub struct { + BlockchainCalled func() data.ChainHandler + SetBlockchainCalled func(chain data.ChainHandler) + StorageServiceCalled func() dataRetriever.StorageService + DatapoolCalled func() dataRetriever.PoolsHolder + MiniBlocksProviderCalled func() factory.MiniBlockProvider + CloneCalled func() interface{} +} + +// Blockchain - +func (dchs *DataComponentsHolderStub) Blockchain() data.ChainHandler { + if dchs.BlockchainCalled != nil { + return dchs.BlockchainCalled() + } + return nil +} + +// SetBlockchain - +func (dchs *DataComponentsHolderStub) SetBlockchain(chain data.ChainHandler) { + if dchs.SetBlockchainCalled != nil { + dchs.SetBlockchainCalled(chain) + } +} + +// StorageService - +func (dchs *DataComponentsHolderStub) StorageService() dataRetriever.StorageService { + if dchs.StorageServiceCalled != nil { + return dchs.StorageServiceCalled() + } + return nil +} + +// Datapool - +func (dchs *DataComponentsHolderStub) Datapool() dataRetriever.PoolsHolder { + if dchs.DatapoolCalled != nil { + return dchs.DatapoolCalled() + } + return nil +} + +// MiniBlocksProvider - +func (dchs *DataComponentsHolderStub) MiniBlocksProvider() factory.MiniBlockProvider { + if dchs.MiniBlocksProviderCalled != nil { + return dchs.MiniBlocksProviderCalled() + } + return nil +} + +// Clone - +func (dchs *DataComponentsHolderStub) Clone() interface{} { + if dchs.CloneCalled != nil { + return dchs.CloneCalled() + } + return nil +} + +// IsInterfaceNil - +func (dchs *DataComponentsHolderStub) IsInterfaceNil() bool { + return dchs == nil +} diff --git a/testscommon/managedPeersHolderStub.go b/testscommon/managedPeersHolderStub.go index 1cbd397debc..ef9a550fe2b 100644 --- a/testscommon/managedPeersHolderStub.go +++ b/testscommon/managedPeersHolderStub.go @@ -17,6 +17,7 @@ type ManagedPeersHolderStub struct { IncrementRoundsWithoutReceivedMessagesCalled func(pkBytes []byte) ResetRoundsWithoutReceivedMessagesCalled func(pkBytes []byte, pid core.PeerID) GetManagedKeysByCurrentNodeCalled func() map[string]crypto.PrivateKey + GetLoadedKeysByCurrentNodeCalled func() [][]byte IsKeyManagedByCurrentNodeCalled func(pkBytes []byte) bool IsKeyRegisteredCalled func(pkBytes []byte) bool IsPidManagedByCurrentNodeCalled func(pid core.PeerID) bool @@ -25,6 +26,7 @@ type ManagedPeersHolderStub struct { GetNextPeerAuthenticationTimeCalled func(pkBytes []byte) (time.Time, error) SetNextPeerAuthenticationTimeCalled func(pkBytes []byte, nextTime time.Time) IsMultiKeyModeCalled func() bool + GetRedundancyStepInReasonCalled func() string } // AddManagedPeer - @@ -89,6 +91,14 @@ func (stub *ManagedPeersHolderStub) GetManagedKeysByCurrentNode() map[string]cry return nil } +// GetLoadedKeysByCurrentNode - +func (stub *ManagedPeersHolderStub) GetLoadedKeysByCurrentNode() [][]byte { + if stub.GetLoadedKeysByCurrentNodeCalled != nil { + return stub.GetLoadedKeysByCurrentNodeCalled() + } + return make([][]byte, 0) +} + // IsKeyManagedByCurrentNode - func (stub *ManagedPeersHolderStub) IsKeyManagedByCurrentNode(pkBytes []byte) bool { if stub.IsKeyManagedByCurrentNodeCalled != nil { @@ -151,6 +161,15 @@ func (stub *ManagedPeersHolderStub) IsMultiKeyMode() bool { return false } +// GetRedundancyStepInReason - +func (stub *ManagedPeersHolderStub) GetRedundancyStepInReason() string { + if stub.GetRedundancyStepInReasonCalled != nil { + return stub.GetRedundancyStepInReasonCalled() + } + + return "" +} + // IsInterfaceNil - func (stub *ManagedPeersHolderStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/managedPeersMonitorStub.go b/testscommon/managedPeersMonitorStub.go index 2ae60ccc55e..43aea679c14 100644 --- a/testscommon/managedPeersMonitorStub.go +++ b/testscommon/managedPeersMonitorStub.go @@ -6,6 +6,7 @@ type ManagedPeersMonitorStub struct { GetEligibleManagedKeysCalled func() ([][]byte, error) GetWaitingManagedKeysCalled func() ([][]byte, error) GetManagedKeysCalled func() [][]byte + GetLoadedKeysCalled func() [][]byte } // GetManagedKeys - @@ -16,6 +17,14 @@ func (stub *ManagedPeersMonitorStub) GetManagedKeys() [][]byte { return make([][]byte, 0) } +// GetLoadedKeys - +func (stub *ManagedPeersMonitorStub) GetLoadedKeys() [][]byte { + if stub.GetLoadedKeysCalled != nil { + return stub.GetLoadedKeysCalled() + } + return make([][]byte, 0) +} + // GetManagedKeysCount - func (stub *ManagedPeersMonitorStub) GetManagedKeysCount() int { if stub.GetManagedKeysCountCalled != nil { diff --git a/testscommon/maxNodesChangeConfigProviderStub.go b/testscommon/maxNodesChangeConfigProviderStub.go new file mode 100644 index 00000000000..1d7195e84f7 --- /dev/null +++ b/testscommon/maxNodesChangeConfigProviderStub.go @@ -0,0 +1,40 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// MaxNodesChangeConfigProviderStub - +type MaxNodesChangeConfigProviderStub struct { + GetAllNodesConfigCalled func() []config.MaxNodesChangeConfig + GetCurrentNodesConfigCalled func() config.MaxNodesChangeConfig + EpochConfirmedCalled func(epoch uint32, round uint64) +} + +// GetAllNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetAllNodesConfig() []config.MaxNodesChangeConfig { + if stub.GetAllNodesConfigCalled != nil { + return stub.GetAllNodesConfigCalled() + } + + return nil +} + +// GetCurrentNodesConfig - +func (stub *MaxNodesChangeConfigProviderStub) GetCurrentNodesConfig() config.MaxNodesChangeConfig { + if stub.GetCurrentNodesConfigCalled != nil { + return stub.GetCurrentNodesConfigCalled() + } + + return config.MaxNodesChangeConfig{} +} + +// EpochConfirmed - +func (stub *MaxNodesChangeConfigProviderStub) EpochConfirmed(epoch uint32, round uint64) { + if stub.EpochConfirmedCalled != nil { + stub.EpochConfirmedCalled(epoch, round) + } +} + +// IsInterfaceNil - +func (stub *MaxNodesChangeConfigProviderStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/nodesSetupMock.go b/testscommon/nodesSetupMock.go deleted file mode 100644 index 070e0ecb6a2..00000000000 --- a/testscommon/nodesSetupMock.go +++ /dev/null @@ -1,191 +0,0 @@ -package testscommon - -import ( - "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" -) - -// NodesSetupStub - -type NodesSetupStub struct { - InitialNodesPubKeysCalled func() map[uint32][]string - InitialEligibleNodesPubKeysForShardCalled func(shardId uint32) ([]string, error) - GetShardIDForPubKeyCalled func(pubKey []byte) (uint32, error) - NumberOfShardsCalled func() uint32 - GetShardConsensusGroupSizeCalled func() uint32 - GetMetaConsensusGroupSizeCalled func() uint32 - GetRoundDurationCalled func() uint64 - MinNumberOfMetaNodesCalled func() uint32 - MinNumberOfShardNodesCalled func() uint32 - GetHysteresisCalled func() float32 - GetAdaptivityCalled func() bool - InitialNodesInfoForShardCalled func(shardId uint32) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) - InitialNodesInfoCalled func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) - GetStartTimeCalled func() int64 - MinNumberOfNodesCalled func() uint32 - AllInitialNodesCalled func() []nodesCoordinator.GenesisNodeInfoHandler - MinNumberOfNodesWithHysteresisCalled func() uint32 - MinShardHysteresisNodesCalled func() uint32 - MinMetaHysteresisNodesCalled func() uint32 -} - -// InitialNodesPubKeys - -func (n *NodesSetupStub) InitialNodesPubKeys() map[uint32][]string { - if n.InitialNodesPubKeysCalled != nil { - return n.InitialNodesPubKeysCalled() - } - - return map[uint32][]string{0: {"val1", "val2"}} -} - -// InitialEligibleNodesPubKeysForShard - -func (n *NodesSetupStub) InitialEligibleNodesPubKeysForShard(shardId uint32) ([]string, error) { - if n.InitialEligibleNodesPubKeysForShardCalled != nil { - return n.InitialEligibleNodesPubKeysForShardCalled(shardId) - } - - return []string{"val1", "val2"}, nil -} - -// NumberOfShards - -func (n *NodesSetupStub) NumberOfShards() uint32 { - if n.NumberOfShardsCalled != nil { - return n.NumberOfShardsCalled() - } - return 1 -} - -// GetShardIDForPubKey - -func (n *NodesSetupStub) GetShardIDForPubKey(pubkey []byte) (uint32, error) { - if n.GetShardIDForPubKeyCalled != nil { - return n.GetShardIDForPubKeyCalled(pubkey) - } - return 0, nil -} - -// GetShardConsensusGroupSize - -func (n *NodesSetupStub) GetShardConsensusGroupSize() uint32 { - if n.GetShardConsensusGroupSizeCalled != nil { - return n.GetShardConsensusGroupSizeCalled() - } - return 1 -} - -// GetMetaConsensusGroupSize - -func (n *NodesSetupStub) GetMetaConsensusGroupSize() uint32 { - if n.GetMetaConsensusGroupSizeCalled != nil { - return n.GetMetaConsensusGroupSizeCalled() - } - return 1 -} - -// GetRoundDuration - -func (n *NodesSetupStub) GetRoundDuration() uint64 { - if n.GetRoundDurationCalled != nil { - return n.GetRoundDurationCalled() - } - return 4000 -} - -// MinNumberOfMetaNodes - -func (n *NodesSetupStub) MinNumberOfMetaNodes() uint32 { - if n.MinNumberOfMetaNodesCalled != nil { - return n.MinNumberOfMetaNodesCalled() - } - return 1 -} - -// MinNumberOfShardNodes - -func (n *NodesSetupStub) MinNumberOfShardNodes() uint32 { - if n.MinNumberOfShardNodesCalled != nil { - return n.MinNumberOfShardNodesCalled() - } - return 1 -} - -// GetHysteresis - -func (n *NodesSetupStub) GetHysteresis() float32 { - if n.GetHysteresisCalled != nil { - return n.GetHysteresisCalled() - } - return 0 -} - -// GetAdaptivity - -func (n *NodesSetupStub) GetAdaptivity() bool { - if n.GetAdaptivityCalled != nil { - return n.GetAdaptivityCalled() - } - return false -} - -// InitialNodesInfoForShard - -func (n *NodesSetupStub) InitialNodesInfoForShard( - shardId uint32, -) ([]nodesCoordinator.GenesisNodeInfoHandler, []nodesCoordinator.GenesisNodeInfoHandler, error) { - if n.InitialNodesInfoForShardCalled != nil { - return n.InitialNodesInfoForShardCalled(shardId) - } - - return nil, nil, nil -} - -// InitialNodesInfo - -func (n *NodesSetupStub) InitialNodesInfo() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { - if n.InitialNodesInfoCalled != nil { - return n.InitialNodesInfoCalled() - } - - return nil, nil -} - -// GetStartTime - -func (n *NodesSetupStub) GetStartTime() int64 { - if n.GetStartTimeCalled != nil { - return n.GetStartTimeCalled() - } - return 0 -} - -// MinNumberOfNodes - -func (n *NodesSetupStub) MinNumberOfNodes() uint32 { - if n.MinNumberOfNodesCalled != nil { - return n.MinNumberOfNodesCalled() - } - return 1 -} - -// MinNumberOfNodesWithHysteresis - -func (n *NodesSetupStub) MinNumberOfNodesWithHysteresis() uint32 { - if n.MinNumberOfNodesWithHysteresisCalled != nil { - return n.MinNumberOfNodesWithHysteresisCalled() - } - return n.MinNumberOfNodes() -} - -// AllInitialNodes - -func (n *NodesSetupStub) AllInitialNodes() []nodesCoordinator.GenesisNodeInfoHandler { - if n.AllInitialNodesCalled != nil { - return n.AllInitialNodesCalled() - } - return nil -} - -// MinShardHysteresisNodes - -func (n *NodesSetupStub) MinShardHysteresisNodes() uint32 { - if n.MinShardHysteresisNodesCalled != nil { - return n.MinShardHysteresisNodesCalled() - } - return 1 -} - -// MinMetaHysteresisNodes - -func (n *NodesSetupStub) MinMetaHysteresisNodes() uint32 { - if n.MinMetaHysteresisNodesCalled != nil { - return n.MinMetaHysteresisNodesCalled() - } - return 1 -} - -// IsInterfaceNil - -func (n *NodesSetupStub) IsInterfaceNil() bool { - return n == nil -} diff --git a/testscommon/nodesSetupMock/nodesSetupMock.go b/testscommon/nodesSetupMock/nodesSetupMock.go new file mode 100644 index 00000000000..392cb038719 --- /dev/null +++ b/testscommon/nodesSetupMock/nodesSetupMock.go @@ -0,0 +1,47 @@ +package nodesSetupMock + +// NodesSetupMock - +type NodesSetupMock struct { + NumberOfShardsField uint32 + HysteresisField float32 + MinNumberOfMetaNodesField uint32 + MinNumberOfShardNodesField uint32 +} + +// NumberOfShards - +func (n *NodesSetupMock) NumberOfShards() uint32 { + return n.NumberOfShardsField +} + +// GetHysteresis - +func (n *NodesSetupMock) GetHysteresis() float32 { + return n.HysteresisField +} + +// MinNumberOfMetaNodes - +func (n *NodesSetupMock) MinNumberOfMetaNodes() uint32 { + return n.MinNumberOfMetaNodesField +} + +// MinNumberOfShardNodes - +func (n *NodesSetupMock) MinNumberOfShardNodes() uint32 { + return n.MinNumberOfShardNodesField +} + +// MinNumberOfNodes - +func (n *NodesSetupMock) MinNumberOfNodes() uint32 { + return n.NumberOfShardsField*n.MinNumberOfShardNodesField + n.MinNumberOfMetaNodesField +} + +// MinNumberOfNodesWithHysteresis - +func (n *NodesSetupMock) MinNumberOfNodesWithHysteresis() uint32 { + hystNodesMeta := getHysteresisNodes(n.MinNumberOfMetaNodesField, n.HysteresisField) + hystNodesShard := getHysteresisNodes(n.MinNumberOfShardNodesField, n.HysteresisField) + minNumberOfNodes := n.MinNumberOfNodes() + + return minNumberOfNodes + hystNodesMeta + n.NumberOfShardsField*hystNodesShard +} + +func getHysteresisNodes(minNumNodes uint32, hysteresis float32) uint32 { + return uint32(float32(minNumNodes) * hysteresis) +} diff --git a/testscommon/p2pmocks/messageProcessorStub.go b/testscommon/p2pmocks/messageProcessorStub.go new file mode 100644 index 00000000000..5802dcc6785 --- /dev/null +++ b/testscommon/p2pmocks/messageProcessorStub.go @@ -0,0 +1,25 @@ +package p2pmocks + +import ( + "github.com/multiversx/mx-chain-communication-go/p2p" + "github.com/multiversx/mx-chain-core-go/core" +) + +// MessageProcessorStub - +type MessageProcessorStub struct { + ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error +} + +// ProcessReceivedMessage - +func (stub *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + if stub.ProcessReceivedMessageCalled != nil { + return stub.ProcessReceivedMessageCalled(message, fromConnectedPeer, source) + } + + return nil +} + +// IsInterfaceNil - +func (stub *MessageProcessorStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index 368b8bdadd5..77d058c71a1 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -46,6 +46,7 @@ type MessengerStub struct { SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetDebuggerCalled func(debugger p2p.Debugger) error + HasCompatibleProtocolIDCalled func(address string) bool } // ID - @@ -369,6 +370,15 @@ func (ms *MessengerStub) SetDebugger(debugger p2p.Debugger) error { return nil } +// HasCompatibleProtocolID - +func (ms *MessengerStub) HasCompatibleProtocolID(address string) bool { + if ms.HasCompatibleProtocolIDCalled != nil { + return ms.HasCompatibleProtocolIDCalled(address) + } + + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/testscommon/pool/headersPoolStub.go b/testscommon/pool/headersPoolStub.go new file mode 100644 index 00000000000..66c01d91c68 --- /dev/null +++ b/testscommon/pool/headersPoolStub.go @@ -0,0 +1,105 @@ +package pool + +import ( + "errors" + + "github.com/multiversx/mx-chain-core-go/data" +) + +// HeadersPoolStub - +type HeadersPoolStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int + GetNumHeadersCalled func(shardId uint32) int +} + +// AddHeader - +func (hps *HeadersPoolStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hps.AddCalled != nil { + hps.AddCalled(headerHash, header) + } +} + +// RemoveHeaderByHash - +func (hps *HeadersPoolStub) RemoveHeaderByHash(headerHash []byte) { + if hps.RemoveHeaderByHashCalled != nil { + hps.RemoveHeaderByHashCalled(headerHash) + } +} + +// RemoveHeaderByNonceAndShardId - +func (hps *HeadersPoolStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hps.RemoveHeaderByNonceAndShardIdCalled != nil { + hps.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +// GetHeadersByNonceAndShardId - +func (hps *HeadersPoolStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hps.GetHeaderByNonceAndShardIdCalled != nil { + return hps.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +// GetHeaderByHash - +func (hps *HeadersPoolStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hps.GetHeaderByHashCalled != nil { + return hps.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +// Clear - +func (hps *HeadersPoolStub) Clear() { + if hps.ClearCalled != nil { + hps.ClearCalled() + } +} + +// RegisterHandler - +func (hps *HeadersPoolStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hps.RegisterHandlerCalled != nil { + hps.RegisterHandlerCalled(handler) + } +} + +// Nonces - +func (hps *HeadersPoolStub) Nonces(shardId uint32) []uint64 { + if hps.NoncesCalled != nil { + return hps.NoncesCalled(shardId) + } + return nil +} + +// Len - +func (hps *HeadersPoolStub) Len() int { + return 0 +} + +// MaxSize - +func (hps *HeadersPoolStub) MaxSize() int { + return 100 +} + +// IsInterfaceNil - +func (hps *HeadersPoolStub) IsInterfaceNil() bool { + return hps == nil +} + +// GetNumHeaders - +func (hps *HeadersPoolStub) GetNumHeaders(shardId uint32) int { + if hps.GetNumHeadersCalled != nil { + return hps.GetNumHeadersCalled(shardId) + } + + return 0 +} diff --git a/factory/mock/forkDetectorStub.go b/testscommon/processMocks/forkDetectorStub.go similarity index 94% rename from factory/mock/forkDetectorStub.go rename to testscommon/processMocks/forkDetectorStub.go index 640c7e3899f..80ddc4d2ebf 100644 --- a/factory/mock/forkDetectorStub.go +++ b/testscommon/processMocks/forkDetectorStub.go @@ -1,4 +1,4 @@ -package mock +package processMocks import ( "github.com/multiversx/mx-chain-core-go/data" @@ -28,7 +28,10 @@ func (fdm *ForkDetectorStub) RestoreToGenesis() { // AddHeader - func (fdm *ForkDetectorStub) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { - return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + if fdm.AddHeaderCalled != nil { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } + return nil } // RemoveHeader - diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go index 024fe336b9f..e58b36923f8 100644 --- a/testscommon/realConfigsHandling.go +++ b/testscommon/realConfigsHandling.go @@ -5,60 +5,83 @@ import ( "os/exec" "path" "strings" - "testing" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/stretchr/testify/require" ) // CreateTestConfigs will try to copy the whole configs directory to a temp directory and return the configs after load // The copying of the configs is required because minor adjustments of their contents is required for the tests to pass -func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Configs { - tempDir := tb.TempDir() - +func CreateTestConfigs(tempDir string, originalConfigsPath string) (*config.Configs, error) { newConfigsPath := path.Join(tempDir, "config") // TODO refactor this cp to work on all OSes cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) err := cmd.Run() - require.Nil(tb, err) + if err != nil { + return nil, err + } newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") - correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) + err = correctTestPathInGenesisSmartContracts(tempDir, newGenesisSmartContractsFilename) + if err != nil { + return nil, err + } apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } mainP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } fullArchiveP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "fullArchiveP2P.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) - require.Nil(tb, err) + if err != nil { + return nil, err + } // make the node pass the network wait constraints mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 @@ -91,12 +114,14 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config }, EpochConfig: epochConfig, RoundConfig: roundConfig, - } + }, nil } -func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { +func correctTestPathInGenesisSmartContracts(tempDir string, newGenesisSmartContractsFilename string) error { input, err := os.ReadFile(newGenesisSmartContractsFilename) - require.Nil(tb, err) + if err != nil { + return err + } lines := strings.Split(string(input), "\n") for i, line := range lines { @@ -105,6 +130,5 @@ func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGe } } output := strings.Join(lines, "\n") - err = os.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) - require.Nil(tb, err) + return os.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) } diff --git a/epochStart/mock/rewardsCreatorStub.go b/testscommon/rewardsCreatorStub.go similarity index 90% rename from epochStart/mock/rewardsCreatorStub.go rename to testscommon/rewardsCreatorStub.go index 9073048cca7..b9b0b2b0492 100644 --- a/epochStart/mock/rewardsCreatorStub.go +++ b/testscommon/rewardsCreatorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "math/big" @@ -12,10 +12,10 @@ import ( // RewardsCreatorStub - type RewardsCreatorStub struct { CreateRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) VerifyRewardsMiniBlocksCalled func( - metaBlock data.MetaHeaderHandler, validatorsInfo map[uint32][]*state.ValidatorInfo, computedEconomics *block.Economics, + metaBlock data.MetaHeaderHandler, validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error GetProtocolSustainabilityRewardsCalled func() *big.Int GetLocalTxCacheCalled func() epochStart.TransactionCacher @@ -29,7 +29,7 @@ type RewardsCreatorStub struct { // CreateRewardsMiniBlocks - func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) (block.MiniBlockSlice, error) { if rcs.CreateRewardsMiniBlocksCalled != nil { @@ -42,7 +42,7 @@ func (rcs *RewardsCreatorStub) CreateRewardsMiniBlocks( // VerifyRewardsMiniBlocks - func (rcs *RewardsCreatorStub) VerifyRewardsMiniBlocks( metaBlock data.MetaHeaderHandler, - validatorsInfo map[uint32][]*state.ValidatorInfo, + validatorsInfo state.ShardValidatorsInfoMapHandler, computedEconomics *block.Economics, ) error { if rcs.VerifyRewardsMiniBlocksCalled != nil { diff --git a/testscommon/roundHandlerMock.go b/testscommon/roundHandlerMock.go index 976e8a55181..6c5d45cc7bc 100644 --- a/testscommon/roundHandlerMock.go +++ b/testscommon/roundHandlerMock.go @@ -10,12 +10,13 @@ type RoundHandlerMock struct { indexMut sync.RWMutex index int64 - IndexCalled func() int64 - TimeDurationCalled func() time.Duration - TimeStampCalled func() time.Time - UpdateRoundCalled func(time.Time, time.Time) - RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration - BeforeGenesisCalled func() bool + IndexCalled func() int64 + TimeDurationCalled func() time.Duration + TimeStampCalled func() time.Time + UpdateRoundCalled func(time.Time, time.Time) + RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration + BeforeGenesisCalled func() bool + IncrementIndexCalled func() } // BeforeGenesis - @@ -77,6 +78,13 @@ func (rndm *RoundHandlerMock) RemainingTime(startTime time.Time, maxTime time.Du return 4000 * time.Millisecond } +// IncrementIndex - +func (rndm *RoundHandlerMock) IncrementIndex() { + if rndm.IncrementIndexCalled != nil { + rndm.IncrementIndexCalled() + } +} + // IsInterfaceNil returns true if there is no value under the interface func (rndm *RoundHandlerMock) IsInterfaceNil() bool { return rndm == nil diff --git a/consensus/mock/sentSignatureTrackerStub.go b/testscommon/sentSignatureTrackerStub.go similarity index 52% rename from consensus/mock/sentSignatureTrackerStub.go rename to testscommon/sentSignatureTrackerStub.go index f61bcf2e778..c051d0c60a7 100644 --- a/consensus/mock/sentSignatureTrackerStub.go +++ b/testscommon/sentSignatureTrackerStub.go @@ -1,10 +1,10 @@ -package mock +package testscommon // SentSignatureTrackerStub - type SentSignatureTrackerStub struct { - StartRoundCalled func() - SignatureSentCalled func(pkBytes []byte) - ReceivedActualSignersCalled func(signersPks []string) + StartRoundCalled func() + SignatureSentCalled func(pkBytes []byte) + ResetCountersForManagedBlockSignerCalled func(signerPk []byte) } // StartRound - @@ -21,10 +21,10 @@ func (stub *SentSignatureTrackerStub) SignatureSent(pkBytes []byte) { } } -// ReceivedActualSigners - -func (stub *SentSignatureTrackerStub) ReceivedActualSigners(signersPks []string) { - if stub.ReceivedActualSignersCalled != nil { - stub.ReceivedActualSignersCalled(signersPks) +// ResetCountersForManagedBlockSigner - +func (stub *SentSignatureTrackerStub) ResetCountersForManagedBlockSigner(signerPk []byte) { + if stub.ResetCountersForManagedBlockSignerCalled != nil { + stub.ResetCountersForManagedBlockSignerCalled(signerPk) } } diff --git a/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go new file mode 100644 index 00000000000..2ed51dc9188 --- /dev/null +++ b/testscommon/shardingMocks/nodesCoordRegistryFactoryMock.go @@ -0,0 +1,32 @@ +package shardingMocks + +import ( + "encoding/json" + + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" +) + +// NodesCoordinatorRegistryFactoryMock - +type NodesCoordinatorRegistryFactoryMock struct { +} + +// CreateNodesCoordinatorRegistry - +func (ncr *NodesCoordinatorRegistryFactoryMock) CreateNodesCoordinatorRegistry(buff []byte) (nodesCoordinator.NodesCoordinatorRegistryHandler, error) { + registry := &nodesCoordinator.NodesCoordinatorRegistry{} + err := json.Unmarshal(buff, registry) + if err != nil { + return nil, err + } + + return registry, nil +} + +// GetRegistryData - +func (ncr *NodesCoordinatorRegistryFactoryMock) GetRegistryData(registry nodesCoordinator.NodesCoordinatorRegistryHandler, _ uint32) ([]byte, error) { + return json.Marshal(registry) +} + +// IsInterfaceNil - +func (ncr *NodesCoordinatorRegistryFactoryMock) IsInterfaceNil() bool { + return ncr == nil +} diff --git a/testscommon/shardingMocks/nodesCoordinatorMock.go b/testscommon/shardingMocks/nodesCoordinatorMock.go index 8ea7177705b..3ee80f88d3d 100644 --- a/testscommon/shardingMocks/nodesCoordinatorMock.go +++ b/testscommon/shardingMocks/nodesCoordinatorMock.go @@ -11,21 +11,23 @@ import ( // NodesCoordinatorMock defines the behaviour of a struct able to do validator group selection type NodesCoordinatorMock struct { - Validators map[uint32][]nodesCoordinator.Validator - ShardConsensusSize uint32 - MetaConsensusSize uint32 - ShardId uint32 - NbShards uint32 - GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) - GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) - SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) - GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) - GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) - GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) - ConsensusGroupSizeCalled func(uint32) int - GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + Validators map[uint32][]nodesCoordinator.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32, epoch uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) + SetNodesPerShardsCalled func(nodes map[uint32][]nodesCoordinator.Validator, epoch uint32) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) (validatorsGroup []nodesCoordinator.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) + GetAllEligibleValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetAllWaitingValidatorsPublicKeysCalled func() (map[uint32][][]byte, error) + ConsensusGroupSizeCalled func(uint32) int + GetValidatorsIndexesCalled func(publicKeys []string, epoch uint32) ([]uint64, error) + GetAllShuffledOutValidatorsPublicKeysCalled func(epoch uint32) (map[uint32][][]byte, error) + GetNumTotalEligibleCalled func() uint64 } // NewNodesCoordinatorMock - @@ -78,6 +80,9 @@ func (ncm *NodesCoordinatorMock) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma // GetNumTotalEligible - func (ncm *NodesCoordinatorMock) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -97,6 +102,14 @@ func (ncm *NodesCoordinatorMock) GetAllWaitingValidatorsPublicKeys(_ uint32) (ma return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorMock) GetAllShuffledOutValidatorsPublicKeys(epoch uint32) (map[uint32][][]byte, error) { + if ncm.GetAllShuffledOutValidatorsPublicKeysCalled != nil { + return ncm.GetAllShuffledOutValidatorsPublicKeysCalled(epoch) + } + return nil, nil +} + // GetValidatorsIndexes - func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string, epoch uint32) ([]uint64, error) { if ncm.GetValidatorsIndexesCalled != nil { diff --git a/testscommon/shardingMocks/nodesCoordinatorStub.go b/testscommon/shardingMocks/nodesCoordinatorStub.go index a9d3aecf380..9f82a5256e5 100644 --- a/testscommon/shardingMocks/nodesCoordinatorStub.go +++ b/testscommon/shardingMocks/nodesCoordinatorStub.go @@ -8,7 +8,6 @@ import ( // NodesCoordinatorStub - type NodesCoordinatorStub struct { - ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]nodesCoordinator.Validator, error) GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32, epoch uint32) ([]string, error) GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator nodesCoordinator.Validator, shardId uint32, err error) @@ -21,10 +20,11 @@ type NodesCoordinatorStub struct { GetConsensusWhitelistedNodesCalled func(epoch uint32) (map[string]struct{}, error) GetOwnPublicKeyCalled func() []byte GetWaitingEpochsLeftForPublicKeyCalled func(publicKey []byte) (uint32, error) + GetNumTotalEligibleCalled func() uint64 } // NodesCoordinatorToRegistry - -func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry() *nodesCoordinator.NodesCoordinatorRegistry { +func (ncm *NodesCoordinatorStub) NodesCoordinatorToRegistry(uint32) nodesCoordinator.NodesCoordinatorRegistryHandler { return nil } @@ -51,7 +51,7 @@ func (ncm *NodesCoordinatorStub) GetAllLeavingValidatorsPublicKeys(_ uint32) (ma } // SetConfig - -func (ncm *NodesCoordinatorStub) SetConfig(_ *nodesCoordinator.NodesCoordinatorRegistry) error { +func (ncm *NodesCoordinatorStub) SetConfig(_ nodesCoordinator.NodesCoordinatorRegistryHandler) error { return nil } @@ -77,8 +77,16 @@ func (ncm *NodesCoordinatorStub) GetAllWaitingValidatorsPublicKeys(epoch uint32) return nil, nil } +// GetAllShuffledOutValidatorsPublicKeys - +func (ncm *NodesCoordinatorStub) GetAllShuffledOutValidatorsPublicKeys(_ uint32) (map[uint32][][]byte, error) { + return nil, nil +} + // GetNumTotalEligible - func (ncm *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if ncm.GetNumTotalEligibleCalled != nil { + return ncm.GetNumTotalEligibleCalled() + } return 1 } @@ -103,8 +111,8 @@ func (ncm *NodesCoordinatorStub) ComputeConsensusGroup( shardId uint32, epoch uint32, ) (validatorsGroup []nodesCoordinator.Validator, err error) { - if ncm.ComputeValidatorsGroupCalled != nil { - return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId, epoch) + if ncm.ComputeConsensusGroupCalled != nil { + return ncm.ComputeConsensusGroupCalled(randomness, round, shardId, epoch) } var list []nodesCoordinator.Validator diff --git a/testscommon/stakingcommon/auctionListSelectorStub.go b/testscommon/stakingcommon/auctionListSelectorStub.go new file mode 100644 index 00000000000..8cc24960c82 --- /dev/null +++ b/testscommon/stakingcommon/auctionListSelectorStub.go @@ -0,0 +1,25 @@ +package stakingcommon + +import "github.com/multiversx/mx-chain-go/state" + +// AuctionListSelectorStub - +type AuctionListSelectorStub struct { + SelectNodesFromAuctionListCalled func(validatorsInfoMap state.ShardValidatorsInfoMapHandler, randomness []byte) error +} + +// SelectNodesFromAuctionList - +func (als *AuctionListSelectorStub) SelectNodesFromAuctionList( + validatorsInfoMap state.ShardValidatorsInfoMapHandler, + randomness []byte, +) error { + if als.SelectNodesFromAuctionListCalled != nil { + return als.SelectNodesFromAuctionListCalled(validatorsInfoMap, randomness) + } + + return nil +} + +// IsInterfaceNil - +func (als *AuctionListSelectorStub) IsInterfaceNil() bool { + return als == nil +} diff --git a/testscommon/stakingcommon/stakingCommon.go b/testscommon/stakingcommon/stakingCommon.go new file mode 100644 index 00000000000..1af9b441b9c --- /dev/null +++ b/testscommon/stakingcommon/stakingCommon.go @@ -0,0 +1,333 @@ +package stakingcommon + +import ( + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + economicsHandler "github.com/multiversx/mx-chain-go/process/economics" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/vm" + "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("testscommon/stakingCommon") + +// RegisterValidatorKeys will register validator's staked key in the provided accounts db +func RegisterValidatorKeys( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + AddValidatorData(accountsDB, ownerAddress, stakedKeys, totalStake, marshaller) + AddStakingData(accountsDB, ownerAddress, rewardAddress, stakedKeys, marshaller) + _, err := accountsDB.Commit() + log.LogIfError(err) +} + +// AddValidatorData will add the validator's registered keys in the provided accounts db +func AddValidatorData( + accountsDB state.AccountsAdapter, + ownerKey []byte, + registeredKeys [][]byte, + totalStake *big.Int, + marshaller marshal.Marshalizer, +) { + validatorSC := LoadUserAccount(accountsDB, vm.ValidatorSCAddress) + ownerStoredData, _, _ := validatorSC.RetrieveValue(ownerKey) + validatorData := &systemSmartContracts.ValidatorDataV2{} + if len(ownerStoredData) != 0 { + _ = marshaller.Unmarshal(validatorData, ownerStoredData) + validatorData.BlsPubKeys = append(validatorData.BlsPubKeys, registeredKeys...) + validatorData.TotalStakeValue = totalStake + } else { + validatorData = &systemSmartContracts.ValidatorDataV2{ + RegisterNonce: 0, + Epoch: 0, + RewardAddress: ownerKey, + TotalStakeValue: totalStake, + LockedStake: big.NewInt(0), + TotalUnstaked: big.NewInt(0), + BlsPubKeys: registeredKeys, + NumRegistered: uint32(len(registeredKeys)), + } + } + + marshaledData, _ := marshaller.Marshal(validatorData) + _ = validatorSC.SaveKeyValue(ownerKey, marshaledData) + + _ = accountsDB.SaveAccount(validatorSC) +} + +// AddStakingData will add the owner's staked keys in the provided accounts db +func AddStakingData( + accountsDB state.AccountsAdapter, + ownerAddress []byte, + rewardAddress []byte, + stakedKeys [][]byte, + marshaller marshal.Marshalizer, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Staked: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + marshaledData, _ := marshaller.Marshal(stakedData) + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + for _, key := range stakedKeys { + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +// AddKeysToWaitingList will add the owner's provided bls keys in the staking queue list +func AddKeysToWaitingList( + accountsDB state.AccountsAdapter, + waitingKeys [][]byte, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, +) { + if len(waitingKeys) == 0 { + return + } + + stakingSCAcc := LoadUserAccount(accountsDB, vm.StakingSCAddress) + waitingList := getWaitingList(stakingSCAcc, marshaller) + + waitingListAlreadyHasElements := waitingList.Length > 0 + waitingListLastKeyBeforeAddingNewKeys := waitingList.LastKey + previousKey := waitingList.LastKey + if !waitingListAlreadyHasElements { + waitingList.FirstKey = getPrefixedWaitingKey(waitingKeys[0]) + previousKey = waitingList.FirstKey + } + + numWaitingKeys := len(waitingKeys) + waitingList.LastKey = getPrefixedWaitingKey(waitingKeys[numWaitingKeys-1]) + waitingList.Length += uint32(numWaitingKeys) + saveWaitingList(stakingSCAcc, marshaller, waitingList) + + for i, waitingKey := range waitingKeys { + waitingListElement := &systemSmartContracts.ElementInList{ + BLSPublicKey: waitingKey, + PreviousKey: previousKey, + NextKey: make([]byte, 0), + } + + if i < numWaitingKeys-1 { + nextKey := getPrefixedWaitingKey(waitingKeys[i+1]) + waitingListElement.NextKey = nextKey + } + + prefixedWaitingKey := getPrefixedWaitingKey(waitingKey) + saveStakedWaitingKey(stakingSCAcc, marshaller, rewardAddress, ownerAddress, waitingKey) + saveElemInList(stakingSCAcc, marshaller, waitingListElement, prefixedWaitingKey) + + previousKey = prefixedWaitingKey + } + + if waitingListAlreadyHasElements { + lastElem, _ := GetWaitingListElement(stakingSCAcc, marshaller, waitingListLastKeyBeforeAddingNewKeys) + lastElem.NextKey = getPrefixedWaitingKey(waitingKeys[0]) + saveElemInList(stakingSCAcc, marshaller, lastElem, waitingListLastKeyBeforeAddingNewKeys) + } + + _ = accountsDB.SaveAccount(stakingSCAcc) +} + +func getWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, +) *systemSmartContracts.WaitingList { + marshaledData, _, _ := stakingSCAcc.RetrieveValue([]byte("waitingList")) + waitingList := &systemSmartContracts.WaitingList{} + _ = marshaller.Unmarshal(waitingList, marshaledData) + + return waitingList +} + +func saveWaitingList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + waitingList *systemSmartContracts.WaitingList, +) { + marshaledData, _ := marshaller.Marshal(waitingList) + _ = stakingSCAcc.SaveKeyValue([]byte("waitingList"), marshaledData) +} + +func getPrefixedWaitingKey(key []byte) []byte { + return []byte("w_" + string(key)) +} + +func saveStakedWaitingKey( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + rewardAddress []byte, + ownerAddress []byte, + key []byte, +) { + stakedData := &systemSmartContracts.StakedDataV2_0{ + Waiting: true, + RewardAddress: rewardAddress, + OwnerAddress: ownerAddress, + StakeValue: big.NewInt(100), + } + + marshaledData, _ := marshaller.Marshal(stakedData) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +func saveElemInList( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + elem *systemSmartContracts.ElementInList, + key []byte, +) { + marshaledData, _ := marshaller.Marshal(elem) + _ = stakingSCAcc.SaveKeyValue(key, marshaledData) +} + +// GetWaitingListElement returns the element in waiting list saved at the provided key +func GetWaitingListElement( + stakingSCAcc state.UserAccountHandler, + marshaller marshal.Marshalizer, + key []byte, +) (*systemSmartContracts.ElementInList, error) { + marshaledData, _, _ := stakingSCAcc.RetrieveValue(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &systemSmartContracts.ElementInList{} + err := marshaller.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +// LoadUserAccount returns address's state.UserAccountHandler from the provided db +func LoadUserAccount(accountsDB state.AccountsAdapter, address []byte) state.UserAccountHandler { + acc, _ := accountsDB.LoadAccount(address) + return acc.(state.UserAccountHandler) +} + +// CreateEconomicsData returns an initialized process.EconomicsDataHandler +func CreateEconomicsData() process.EconomicsDataHandler { + maxGasLimitPerBlock := strconv.FormatUint(1500000000, 10) + minGasPrice := strconv.FormatUint(10, 10) + minGasLimit := strconv.FormatUint(10, 10) + + argsNewEconomicsData := economicsHandler.ArgsNewEconomicsData{ + Economics: &config.EconomicsConfig{ + GlobalSettings: config.GlobalSettings{ + GenesisTotalSupply: "2000000000000000000000", + MinimumInflation: 0, + YearSettings: []*config.YearSetting{ + { + Year: 0, + MaximumInflation: 0.01, + }, + }, + }, + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 0.1, + DeveloperPercentage: 0.1, + ProtocolSustainabilityPercentage: 0.1, + ProtocolSustainabilityAddress: "protocol", + TopUpGradientPoint: "300000000000000000000", + TopUpFactor: 0.25, + }, + }, + }, + FeeSettings: config.FeeSettings{ + GasLimitSettings: []config.GasLimitSetting{ + { + MaxGasLimitPerBlock: maxGasLimitPerBlock, + MaxGasLimitPerMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaBlock: maxGasLimitPerBlock, + MaxGasLimitPerMetaMiniBlock: maxGasLimitPerBlock, + MaxGasLimitPerTx: maxGasLimitPerBlock, + MinGasLimit: minGasLimit, + ExtraGasLimitGuardedTx: maxGasLimitPerBlock, + }, + }, + MinGasPrice: minGasPrice, + GasPerDataByte: "1", + GasPriceModifier: 1.0, + MaxGasPriceSetGuardian: minGasPrice, + }, + }, + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + TxVersionChecker: &disabled.TxVersionChecker{}, + } + economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) + return economicsData +} + +// SaveNodesConfig saves the nodes config in accounts db under "nodesConfig" key with provided params +func SaveNodesConfig( + accountsDB state.AccountsAdapter, + marshaller marshal.Marshalizer, + stakedNodes, + minNumNodes, + maxNumNodes int64, +) { + nodesConfigData := &systemSmartContracts.StakingNodesConfig{ + StakedNodes: stakedNodes, + MinNumNodes: minNumNodes, + MaxNumNodes: maxNumNodes, + } + nodesDataBytes, err := marshaller.Marshal(nodesConfigData) + log.LogIfError(err) + + account, err := accountsDB.LoadAccount(vm.StakingSCAddress) + log.LogIfError(err) + + userAccount, _ := account.(state.UserAccountHandler) + err = userAccount.SaveKeyValue([]byte("nodesConfig"), nodesDataBytes) + log.LogIfError(err) + err = accountsDB.SaveAccount(account) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} + +// SaveDelegationManagerConfig will save a mock configuration for the delegation manager SC +func SaveDelegationManagerConfig(accountsDB state.AccountsAdapter, marshaller marshal.Marshalizer) { + managementData := &systemSmartContracts.DelegationManagement{ + MinDeposit: big.NewInt(100), + LastAddress: vm.FirstDelegationSCAddress, + MinDelegationAmount: big.NewInt(1), + } + marshaledData, err := marshaller.Marshal(managementData) + log.LogIfError(err) + + acc, err := accountsDB.LoadAccount(vm.DelegationManagerSCAddress) + log.LogIfError(err) + delegationAcc, _ := acc.(state.UserAccountHandler) + + err = delegationAcc.SaveKeyValue([]byte("delegationManagement"), marshaledData) + log.LogIfError(err) + err = accountsDB.SaveAccount(delegationAcc) + log.LogIfError(err) + _, err = accountsDB.Commit() + log.LogIfError(err) +} diff --git a/epochStart/mock/stakingDataProviderStub.go b/testscommon/stakingcommon/stakingDataProviderStub.go similarity index 51% rename from epochStart/mock/stakingDataProviderStub.go rename to testscommon/stakingcommon/stakingDataProviderStub.go index a2cab61586b..dc2b990c20c 100644 --- a/epochStart/mock/stakingDataProviderStub.go +++ b/testscommon/stakingcommon/stakingDataProviderStub.go @@ -1,32 +1,35 @@ -package mock +package stakingcommon import ( "math/big" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/state" ) // StakingDataProviderStub - type StakingDataProviderStub struct { CleanCalled func() - PrepareStakingDataCalled func(keys map[uint32][][]byte) error + PrepareStakingDataCalled func(validatorsMap state.ShardValidatorsInfoMapHandler) error GetTotalStakeEligibleNodesCalled func() *big.Int GetTotalTopUpStakeEligibleNodesCalled func() *big.Int GetNodeStakedTopUpCalled func(blsKey []byte) (*big.Int, error) - FillValidatorInfoCalled func(blsKey []byte) error - ComputeUnQualifiedNodesCalled func(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) + FillValidatorInfoCalled func(validator state.ValidatorInfoHandler) error + ComputeUnQualifiedNodesCalled func(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) + GetBlsKeyOwnerCalled func(blsKey []byte) (string, error) + GetOwnersDataCalled func() map[string]*epochStart.OwnerData } // FillValidatorInfo - -func (sdps *StakingDataProviderStub) FillValidatorInfo(blsKey []byte) error { +func (sdps *StakingDataProviderStub) FillValidatorInfo(validator state.ValidatorInfoHandler) error { if sdps.FillValidatorInfoCalled != nil { - return sdps.FillValidatorInfoCalled(blsKey) + return sdps.FillValidatorInfoCalled(validator) } return nil } // ComputeUnQualifiedNodes - -func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos map[uint32][]*state.ValidatorInfo) ([][]byte, map[string][][]byte, error) { +func (sdps *StakingDataProviderStub) ComputeUnQualifiedNodes(validatorInfos state.ShardValidatorsInfoMapHandler) ([][]byte, map[string][][]byte, error) { if sdps.ComputeUnQualifiedNodesCalled != nil { return sdps.ComputeUnQualifiedNodesCalled(validatorInfos) } @@ -57,10 +60,10 @@ func (sdps *StakingDataProviderStub) GetNodeStakedTopUp(blsKey []byte) (*big.Int return big.NewInt(0), nil } -// PrepareStakingDataForRewards - -func (sdps *StakingDataProviderStub) PrepareStakingDataForRewards(keys map[uint32][][]byte) error { +// PrepareStakingData - +func (sdps *StakingDataProviderStub) PrepareStakingData(validatorsMap state.ShardValidatorsInfoMapHandler) error { if sdps.PrepareStakingDataCalled != nil { - return sdps.PrepareStakingDataCalled(keys) + return sdps.PrepareStakingDataCalled(validatorsMap) } return nil } @@ -72,6 +75,31 @@ func (sdps *StakingDataProviderStub) Clean() { } } +// GetBlsKeyOwner - +func (sdps *StakingDataProviderStub) GetBlsKeyOwner(blsKey []byte) (string, error) { + if sdps.GetBlsKeyOwnerCalled != nil { + return sdps.GetBlsKeyOwnerCalled(blsKey) + } + return "", nil +} + +// GetNumOfValidatorsInCurrentEpoch - +func (sdps *StakingDataProviderStub) GetNumOfValidatorsInCurrentEpoch() uint32 { + return 0 +} + +// GetOwnersData - +func (sdps *StakingDataProviderStub) GetOwnersData() map[string]*epochStart.OwnerData { + if sdps.GetOwnersDataCalled != nil { + return sdps.GetOwnersDataCalled() + } + return nil +} + +// EpochConfirmed - +func (sdps *StakingDataProviderStub) EpochConfirmed(uint32, uint64) { +} + // IsInterfaceNil - func (sdps *StakingDataProviderStub) IsInterfaceNil() bool { return sdps == nil diff --git a/node/mock/validatorsProviderStub.go b/testscommon/stakingcommon/validatorsProviderStub.go similarity index 51% rename from node/mock/validatorsProviderStub.go rename to testscommon/stakingcommon/validatorsProviderStub.go index 98ea652340b..0db49b4fde8 100644 --- a/node/mock/validatorsProviderStub.go +++ b/testscommon/stakingcommon/validatorsProviderStub.go @@ -1,12 +1,15 @@ -package mock +package stakingcommon import ( "github.com/multiversx/mx-chain-core-go/data/validator" + "github.com/multiversx/mx-chain-go/common" ) // ValidatorsProviderStub - type ValidatorsProviderStub struct { GetLatestValidatorsCalled func() map[string]*validator.ValidatorStatistics + GetAuctionListCalled func() ([]*common.AuctionListValidatorAPIResponse, error) + ForceUpdateCalled func() error } // GetLatestValidators - @@ -14,6 +17,25 @@ func (vp *ValidatorsProviderStub) GetLatestValidators() map[string]*validator.Va if vp.GetLatestValidatorsCalled != nil { return vp.GetLatestValidatorsCalled() } + + return nil +} + +// GetAuctionList - +func (vp *ValidatorsProviderStub) GetAuctionList() ([]*common.AuctionListValidatorAPIResponse, error) { + if vp.GetAuctionListCalled != nil { + return vp.GetAuctionListCalled() + } + + return nil, nil +} + +// ForceUpdate - +func (vp *ValidatorsProviderStub) ForceUpdate() error { + if vp.ForceUpdateCalled != nil { + return vp.ForceUpdateCalled() + } + return nil } diff --git a/testscommon/state/accountAdapterStub.go b/testscommon/state/accountAdapterStub.go index 433722f7e21..fa9305f8222 100644 --- a/testscommon/state/accountAdapterStub.go +++ b/testscommon/state/accountAdapterStub.go @@ -177,14 +177,14 @@ func (aas *StateUserAccountHandlerStub) ClaimDeveloperRewards(senderAddr []byte) return nil, nil } -//AddToDeveloperReward - +// AddToDeveloperReward - func (aas *StateUserAccountHandlerStub) AddToDeveloperReward(val *big.Int) { if aas.AddToDeveloperRewardCalled != nil { aas.AddToDeveloperRewardCalled(val) } } -//GetDeveloperReward - +// GetDeveloperReward - func (aas *StateUserAccountHandlerStub) GetDeveloperReward() *big.Int { if aas.GetDeveloperRewardCalled != nil { return aas.GetDeveloperRewardCalled() @@ -230,7 +230,7 @@ func (aas *StateUserAccountHandlerStub) GetUserName() []byte { return nil } -//IsGuarded - +// IsGuarded - func (aas *StateUserAccountHandlerStub) IsGuarded() bool { if aas.IsGuardedCalled != nil { return aas.IsGuardedCalled() diff --git a/testscommon/state/peerAccountHandlerMock.go b/testscommon/state/peerAccountHandlerMock.go index b3283be1280..870836cc00d 100644 --- a/testscommon/state/peerAccountHandlerMock.go +++ b/testscommon/state/peerAccountHandlerMock.go @@ -14,6 +14,7 @@ type PeerAccountHandlerMock struct { IncreaseValidatorSuccessRateValue uint32 DecreaseValidatorSuccessRateValue uint32 IncreaseValidatorIgnoredSignaturesValue uint32 + PreviousList string IncreaseLeaderSuccessRateCalled func(uint32) DecreaseLeaderSuccessRateCalled func(uint32) @@ -52,11 +53,26 @@ func (p *PeerAccountHandlerMock) GetList() string { return "" } +// GetPreviousList - +func (p *PeerAccountHandlerMock) GetPreviousList() string { + return "" +} + // GetIndexInList - func (p *PeerAccountHandlerMock) GetIndexInList() uint32 { return 0 } +// GetPreviousIndexInList - +func (p *PeerAccountHandlerMock) GetPreviousIndexInList() uint32 { + return 0 +} + +// GetBLSPublicKey - +func (p *PeerAccountHandlerMock) GetBLSPublicKey() []byte { + return nil +} + // SetBLSPublicKey - func (p *PeerAccountHandlerMock) SetBLSPublicKey([]byte) error { return nil @@ -290,13 +306,18 @@ func (p *PeerAccountHandlerMock) SetConsecutiveProposerMisses(consecutiveMisses } // SetListAndIndex - -func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32) { +func (p *PeerAccountHandlerMock) SetListAndIndex(shardID uint32, list string, index uint32, _ bool) { if p.SetListAndIndexCalled != nil { p.SetListAndIndexCalled(shardID, list, index) } } +// SetPreviousList - +func (p *PeerAccountHandlerMock) SetPreviousList(list string) { + p.PreviousList = list +} + // IsInterfaceNil - func (p *PeerAccountHandlerMock) IsInterfaceNil() bool { - return false + return p == nil } diff --git a/testscommon/state/userAccountStub.go b/testscommon/state/userAccountStub.go index 90fc1f88ab3..ec386a68dbe 100644 --- a/testscommon/state/userAccountStub.go +++ b/testscommon/state/userAccountStub.go @@ -30,6 +30,7 @@ type UserAccountStub struct { RetrieveValueCalled func(_ []byte) ([]byte, uint32, error) SetDataTrieCalled func(dataTrie common.Trie) GetRootHashCalled func() []byte + SaveKeyValueCalled func(key []byte, value []byte) error } // HasNewCode - @@ -172,7 +173,10 @@ func (u *UserAccountStub) RetrieveValue(key []byte) ([]byte, uint32, error) { } // SaveKeyValue - -func (u *UserAccountStub) SaveKeyValue(_ []byte, _ []byte) error { +func (u *UserAccountStub) SaveKeyValue(key []byte, value []byte) error { + if u.SaveKeyValueCalled != nil { + return u.SaveKeyValueCalled(key, value) + } return nil } diff --git a/testscommon/stateStatisticsHandlerStub.go b/testscommon/stateStatisticsHandlerStub.go new file mode 100644 index 00000000000..bc13bea90d4 --- /dev/null +++ b/testscommon/stateStatisticsHandlerStub.go @@ -0,0 +1,136 @@ +package testscommon + +// StateStatisticsHandlerStub - +type StateStatisticsHandlerStub struct { + ResetCalled func() + ResetSnapshotCalled func() + IncrementCacheCalled func() + CacheCalled func() uint64 + IncrementSnapshotCacheCalled func() + SnapshotCacheCalled func() uint64 + IncrementPersisterCalled func(epoch uint32) + PersisterCalled func(epoch uint32) uint64 + IncrementSnapshotPersisterCalled func(epoch uint32) + SnapshotPersisterCalled func(epoch uint32) uint64 + IncrementTrieCalled func() + TrieCalled func() uint64 + ProcessingStatsCalled func() []string + SnapshotStatsCalled func() []string +} + +// Reset - +func (stub *StateStatisticsHandlerStub) Reset() { + if stub.ResetCalled != nil { + stub.ResetCalled() + } +} + +// ResetSnapshot - +func (stub *StateStatisticsHandlerStub) ResetSnapshot() { + if stub.ResetSnapshotCalled != nil { + stub.ResetSnapshotCalled() + } +} + +// IncrementCache - +func (stub *StateStatisticsHandlerStub) IncrementCache() { + if stub.IncrementCacheCalled != nil { + stub.IncrementCacheCalled() + } +} + +// Cache - +func (stub *StateStatisticsHandlerStub) Cache() uint64 { + if stub.CacheCalled != nil { + return stub.CacheCalled() + } + + return 0 +} + +// IncrementSnapshotCache - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotCache() { + if stub.IncrementSnapshotCacheCalled != nil { + stub.IncrementSnapshotCacheCalled() + } +} + +// SnapshotCache - +func (stub *StateStatisticsHandlerStub) SnapshotCache() uint64 { + if stub.SnapshotCacheCalled != nil { + return stub.SnapshotCacheCalled() + } + + return 0 +} + +// IncrementPersister - +func (stub *StateStatisticsHandlerStub) IncrementPersister(epoch uint32) { + if stub.IncrementPersisterCalled != nil { + stub.IncrementPersisterCalled(epoch) + } +} + +// Persister - +func (stub *StateStatisticsHandlerStub) Persister(epoch uint32) uint64 { + if stub.PersisterCalled != nil { + return stub.PersisterCalled(epoch) + } + + return 0 +} + +// IncrementSnapshotPersister - +func (stub *StateStatisticsHandlerStub) IncrementSnapshotPersister(epoch uint32) { + if stub.IncrementSnapshotPersisterCalled != nil { + stub.IncrementSnapshotPersisterCalled(epoch) + } +} + +// SnapshotPersister - +func (stub *StateStatisticsHandlerStub) SnapshotPersister(epoch uint32) uint64 { + if stub.SnapshotPersisterCalled != nil { + return stub.SnapshotPersisterCalled(epoch) + } + + return 0 +} + +// IncrementTrie - +func (stub *StateStatisticsHandlerStub) IncrementTrie() { + if stub.IncrementTrieCalled != nil { + stub.IncrementTrieCalled() + } +} + +// Trie - +func (stub *StateStatisticsHandlerStub) Trie() uint64 { + if stub.TrieCalled != nil { + return stub.TrieCalled() + } + + return 0 +} + +// ProcessingStats - +func (stub *StateStatisticsHandlerStub) ProcessingStats() []string { + if stub.ProcessingStatsCalled != nil { + return stub.ProcessingStatsCalled() + } + + return make([]string, 0) +} + +// SnapshotStats - +func (stub *StateStatisticsHandlerStub) SnapshotStats() []string { + if stub.SnapshotStatsCalled != nil { + return stub.SnapshotStatsCalled() + } + + return make([]string, 0) +} + +// IsInterfaceNil - +func (stub *StateStatisticsHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/testscommon/storageManager/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go index b14d6c460a6..60e10541da6 100644 --- a/testscommon/storageManager/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -7,30 +7,30 @@ import ( // StorageManagerStub - type StorageManagerStub struct { - PutCalled func([]byte, []byte) error - PutInEpochCalled func([]byte, []byte, uint32) error - PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error - GetCalled func([]byte) ([]byte, error) - GetFromCurrentEpochCalled func([]byte) ([]byte, error) - TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) - GetDbThatContainsHashCalled func([]byte) common.BaseStorer - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - RemoveFromCurrentEpochCalled func([]byte) error - RemoveCalled func([]byte) error - IsInterfaceNilCalled func() bool - SetEpochForPutOperationCalled func(uint32) - ShouldTakeSnapshotCalled func() bool - GetLatestStorageEpochCalled func() (uint32, error) - IsClosedCalled func() bool - GetBaseTrieStorageManagerCalled func() common.StorageManager - GetIdentifierCalled func() string - CloseCalled func() error - RemoveFromAllActiveEpochsCalled func(hash []byte) error - IsSnapshotSupportedCalled func() bool - GetStateStatsHandlerCalled func() common.StateStatisticsHandler + PutCalled func([]byte, []byte) error + PutInEpochCalled func([]byte, []byte, uint32) error + PutInEpochWithoutCacheCalled func([]byte, []byte, uint32) error + GetCalled func([]byte) ([]byte, error) + GetFromCurrentEpochCalled func([]byte) ([]byte, error) + TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) + GetDbThatContainsHashCalled func([]byte) common.BaseStorer + IsPruningEnabledCalled func() bool + IsPruningBlockedCalled func() bool + EnterPruningBufferingModeCalled func() + ExitPruningBufferingModeCalled func() + RemoveFromCurrentEpochCalled func([]byte) error + RemoveCalled func([]byte) error + IsInterfaceNilCalled func() bool + SetEpochForPutOperationCalled func(uint32) + ShouldTakeSnapshotCalled func() bool + GetLatestStorageEpochCalled func() (uint32, error) + IsClosedCalled func() bool + GetBaseTrieStorageManagerCalled func() common.StorageManager + GetIdentifierCalled func() string + CloseCalled func() error + RemoveFromAllActiveEpochsCalled func(hash []byte) error + IsSnapshotSupportedCalled func() bool + GetStateStatsHandlerCalled func() common.StateStatisticsHandler } // Put - diff --git a/testscommon/tableDisplayerMock.go b/testscommon/tableDisplayerMock.go new file mode 100644 index 00000000000..813c3e11fc5 --- /dev/null +++ b/testscommon/tableDisplayerMock.go @@ -0,0 +1,19 @@ +package testscommon + +import "github.com/multiversx/mx-chain-core-go/display" + +// TableDisplayerMock - +type TableDisplayerMock struct { + DisplayTableCalled func(tableHeader []string, lines []*display.LineData, message string) +} + +// DisplayTable - +func (mock *TableDisplayerMock) DisplayTable(tableHeader []string, lines []*display.LineData, message string) { + if mock.DisplayTableCalled != nil { + mock.DisplayTableCalled(tableHeader, lines, message) + } +} + +func (mock *TableDisplayerMock) IsInterfaceNil() bool { + return mock == nil +} diff --git a/testscommon/testConfigs.go b/testscommon/testConfigs.go new file mode 100644 index 00000000000..fc0840e5237 --- /dev/null +++ b/testscommon/testConfigs.go @@ -0,0 +1,36 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/config" + +// GetDefaultRoundsConfig - +func GetDefaultRoundsConfig() config.RoundConfig { + return config.RoundConfig{ + RoundActivations: map[string]config.ActivationRoundByName{ + "DisableAsyncCallV1": { + Round: "18446744073709551615", + }, + }, + } +} + +// GetDefaultHeaderVersionConfig - +func GetDefaultHeaderVersionConfig() config.VersionsConfig { + return config.VersionsConfig{ + DefaultVersion: "default", + VersionsByEpochs: []config.VersionByEpochs{ + { + StartEpoch: 0, + Version: "*", + }, + { + StartEpoch: 1, + Version: "2", + }, + }, + Cache: config.CacheConfig{ + Name: "VersionsCache", + Type: "LRU", + Capacity: 100, + }, + } +} diff --git a/testscommon/transactionCoordinatorMock.go b/testscommon/transactionCoordinatorMock.go index e5a52257c67..cd25a769912 100644 --- a/testscommon/transactionCoordinatorMock.go +++ b/testscommon/transactionCoordinatorMock.go @@ -33,6 +33,8 @@ type TransactionCoordinatorMock struct { GetAllIntermediateTxsCalled func() map[block.Type]map[string]data.TransactionHandler AddTxsFromMiniBlocksCalled func(miniBlocks block.MiniBlockSlice) AddTransactionsCalled func(txHandlers []data.TransactionHandler, blockType block.Type) + + miniBlocks []*block.MiniBlock } // GetAllCurrentLogs - @@ -45,7 +47,7 @@ func (tcm *TransactionCoordinatorMock) CreatePostProcessMiniBlocks() block.MiniB if tcm.CreatePostProcessMiniBlocksCalled != nil { return tcm.CreatePostProcessMiniBlocksCalled() } - return nil + return tcm.miniBlocks } // CreateReceiptsHash - @@ -233,6 +235,7 @@ func (tcm *TransactionCoordinatorMock) GetAllIntermediateTxs() map[block.Type]ma // AddTxsFromMiniBlocks - func (tcm *TransactionCoordinatorMock) AddTxsFromMiniBlocks(miniBlocks block.MiniBlockSlice) { if tcm.AddTxsFromMiniBlocksCalled == nil { + tcm.miniBlocks = append(tcm.miniBlocks, miniBlocks...) return } @@ -248,6 +251,10 @@ func (tcm *TransactionCoordinatorMock) AddTransactions(txHandlers []data.Transac tcm.AddTransactionsCalled(txHandlers, blockType) } +func (tcm *TransactionCoordinatorMock) ClearStoredMbs() { + tcm.miniBlocks = make([]*block.MiniBlock, 0) +} + // IsInterfaceNil returns true if there is no value under the interface func (tcm *TransactionCoordinatorMock) IsInterfaceNil() bool { return tcm == nil diff --git a/testscommon/txDataBuilder/builder.go b/testscommon/txDataBuilder/builder.go index 49916cd5a1c..3198792ac57 100644 --- a/testscommon/txDataBuilder/builder.go +++ b/testscommon/txDataBuilder/builder.go @@ -5,6 +5,7 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) // TxDataBuilder constructs a string to be used for transaction arguments @@ -176,11 +177,20 @@ func (builder *TxDataBuilder) TransferESDT(token string, value int64) *TxDataBui return builder.Func(core.BuiltInFunctionESDTTransfer).Str(token).Int64(value) } -//TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. +// TransferESDTNFT appends to the data string all the elements required to request an ESDT NFT transfer. func (builder *TxDataBuilder) TransferESDTNFT(token string, nonce int, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTNFTTransfer).Str(token).Int(nonce).Int64(value) } +// MultiTransferESDTNFT appends to the data string all the elements required to request an Multi ESDT NFT transfer. +func (builder *TxDataBuilder) MultiTransferESDTNFT(destinationAddress []byte, transfers []*vmcommon.ESDTTransfer) *TxDataBuilder { + txBuilder := builder.Func(core.BuiltInFunctionMultiESDTNFTTransfer).Bytes(destinationAddress).Int(len(transfers)) + for _, transfer := range transfers { + txBuilder.Bytes(transfer.ESDTTokenName).Int(int(transfer.ESDTTokenNonce)).BigInt(transfer.ESDTValue) + } + return txBuilder +} + // BurnESDT appends to the data string all the elements required to burn ESDT tokens. func (builder *TxDataBuilder) BurnESDT(token string, value int64) *TxDataBuilder { return builder.Func(core.BuiltInFunctionESDTBurn).Str(token).Int64(value) diff --git a/process/mock/validatorStatisticsProcessorStub.go b/testscommon/validatorStatisticsProcessorStub.go similarity index 84% rename from process/mock/validatorStatisticsProcessorStub.go rename to testscommon/validatorStatisticsProcessorStub.go index b3e4f947da0..4d588610d31 100644 --- a/process/mock/validatorStatisticsProcessorStub.go +++ b/testscommon/validatorStatisticsProcessorStub.go @@ -1,4 +1,4 @@ -package mock +package testscommon import ( "github.com/multiversx/mx-chain-core-go/data" @@ -12,23 +12,15 @@ type ValidatorStatisticsProcessorStub struct { GetPeerAccountCalled func(address []byte) (state.PeerAccountHandler, error) RootHashCalled func() ([]byte, error) LastFinalizedRootHashCalled func() []byte - ResetValidatorStatisticsAtNewEpochCalled func(vInfos map[uint32][]*state.ValidatorInfo) error - GetValidatorInfoForRootHashCalled func(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) - ProcessRatingsEndOfEpochCalled func(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error + ResetValidatorStatisticsAtNewEpochCalled func(vInfos state.ShardValidatorsInfoMapHandler) error + GetValidatorInfoForRootHashCalled func(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) + ProcessRatingsEndOfEpochCalled func(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error ProcessCalled func(validatorInfo data.ShardValidatorInfoHandler) error CommitCalled func() ([]byte, error) PeerAccountToValidatorInfoCalled func(peerAccount state.PeerAccountHandler) *state.ValidatorInfo SaveNodesCoordinatorUpdatesCalled func(epoch uint32) (bool, error) } -// SaveNodesCoordinatorUpdates - -func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { - if vsp.SaveNodesCoordinatorUpdatesCalled != nil { - return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) - } - return false, nil -} - // PeerAccountToValidatorInfo - func (vsp *ValidatorStatisticsProcessorStub) PeerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.ValidatorInfo { if vsp.PeerAccountToValidatorInfoCalled != nil { @@ -56,7 +48,7 @@ func (vsp *ValidatorStatisticsProcessorStub) Commit() ([]byte, error) { } // ResetValidatorStatisticsAtNewEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos map[uint32][]*state.ValidatorInfo) error { +func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch(vInfos state.ShardValidatorsInfoMapHandler) error { if vsp.ResetValidatorStatisticsAtNewEpochCalled != nil { return vsp.ResetValidatorStatisticsAtNewEpochCalled(vInfos) } @@ -64,19 +56,11 @@ func (vsp *ValidatorStatisticsProcessorStub) ResetValidatorStatisticsAtNewEpoch( } // GetValidatorInfoForRootHash - -func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (map[uint32][]*state.ValidatorInfo, error) { +func (vsp *ValidatorStatisticsProcessorStub) GetValidatorInfoForRootHash(rootHash []byte) (state.ShardValidatorsInfoMapHandler, error) { if vsp.GetValidatorInfoForRootHashCalled != nil { return vsp.GetValidatorInfoForRootHashCalled(rootHash) } - return nil, nil -} - -// ProcessRatingsEndOfEpoch - -func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos map[uint32][]*state.ValidatorInfo, epoch uint32) error { - if vsp.ProcessRatingsEndOfEpochCalled != nil { - return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) - } - return nil + return state.NewShardValidatorsInfoMap(), nil } // UpdatePeerState - @@ -87,6 +71,14 @@ func (vsp *ValidatorStatisticsProcessorStub) UpdatePeerState(header data.MetaHea return nil, nil } +// ProcessRatingsEndOfEpoch - +func (vsp *ValidatorStatisticsProcessorStub) ProcessRatingsEndOfEpoch(validatorInfos state.ShardValidatorsInfoMapHandler, epoch uint32) error { + if vsp.ProcessRatingsEndOfEpochCalled != nil { + return vsp.ProcessRatingsEndOfEpochCalled(validatorInfos, epoch) + } + return nil +} + // RevertPeerState - func (vsp *ValidatorStatisticsProcessorStub) RevertPeerState(header data.MetaHeaderHandler) error { if vsp.RevertPeerStateCalled != nil { @@ -103,8 +95,20 @@ func (vsp *ValidatorStatisticsProcessorStub) RootHash() ([]byte, error) { return nil, nil } -// GetExistingPeerAccount - -func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []byte) (state.PeerAccountHandler, error) { +// SetLastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { +} + +// LastFinalizedRootHash - +func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { + if vsp.LastFinalizedRootHashCalled != nil { + return vsp.LastFinalizedRootHashCalled() + } + return nil +} + +// GetPeerAccount - +func (vsp *ValidatorStatisticsProcessorStub) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { if vsp.GetPeerAccountCalled != nil { return vsp.GetPeerAccountCalled(address) } @@ -116,19 +120,15 @@ func (vsp *ValidatorStatisticsProcessorStub) GetExistingPeerAccount(address []by func (vsp *ValidatorStatisticsProcessorStub) DisplayRatings(_ uint32) { } -// SetLastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) SetLastFinalizedRootHash(_ []byte) { -} - -// LastFinalizedRootHash - -func (vsp *ValidatorStatisticsProcessorStub) LastFinalizedRootHash() []byte { - if vsp.LastFinalizedRootHashCalled != nil { - return vsp.LastFinalizedRootHashCalled() +// SaveNodesCoordinatorUpdates - +func (vsp *ValidatorStatisticsProcessorStub) SaveNodesCoordinatorUpdates(epoch uint32) (bool, error) { + if vsp.SaveNodesCoordinatorUpdatesCalled != nil { + return vsp.SaveNodesCoordinatorUpdatesCalled(epoch) } - return nil + return false, nil } // IsInterfaceNil - func (vsp *ValidatorStatisticsProcessorStub) IsInterfaceNil() bool { - return false + return vsp == nil } diff --git a/testscommon/vmcommonMocks/userAccountStub.go b/testscommon/vmcommonMocks/userAccountStub.go index 6fb0b1f4d85..8f1eabf8a7f 100644 --- a/testscommon/vmcommonMocks/userAccountStub.go +++ b/testscommon/vmcommonMocks/userAccountStub.go @@ -159,7 +159,7 @@ func (uas *UserAccountStub) GetNonce() uint64 { return 0 } -//IsInterfaceNil - +// IsInterfaceNil - func (uas *UserAccountStub) IsInterfaceNil() bool { return uas == nil } diff --git a/trie/node.go b/trie/node.go index 6d82a238e95..754b3b3548d 100644 --- a/trie/node.go +++ b/trie/node.go @@ -152,7 +152,7 @@ func resolveIfCollapsed(n node, pos byte, db common.TrieStorageInteractor) error func handleStorageInteractorStats(db common.TrieStorageInteractor) { if db != nil { - db.GetStateStatsHandler().IncrTrie() + db.GetStateStatsHandler().IncrementTrie() } } diff --git a/trie/node_extension.go b/trie/node_extension.go index 4e7b38a6a7d..ffbdab699ad 100644 --- a/trie/node_extension.go +++ b/trie/node_extension.go @@ -26,8 +26,8 @@ func shouldTestNode(n node, key []byte) bool { } func snapshotGetTestPoint(key []byte, faultyChance int) error { - rand.Seed(time.Now().UnixNano()) - checkVal := rand.Intn(math.MaxInt) + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + checkVal := rnd.Intn(math.MaxInt) if checkVal%faultyChance == 0 { log.Debug("deliberately not returning hash", "hash", key) return fmt.Errorf("snapshot get error") diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 485b01bf199..0f875999bd1 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -399,6 +399,12 @@ func (tr *patriciaMerkleTrie) recreateFromDb(rootHash []byte, tsm common.Storage // GetSerializedNode returns the serialized node (if existing) provided the node's hash func (tr *patriciaMerkleTrie) GetSerializedNode(hash []byte) ([]byte, error) { + // TODO: investigate if we can move the critical section behavior in the trie node resolver as this call will compete with a normal trie.Get operation + // which might occur during processing. + // warning: A critical section here or on the trie node resolver must be kept as to not overwhelm the node with requests that affects the block processing flow + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + log.Trace("GetSerializedNode", "hash", hash) return tr.trieStorage.Get(hash) @@ -406,6 +412,12 @@ func (tr *patriciaMerkleTrie) GetSerializedNode(hash []byte) ([]byte, error) { // GetSerializedNodes returns a batch of serialized nodes from the trie, starting from the given hash func (tr *patriciaMerkleTrie) GetSerializedNodes(rootHash []byte, maxBuffToSend uint64) ([][]byte, uint64, error) { + // TODO: investigate if we can move the critical section behavior in the trie node resolver as this call will compete with a normal trie.Get operation + // which might occur during processing. + // warning: A critical section here or on the trie node resolver must be kept as to not overwhelm the node with requests that affects the block processing flow + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + log.Trace("GetSerializedNodes", "rootHash", rootHash) size := uint64(0) diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index 3443858e7e7..63278d43a1f 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" @@ -22,7 +23,7 @@ import ( errorsCommon "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state/parsers" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" - "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -1058,64 +1059,56 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { wg.Wait() } -func TestPatriciaMerkleTrie_GetSerializedNodesClose(t *testing.T) { +func TestPatriciaMerkleTrie_GetSerializedNodesShouldSerializeTheCalls(t *testing.T) { t.Parallel() args := trie.GetDefaultTrieStorageManagerParameters() - args.MainStorer = &storage.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - // gets take a long time + numConcurrentCalls := int32(0) + testTrieStorageManager := &storageManager.StorageManagerStub{ + GetCalled: func(bytes []byte) ([]byte, error) { + newValue := atomic.AddInt32(&numConcurrentCalls, 1) + defer atomic.AddInt32(&numConcurrentCalls, -1) + + assert.Equal(t, int32(1), newValue) + + // get takes a long time time.Sleep(time.Millisecond * 10) - return key, nil + + return bytes, nil }, } - trieStorageManager, _ := trie.NewTrieStorageManager(args) - tr, _ := trie.NewTrie(trieStorageManager, args.Marshalizer, args.Hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) - numGoRoutines := 1000 - wgStart := sync.WaitGroup{} - wgStart.Add(numGoRoutines) - wgEnd := sync.WaitGroup{} - wgEnd.Add(numGoRoutines) + tr, _ := trie.NewTrie(testTrieStorageManager, args.Marshalizer, args.Hasher, &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, 5) + numGoRoutines := 100 + wg := sync.WaitGroup{} + wg.Add(numGoRoutines) for i := 0; i < numGoRoutines; i++ { if i%2 == 0 { go func() { time.Sleep(time.Millisecond * 100) - wgStart.Done() - _, _, _ = tr.GetSerializedNodes([]byte("dog"), 1024) - wgEnd.Done() + wg.Done() }() } else { go func() { time.Sleep(time.Millisecond * 100) - wgStart.Done() - _, _ = tr.GetSerializedNode([]byte("dog")) - wgEnd.Done() + wg.Done() }() } } - wgStart.Wait() + wg.Wait() chanClosed := make(chan struct{}) go func() { _ = tr.Close() close(chanClosed) }() - chanGetsEnded := make(chan struct{}) - go func() { - wgEnd.Wait() - close(chanGetsEnded) - }() - timeout := time.Second * 10 select { case <-chanClosed: // ok - case <-chanGetsEnded: - assert.Fail(t, "trie should have been closed before all gets ended") case <-time.After(timeout): assert.Fail(t, "timeout waiting for trie to be closed") } diff --git a/update/genesis/common.go b/update/genesis/common.go index 2ce58de50af..d8d3b11ca0e 100644 --- a/update/genesis/common.go +++ b/update/genesis/common.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" ) @@ -14,25 +14,20 @@ import ( // TODO: create a structure or use this function also in process/peer/process.go func getValidatorDataFromLeaves( leavesChannels *common.TrieIteratorChannels, - shardCoordinator sharding.Coordinator, marshalizer marshal.Marshalizer, -) (map[uint32][]*state.ValidatorInfo, error) { - - validators := make(map[uint32][]*state.ValidatorInfo, shardCoordinator.NumberOfShards()+1) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - validators[i] = make([]*state.ValidatorInfo, 0) - } - validators[core.MetachainShardId] = make([]*state.ValidatorInfo, 0) - +) (state.ShardValidatorsInfoMapHandler, error) { + validators := state.NewShardValidatorsInfoMap() for pa := range leavesChannels.LeavesChan { peerAccount, err := unmarshalPeer(pa, marshalizer) if err != nil { return nil, err } - currentShardId := peerAccount.GetShardId() validatorInfoData := peerAccountToValidatorInfo(peerAccount) - validators[currentShardId] = append(validators[currentShardId], validatorInfoData) + err = validators.Add(validatorInfoData) + if err != nil { + return nil, err + } } err := leavesChannels.ErrChan.ReadFromChanNonBlocking() @@ -60,7 +55,9 @@ func peerAccountToValidatorInfo(peerAccount state.PeerAccountHandler) *state.Val PublicKey: peerAccount.AddressBytes(), ShardId: peerAccount.GetShardId(), List: getActualList(peerAccount), + PreviousList: peerAccount.GetPreviousList(), Index: peerAccount.GetIndexInList(), + PreviousIndex: peerAccount.GetPreviousIndexInList(), TempRating: peerAccount.GetTempRating(), Rating: peerAccount.GetRating(), RewardAddress: peerAccount.GetRewardAddress(), @@ -92,7 +89,7 @@ func getActualList(peerAccount state.PeerAccountHandler) string { return string(common.LeavingList) } -func shouldExportValidator(validator *state.ValidatorInfo, allowedLists []common.PeerType) bool { +func shouldExportValidator(validator state.ValidatorInfoHandler, allowedLists []common.PeerType) bool { validatorList := validator.GetList() for _, list := range allowedLists { diff --git a/update/genesis/export.go b/update/genesis/export.go index 0f5c469afc9..ba4e678a0f8 100644 --- a/update/genesis/export.go +++ b/update/genesis/export.go @@ -311,8 +311,7 @@ func (se *stateExport) exportTrie(key string, trie common.Trie) error { } if accType == ValidatorAccount { - var validatorData map[uint32][]*state.ValidatorInfo - validatorData, err = getValidatorDataFromLeaves(leavesChannels, se.shardCoordinator, se.marshalizer) + validatorData, err := getValidatorDataFromLeaves(leavesChannels, se.marshalizer) if err != nil { return err } @@ -443,30 +442,28 @@ func (se *stateExport) exportValidatorInfo(key string, validatorInfo *state.Shar return nil } -func (se *stateExport) exportNodesSetupJson(validators map[uint32][]*state.ValidatorInfo) error { +func (se *stateExport) exportNodesSetupJson(validators state.ShardValidatorsInfoMapHandler) error { acceptedListsForExport := []common.PeerType{common.EligibleList, common.WaitingList, common.JailedList} initialNodes := make([]*sharding.InitialNode, 0) - for _, validatorsInShard := range validators { - for _, validator := range validatorsInShard { - if shouldExportValidator(validator, acceptedListsForExport) { - - pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) - if err != nil { - return nil - } - - rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) - if err != nil { - return nil - } - - initialNodes = append(initialNodes, &sharding.InitialNode{ - PubKey: pubKey, - Address: rewardAddress, - InitialRating: validator.GetRating(), - }) + for _, validator := range validators.GetAllValidatorsInfo() { + if shouldExportValidator(validator, acceptedListsForExport) { + + pubKey, err := se.validatorPubKeyConverter.Encode(validator.GetPublicKey()) + if err != nil { + return nil + } + + rewardAddress, err := se.addressPubKeyConverter.Encode(validator.GetRewardAddress()) + if err != nil { + return nil } + + initialNodes = append(initialNodes, &sharding.InitialNode{ + PubKey: pubKey, + Address: rewardAddress, + InitialRating: validator.GetRating(), + }) } } diff --git a/update/genesis/export_test.go b/update/genesis/export_test.go index f1fca206504..bad77b07959 100644 --- a/update/genesis/export_test.go +++ b/update/genesis/export_test.go @@ -389,16 +389,17 @@ func TestStateExport_ExportNodesSetupJsonShouldExportKeysInAlphabeticalOrder(t * require.False(t, check.IfNil(stateExporter)) - vals := make(map[uint32][]*state.ValidatorInfo) - val50 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaa"), List: string(common.EligibleList)} - val51 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbb"), List: string(common.EligibleList)} - val10 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ccc"), List: string(common.EligibleList)} - val11 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("ddd"), List: string(common.EligibleList)} - val00 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} - val01 := &state.ValidatorInfo{ShardId: 5, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} - vals[1] = []*state.ValidatorInfo{val50, val51} - vals[0] = []*state.ValidatorInfo{val00, val01} - vals[2] = []*state.ValidatorInfo{val10, val11} + vals := state.NewShardValidatorsInfoMap() + val50 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("aaa"), List: string(common.EligibleList)} + val51 := &state.ValidatorInfo{ShardId: 0, PublicKey: []byte("bbb"), List: string(common.EligibleList)} + val10 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ccc"), List: string(common.EligibleList)} + val11 := &state.ValidatorInfo{ShardId: 1, PublicKey: []byte("ddd"), List: string(common.EligibleList)} + val00 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("aaaaaa"), List: string(common.EligibleList)} + val01 := &state.ValidatorInfo{ShardId: 2, PublicKey: []byte("bbbbbb"), List: string(common.EligibleList)} + _ = vals.SetValidatorsInShard(0, []state.ValidatorInfoHandler{val50, val51}) + _ = vals.SetValidatorsInShard(1, []state.ValidatorInfoHandler{val10, val11}) + _ = vals.SetValidatorsInShard(2, []state.ValidatorInfoHandler{val00, val01}) + err = stateExporter.exportNodesSetupJson(vals) require.Nil(t, err) diff --git a/vm/errors.go b/vm/errors.go index 341c26e49ad..0e3ea608ed2 100644 --- a/vm/errors.go +++ b/vm/errors.go @@ -267,3 +267,15 @@ var ErrWrongNewOwnerAddress = errors.New("wrong new owner address") // ErrInternalErrorWhileSettingNewOwner signals that an error occurred when setting the new contract owner var ErrInternalErrorWhileSettingNewOwner = errors.New("internal error when setting new contract owner") + +// ErrInvalidStakeLimitPercentage signals the invalid stake limit percentage was provided +var ErrInvalidStakeLimitPercentage = errors.New("invalid stake limit percentage") + +// ErrInvalidNodeLimitPercentage signals the invalid node limit percentage was provided +var ErrInvalidNodeLimitPercentage = errors.New("invalid node limit percentage") + +// ErrNilNodesCoordinator signals that nil nodes coordinator was provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + +// ErrWaitingListDisabled signals that waiting list has been disabled, since staking v4 is active +var ErrWaitingListDisabled = errors.New("waiting list is disabled since staking v4 activation") diff --git a/vm/factory/systemSCFactory.go b/vm/factory/systemSCFactory.go index 0cccff2ce4b..5a6defa2d3c 100644 --- a/vm/factory/systemSCFactory.go +++ b/vm/factory/systemSCFactory.go @@ -31,6 +31,7 @@ type systemSCFactory struct { addressPubKeyConverter core.PubkeyConverter shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator } // ArgsNewSystemSCFactory defines the arguments struct needed to create the system SCs @@ -46,6 +47,7 @@ type ArgsNewSystemSCFactory struct { AddressPubKeyConverter core.PubkeyConverter ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewSystemSCFactory creates a factory which will instantiate the system smart contracts @@ -80,6 +82,9 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { if check.IfNil(args.EnableEpochsHandler) { return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilEnableEpochsHandler) } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in NewSystemSCFactory", vm.ErrNilNodesCoordinator) + } scf := &systemSCFactory{ systemEI: args.SystemEI, @@ -92,6 +97,7 @@ func NewSystemSCFactory(args ArgsNewSystemSCFactory) (*systemSCFactory, error) { addressPubKeyConverter: args.AddressPubKeyConverter, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, + nodesCoordinator: args.NodesCoordinator, } err := scf.createGasConfig(args.GasSchedule.LatestGasSchedule()) @@ -197,6 +203,7 @@ func (scf *systemSCFactory) createValidatorContract() (vm.SystemSmartContract, e GovernanceSCAddress: vm.GovernanceSCAddress, ShardCoordinator: scf.shardCoordinator, EnableEpochsHandler: scf.enableEpochsHandler, + NodesCoordinator: scf.nodesCoordinator, } validatorSC, err := systemSmartContracts.NewValidatorSmartContract(args) return validatorSC, err diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 8f16f1a46b1..76c46685cb1 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -65,6 +65,8 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MaxNumberOfNodesForStake: 100, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, DelegationSystemSCConfig: config.DelegationSystemSCConfig{ MinServiceFee: 0, @@ -75,10 +77,17 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MinStakeAmount: "10", ConfigChangeAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, + SoftAuctionConfig: config.SoftAuctionConfig{ + TopUpStep: "10", + MinTopUp: "1", + MaxTopUp: "32000000", + MaxNumberOfIterations: 100000, + }, }, AddressPubKeyConverter: &testscommon.PubkeyConverterMock{}, ShardCoordinator: &mock.ShardCoordinatorStub{}, EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + NodesCoordinator: &mock.NodesCoordinatorStub{}, } } @@ -93,6 +102,17 @@ func TestNewSystemSCFactory_NilSystemEI(t *testing.T) { assert.True(t, errors.Is(err, vm.ErrNilSystemEnvironmentInterface)) } +func TestNewSystemSCFactory_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockNewSystemScFactoryArgs() + arguments.NodesCoordinator = nil + scFactory, err := NewSystemSCFactory(arguments) + + assert.Nil(t, scFactory) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + func TestNewSystemSCFactory_NilSigVerifier(t *testing.T) { t.Parallel() diff --git a/vm/interface.go b/vm/interface.go index 02d78643821..ca8332c742f 100644 --- a/vm/interface.go +++ b/vm/interface.go @@ -37,7 +37,7 @@ type SystemSCContainer interface { type SystemEI interface { ExecuteOnDestContext(destination []byte, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) DeploySystemSC(baseContract []byte, newAddress []byte, ownerAddress []byte, initFunction string, value *big.Int, input [][]byte) (vmcommon.ReturnCode, error) - Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) error + Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) SendGlobalSettingToAll(sender []byte, input []byte) GetBalance(addr []byte) *big.Int SetStorage(key []byte, value []byte) @@ -60,6 +60,7 @@ type SystemEI interface { GetLogs() []*vmcommon.LogEntry SetOwnerOperatingOnAccount(newOwner []byte) error UpdateCodeDeployerAddress(scAddress string, newOwner []byte) error + ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) IsInterfaceNil() bool } @@ -70,6 +71,12 @@ type EconomicsHandler interface { IsInterfaceNil() bool } +// NodesCoordinator defines the methods needed about nodes in system SCs from nodes coordinator +type NodesCoordinator interface { + GetNumTotalEligible() uint64 + IsInterfaceNil() bool +} + // ContextHandler defines the methods needed to execute system smart contracts type ContextHandler interface { SystemEI @@ -129,4 +136,5 @@ type BlockchainHook interface { GetSnapshot() int RevertToSnapshot(snapshot int) error IsBuiltinFunctionName(functionName string) bool + ProcessBuiltInFunction(input *vmcommon.ContractCallInput) (*vmcommon.VMOutput, error) } diff --git a/vm/mock/nodesCoordinatorStub.go b/vm/mock/nodesCoordinatorStub.go new file mode 100644 index 00000000000..de4a99e28e7 --- /dev/null +++ b/vm/mock/nodesCoordinatorStub.go @@ -0,0 +1,19 @@ +package mock + +// NodesCoordinatorStub - +type NodesCoordinatorStub struct { + GetNumTotalEligibleCalled func() uint64 +} + +// GetNumTotalEligible - +func (n *NodesCoordinatorStub) GetNumTotalEligible() uint64 { + if n.GetNumTotalEligibleCalled != nil { + return n.GetNumTotalEligibleCalled() + } + return 1000 +} + +// IsInterfaceNil - +func (n *NodesCoordinatorStub) IsInterfaceNil() bool { + return n == nil +} diff --git a/vm/mock/systemEIStub.go b/vm/mock/systemEIStub.go index 4162a34ab24..0c300010316 100644 --- a/vm/mock/systemEIStub.go +++ b/vm/mock/systemEIStub.go @@ -10,7 +10,7 @@ import ( // SystemEIStub - type SystemEIStub struct { - TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte) error + TransferCalled func(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) GetBalanceCalled func(addr []byte) *big.Int SetStorageCalled func(key []byte, value []byte) AddReturnMessageCalled func(msg string) @@ -37,6 +37,7 @@ type SystemEIStub struct { GasLeftCalled func() uint64 CleanStorageUpdatesCalled func() ReturnMessage string + ProcessBuiltInFunctionCalled func(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) AddLogEntryCalled func(entry *vmcommon.LogEntry) SetOwnerOperatingOnAccountCalled func(newOwner []byte) error UpdateCodeDeployerAddressCalled func(scAddress string, newOwner []byte) error @@ -203,11 +204,10 @@ func (s *SystemEIStub) SendGlobalSettingToAll(sender []byte, input []byte) { } // Transfer - -func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) error { +func (s *SystemEIStub) Transfer(destination []byte, sender []byte, value *big.Int, input []byte, gasLimit uint64) { if s.TransferCalled != nil { - return s.TransferCalled(destination, sender, value, input) + s.TransferCalled(destination, sender, value, input, gasLimit) } - return nil } // GetBalance - @@ -310,6 +310,14 @@ func (s *SystemEIStub) UpdateCodeDeployerAddress(scAddress string, newOwner []by return nil } +// ProcessBuiltInFunction - +func (s *SystemEIStub) ProcessBuiltInFunction(sender, destination []byte, function string, arguments [][]byte) (*vmcommon.VMOutput, error) { + if s.ProcessBuiltInFunctionCalled != nil { + return s.ProcessBuiltInFunctionCalled(sender, destination, function, arguments) + } + return &vmcommon.VMOutput{}, nil +} + // IsInterfaceNil - func (s *SystemEIStub) IsInterfaceNil() bool { return s == nil diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index d71afabb6e2..ac33ba81da2 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -1215,6 +1215,13 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmOutput.ReturnCode } + allLogs := d.eei.GetLogs() + tooManyNodesErrMsg := getTooManyNodesErrMsg(allLogs) + if len(tooManyNodesErrMsg) != 0 { + d.eei.AddReturnMessage(tooManyNodesErrMsg) + return vmcommon.UserError + } + err = d.updateDelegationStatusAfterStake(status, vmOutput.ReturnData, args.Arguments) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -1226,6 +1233,27 @@ func (d *delegation) stakeNodes(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.Ok } +func getTooManyNodesErrMsg(logEntries []*vmcommon.LogEntry) string { + for _, logEntry := range logEntries { + topics := logEntry.Topics + if len(topics) != 3 { + continue + } + if bytes.Equal(topics[0], []byte(numberOfNodesTooHigh)) { + return formatTooManyNodesMsg(topics) + } + } + + return "" +} + +func formatTooManyNodesMsg(topics [][]byte) string { + numRegisteredBlsKeys := big.NewInt(0).SetBytes(topics[1]).Int64() + nodeLimit := big.NewInt(0).SetBytes(topics[2]).Int64() + return fmt.Sprintf("%s, num registered bls keys: %d, node limit: %d", + numberOfNodesTooHigh, numRegisteredBlsKeys, nodeLimit) +} + func (d *delegation) updateDelegationStatusAfterStake( status *DelegationContractStatus, returnData [][]byte, @@ -1430,11 +1458,7 @@ func (d *delegation) unJailNodes(args *vmcommon.ContractCallInput) vmcommon.Retu sendBackValue := getTransferBackFromVMOutput(vmOutput) if sendBackValue.Cmp(zero) > 0 { - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, sendBackValue, nil, 0) } d.createAndAddLogEntry(args, args.Arguments...) @@ -1532,70 +1556,54 @@ func (d *delegation) finishDelegateUser( return vmcommon.UserError } - var err error - if len(delegator.ActiveFund) == 0 { - var fundKey []byte - fundKey, err = d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - delegator.ActiveFund = fundKey - if isNew { - dStatus.NumUsers++ - } - } else { - err = d.addValueToFund(delegator.ActiveFund, delegateValue) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } - - err = d.checkActiveFund(delegator) + err := d.addToActiveFund(callerAddr, delegator, delegateValue, dStatus, isNew) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) - vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, callValue) + err = d.checkActiveFund(delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - if vmOutput.ReturnCode != vmcommon.Ok { - return vmOutput.ReturnCode - } - if len(stakeArgs) > 0 { - err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + returnCode := d.executeStakeAndUpdateStatus(dConfig, dStatus, globalFund, callValue, scAddress) + if returnCode != vmcommon.Ok { + return returnCode } - err = d.saveDelegationStatus(dStatus) + err = d.saveDelegatorData(callerAddr, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.saveGlobalFundData(globalFund) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return vmcommon.Ok +} + +func (d *delegation) addToActiveFund( + callerAddr []byte, + delegator *DelegatorData, + delegateValue *big.Int, + dStatus *DelegationContractStatus, + isNew bool, +) error { + if len(delegator.ActiveFund) > 0 { + return d.addValueToFund(delegator.ActiveFund, delegateValue) } - err = d.saveDelegatorData(callerAddr, delegator) + fundKey, err := d.createAndSaveNextKeyFund(callerAddr, delegateValue, active) if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError + return err } - return vmcommon.Ok + delegator.ActiveFund = fundKey + if isNew { + dStatus.NumUsers++ + } + + return nil } func (d *delegation) checkActiveFund(delegator *DelegatorData) error { @@ -1730,7 +1738,16 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - isNew, delegator, err := d.getOrCreateDelegatorData(args.CallerAddr) + return d.unDelegateValueFromAddress(args, valueToUnDelegate, args.CallerAddr, args.RecipientAddr) +} + +func (d *delegation) unDelegateValueFromAddress( + args *vmcommon.ContractCallInput, + valueToUnDelegate *big.Int, + delegatorAddress []byte, + contractAddress []byte, +) vmcommon.ReturnCode { + isNew, delegator, err := d.getOrCreateDelegatorData(delegatorAddress) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1763,12 +1780,13 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur d.eei.AddReturnMessage("invalid value to undelegate - need to undelegate all - do not leave dust behind") return vmcommon.UserError } - err = d.checkOwnerCanUnDelegate(args.CallerAddr, activeFund, valueToUnDelegate) + + err = d.checkOwnerCanUnDelegate(delegatorAddress, activeFund, valueToUnDelegate) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } - err = d.computeAndUpdateRewards(args.CallerAddr, delegator) + err = d.computeAndUpdateRewards(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1780,7 +1798,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(args.RecipientAddr, "unStakeTokens", valueToUnDelegate) + returnData, returnCode := d.executeOnValidatorSCWithValueInArgs(contractAddress, "unStakeTokens", valueToUnDelegate) if returnCode != vmcommon.Ok { return returnCode } @@ -1798,7 +1816,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.addNewUnStakedFund(args.CallerAddr, delegator, actualUserUnStake) + err = d.addNewUnStakedFund(delegatorAddress, delegator, actualUserUnStake) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1822,7 +1840,7 @@ func (d *delegation) unDelegate(args *vmcommon.ContractCallInput) vmcommon.Retur return vmcommon.UserError } - err = d.saveDelegatorData(args.CallerAddr, delegator) + err = d.saveDelegatorData(delegatorAddress, delegator) if err != nil { d.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1971,11 +1989,31 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De isOwner := d.isOwner(callerAddress) + totalRewards, err := d.computeRewards(delegator.RewardsCheckpoint, isOwner, activeFund.Value) + if err != nil { + return err + } + delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) + delegator.RewardsCheckpoint = currentEpoch + 1 + + return nil +} + +func (d *delegation) computeRewards( + rewardsCheckpoint uint32, + isOwner bool, + activeValue *big.Int, +) (*big.Int, error) { totalRewards := big.NewInt(0) - for i := delegator.RewardsCheckpoint; i <= currentEpoch; i++ { + if activeValue.Cmp(zero) <= 0 { + return totalRewards, nil + } + + currentEpoch := d.eei.BlockChainHook().CurrentEpoch() + for i := rewardsCheckpoint; i <= currentEpoch; i++ { found, rewardData, errGet := d.getRewardComputationData(i) if errGet != nil { - return errGet + return nil, errGet } if !found { continue @@ -1999,7 +2037,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De rewardForDelegator := big.NewInt(0).Sub(rewardData.RewardsToDistribute, rewardsForOwner) // delegator reward is: rewardForDelegator * user stake / total active - rewardForDelegator.Mul(rewardForDelegator, activeFund.Value) + rewardForDelegator.Mul(rewardForDelegator, activeValue) rewardForDelegator.Div(rewardForDelegator, rewardData.TotalActive) if isOwner { @@ -2008,10 +2046,7 @@ func (d *delegation) computeAndUpdateRewards(callerAddress []byte, delegator *De totalRewards.Add(totalRewards, rewardForDelegator) } - delegator.UnClaimedRewards.Add(delegator.UnClaimedRewards, totalRewards) - delegator.RewardsCheckpoint = currentEpoch + 1 - - return nil + return totalRewards, nil } func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -2041,11 +2076,7 @@ func (d *delegation) claimRewards(args *vmcommon.ContractCallInput) vmcommon.Ret return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, delegator.UnClaimedRewards, nil, 0) unclaimedRewardsBytes := delegator.UnClaimedRewards.Bytes() delegator.TotalCumulatedRewards.Add(delegator.TotalCumulatedRewards, delegator.UnClaimedRewards) @@ -2112,6 +2143,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC d.eei.AddReturnMessage(vm.ErrCallValueMustBeZero.Error()) return vmcommon.UserError } + err := d.eei.UseGas(d.gasCost.MetaChainSystemSCsCost.DelegationOps) if err != nil { d.eei.AddReturnMessage(err.Error()) @@ -2212,11 +2244,7 @@ func (d *delegation) withdraw(args *vmcommon.ContractCallInput) vmcommon.ReturnC return vmcommon.UserError } - err = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) - if err != nil { - d.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, actualUserUnBond, nil, 0) var wasDeleted bool wasDeleted, err = d.deleteDelegatorOnWithdrawIfNeeded(args.CallerAddr, delegator) @@ -2900,6 +2928,45 @@ func (d *delegation) addTokens(args *vmcommon.ContractCallInput) vmcommon.Return return vmcommon.Ok } +func (d *delegation) executeStakeAndUpdateStatus( + dConfig *DelegationConfig, + dStatus *DelegationContractStatus, + globalFund *GlobalFundData, + valueToStake *big.Int, + scAddress []byte, +) vmcommon.ReturnCode { + stakeArgs := d.makeStakeArgsIfAutomaticActivation(dConfig, dStatus, globalFund) + vmOutput, err := d.executeOnValidatorSC(scAddress, "stake", stakeArgs, valueToStake) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if vmOutput.ReturnCode != vmcommon.Ok { + return vmOutput.ReturnCode + } + + if len(stakeArgs) > 0 { + err = d.updateDelegationStatusAfterStake(dStatus, vmOutput.ReturnData, stakeArgs) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + err = d.saveDelegationStatus(dStatus) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = d.saveGlobalFundData(globalFund) + if err != nil { + d.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + func (d *delegation) executeOnValidatorSC(address []byte, function string, args [][]byte, value *big.Int) (*vmcommon.VMOutput, error) { validatorCall := function for _, key := range args { @@ -2912,7 +2979,6 @@ func (d *delegation) executeOnValidatorSC(address []byte, function string, args } return vmOutput, nil - } func (d *delegation) getDelegationContractConfig() (*DelegationConfig, error) { diff --git a/vm/systemSmartContracts/delegationManager_test.go b/vm/systemSmartContracts/delegationManager_test.go index b683ac4331c..e2b4de77d8f 100644 --- a/vm/systemSmartContracts/delegationManager_test.go +++ b/vm/systemSmartContracts/delegationManager_test.go @@ -1171,7 +1171,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegationDuplicatedInput(t *tes GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.RecipientAddr, args.CallerAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil @@ -1197,7 +1197,7 @@ func TestDelegationManagerSystemSC_ClaimMultipleDelegation(t *testing.T) { GetCalled: func(key []byte) (vm.SystemSmartContract, error) { return &mock.SystemSCStub{ ExecuteCalled: func(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - _ = d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) + d.eei.Transfer(args.CallerAddr, args.RecipientAddr, big.NewInt(10), nil, 0) return vmcommon.Ok }, }, nil diff --git a/vm/systemSmartContracts/delegation_test.go b/vm/systemSmartContracts/delegation_test.go index 73cbab30716..fe93b1c8368 100644 --- a/vm/systemSmartContracts/delegation_test.go +++ b/vm/systemSmartContracts/delegation_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/vm" "github.com/multiversx/mx-chain-go/vm/mock" @@ -158,6 +159,14 @@ func createDelegationContractAndEEI() (*delegation, *vmContext) { args.DelegationSCConfig.MaxServiceFee = 10000 args.DelegationSCConfig.MinServiceFee = 0 d, _ := NewDelegationSystemSC(args) + + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(10), + MinDelegationAmount: big.NewInt(10), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + return d, eei } @@ -1615,9 +1624,16 @@ func TestDelegationSystemSC_ExecuteUnDelegateUserErrorsWhenGettingMinimumDelegat }) d.eei.SetStorage([]byte(lastFundKey), fundKey) + managementData := &DelegationManagement{ + MinDeposit: big.NewInt(50), + MinDelegationAmount: big.NewInt(50), + } + marshaledData, _ := d.marshalizer.Marshal(managementData) + eei.SetStorageForAddress(d.delegationMgrSCAddress, []byte(delegationManagementKey), marshaledData) + output := d.Execute(vmInput) assert.Equal(t, vmcommon.UserError, output) - assert.True(t, strings.Contains(eei.returnMessage, "error getting minimum delegation amount")) + assert.True(t, strings.Contains(eei.returnMessage, "invalid value to undelegate - need to undelegate all - do not leave dust behind")) } func TestDelegationSystemSC_ExecuteUnDelegateUserNotDelegatorOrNoActiveFundShouldErr(t *testing.T) { @@ -5028,3 +5044,139 @@ func TestDelegationSystemSC_SynchronizeOwner(t *testing.T) { eei.ResetReturnMessage() }) } + +func TestDelegationSystemSC_ExecuteAddNodesStakeNodesWithNodesLimit(t *testing.T) { + t.Parallel() + + sig := []byte("sig1") + args := createMockArgumentsForDelegation() + args.EnableEpochsHandler = enableEpochsHandlerMock.NewEnableEpochsHandlerStub( + common.StakingV4Step1Flag, + common.StakingV4Step2Flag, + common.StakingV4Step3Flag, + common.StakeLimitsFlag, + + common.DelegationSmartContractFlag, + common.StakingV2FlagAfterEpoch, + common.AddTokensToDelegationFlag, + common.DeleteDelegatorAfterClaimRewardsFlag, + common.ComputeRewardCheckpointFlag, + common.ValidatorToDelegationFlag, + common.ReDelegateBelowMinCheckFlag, + common.MultiClaimOnDelegationFlag, + ) + eei := createDefaultEei() + delegationsMap := map[string][]byte{} + delegationsMap[ownerKey] = []byte("owner") + eei.storageUpdate[string(eei.scAddress)] = delegationsMap + args.Eei = eei + + d, _ := NewDelegationSystemSC(args) + + blsKey1 := []byte("blsKey1") + blsKey2 := []byte("blsKey2") + key1 := &NodesData{ + BLSKey: blsKey1, + } + key2 := &NodesData{ + BLSKey: blsKey2, + } + dStatus := &DelegationContractStatus{ + StakedKeys: []*NodesData{key1, key2}, + } + _ = d.saveDelegationStatus(dStatus) + + globalFund := &GlobalFundData{ + TotalActive: big.NewInt(400), + } + _ = d.saveGlobalFundData(globalFund) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2}) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 2, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + newBlsKey1 := []byte("newBlsKey1") + vmInput := getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey1, sig}) + output := d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey1}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 0, len(dStatus.NotStakedKeys)) + + addValidatorAndStakingScToVmContextWithBlsKeys(eei, [][]byte{blsKey1, blsKey2, newBlsKey1}) + + newBlsKey2 := []byte("newBlsKey2") + vmInput = getDefaultVmInputForFunc("addNodes", [][]byte{newBlsKey2, sig}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.Ok, output) + + vmInput = getDefaultVmInputForFunc("stakeNodes", [][]byte{newBlsKey2}) + output = d.Execute(vmInput) + require.Equal(t, vmcommon.UserError, output) + require.True(t, strings.Contains(eei.returnMessage, numberOfNodesTooHigh)) + require.True(t, strings.Contains(eei.returnMessage, "num registered bls keys: 3")) + require.True(t, strings.Contains(eei.returnMessage, "node limit: 3")) + + dStatus, _ = d.getDelegationStatus() + require.Equal(t, 3, len(dStatus.StakedKeys)) + require.Equal(t, 0, len(dStatus.UnStakedKeys)) + require.Equal(t, 1, len(dStatus.NotStakedKeys)) +} + +func addValidatorAndStakingScToVmContextWithBlsKeys(eei *vmContext, blsKeys [][]byte) { + validatorArgs := createMockArgumentsForValidatorSC() + validatorArgs.StakingSCConfig.NodeLimitPercentage = 1 + validatorArgs.Eei = eei + validatorArgs.StakingSCConfig.GenesisNodePrice = "100" + validatorArgs.StakingSCAddress = vm.StakingSCAddress + validatorArgs.NodesCoordinator = &shardingMocks.NodesCoordinatorStub{ + GetNumTotalEligibleCalled: func() uint64 { + return 3 + }, + } + validatorSc, _ := NewValidatorSmartContract(validatorArgs) + + stakingArgs := createMockStakingScArguments() + stakingArgs.Eei = eei + stakingSc, _ := NewStakingSmartContract(stakingArgs) + + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + if bytes.Equal(key, vm.StakingSCAddress) { + return stakingSc, nil + } + + if bytes.Equal(key, vm.ValidatorSCAddress) { + _ = validatorSc.saveRegistrationData([]byte("addr"), &ValidatorDataV2{ + RewardAddress: []byte("rewardAddr"), + TotalStakeValue: big.NewInt(1000), + LockedStake: big.NewInt(500), + BlsPubKeys: blsKeys, + TotalUnstaked: big.NewInt(150), + UnstakedInfo: []*UnstakedValue{ + { + UnstakedEpoch: 10, + UnstakedValue: big.NewInt(60), + }, + { + UnstakedEpoch: 50, + UnstakedValue: big.NewInt(80), + }, + }, + NumRegistered: uint32(len(blsKeys)), + }) + validatorSc.unBondPeriod = 50 + return validatorSc, nil + } + + return nil, nil + }}) +} diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index a2743693694..55f554d11b0 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -1,6 +1,7 @@ package systemSmartContracts import ( + "errors" "fmt" "math/big" @@ -75,6 +76,7 @@ func NewVMContext(args VMContextArgs) (*vmContext, error) { err := core.CheckHandlerCompatibility(args.EnableEpochsHandler, []core.EnableEpochFlag{ common.MultiClaimOnDelegationFlag, common.SetSenderInEeiOutputTransferFlag, + common.AlwaysMergeContextsInEEIFlag, }) if err != nil { return nil, err @@ -217,6 +219,17 @@ func (host *vmContext) SendGlobalSettingToAll(_ []byte, input []byte) { } } +func (host *vmContext) transferValueOnly( + destination []byte, + sender []byte, + value *big.Int, +) { + senderAcc, destAcc := host.getSenderDestination(sender, destination) + + _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) + _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) +} + func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcommon.OutputAccount, *vmcommon.OutputAccount) { senderAcc, exists := host.outputAccounts[string(sender)] if !exists { @@ -241,17 +254,6 @@ func (host *vmContext) getSenderDestination(sender, destination []byte) (*vmcomm return senderAcc, destAcc } -func (host *vmContext) transferValueOnly( - destination []byte, - sender []byte, - value *big.Int, -) { - senderAcc, destAcc := host.getSenderDestination(sender, destination) - - _ = senderAcc.BalanceDelta.Sub(senderAcc.BalanceDelta, value) - _ = destAcc.BalanceDelta.Add(destAcc.BalanceDelta, value) -} - // Transfer handles any necessary value transfer required and takes // the necessary steps to create accounts func (host *vmContext) Transfer( @@ -260,7 +262,7 @@ func (host *vmContext) Transfer( value *big.Int, input []byte, gasLimit uint64, -) error { +) { host.transferValueOnly(destination, sender, value) senderAcc, destAcc := host.getSenderDestination(sender, destination) outputTransfer := vmcommon.OutputTransfer{ @@ -275,8 +277,6 @@ func (host *vmContext) Transfer( outputTransfer.SenderAddress = senderAcc.Address } destAcc.OutputTransfers = append(destAcc.OutputTransfers, outputTransfer) - - return nil } // GetLogs returns the logs @@ -340,8 +340,11 @@ func (host *vmContext) properMergeContexts(parentContext *vmContext, returnCode host.scAddress = parentContext.scAddress host.AddReturnMessage(parentContext.returnMessage) - if returnCode != vmcommon.Ok { - // no need to merge - revert was done - transaction will fail + + // merge contexts if the return code is OK or the fix flag is activated because it was wrong not to merge them if the call failed + shouldMergeContexts := returnCode == vmcommon.Ok || host.enableEpochsHandler.IsFlagEnabled(common.AlwaysMergeContextsInEEIFlag) + if !shouldMergeContexts { + // backwards compatibility return } @@ -432,7 +435,8 @@ func createDirectCallInput( func (host *vmContext) transferBeforeInternalExec(callInput *vmcommon.ContractCallInput, sender []byte, callType string) error { if !host.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { - return host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + host.Transfer(callInput.RecipientAddr, sender, callInput.CallValue, nil, 0) + return nil } host.transferValueOnly(callInput.RecipientAddr, sender, callInput.CallValue) @@ -530,6 +534,8 @@ func (host *vmContext) ExecuteOnDestContext(destination []byte, sender []byte, v vmOutput := &vmcommon.VMOutput{ReturnCode: vmcommon.UserError} currContext := host.copyToNewContext() defer func() { + // we need to reset here the output since it was already transferred in the vmOutput (host.CreateVMOutput() function) + // and we do not want to duplicate them host.output = make([][]byte, 0) host.properMergeContexts(currContext, vmOutput.ReturnCode) }() @@ -593,6 +599,42 @@ func (host *vmContext) AddLogEntry(entry *vmcommon.LogEntry) { host.logs = append(host.logs, entry) } +// ProcessBuiltInFunction will process the given built in function and will merge the generated output accounts and logs +func (host *vmContext) ProcessBuiltInFunction( + sender, destination []byte, + function string, + arguments [][]byte, +) (*vmcommon.VMOutput, error) { + vmInput := createDirectCallInput(destination, sender, big.NewInt(0), function, arguments) + vmInput.GasProvided = host.GasLeft() + vmOutput, err := host.blockChainHook.ProcessBuiltInFunction(vmInput) + if err != nil { + return nil, err + } + if vmOutput.ReturnCode != vmcommon.Ok { + return nil, errors.New(vmOutput.ReturnMessage) + } + + for address, outAcc := range vmOutput.OutputAccounts { + if len(outAcc.OutputTransfers) > 0 { + leftAccount, exist := host.outputAccounts[address] + if !exist { + leftAccount = &vmcommon.OutputAccount{ + Address: []byte(address), + } + host.outputAccounts[address] = leftAccount + } + leftAccount.OutputTransfers = append(leftAccount.OutputTransfers, outAcc.OutputTransfers...) + } + } + + for _, logEntry := range vmOutput.Logs { + host.AddLogEntry(logEntry) + } + + return vmOutput, nil +} + // BlockChainHook returns the blockchain hook func (host *vmContext) BlockChainHook() vm.BlockchainHook { return host.blockChainHook diff --git a/vm/systemSmartContracts/eei_test.go b/vm/systemSmartContracts/eei_test.go index 0d5df038a98..aa1120e452d 100644 --- a/vm/systemSmartContracts/eei_test.go +++ b/vm/systemSmartContracts/eei_test.go @@ -200,9 +200,7 @@ func TestVmContext_Transfer(t *testing.T) { value := big.NewInt(999) input := []byte("input") - err := vmCtx.Transfer(destination, sender, value, input, 0) - assert.Nil(t, err) - + vmCtx.Transfer(destination, sender, value, input, 0) balance := vmCtx.GetBalance(destination) assert.Equal(t, value.Uint64(), balance.Uint64()) diff --git a/vm/systemSmartContracts/esdt.go b/vm/systemSmartContracts/esdt.go index b353a052b42..1a6d0cabbbe 100644 --- a/vm/systemSmartContracts/esdt.go +++ b/vm/systemSmartContracts/esdt.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -50,7 +51,7 @@ type esdt struct { gasCost vm.GasCost baseIssuingCost *big.Int ownerAddress []byte // do not use this in functions. Should use e.getEsdtOwner() - eSDTSCAddress []byte + esdtSCAddress []byte endOfEpochSCAddress []byte marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -110,7 +111,6 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { if len(args.EndOfEpochSCAddress) == 0 { return nil, vm.ErrNilEndOfEpochSmartContractAddress } - baseIssuingCost, okConvert := big.NewInt(0).SetString(args.ESDTSCConfig.BaseIssuingCost, conversionBase) if !okConvert || baseIssuingCost.Cmp(big.NewInt(0)) < 0 { return nil, vm.ErrInvalidBaseIssuingCost @@ -123,7 +123,7 @@ func NewESDTSmartContract(args ArgsNewESDTSmartContract) (*esdt, error) { // we should have called pubkeyConverter.Decode here instead of a byte slice cast. Since that change would break // backwards compatibility, the fix was carried in the epochStart/metachain/systemSCs.go ownerAddress: []byte(args.ESDTSCConfig.OwnerAddress), - eSDTSCAddress: args.ESDTSCAddress, + esdtSCAddress: args.ESDTSCAddress, hasher: args.Hasher, marshalizer: args.Marshalizer, endOfEpochSCAddress: args.EndOfEpochSCAddress, @@ -317,11 +317,7 @@ func (e *esdt) issue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if initialSupply.Cmp(zero) > 0 { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(tokenIdentifier) + "@" + hex.EncodeToString(initialSupply.Bytes()) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) } else { e.eei.Finish(tokenIdentifier) } @@ -841,12 +837,7 @@ func (e *esdt) burn(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } if !token.Burnable { esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[1]) - err = e.eei.Transfer(args.CallerAddr, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.eei.Transfer(args.CallerAddr, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) e.eei.AddReturnMessage("token is not burnable") if e.enableEpochsHandler.IsFlagEnabled(common.MultiClaimOnDelegationFlag) { return vmcommon.UserError @@ -918,11 +909,7 @@ func (e *esdt) mint(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } esdtTransferData := core.BuiltInFunctionESDTTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(mintValue.Bytes()) - err = e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -947,11 +934,7 @@ func (e *esdt) toggleFreeze(args *vmcommon.ContractCallInput, builtInFunc string } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - err := e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -997,11 +980,7 @@ func (e *esdt) toggleFreezeSingleNFT(args *vmcommon.ContractCallInput, builtInFu composedArg := append(args.Arguments[0], args.Arguments[1]...) esdtTransferData := builtInFunc + "@" + hex.EncodeToString(composedArg) - err := e.eei.Transfer(args.Arguments[2], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[2], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) return vmcommon.Ok } @@ -1027,14 +1006,10 @@ func (e *esdt) wipeTokenFromAddress( } esdtTransferData := core.BuiltInFunctionESDTWipe + "@" + hex.EncodeToString(wipeArgument) - err := e.eei.Transfer(address, e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(address, e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferData), 0) token.NumWiped++ - err = e.saveToken(tokenID, token) + err := e.saveToken(tokenID, token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1096,7 +1071,7 @@ func (e *esdt) togglePause(args *vmcommon.ContractCallInput, builtInFunc string) } esdtTransferData := builtInFunc + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) logEntry := &vmcommon.LogEntry{ Identifier: []byte(builtInFunc), @@ -1130,7 +1105,7 @@ func (e *esdt) saveTokenAndSendForAll(token *ESDTDataV2, tokenID []byte, builtIn } esdtTransferData := builtInCall + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -1199,7 +1174,7 @@ func (e *esdt) addBurnRoleAndSendToAllShards(token *ESDTDataV2, tokenID []byte) e.eei.AddLogEntry(logEntry) esdtTransferData := vmcommon.BuiltInFunctionESDTSetBurnRoleForAll + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) configChange(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -1283,11 +1258,7 @@ func (e *esdt) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { } scBalance := e.eei.GetBalance(args.RecipientAddr) - err = e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.CallerAddr, args.RecipientAddr, scBalance, nil, 0) return vmcommon.Ok } @@ -1363,9 +1334,11 @@ func (e *esdt) getSpecialRoles(args *vmcommon.ContractCallInput) vmcommon.Return rolesAsString = append(rolesAsString, string(role)) } - specialRoleAddress := e.addressPubKeyConverter.SilentEncode(specialRole.Address, log) - roles := strings.Join(rolesAsString, ",") + + specialRoleAddress, errEncode := e.addressPubKeyConverter.Encode(specialRole.Address) + e.treatEncodeErrorForGetSpecialRoles(errEncode, rolesAsString, specialRole.Address) + message := fmt.Sprintf("%s:%s", specialRoleAddress, roles) e.eei.Finish([]byte(message)) } @@ -1373,6 +1346,25 @@ func (e *esdt) getSpecialRoles(args *vmcommon.ContractCallInput) vmcommon.Return return vmcommon.Ok } +func (e *esdt) treatEncodeErrorForGetSpecialRoles(err error, roles []string, address []byte) { + if err == nil { + return + } + + logLevel := logger.LogTrace + for _, role := range roles { + if role != vmcommon.ESDTRoleBurnForAll { + logLevel = logger.LogWarning + break + } + } + + log.Log(logLevel, "esdt.treatEncodeErrorForGetSpecialRoles", + "hex specialRole.Address", hex.EncodeToString(address), + "roles", strings.Join(roles, ", "), + "error", err) +} + func (e *esdt) basicOwnershipChecks(args *vmcommon.ContractCallInput) (*ESDTDataV2, vmcommon.ReturnCode) { if args.CallValue.Cmp(zero) != 0 { e.eei.AddReturnMessage("callValue must be 0") @@ -1641,11 +1633,7 @@ func (e *esdt) changeToMultiShardCreate(args *vmcommon.ContractCallInput) vmcomm isAddressLastByteZero := addressWithCreateRole[len(addressWithCreateRole)-1] == 0 if !isAddressLastByteZero { multiCreateRoleOnly := [][]byte{[]byte(core.ESDTRoleNFTCreateMultiShard)} - err = e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(args.Arguments[0], addressWithCreateRole, multiCreateRoleOnly, core.BuiltInFunctionSetESDTRole) } err = e.saveToken(args.Arguments[0], token) @@ -1728,16 +1716,13 @@ func (e *esdt) prepareAndSendRoleChangeData( if properties.isMultiShardNFTCreateSet { allRoles = append(allRoles, []byte(core.ESDTRoleNFTCreateMultiShard)) } - err := e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(tokenID, address, allRoles, core.BuiltInFunctionSetESDTRole) + isTransferRoleDefinedInArgs := isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) - firstTransferRoleSet := !properties.transferRoleExists && isTransferRoleDefinedInArgs + firstTransferRoleSet := !properties.transferRoleExists && isDefinedRoleInArgs(roles, []byte(core.ESDTRoleTransfer)) if firstTransferRoleSet { esdtTransferData := core.BuiltInFunctionESDTSetLimitedTransfer + "@" + hex.EncodeToString(tokenID) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleDefinedInArgs { @@ -1840,12 +1825,7 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur esdtRole.Roles = esdtRole.Roles[:len(esdtRole.Roles)-1] } - err := e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + e.sendRoleChangeData(args.Arguments[0], args.Arguments[1], args.Arguments[2:], core.BuiltInFunctionUnSetESDTRole) if len(esdtRole.Roles) == 0 { for i, roles := range token.SpecialRoles { if bytes.Equal(roles.Address, address) { @@ -1863,14 +1843,14 @@ func (e *esdt) unSetSpecialRole(args *vmcommon.ContractCallInput) vmcommon.Retur lastTransferRoleWasDeleted := isTransferRoleInArgs && !transferRoleExists if lastTransferRoleWasDeleted { esdtTransferData := core.BuiltInFunctionESDTUnSetLimitedTransfer + "@" + hex.EncodeToString(args.Arguments[0]) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } if isTransferRoleInArgs { e.deleteTransferRoleAddressFromSystemAccount(args.Arguments[0], address) } - err = e.saveToken(args.Arguments[0], token) + err := e.saveToken(args.Arguments[0], token) if err != nil { e.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -1886,7 +1866,7 @@ func (e *esdt) sendNewTransferRoleAddressToSystemAccount(token []byte, address [ } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleAddAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address []byte) { @@ -1896,7 +1876,7 @@ func (e *esdt) deleteTransferRoleAddressFromSystemAccount(token []byte, address } esdtTransferData := vmcommon.BuiltInFunctionESDTTransferRoleDeleteAddress + "@" + hex.EncodeToString(token) + "@" + hex.EncodeToString(address) - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) } func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { @@ -1932,7 +1912,7 @@ func (e *esdt) sendAllTransferRoleAddresses(args *vmcommon.ContractCallInput) vm return vmcommon.UserError } - e.eei.SendGlobalSettingToAll(e.eSDTSCAddress, []byte(esdtTransferData)) + e.eei.SendGlobalSettingToAll(e.esdtSCAddress, []byte(esdtTransferData)) return vmcommon.Ok } @@ -2022,11 +2002,7 @@ func (e *esdt) transferNFTCreateRole(args *vmcommon.ContractCallInput) vmcommon. esdtTransferNFTCreateData := core.BuiltInFunctionESDTNFTCreateRoleTransfer + "@" + hex.EncodeToString(args.Arguments[0]) + "@" + hex.EncodeToString(args.Arguments[2]) - err = e.eei.Transfer(args.Arguments[1], e.eSDTSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.eei.Transfer(args.Arguments[1], e.esdtSCAddress, big.NewInt(0), []byte(esdtTransferNFTCreateData), 0) return vmcommon.Ok } @@ -2064,24 +2040,19 @@ func (e *esdt) stopNFTCreateForever(args *vmcommon.ContractCallInput) vmcommon.R } for _, currentOwner := range currentOwners { - err = e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) - if err != nil { - e.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + e.sendRoleChangeData(args.Arguments[0], currentOwner, [][]byte{[]byte(core.ESDTRoleNFTCreate)}, core.BuiltInFunctionUnSetESDTRole) } return vmcommon.Ok } -func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) error { +func (e *esdt) sendRoleChangeData(tokenID []byte, destination []byte, roles [][]byte, builtInFunc string) { esdtSetRoleData := builtInFunc + "@" + hex.EncodeToString(tokenID) for _, arg := range roles { esdtSetRoleData += "@" + hex.EncodeToString(arg) } - err := e.eei.Transfer(destination, e.eSDTSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) - return err + e.eei.Transfer(destination, e.esdtSCAddress, big.NewInt(0), []byte(esdtSetRoleData), 0) } func (e *esdt) getAllAddressesAndRoles(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { diff --git a/vm/systemSmartContracts/esdt_test.go b/vm/systemSmartContracts/esdt_test.go index 050d5cc452d..0504527efb6 100644 --- a/vm/systemSmartContracts/esdt_test.go +++ b/vm/systemSmartContracts/esdt_test.go @@ -794,7 +794,7 @@ func TestEsdt_ExecuteMintInvalidDestinationAddressShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "destination address of invalid length")) } -func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteMintTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -807,9 +807,6 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -818,7 +815,7 @@ func TestEsdt_ExecuteMintTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("mint", [][]byte{[]byte("esdtToken"), {200}}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteMintWithTwoArgsShouldSetOwnerAsDestination(t *testing.T) { @@ -1080,7 +1077,7 @@ func TestEsdt_ExecuteToggleFreezeNonFreezableTokenShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "cannot freeze")) } -func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1092,9 +1089,6 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1103,10 +1097,10 @@ func TestEsdt_ExecuteToggleFreezeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freeze", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteToggleFreezeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1119,9 +1113,6 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1130,7 +1121,7 @@ func TestEsdt_ExecuteToggleFreezeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("freezeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteToggleFreezeShouldWorkWithRealBech32Address(t *testing.T) { @@ -1566,7 +1557,7 @@ func TestEsdt_ExecuteWipeInvalidDestShouldFail(t *testing.T) { assert.True(t, strings.Contains(eei.returnMessage, "invalid")) } -func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeTransferFailsNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1579,9 +1570,6 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1590,10 +1578,10 @@ func TestEsdt_ExecuteWipeTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipe", [][]byte{[]byte("esdtToken"), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } -func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { +func TestEsdt_ExecuteWipeSingleNFTTransferNoErr(t *testing.T) { t.Parallel() err := errors.New("transfer error") @@ -1606,9 +1594,6 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { }) return marshalizedData } - args.Eei.(*mock.SystemEIStub).TransferCalled = func(destination []byte, sender []byte, value *big.Int, input []byte) error { - return err - } args.Eei.(*mock.SystemEIStub).AddReturnMessageCalled = func(msg string) { assert.Equal(t, err.Error(), msg) } @@ -1617,7 +1602,7 @@ func TestEsdt_ExecuteWipeSingleNFTTransferFailsShouldErr(t *testing.T) { vmInput := getDefaultVmInputForFunc("wipeSingleNFT", [][]byte{[]byte("esdtToken"), big.NewInt(10).Bytes(), getAddress()}) output := e.Execute(vmInput) - assert.Equal(t, vmcommon.UserError, output) + assert.Equal(t, vmcommon.Ok, output) } func TestEsdt_ExecuteWipeShouldWork(t *testing.T) { @@ -2559,6 +2544,63 @@ func TestEsdt_GetSpecialRolesShouldWork(t *testing.T) { assert.Equal(t, []byte("erd1e7n8rzxdtl2n2fl6mrsg4l7stp2elxhfy6l9p7eeafspjhhrjq7qk05usw:ESDTRoleNFTAddQuantity,ESDTRoleNFTCreate,ESDTRoleNFTBurn"), eei.output[1]) } +func TestEsdt_GetSpecialRolesWithEmptyAddressShouldWork(t *testing.T) { + t.Parallel() + + tokenName := []byte("esdtToken") + args := createMockArgumentsForESDT() + eei := createDefaultEei() + args.Eei = eei + + addr := "" + addrBytes, _ := testscommon.RealWorldBech32PubkeyConverter.Decode(addr) + + specialRoles := []*ESDTRoles{ + { + Address: addrBytes, + Roles: [][]byte{ + []byte(core.ESDTRoleLocalMint), + []byte(core.ESDTRoleLocalBurn), + }, + }, + { + Address: addrBytes, + Roles: [][]byte{ + []byte(core.ESDTRoleNFTAddQuantity), + []byte(core.ESDTRoleNFTCreate), + []byte(core.ESDTRoleNFTBurn), + }, + }, + { + Address: addrBytes, + Roles: [][]byte{ + []byte(vmcommon.ESDTRoleBurnForAll), + }, + }, + } + tokensMap := map[string][]byte{} + marshalizedData, _ := args.Marshalizer.Marshal(ESDTDataV2{ + SpecialRoles: specialRoles, + }) + tokensMap[string(tokenName)] = marshalizedData + eei.storageUpdate[string(eei.scAddress)] = tokensMap + args.Eei = eei + + args.AddressPubKeyConverter = testscommon.RealWorldBech32PubkeyConverter + + e, _ := NewESDTSmartContract(args) + + eei.output = make([][]byte, 0) + vmInput := getDefaultVmInputForFunc("getSpecialRoles", [][]byte{[]byte("esdtToken")}) + output := e.Execute(vmInput) + assert.Equal(t, vmcommon.Ok, output) + + assert.Equal(t, 3, len(eei.output)) + assert.Equal(t, []byte(":ESDTRoleLocalMint,ESDTRoleLocalBurn"), eei.output[0]) + assert.Equal(t, []byte(":ESDTRoleNFTAddQuantity,ESDTRoleNFTCreate,ESDTRoleNFTBurn"), eei.output[1]) + assert.Equal(t, []byte(":ESDTRoleBurnForAll"), eei.output[2]) +} + func TestEsdt_UnsetSpecialRoleWithRemoveEntryFromSpecialRoles(t *testing.T) { t.Parallel() @@ -2760,7 +2802,6 @@ func TestEsdt_SetSpecialRoleCheckBasicOwnershipErr(t *testing.T) { func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -2770,9 +2811,8 @@ func TestEsdt_SetSpecialRoleNewSendRoleChangeDataErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return localErr }, } args.Eei = eei @@ -2807,9 +2847,8 @@ func TestEsdt_SetSpecialRoleAlreadyExists(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, } args.Eei = eei @@ -2846,11 +2885,10 @@ func TestEsdt_SetSpecialRoleCannotSaveToken(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -2887,9 +2925,8 @@ func TestEsdt_SetSpecialRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4275726e"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -2931,9 +2968,8 @@ func TestEsdt_SetSpecialRoleNFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e4654437265617465"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3238,9 +3274,8 @@ func TestEsdt_SetSpecialRoleSFTShouldErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTSetRole@6d79546f6b656e@45534454526f6c654e46544164645175616e74697479"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3509,10 +3544,9 @@ func TestEsdt_UnsetSpecialRoleCannotRemoveRoleNotExistsShouldErr(t *testing.T) { require.Equal(t, vmcommon.UserError, retCode) } -func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { +func TestEsdt_UnsetSpecialRoleRemoveRoleTransfer(t *testing.T) { t.Parallel() - localErr := errors.New("local err") args := createMockArgumentsForESDT() eei := &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { @@ -3528,9 +3562,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return localErr }, } args.Eei = eei @@ -3544,7 +3577,7 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleTransferErr(t *testing.T) { vmInput.GasProvided = 50000000 retCode := e.Execute(vmInput) - require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vmcommon.Ok, retCode) } func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { @@ -3565,11 +3598,10 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleSaveTokenErr(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) castedMarshalizer := args.Marshalizer.(*mock.MarshalizerMock) castedMarshalizer.Fail = true - return nil }, } args.Eei = eei @@ -3604,9 +3636,8 @@ func TestEsdt_UnsetSpecialRoleRemoveRoleShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@6d79546f6b656e@45534454526f6c654c6f63616c4d696e74"), input) - return nil }, SetStorageCalled: func(key []byte, value []byte) { token := &ESDTDataV2{} @@ -3713,9 +3744,8 @@ func TestEsdt_StopNFTCreateForeverCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTUnSetRole@746f6b656e4944@45534454526f6c654e4654437265617465"), input) - return nil }, } args.Eei = eei @@ -3825,10 +3855,9 @@ func TestEsdt_TransferNFTCreateCallShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { + TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte, _ uint64) { require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@63616c6c657232"), input) require.Equal(t, destination, []byte("caller3")) - return nil }, } args.Eei = eei @@ -3863,11 +3892,6 @@ func TestEsdt_TransferNFTCreateCallMultiShardShouldWork(t *testing.T) { tokenBytes, _ := args.Marshalizer.Marshal(token) return tokenBytes }, - TransferCalled: func(destination []byte, sender []byte, value *big.Int, input []byte) error { - require.Equal(t, []byte("ESDTNFTCreateRoleTransfer@746f6b656e4944@3263616c6c6572"), input) - require.Equal(t, destination, []byte("3caller")) - return nil - }, } args.Eei = eei diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 042df1bc204..ae3f080c636 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -648,11 +648,7 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc g.addToAccumulatedFees(baseConfig.LostProposalFee) } - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } + g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) logEntry := &vmcommon.LogEntry{ Identifier: []byte(args.Function), @@ -701,12 +697,7 @@ func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInp accumulatedFees := g.getAccumulatedFees() g.setAccumulatedFees(big.NewInt(0)) - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) - if err != nil { - g.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - + g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) return vmcommon.Ok } diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index e451d090f70..387e16b33fb 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -366,6 +366,101 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { require.Equal(t, vmcommon.Ok, retCode) } +func TestGovernanceContract_ValidatorVoteInvalidDelegated(t *testing.T) { + t.Parallel() + + returnMessage := "" + errInvalidVoteSubstr := "invalid delegator address" + callerAddress := vm.FirstDelegationSCAddress + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) + + args := createMockGovernanceArgs() + + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + } + args.Eei = &mock.SystemEIStub{ + GetStorageCalled: func(key []byte) []byte { + if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { + proposalBytes, _ := args.Marshalizer.Marshal(generalProposal) + return proposalBytes + } + + return nil + }, + BlockChainHookCalled: func() vm.BlockchainHook { + return &mock.BlockChainHookStub{ + CurrentNonceCalled: func() uint64 { + return 14 + }, + } + }, + AddReturnMessageCalled: func(msg string) { + returnMessage = msg + }, + } + voteArgs := [][]byte{ + proposalIdentifier, + []byte("yes"), + []byte("delegatedToWrongAddress"), + big.NewInt(1000).Bytes(), + } + + gsc, _ := NewGovernanceContract(args) + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, returnMessage, errInvalidVoteSubstr) +} + +func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { + t.Parallel() + + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + blockchainHook.CurrentNonceCalled = func() uint64 { + return 12 + } + + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := []byte("aaaaaaaaa") + generalProposal := &GeneralProposal{ + CommitHash: proposalIdentifier, + StartVoteEpoch: 10, + EndVoteEpoch: 15, + Yes: big.NewInt(0), + No: big.NewInt(0), + Veto: big.NewInt(0), + Abstain: big.NewInt(0), + } + + voteArgs := [][]byte{ + []byte("1"), + []byte("yes"), + } + gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) + _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) + + callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") + + callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) + callInput.CallValue = big.NewInt(10) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) + + callInput.CallValue = big.NewInt(0) + callInput.GasProvided = 0 + gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) +} + func TestGovernanceContract_ChangeConfigWrongCaller(t *testing.T) { t.Parallel() @@ -827,52 +922,6 @@ func TestGovernanceContract_VoteTwice(t *testing.T) { require.Equal(t, eei.GetReturnMessage(), "double vote is not allowed") } -func TestGovernanceContract_DelegateVoteUserErrors(t *testing.T) { - t.Parallel() - - gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() - blockchainHook.CurrentNonceCalled = func() uint64 { - return 12 - } - - callerAddress := bytes.Repeat([]byte{2}, 32) - proposalIdentifier := []byte("aaaaaaaaa") - generalProposal := &GeneralProposal{ - CommitHash: proposalIdentifier, - StartVoteEpoch: 10, - EndVoteEpoch: 15, - Yes: big.NewInt(0), - No: big.NewInt(0), - Veto: big.NewInt(0), - Abstain: big.NewInt(0), - } - - voteArgs := [][]byte{ - []byte("1"), - []byte("yes"), - } - gsc.eei.SetStorage(voteArgs[0], proposalIdentifier) - _ = gsc.saveGeneralProposal(proposalIdentifier, generalProposal) - - callInput := createVMInput(big.NewInt(0), "delegateVote", callerAddress, vm.GovernanceSCAddress, voteArgs) - retCode := gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.Equal(t, eei.GetReturnMessage(), "invalid number of arguments") - - callInput.Arguments = append(callInput.Arguments, []byte{1}, []byte{2}) - callInput.CallValue = big.NewInt(10) - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.UserError, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "function is not payable")) - - callInput.CallValue = big.NewInt(0) - callInput.GasProvided = 0 - gsc.gasCost.MetaChainSystemSCsCost.DelegateVote = 10 - retCode = gsc.Execute(callInput) - require.Equal(t, vmcommon.OutOfGas, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) -} - func TestGovernanceContract_DelegateVoteMoreErrors(t *testing.T) { t.Parallel() diff --git a/vm/systemSmartContracts/staking.go b/vm/systemSmartContracts/staking.go index 004254ce87b..d450ef73f75 100644 --- a/vm/systemSmartContracts/staking.go +++ b/vm/systemSmartContracts/staking.go @@ -24,8 +24,6 @@ var log = logger.GetOrCreate("vm/systemsmartcontracts") const ownerKey = "owner" const nodesConfigKey = "nodesConfig" -const waitingListHeadKey = "waitingList" -const waitingElementPrefix = "w_" type stakingSC struct { eei vm.SystemEI @@ -60,13 +58,6 @@ type ArgsNewStakingSmartContract struct { EnableEpochsHandler common.EnableEpochsHandler } -type waitingListReturnData struct { - blsKeys [][]byte - stakedDataList []*StakedDataV2_0 - lastKey []byte - afterLastjailed bool -} - // NewStakingSmartContract creates a staking smart contract func NewStakingSmartContract( args ArgsNewStakingSmartContract, @@ -243,6 +234,10 @@ func (s *stakingSC) numSpareNodes() int64 { } func (s *stakingSC) canStake() bool { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return true + } + stakeConfig := s.getConfig() return stakeConfig.StakedNodes < stakeConfig.MaxNumNodes } @@ -503,44 +498,6 @@ func (s *stakingSC) stake(args *vmcommon.ContractCallInput, onlyRegister bool) v return vmcommon.Ok } -func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { - if registrationData.Staked { - return nil - } - - registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - if !s.canStake() { - s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) - err := s.addToWaitingList(blsKey, addFirst) - if err != nil { - s.eei.AddReturnMessage("error while adding to waiting") - return err - } - registrationData.Waiting = true - s.eei.Finish([]byte{waiting}) - return nil - } - - err := s.removeFromWaitingList(blsKey) - if err != nil { - s.eei.AddReturnMessage("error while removing from waiting") - return err - } - s.addToStakedNodes(1) - s.activeStakingFor(registrationData) - - return nil -} - -func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { - stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.Staked = true - stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch - stakingData.UnStakedNonce = 0 - stakingData.Waiting = false -} - func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { // backward compatibility - no need for return message @@ -573,6 +530,7 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm if registrationData.Staked { s.removeFromStakedNodes() } + if registrationData.Waiting { err = s.removeFromWaitingList(args.Arguments[0]) if err != nil { @@ -595,64 +553,94 @@ func (s *stakingSC) unStakeAtEndOfEpoch(args *vmcommon.ContractCallInput) vmcomm return vmcommon.Ok } +func (s *stakingSC) activeStakingFor(stakingData *StakedDataV2_0) { + stakingData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.Staked = true + stakingData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + stakingData.UnStakedEpoch = common.DefaultUnstakedEpoch + stakingData.UnStakedNonce = 0 + stakingData.Waiting = false +} + +func (s *stakingSC) processStake(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.processStakeV2(registrationData) + } + + return s.processStakeV1(blsKey, registrationData, addFirst) +} + +func (s *stakingSC) processStakeV2(registrationData *StakedDataV2_0) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + return s.unStakeV2(args) + } + + return s.unStakeV1(args) +} + +func (s *stakingSC) unStakeV2(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + if !registrationData.Staked { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.ExecutionFailed + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) checkUnStakeArgs(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unStake function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(args.Arguments) < 2 { s.eei.AddReturnMessage("not enough arguments, needed BLS key and reward address") - return vmcommon.UserError + return nil, vmcommon.UserError } registrationData, err := s.getOrCreateRegisteredData(args.Arguments[0]) if err != nil { s.eei.AddReturnMessage("cannot get or create registered data: error " + err.Error()) - return vmcommon.UserError + return nil, vmcommon.UserError } if len(registrationData.RewardAddress) == 0 { s.eei.AddReturnMessage("cannot unStake a key that is not registered") - return vmcommon.UserError + return nil, vmcommon.UserError } if !bytes.Equal(args.Arguments[1], registrationData.RewardAddress) { s.eei.AddReturnMessage("unStake possible only from staker caller") - return vmcommon.UserError + return nil, vmcommon.UserError } if s.isNodeJailedOrWithBadRating(registrationData, args.Arguments[0]) { s.eei.AddReturnMessage("cannot unStake node which is jailed or with bad rating") - return vmcommon.UserError + return nil, vmcommon.UserError } if !registrationData.Staked && !registrationData.Waiting { s.eei.AddReturnMessage("cannot unStake node which was already unStaked") - return vmcommon.UserError - } - - if !registrationData.Staked { - registrationData.Waiting = false - err = s.removeFromWaitingList(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - err = s.saveStakingData(args.Arguments[0], registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok + return nil, vmcommon.UserError } - addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() - if addOneFromQueue { - _, err = s.moveFirstFromWaitingToStaked() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - } + return registrationData, vmcommon.Ok +} +func (s *stakingSC) tryUnStake(key []byte, registrationData *StakedDataV2_0) vmcommon.ReturnCode { if !s.canUnStake() { s.eei.AddReturnMessage("unStake is not possible as too many left") return vmcommon.UserError @@ -664,7 +652,7 @@ func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() registrationData.Waiting = false - err = s.saveStakingData(args.Arguments[0], registrationData) + err := s.saveStakingData(key, registrationData) if err != nil { s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) return vmcommon.UserError @@ -673,53 +661,6 @@ func (s *stakingSC) unStake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.Ok } -func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { - waitingElementKey := createWaitingListKey(blsKey) - _, err := s.getWaitingListElement(waitingElementKey) - if err == nil { - // node in waiting - remove from it - and that's it - return false, s.removeFromWaitingList(blsKey) - } - - return s.moveFirstFromWaitingToStaked() -} - -func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { - waitingList, err := s.getWaitingListHead() - if err != nil { - return false, err - } - if waitingList.Length == 0 { - return false, nil - } - elementInList, err := s.getWaitingListElement(waitingList.FirstKey) - if err != nil { - return false, err - } - err = s.removeFromWaitingList(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - - nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) - if err != nil { - return false, err - } - if len(nodeData.RewardAddress) == 0 || nodeData.Staked { - return false, vm.ErrInvalidWaitingList - } - - nodeData.Waiting = false - nodeData.Staked = true - nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() - nodeData.UnStakedNonce = 0 - nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch - - s.addToStakedNodes(1) - return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) -} - func (s *stakingSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { s.eei.AddReturnMessage("unBond function not allowed to be called by address " + string(args.CallerAddr)) @@ -809,751 +750,159 @@ func (s *stakingSC) isStaked(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return vmcommon.UserError } -func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) != 0 { - return nil - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - return err +func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.removeAndSetUnstaked(registrationData) + return } - waitingList.Length += 1 - if waitingList.Length == 1 { - return s.startWaitingList(waitingList, addJailed, blsKey) + if s.canUnStake() { + s.removeAndSetUnstaked(registrationData) + return } - if addJailed { - return s.insertAfterLastJailed(waitingList, blsKey) - } + s.eei.AddReturnMessage("did not switch as not enough validators remaining") +} - return s.addToEndOfTheList(waitingList, blsKey) +func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { + s.removeFromStakedNodes() + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.StakedNonce = math.MaxUint64 } -func (s *stakingSC) startWaitingList( - waitingList *WaitingList, - addJailed bool, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastKey = inWaitingListKey - if addJailed { - waitingList.LastJailedKey = inWaitingListKey +func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: waitingList.LastKey, - NextKey: make([]byte, 0), + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} - -func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - oldLastKey := make([]byte, len(waitingList.LastKey)) - copy(oldLastKey, waitingList.LastKey) - lastElement, err := s.getWaitingListElement(waitingList.LastKey) - if err != nil { - return err - } - lastElement.NextKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: oldLastKey, - NextKey: make([]byte, 0), + newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMinNodes <= 0 { + s.eei.AddReturnMessage("new minimum number of nodes zero or negative") + return vmcommon.UserError } - err = s.saveWaitingListElement(oldLastKey, lastElement) - if err != nil { - return err + if newMinNodes > int64(s.maxNumNodes) { + s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") + return vmcommon.UserError } - waitingList.LastKey = inWaitingListKey - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) -} + stakeConfig.MinNumNodes = newMinNodes + s.setConfig(stakeConfig) -func (s *stakingSC) insertAfterLastJailed( - waitingList *WaitingList, - blsKey []byte, -) error { - inWaitingListKey := createWaitingListKey(blsKey) - if len(waitingList.LastJailedKey) == 0 { - previousFirstKey := make([]byte, len(waitingList.FirstKey)) - copy(previousFirstKey, waitingList.FirstKey) - waitingList.FirstKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: inWaitingListKey, - NextKey: previousFirstKey, - } + return vmcommon.Ok +} - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { - previousFirstElement, err := s.getWaitingListElement(previousFirstKey) - if err != nil { - return err - } - previousFirstElement.PreviousKey = inWaitingListKey - err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) - if err != nil { - return err - } - } +func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError + } - return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) + stakeConfig := s.getConfig() + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be 1") + return vmcommon.UserError } - lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) - if err != nil { - return err + newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() + if newMaxNodes <= 0 { + s.eei.AddReturnMessage("new max number of nodes zero or negative") + return vmcommon.UserError } - if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = inWaitingListKey - return s.addToEndOfTheList(waitingList, blsKey) + if newMaxNodes < int64(s.minNumNodes) { + s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") + return vmcommon.UserError } - firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) - if err != nil { - return err + prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) + s.eei.Finish(prevMaxNumNodes.Bytes()) + stakeConfig.MaxNumNodes = newMaxNodes + s.setConfig(stakeConfig) + + return vmcommon.Ok +} + +func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { + return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) +} + +func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - elementInWaiting := &ElementInList{ - BLSPublicKey: blsKey, - PreviousKey: make([]byte, len(inWaitingListKey)), - NextKey: make([]byte, len(inWaitingListKey)), + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) - copy(elementInWaiting.NextKey, lastJailedElement.NextKey) - lastJailedElement.NextKey = inWaitingListKey - firstNonJailedElement.PreviousKey = inWaitingListKey - waitingList.LastJailedKey = inWaitingListKey + s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) + return vmcommon.Ok +} - err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) +func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) if err != nil { - return err + s.eei.AddReturnMessage("insufficient gas") + return nil, vmcommon.OutOfGas } - err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) - if err != nil { - return err + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return nil, vmcommon.UserError } - err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) if err != nil { - return err + s.eei.AddReturnMessage(err.Error()) + return nil, vmcommon.UserError } - return s.saveWaitingListHead(waitingList) -} - -func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { - err := s.saveWaitingListElement(key, element) - if err != nil { - return err + if len(stakedData.RewardAddress) == 0 { + s.eei.AddReturnMessage("blsKey not registered in staking sc") + return nil, vmcommon.UserError } - return s.saveWaitingListHead(waitingList) + return stakedData, vmcommon.Ok } -func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { - inWaitingListKey := createWaitingListKey(blsKey) - marshaledData := s.eei.GetStorage(inWaitingListKey) - if len(marshaledData) == 0 { - return nil +func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError } - s.eei.SetStorage(inWaitingListKey, nil) - elementToRemove := &ElementInList{} - err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) - if err != nil { - return err + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - waitingList, err := s.getWaitingListHead() - if err != nil { - return err + if stakedData.Jailed || s.eei.CanUnJail(args.Arguments[0]) { + s.eei.Finish([]byte("jailed")) + return vmcommon.Ok } - if waitingList.Length == 0 { - return vm.ErrInvalidWaitingList + if stakedData.Waiting { + s.eei.Finish([]byte("queued")) + return vmcommon.Ok } - waitingList.Length -= 1 - if waitingList.Length == 0 { - s.eei.SetStorage([]byte(waitingListHeadKey), nil) - return nil + if stakedData.Staked { + s.eei.Finish([]byte("staked")) + return vmcommon.Ok } - // remove the first element - isCorrectFirstQueueFlagEnabled := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) - isFirstElementBeforeFix := !isCorrectFirstQueueFlagEnabled && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) - isFirstElementAfterFix := isCorrectFirstQueueFlagEnabled && bytes.Equal(waitingList.FirstKey, inWaitingListKey) - if isFirstElementBeforeFix || isFirstElementAfterFix { - if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, 0) - } - - nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) - if errGet != nil { - return errGet - } - - nextElement.PreviousKey = elementToRemove.NextKey - waitingList.FirstKey = elementToRemove.NextKey - return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) - } - - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { - waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) - copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) - } - - previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) - // search the other way around for the element in front - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { - previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) - if err != nil { - return err - } - } - if previousElement == nil { - previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) - if err != nil { - return err - } - } - if len(elementToRemove.NextKey) == 0 { - waitingList.LastKey = elementToRemove.PreviousKey - previousElement.NextKey = make([]byte, 0) - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) - } - - nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) - if err != nil { - return err - } - - nextElement.PreviousKey = elementToRemove.PreviousKey - previousElement.NextKey = elementToRemove.NextKey - - err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) - if err != nil { - return err - } - return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) -} - -func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { - var previousElement *ElementInList - index := uint32(1) - nextKey := make([]byte, len(waitingList.FirstKey)) - copy(nextKey, waitingList.FirstKey) - for len(nextKey) != 0 && index <= waitingList.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(inWaitingListKey, element.NextKey) { - previousElement = element - elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) - return previousElement, nil - } - - nextKey = make([]byte, len(element.NextKey)) - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - return nil, vm.ErrElementNotFound -} - -func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { - marshaledData := s.eei.GetStorage(key) - if len(marshaledData) == 0 { - return nil, vm.ErrElementNotFound - } - - element := &ElementInList{} - err := s.marshalizer.Unmarshal(element, marshaledData) - if err != nil { - return nil, err - } - - return element, nil -} - -func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { - marshaledData, err := s.marshalizer.Marshal(element) - if err != nil { - return err - } - - s.eei.SetStorage(key, marshaledData) - return nil -} - -func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { - waitingList := &WaitingList{ - FirstKey: make([]byte, 0), - LastKey: make([]byte, 0), - Length: 0, - LastJailedKey: make([]byte, 0), - } - marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) - if len(marshaledData) == 0 { - return waitingList, nil - } - - err := s.marshalizer.Unmarshal(waitingList, marshaledData) - if err != nil { - return nil, err - } - - return waitingList, nil -} - -func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { - marshaledData, err := s.marshalizer.Marshal(waitingList) - if err != nil { - return err - } - - s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) - return nil -} - -func createWaitingListKey(blsKey []byte) []byte { - return []byte(waitingElementPrefix + string(blsKey)) -} - -func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(registrationData.RewardAddress) == 0 { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if !registrationData.Staked { - s.eei.AddReturnMessage("no need to jail as not a validator") - return vmcommon.UserError - } - if registrationData.Jailed { - s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) - return vmcommon.UserError - } - switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - registrationData.NumJailed++ - registrationData.Jailed = true - registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() - - if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { - s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") - } else { - s.tryRemoveJailedNodeFromStaked(registrationData) - } - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) tryRemoveJailedNodeFromStaked(registrationData *StakedDataV2_0) { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { - s.removeAndSetUnstaked(registrationData) - return - } - - if s.canUnStake() { - s.removeAndSetUnstaked(registrationData) - return - } - - s.eei.AddReturnMessage("did not switch as not enough validators remaining") -} - -func (s *stakingSC) removeAndSetUnstaked(registrationData *StakedDataV2_0) { - s.removeFromStakedNodes() - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.StakedNonce = math.MaxUint64 -} - -func (s *stakingSC) updateConfigMinNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMinNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMinNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMinNodes <= 0 { - s.eei.AddReturnMessage("new minimum number of nodes zero or negative") - return vmcommon.UserError - } - - if newMinNodes > int64(s.maxNumNodes) { - s.eei.AddReturnMessage("new minimum number of nodes greater than maximum number of nodes") - return vmcommon.UserError - } - - stakeConfig.MinNumNodes = newMinNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) updateConfigMaxNodes(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("updateConfigMaxNodes function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - - stakeConfig := s.getConfig() - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be 1") - return vmcommon.UserError - } - - newMaxNodes := big.NewInt(0).SetBytes(args.Arguments[0]).Int64() - if newMaxNodes <= 0 { - s.eei.AddReturnMessage("new max number of nodes zero or negative") - return vmcommon.UserError - } - - if newMaxNodes < int64(s.minNumNodes) { - s.eei.AddReturnMessage("new max number of nodes less than min number of nodes") - return vmcommon.UserError - } - - prevMaxNumNodes := big.NewInt(stakeConfig.MaxNumNodes) - s.eei.Finish(prevMaxNumNodes.Bytes()) - stakeConfig.MaxNumNodes = newMaxNodes - s.setConfig(stakeConfig) - - return vmcommon.Ok -} - -func (s *stakingSC) isNodeJailedOrWithBadRating(registrationData *StakedDataV2_0, blsKey []byte) bool { - return registrationData.Jailed || s.eei.CanUnJail(blsKey) || s.eei.IsBadRating(blsKey) -} - -func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return vmcommon.UserError - } - - waitingElementKey := createWaitingListKey(args.Arguments[0]) - _, err := s.getWaitingListElement(waitingElementKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { - s.eei.Finish([]byte(strconv.Itoa(1))) - return vmcommon.Ok - } - if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok - } - - prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - index := uint32(2) - nextKey := make([]byte, len(waitingElementKey)) - copy(nextKey, prevElement.NextKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - if bytes.Equal(nextKey, waitingElementKey) { - s.eei.Finish([]byte(strconv.Itoa(int(index)))) - return vmcommon.Ok - } - - prevElement, err = s.getWaitingListElement(nextKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if len(prevElement.NextKey) == 0 { - break - } - index++ - copy(nextKey, prevElement.NextKey) - } - - s.eei.AddReturnMessage("element in waiting list not found") - return vmcommon.UserError -} - -func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) - return vmcommon.Ok -} - -func (s *stakingSC) getRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - - s.eei.Finish([]byte(hex.EncodeToString(stakedData.RewardAddress))) - return vmcommon.Ok -} - -func (s *stakingSC) getStakedDataIfExists(args *vmcommon.ContractCallInput) (*StakedDataV2_0, vmcommon.ReturnCode) { - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return nil, vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") - return nil, vmcommon.UserError - } - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[0]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return nil, vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - s.eei.AddReturnMessage("blsKey not registered in staking sc") - return nil, vmcommon.UserError - } - - return stakedData, vmcommon.Ok -} - -func (s *stakingSC) getBLSKeyStatus(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - - if stakedData.Jailed || s.eei.CanUnJail(args.Arguments[0]) { - s.eei.Finish([]byte("jailed")) - return vmcommon.Ok - } - if stakedData.Waiting { - s.eei.Finish([]byte("queued")) - return vmcommon.Ok - } - if stakedData.Staked { - s.eei.Finish([]byte("staked")) - return vmcommon.Ok - } - - s.eei.Finish([]byte("unStaked")) - return vmcommon.Ok -} - -func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - stakedData, returnCode := s.getStakedDataIfExists(args) - if returnCode != vmcommon.Ok { - return returnCode - } - if stakedData.UnStakedNonce == 0 { - s.eei.AddReturnMessage("not in unbond period") - return vmcommon.UserError - } - - currentNonce := s.eei.BlockChainHook().CurrentNonce() - passedNonce := currentNonce - stakedData.UnStakedNonce - if passedNonce >= s.unBondPeriod { - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.Finish(zero.Bytes()) - } else { - s.eei.Finish([]byte("0")) - } - } else { - remaining := s.unBondPeriod - passedNonce - if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) - } else { - s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - if len(waitingListData.stakedDataList) == 0 { - s.eei.AddReturnMessage("no one in waitingList") - return vmcommon.UserError - } - - for index, stakedData := range waitingListData.stakedDataList { - s.eei.Finish(waitingListData.blsKeys[index]) - s.eei.Finish(stakedData.RewardAddress) - s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) - } - - return vmcommon.Ok -} - -func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) - return vmcommon.UserError - } - if len(args.Arguments)%2 != 0 { - s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") - return vmcommon.UserError - } - for i := 0; i < len(args.Arguments); i += 2 { - stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - if len(stakedData.RewardAddress) == 0 { - log.Error("staking data does not exists", - "bls key", hex.EncodeToString(args.Arguments[i]), - "owner as hex", hex.EncodeToString(args.Arguments[i+1])) - continue - } - - stakedData.OwnerAddress = args.Arguments[i+1] - err = s.saveStakingData(args.Arguments[i], stakedData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) - return vmcommon.UserError - } - } - - return vmcommon.Ok -} - -func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { - s.eei.AddReturnMessage("this is only a view function") - return vmcommon.UserError - } - if len(args.Arguments) < 1 { - s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) - return vmcommon.UserError - } - - stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - if len(stakedData.OwnerAddress) == 0 { - s.eei.AddReturnMessage("owner address is nil") - return vmcommon.UserError - } - - s.eei.Finish(stakedData.OwnerAddress) + s.eei.Finish([]byte("unStaked")) return vmcommon.Ok } @@ -1567,212 +916,117 @@ func (s *stakingSC) getTotalNumberOfRegisteredNodes(args *vmcommon.ContractCallI return vmcommon.UserError } - stakeConfig := s.getConfig() waitingListHead, err := s.getWaitingListHead() if err != nil { s.eei.AddReturnMessage(err.Error()) return vmcommon.UserError } + stakeConfig := s.getConfig() totalRegistered := stakeConfig.StakedNodes + stakeConfig.JailedNodes + int64(waitingListHead.Length) s.eei.Finish(big.NewInt(totalRegistered).Bytes()) return vmcommon.Ok } -func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - // backward compatibility - return vmcommon.UserError - } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be equal to 0") - return vmcommon.UserError - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) +func (s *stakingSC) getRemainingUnbondPeriod(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) return vmcommon.UserError } - if len(waitingList.LastJailedKey) == 0 { - return vmcommon.Ok + stakedData, returnCode := s.getStakedDataIfExists(args) + if returnCode != vmcommon.Ok { + return returnCode } - - waitingList.LastJailedKey = make([]byte, 0) - err = s.saveWaitingListHead(waitingList) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if stakedData.UnStakedNonce == 0 { + s.eei.AddReturnMessage("not in unbond period") return vmcommon.UserError } - return vmcommon.Ok -} - -func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( - waitingListData *waitingListReturnData, -) ([]string, map[string][][]byte, error) { - - listOfOwners := make([]string, 0) - mapOwnersUnStakedNodes := make(map[string][][]byte) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { - stakedData := waitingListData.stakedDataList[i] - validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) - if err != nil { - return nil, nil, err - } - if validatorInfo.numNodesToUnstake == 0 { - continue - } - - validatorInfo.numNodesToUnstake-- - blsKey := waitingListData.blsKeys[i] - err = s.removeFromWaitingList(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData, err := s.getOrCreateRegisteredData(blsKey) - if err != nil { - return nil, nil, err - } - - registrationData.Staked = false - registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() - registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() - registrationData.Waiting = false - - err = s.saveStakingData(blsKey, registrationData) - if err != nil { - return nil, nil, err + currentNonce := s.eei.BlockChainHook().CurrentNonce() + passedNonce := currentNonce - stakedData.UnStakedNonce + if passedNonce >= s.unBondPeriod { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(zero.Bytes()) + } else { + s.eei.Finish([]byte("0")) } - - _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] - if !alreadyAdded { - listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } else { + remaining := s.unBondPeriod - passedNonce + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.Finish(big.NewInt(0).SetUint64(remaining).Bytes()) + } else { + s.eei.Finish([]byte(strconv.Itoa(int(remaining)))) } - - mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) } - return listOfOwners, mapOwnersUnStakedNodes, nil + return vmcommon.Ok } -func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { +func (s *stakingSC) setOwnersOnAddresses(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") - return vmcommon.UserError - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("number of arguments must be equal to 1") + s.eei.AddReturnMessage("setOwnersOnAddresses function not allowed to be called by address " + string(args.CallerAddr)) return vmcommon.UserError } - - numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(args.Arguments)%2 != 0 { + s.eei.AddReturnMessage("invalid number of arguments: expected an even number of arguments") return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) - if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { - nodePriceToUse.Set(s.stakeValue) - } - - stakedNodes := uint64(0) - mapCheckedOwners := make(map[string]*validatorFundInfo) - for i, blsKey := range waitingListData.blsKeys { - stakedData := waitingListData.stakedDataList[i] - if stakedNodes >= numNodesToStake { - break - } - - validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) - if errCheck != nil { - s.eei.AddReturnMessage(errCheck.Error()) - return vmcommon.UserError - } - if validatorInfo.numNodesToUnstake > 0 { - continue - } - - s.activeStakingFor(stakedData) - err = s.saveStakingData(blsKey, stakedData) + for i := 0; i < len(args.Arguments); i += 2 { + stakedData, err := s.getOrCreateRegisteredData(args.Arguments[i]) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } + if len(stakedData.RewardAddress) == 0 { + log.Error("staking data does not exists", + "bls key", hex.EncodeToString(args.Arguments[i]), + "owner as hex", hex.EncodeToString(args.Arguments[i+1])) + continue + } - // remove from waiting list - err = s.removeFromWaitingList(blsKey) + stakedData.OwnerAddress = args.Arguments[i+1] + err = s.saveStakingData(args.Arguments[i], stakedData) if err != nil { s.eei.AddReturnMessage(err.Error()) + s.eei.AddReturnMessage(fmt.Sprintf("process stopped at index %d, bls key %s", i, hex.EncodeToString(args.Arguments[i]))) return vmcommon.UserError } - - stakedNodes++ - // return the change key - s.eei.Finish(blsKey) - s.eei.Finish(stakedData.RewardAddress) } - s.addToStakedNodes(int64(stakedNodes)) - return vmcommon.Ok } -func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { +func (s *stakingSC) getOwner(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { s.eei.AddReturnMessage("invalid method to call") return vmcommon.UserError } - if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { - s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") return vmcommon.UserError } - if len(args.Arguments) != 0 { - s.eei.AddReturnMessage("number of arguments must be 0") + if len(args.Arguments) < 1 { + s.eei.AddReturnMessage(fmt.Sprintf("invalid number of arguments: expected min %d, got %d", 1, len(args.Arguments))) return vmcommon.UserError } - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + stakedData, errGet := s.getOrCreateRegisteredData(args.Arguments[0]) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) return vmcommon.UserError } - if len(waitingListData.blsKeys) == 0 { - s.eei.AddReturnMessage("no nodes in queue") - return vmcommon.Ok - } - - listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) - if err != nil { - s.eei.AddReturnMessage(err.Error()) + if len(stakedData.OwnerAddress) == 0 { + s.eei.AddReturnMessage("owner address is nil") return vmcommon.UserError } - for _, owner := range listOfOwners { - s.eei.Finish([]byte(owner)) - blsKeys := mapOwnersAndBLSKeys[owner] - for _, blsKey := range blsKeys { - s.eei.Finish(blsKey) - } - } - + s.eei.Finish(stakedData.OwnerAddress) return vmcommon.Ok } @@ -1894,193 +1148,6 @@ func (s *stakingSC) checkValidatorFunds( return validatorInfo, nil } -func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { - waitingListData := &waitingListReturnData{} - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - return nil, err - } - if waitingListHead.Length == 0 { - return waitingListData, nil - } - - blsKeysToStake := make([][]byte, 0) - stakedDataList := make([]*StakedDataV2_0, 0) - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - return nil, errGet - } - - if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { - waitingListData.afterLastjailed = true - } - - stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - return nil, errGet - } - - blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) - stakedDataList = append(stakedDataList, stakedData) - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { - log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") - } - - waitingListData.blsKeys = blsKeysToStake - waitingListData.stakedDataList = stakedDataList - waitingListData.lastKey = nextKey - return waitingListData, nil -} - -func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - - waitingListHead, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - if waitingListHead.Length <= 1 { - return vmcommon.Ok - } - - foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 - - index := uint32(1) - nextKey := make([]byte, len(waitingListHead.FirstKey)) - copy(nextKey, waitingListHead.FirstKey) - for len(nextKey) != 0 && index <= waitingListHead.Length { - element, errGet := s.getWaitingListElement(nextKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { - foundLastJailedKey = true - } - - _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) - if errGet != nil { - s.eei.AddReturnMessage(errGet.Error()) - return vmcommon.UserError - } - - if len(element.NextKey) == 0 { - break - } - index++ - copy(nextKey, element.NextKey) - } - - waitingListHead.Length = index - waitingListHead.LastKey = nextKey - if !foundLastJailedKey { - waitingListHead.LastJailedKey = make([]byte, 0) - } - - err = s.saveWaitingListHead(waitingListHead) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - -func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { - s.eei.AddReturnMessage("invalid method to call") - return vmcommon.UserError - } - if args.CallValue.Cmp(zero) != 0 { - s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) - return vmcommon.UserError - } - err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) - if err != nil { - s.eei.AddReturnMessage("insufficient gas") - return vmcommon.OutOfGas - } - if len(args.Arguments) != 1 { - s.eei.AddReturnMessage("invalid number of arguments") - return vmcommon.UserError - } - - blsKey := args.Arguments[0] - _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - for _, keyInList := range waitingListData.blsKeys { - if bytes.Equal(keyInList, blsKey) { - s.eei.AddReturnMessage("key is in queue, not missing") - return vmcommon.UserError - } - } - - waitingList, err := s.getWaitingListHead() - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - waitingList.Length += 1 - if waitingList.Length == 1 { - err = s.startWaitingList(waitingList, false, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok - } - - err = s.addToEndOfTheList(waitingList, blsKey) - if err != nil { - s.eei.AddReturnMessage(err.Error()) - return vmcommon.UserError - } - - return vmcommon.Ok -} - // CanUseContract returns true if contract can be used func (s *stakingSC) CanUseContract() bool { return true diff --git a/vm/systemSmartContracts/stakingWaitingList.go b/vm/systemSmartContracts/stakingWaitingList.go new file mode 100644 index 00000000000..16d979a6a86 --- /dev/null +++ b/vm/systemSmartContracts/stakingWaitingList.go @@ -0,0 +1,1042 @@ +package systemSmartContracts + +import ( + "bytes" + "encoding/hex" + "fmt" + "math" + "math/big" + "strconv" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/vm" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" +) + +const waitingListHeadKey = "waitingList" +const waitingElementPrefix = "w_" + +type waitingListReturnData struct { + blsKeys [][]byte + stakedDataList []*StakedDataV2_0 + lastKey []byte + afterLastJailed bool +} + +func (s *stakingSC) processStakeV1(blsKey []byte, registrationData *StakedDataV2_0, addFirst bool) error { + if registrationData.Staked { + return nil + } + + registrationData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + if !s.canStake() { + s.eei.AddReturnMessage(fmt.Sprintf("staking is full key put into waiting list %s", hex.EncodeToString(blsKey))) + err := s.addToWaitingList(blsKey, addFirst) + if err != nil { + s.eei.AddReturnMessage("error while adding to waiting") + return err + } + registrationData.Waiting = true + s.eei.Finish([]byte{waiting}) + return nil + } + + err := s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage("error while removing from waiting") + return err + } + + s.addToStakedNodes(1) + s.activeStakingFor(registrationData) + + return nil +} + +func (s *stakingSC) unStakeV1(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + registrationData, retCode := s.checkUnStakeArgs(args) + if retCode != vmcommon.Ok { + return retCode + } + + var err error + if !registrationData.Staked { + registrationData.Waiting = false + err = s.removeFromWaitingList(args.Arguments[0]) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + err = s.saveStakingData(args.Arguments[0], registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + addOneFromQueue := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || s.canStakeIfOneRemoved() + if addOneFromQueue { + _, err = s.moveFirstFromWaitingToStaked() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + } + + return s.tryUnStake(args.Arguments[0], registrationData) +} + +func (s *stakingSC) moveFirstFromWaitingToStakedIfNeeded(blsKey []byte) (bool, error) { + waitingElementKey := createWaitingListKey(blsKey) + _, err := s.getWaitingListElement(waitingElementKey) + if err == nil { + // node in waiting - remove from it - and that's it + return false, s.removeFromWaitingList(blsKey) + } + + return s.moveFirstFromWaitingToStaked() +} + +func (s *stakingSC) moveFirstFromWaitingToStaked() (bool, error) { + waitingList, err := s.getWaitingListHead() + if err != nil { + return false, err + } + if waitingList.Length == 0 { + return false, nil + } + elementInList, err := s.getWaitingListElement(waitingList.FirstKey) + if err != nil { + return false, err + } + err = s.removeFromWaitingList(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + + nodeData, err := s.getOrCreateRegisteredData(elementInList.BLSPublicKey) + if err != nil { + return false, err + } + if len(nodeData.RewardAddress) == 0 || nodeData.Staked { + return false, vm.ErrInvalidWaitingList + } + + nodeData.Waiting = false + nodeData.Staked = true + nodeData.RegisterNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.StakedNonce = s.eei.BlockChainHook().CurrentNonce() + nodeData.UnStakedNonce = 0 + nodeData.UnStakedEpoch = common.DefaultUnstakedEpoch + + s.addToStakedNodes(1) + return true, s.saveStakingData(elementInList.BLSPublicKey, nodeData) +} + +func (s *stakingSC) addToWaitingList(blsKey []byte, addJailed bool) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) != 0 { + return nil + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + return s.startWaitingList(waitingList, addJailed, blsKey) + } + + if addJailed { + return s.insertAfterLastJailed(waitingList, blsKey) + } + + return s.addToEndOfTheList(waitingList, blsKey) +} + +func (s *stakingSC) startWaitingList( + waitingList *WaitingList, + addJailed bool, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastKey = inWaitingListKey + if addJailed { + waitingList.LastJailedKey = inWaitingListKey + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: waitingList.LastKey, + NextKey: make([]byte, 0), + } + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) addToEndOfTheList(waitingList *WaitingList, blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + oldLastKey := make([]byte, len(waitingList.LastKey)) + copy(oldLastKey, waitingList.LastKey) + + lastElement, err := s.getWaitingListElement(waitingList.LastKey) + if err != nil { + return err + } + lastElement.NextKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: oldLastKey, + NextKey: make([]byte, 0), + } + + err = s.saveWaitingListElement(oldLastKey, lastElement) + if err != nil { + return err + } + + waitingList.LastKey = inWaitingListKey + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) +} + +func (s *stakingSC) insertAfterLastJailed( + waitingList *WaitingList, + blsKey []byte, +) error { + inWaitingListKey := createWaitingListKey(blsKey) + if len(waitingList.LastJailedKey) == 0 { + previousFirstKey := make([]byte, len(waitingList.FirstKey)) + copy(previousFirstKey, waitingList.FirstKey) + waitingList.FirstKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: inWaitingListKey, + NextKey: previousFirstKey, + } + + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && len(previousFirstKey) > 0 { + previousFirstElement, err := s.getWaitingListElement(previousFirstKey) + if err != nil { + return err + } + previousFirstElement.PreviousKey = inWaitingListKey + err = s.saveWaitingListElement(previousFirstKey, previousFirstElement) + if err != nil { + return err + } + } + + return s.saveElementAndList(inWaitingListKey, elementInWaiting, waitingList) + } + + lastJailedElement, err := s.getWaitingListElement(waitingList.LastJailedKey) + if err != nil { + return err + } + + if bytes.Equal(waitingList.LastKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = inWaitingListKey + return s.addToEndOfTheList(waitingList, blsKey) + } + + firstNonJailedElement, err := s.getWaitingListElement(lastJailedElement.NextKey) + if err != nil { + return err + } + + elementInWaiting := &ElementInList{ + BLSPublicKey: blsKey, + PreviousKey: make([]byte, len(inWaitingListKey)), + NextKey: make([]byte, len(inWaitingListKey)), + } + copy(elementInWaiting.PreviousKey, waitingList.LastJailedKey) + copy(elementInWaiting.NextKey, lastJailedElement.NextKey) + + lastJailedElement.NextKey = inWaitingListKey + firstNonJailedElement.PreviousKey = inWaitingListKey + waitingList.LastJailedKey = inWaitingListKey + + err = s.saveWaitingListElement(elementInWaiting.PreviousKey, lastJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(elementInWaiting.NextKey, firstNonJailedElement) + if err != nil { + return err + } + err = s.saveWaitingListElement(inWaitingListKey, elementInWaiting) + if err != nil { + return err + } + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) saveElementAndList(key []byte, element *ElementInList, waitingList *WaitingList) error { + err := s.saveWaitingListElement(key, element) + if err != nil { + return err + } + + return s.saveWaitingListHead(waitingList) +} + +func (s *stakingSC) removeFromWaitingList(blsKey []byte) error { + inWaitingListKey := createWaitingListKey(blsKey) + marshaledData := s.eei.GetStorage(inWaitingListKey) + if len(marshaledData) == 0 { + return nil + } + s.eei.SetStorage(inWaitingListKey, nil) + + elementToRemove := &ElementInList{} + err := s.marshalizer.Unmarshal(elementToRemove, marshaledData) + if err != nil { + return err + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + return err + } + if waitingList.Length == 0 { + return vm.ErrInvalidWaitingList + } + waitingList.Length -= 1 + if waitingList.Length == 0 { + s.eei.SetStorage([]byte(waitingListHeadKey), nil) + return nil + } + + // remove the first element + isFirstElementBeforeFix := !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(elementToRemove.PreviousKey, inWaitingListKey) + isFirstElementAfterFix := s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && bytes.Equal(waitingList.FirstKey, inWaitingListKey) + if isFirstElementBeforeFix || isFirstElementAfterFix { + if bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, 0) + } + + nextElement, errGet := s.getWaitingListElement(elementToRemove.NextKey) + if errGet != nil { + return errGet + } + + nextElement.PreviousKey = elementToRemove.NextKey + waitingList.FirstKey = elementToRemove.NextKey + return s.saveElementAndList(elementToRemove.NextKey, nextElement, waitingList) + } + + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) || bytes.Equal(inWaitingListKey, waitingList.LastJailedKey) { + waitingList.LastJailedKey = make([]byte, len(elementToRemove.PreviousKey)) + copy(waitingList.LastJailedKey, elementToRemove.PreviousKey) + } + + previousElement, _ := s.getWaitingListElement(elementToRemove.PreviousKey) + // search the other way around for the element in front + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) && previousElement == nil { + previousElement, err = s.searchPreviousFromHead(waitingList, inWaitingListKey, elementToRemove) + if err != nil { + return err + } + } + if previousElement == nil { + previousElement, err = s.getWaitingListElement(elementToRemove.PreviousKey) + if err != nil { + return err + } + } + if len(elementToRemove.NextKey) == 0 { + waitingList.LastKey = elementToRemove.PreviousKey + previousElement.NextKey = make([]byte, 0) + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) + } + + nextElement, err := s.getWaitingListElement(elementToRemove.NextKey) + if err != nil { + return err + } + + nextElement.PreviousKey = elementToRemove.PreviousKey + previousElement.NextKey = elementToRemove.NextKey + + err = s.saveWaitingListElement(elementToRemove.NextKey, nextElement) + if err != nil { + return err + } + return s.saveElementAndList(elementToRemove.PreviousKey, previousElement, waitingList) +} + +func (s *stakingSC) searchPreviousFromHead(waitingList *WaitingList, inWaitingListKey []byte, elementToRemove *ElementInList) (*ElementInList, error) { + var previousElement *ElementInList + index := uint32(1) + nextKey := make([]byte, len(waitingList.FirstKey)) + copy(nextKey, waitingList.FirstKey) + for len(nextKey) != 0 && index <= waitingList.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(inWaitingListKey, element.NextKey) { + previousElement = element + elementToRemove.PreviousKey = createWaitingListKey(previousElement.BLSPublicKey) + return previousElement, nil + } + + nextKey = make([]byte, len(element.NextKey)) + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + return nil, vm.ErrElementNotFound +} + +func (s *stakingSC) getWaitingListElement(key []byte) (*ElementInList, error) { + marshaledData := s.eei.GetStorage(key) + if len(marshaledData) == 0 { + return nil, vm.ErrElementNotFound + } + + element := &ElementInList{} + err := s.marshalizer.Unmarshal(element, marshaledData) + if err != nil { + return nil, err + } + + return element, nil +} + +func (s *stakingSC) saveWaitingListElement(key []byte, element *ElementInList) error { + marshaledData, err := s.marshalizer.Marshal(element) + if err != nil { + return err + } + + s.eei.SetStorage(key, marshaledData) + return nil +} + +func (s *stakingSC) getWaitingListHead() (*WaitingList, error) { + waitingList := &WaitingList{ + FirstKey: make([]byte, 0), + LastKey: make([]byte, 0), + Length: 0, + LastJailedKey: make([]byte, 0), + } + marshaledData := s.eei.GetStorage([]byte(waitingListHeadKey)) + if len(marshaledData) == 0 { + return waitingList, nil + } + + err := s.marshalizer.Unmarshal(waitingList, marshaledData) + if err != nil { + return nil, err + } + + return waitingList, nil +} + +func (s *stakingSC) saveWaitingListHead(waitingList *WaitingList) error { + marshaledData, err := s.marshalizer.Marshal(waitingList) + if err != nil { + return err + } + + s.eei.SetStorage([]byte(waitingListHeadKey), marshaledData) + return nil +} + +func createWaitingListKey(blsKey []byte) []byte { + return []byte(waitingElementPrefix + string(blsKey)) +} + +func (s *stakingSC) switchJailedWithWaiting(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("switchJailedWithWaiting function not allowed to be called by address " + string(args.CallerAddr)) + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(registrationData.RewardAddress) == 0 { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if !registrationData.Staked { + s.eei.AddReturnMessage("no need to jail as not a validator") + return vmcommon.UserError + } + if registrationData.Jailed { + s.eei.AddReturnMessage(vm.ErrBLSPublicKeyAlreadyJailed.Error()) + return vmcommon.UserError + } + switched, err := s.moveFirstFromWaitingToStakedIfNeeded(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + registrationData.NumJailed++ + registrationData.Jailed = true + registrationData.JailedNonce = s.eei.BlockChainHook().CurrentNonce() + + if !switched && !s.enableEpochsHandler.IsFlagEnabled(common.CorrectJailedNotUnStakedEmptyQueueFlag) { + s.eei.AddReturnMessage("did not switch as nobody in waiting, but jailed") + } else { + s.tryRemoveJailedNodeFromStaked(registrationData) + } + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + s.eei.AddReturnMessage("cannot save staking data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListIndex(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + waitingElementKey := createWaitingListKey(args.Arguments[0]) + _, err := s.getWaitingListElement(waitingElementKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingElementKey, waitingListHead.FirstKey) { + s.eei.Finish([]byte(strconv.Itoa(1))) + return vmcommon.Ok + } + if bytes.Equal(waitingElementKey, waitingListHead.LastKey) { + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok + } + + prevElement, err := s.getWaitingListElement(waitingListHead.FirstKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + index := uint32(2) + nextKey := make([]byte, len(waitingElementKey)) + copy(nextKey, prevElement.NextKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + if bytes.Equal(nextKey, waitingElementKey) { + s.eei.Finish([]byte(strconv.Itoa(int(index)))) + return vmcommon.Ok + } + + prevElement, err = s.getWaitingListElement(nextKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(prevElement.NextKey) == 0 { + break + } + index++ + copy(nextKey, prevElement.NextKey) + } + + s.eei.AddReturnMessage("element in waiting list not found") + return vmcommon.UserError +} + +func (s *stakingSC) getWaitingListSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.Get) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + s.eei.Finish([]byte(strconv.Itoa(int(waitingListHead.Length)))) + return vmcommon.Ok +} + +func (s *stakingSC) getWaitingListRegisterNonceAndRewardAddress(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !bytes.Equal(args.CallerAddr, s.stakeAccessAddr) { + s.eei.AddReturnMessage("this is only a view function") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.stakedDataList) == 0 { + s.eei.AddReturnMessage("no one in waitingList") + return vmcommon.UserError + } + + for index, stakedData := range waitingListData.stakedDataList { + s.eei.Finish(waitingListData.blsKeys[index]) + s.eei.Finish(stakedData.RewardAddress) + s.eei.Finish(big.NewInt(int64(stakedData.RegisterNonce)).Bytes()) + } + + return vmcommon.Ok +} + +func (s *stakingSC) resetLastUnJailedFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + // backward compatibility + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be equal to 0") + return vmcommon.UserError + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if len(waitingList.LastJailedKey) == 0 { + return vmcommon.Ok + } + + waitingList.LastJailedKey = make([]byte, 0) + err = s.saveWaitingListHead(waitingList) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) cleanAdditionalQueueNotEnoughFunds( + waitingListData *waitingListReturnData, +) ([]string, map[string][][]byte, error) { + + listOfOwners := make([]string, 0) + mapOwnersUnStakedNodes := make(map[string][][]byte) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i := len(waitingListData.blsKeys) - 1; i >= 0; i-- { + stakedData := waitingListData.stakedDataList[i] + validatorInfo, err := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, s.stakeValue) + if err != nil { + return nil, nil, err + } + if validatorInfo.numNodesToUnstake == 0 { + continue + } + + validatorInfo.numNodesToUnstake-- + blsKey := waitingListData.blsKeys[i] + err = s.removeFromWaitingList(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData, err := s.getOrCreateRegisteredData(blsKey) + if err != nil { + return nil, nil, err + } + + registrationData.Staked = false + registrationData.UnStakedEpoch = s.eei.BlockChainHook().CurrentEpoch() + registrationData.UnStakedNonce = s.eei.BlockChainHook().CurrentNonce() + registrationData.Waiting = false + + err = s.saveStakingData(blsKey, registrationData) + if err != nil { + return nil, nil, err + } + + _, alreadyAdded := mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] + if !alreadyAdded { + listOfOwners = append(listOfOwners, string(stakedData.OwnerAddress)) + } + + mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)] = append(mapOwnersUnStakedNodes[string(stakedData.OwnerAddress)], blsKey) + } + + return listOfOwners, mapOwnersUnStakedNodes, nil +} + +func (s *stakingSC) stakeNodesFromQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.StakingV2Flag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("number of arguments must be equal to 1") + return vmcommon.UserError + } + + numNodesToStake := big.NewInt(0).SetBytes(args.Arguments[0]).Uint64() + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + nodePriceToUse := big.NewInt(0).Set(s.minNodePrice) + if s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + nodePriceToUse.Set(s.stakeValue) + } + + stakedNodes := uint64(0) + mapCheckedOwners := make(map[string]*validatorFundInfo) + for i, blsKey := range waitingListData.blsKeys { + stakedData := waitingListData.stakedDataList[i] + if stakedNodes >= numNodesToStake { + break + } + + validatorInfo, errCheck := s.checkValidatorFunds(mapCheckedOwners, stakedData.OwnerAddress, nodePriceToUse) + if errCheck != nil { + s.eei.AddReturnMessage(errCheck.Error()) + return vmcommon.UserError + } + if validatorInfo.numNodesToUnstake > 0 { + continue + } + + s.activeStakingFor(stakedData) + err = s.saveStakingData(blsKey, stakedData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + // remove from waiting list + err = s.removeFromWaitingList(blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + stakedNodes++ + // return the change key + s.eei.Finish(blsKey) + s.eei.Finish(stakedData.RewardAddress) + } + + s.addToStakedNodes(int64(stakedNodes)) + + return vmcommon.Ok +} + +func (s *stakingSC) cleanAdditionalQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectLastUnJailedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) && !s.enableEpochsHandler.IsFlagEnabled(common.StakingV4Step1Flag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, s.endOfEpochAccessAddr) { + s.eei.AddReturnMessage("stake nodes from waiting list can be called by endOfEpochAccess address only") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + s.eei.AddReturnMessage("number of arguments must be 0") + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + if len(waitingListData.blsKeys) == 0 { + s.eei.AddReturnMessage("no nodes in queue") + return vmcommon.Ok + } + + listOfOwners, mapOwnersAndBLSKeys, err := s.cleanAdditionalQueueNotEnoughFunds(waitingListData) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, owner := range listOfOwners { + s.eei.Finish([]byte(owner)) + blsKeys := mapOwnersAndBLSKeys[owner] + for _, blsKey := range blsKeys { + s.eei.Finish(blsKey) + } + } + + return vmcommon.Ok +} + +func (s *stakingSC) getFirstElementsFromWaitingList(numNodes uint32) (*waitingListReturnData, error) { + waitingListData := &waitingListReturnData{} + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + return nil, err + } + if waitingListHead.Length == 0 { + return waitingListData, nil + } + + blsKeysToStake := make([][]byte, 0) + stakedDataList := make([]*StakedDataV2_0, 0) + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length && index <= numNodes { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + return nil, errGet + } + + if bytes.Equal(nextKey, waitingListHead.LastJailedKey) { + waitingListData.afterLastJailed = true + } + + stakedData, errGet := s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + return nil, errGet + } + + blsKeysToStake = append(blsKeysToStake, element.BLSPublicKey) + stakedDataList = append(stakedDataList, stakedData) + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + if numNodes >= waitingListHead.Length && len(blsKeysToStake) != int(waitingListHead.Length) { + log.Warn("mismatch length on waiting list elements in stakingSC.getFirstElementsFromWaitingList") + } + + waitingListData.blsKeys = blsKeysToStake + waitingListData.stakedDataList = stakedDataList + waitingListData.lastKey = nextKey + return waitingListData, nil +} + +func (s *stakingSC) fixWaitingListQueueSize(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + + waitingListHead, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + if waitingListHead.Length <= 1 { + return vmcommon.Ok + } + + foundLastJailedKey := len(waitingListHead.LastJailedKey) == 0 + + index := uint32(1) + nextKey := make([]byte, len(waitingListHead.FirstKey)) + copy(nextKey, waitingListHead.FirstKey) + for len(nextKey) != 0 && index <= waitingListHead.Length { + element, errGet := s.getWaitingListElement(nextKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if bytes.Equal(waitingListHead.LastJailedKey, nextKey) { + foundLastJailedKey = true + } + + _, errGet = s.getOrCreateRegisteredData(element.BLSPublicKey) + if errGet != nil { + s.eei.AddReturnMessage(errGet.Error()) + return vmcommon.UserError + } + + if len(element.NextKey) == 0 { + break + } + index++ + copy(nextKey, element.NextKey) + } + + waitingListHead.Length = index + waitingListHead.LastKey = nextKey + if !foundLastJailedKey { + waitingListHead.LastJailedKey = make([]byte, 0) + } + + err = s.saveWaitingListHead(waitingListHead) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (s *stakingSC) addMissingNodeToQueue(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if !s.enableEpochsHandler.IsFlagEnabled(common.CorrectFirstQueuedFlag) { + s.eei.AddReturnMessage("invalid method to call") + return vmcommon.UserError + } + if s.enableEpochsHandler.IsFlagEnabled(common.StakingV4StartedFlag) { + s.eei.AddReturnMessage(vm.ErrWaitingListDisabled.Error()) + return vmcommon.UserError + } + if args.CallValue.Cmp(zero) != 0 { + s.eei.AddReturnMessage(vm.TransactionValueMustBeZero) + return vmcommon.UserError + } + err := s.eei.UseGas(s.gasCost.MetaChainSystemSCsCost.FixWaitingListSize) + if err != nil { + s.eei.AddReturnMessage("insufficient gas") + return vmcommon.OutOfGas + } + if len(args.Arguments) != 1 { + s.eei.AddReturnMessage("invalid number of arguments") + return vmcommon.UserError + } + + blsKey := args.Arguments[0] + _, err = s.getWaitingListElement(createWaitingListKey(blsKey)) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingListData, err := s.getFirstElementsFromWaitingList(math.MaxUint32) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + for _, keyInList := range waitingListData.blsKeys { + if bytes.Equal(keyInList, blsKey) { + s.eei.AddReturnMessage("key is in queue, not missing") + return vmcommon.UserError + } + } + + waitingList, err := s.getWaitingListHead() + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + waitingList.Length += 1 + if waitingList.Length == 1 { + err = s.startWaitingList(waitingList, false, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok + } + + err = s.addToEndOfTheList(waitingList, blsKey) + if err != nil { + s.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} diff --git a/vm/systemSmartContracts/staking_test.go b/vm/systemSmartContracts/staking_test.go index 7f46a417db5..c5419dddd20 100644 --- a/vm/systemSmartContracts/staking_test.go +++ b/vm/systemSmartContracts/staking_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/process/smartContract/hooks" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/accounts" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" @@ -53,6 +54,8 @@ func createMockStakingScArgumentsWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 1.0, + NodeLimitPercentage: 1.0, }, EnableEpochsHandler: enableEpochsHandlerMock.NewEnableEpochsHandlerStub( common.StakeFlag, @@ -95,6 +98,18 @@ func CreateVmContractCallInput() *vmcommon.ContractCallInput { } } +func createArgsVMContext() VMContextArgs { + return VMContextArgs{ + BlockChainHook: &mock.BlockChainHookStub{}, + CryptoHook: hooks.NewVMCryptoHook(), + InputParser: &mock.ArgumentParserMock{}, + ValidatorAccountsDB: &stateMock.AccountsStub{}, + ChanceComputer: &mock.RaterMock{}, + EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{}, + UserAccountsDB: &stateMock.AccountsStub{}, + } +} + func TestNewStakingSmartContract_NilSystemEIShouldErr(t *testing.T) { t.Parallel() @@ -998,6 +1013,93 @@ func TestStakingSc_ExecuteIsStaked(t *testing.T) { checkIsStaked(t, stakingSmartContract, callerAddress, stakerPubKey, vmcommon.UserError) } +func TestStakingSc_StakeWithStakingV4(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 4 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + for i := 0; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + + if uint64(i) < stakingSmartContract.maxNumNodes { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } else { + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.UserError) + require.True(t, strings.Contains(eei.returnMessage, "staking is full")) + eei.returnMessage = "" + } + } + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 6) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr0"), []byte("addr0"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 4, 5) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + for i := 5; i < 10; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + err := stakingSmartContract.removeFromWaitingList(addr) + require.Nil(t, err) + } + + for i := 10; i < 20; i++ { + idxStr := strconv.Itoa(i) + addr := []byte("addr" + idxStr) + doStake(t, stakingSmartContract, stakingAccessAddress, addr, addr) + checkIsStaked(t, stakingSmartContract, addr, addr, vmcommon.Ok) + } + requireRegisteredNodes(t, stakingSmartContract, eei, 14, 0) + + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("addr10"), []byte("addr10"), vmcommon.Ok) + requireRegisteredNodes(t, stakingSmartContract, eei, 13, 0) +} + +func TestStakingSc_UnStakeNodeFromWaitingListAfterStakingV4ShouldError(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + + args := createMockStakingScArguments() + stakingAccessAddress := []byte("stakingAccessAddress") + args.StakingAccessAddr = stakingAccessAddress + args.StakingSCConfig.MaxNumberOfNodesForStake = 2 + args.EnableEpochsHandler = enableEpochsHandler + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + args.Eei = eei + + stakingSmartContract, _ := NewStakingSmartContract(args) + + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address0"), []byte("address0")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address1"), []byte("address1")) + doStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2")) + requireRegisteredNodes(t, stakingSmartContract, eei, 2, 1) + + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + eei.returnMessage = "" + doUnStake(t, stakingSmartContract, stakingAccessAddress, []byte("address2"), []byte("address2"), vmcommon.ExecutionFailed) + require.Equal(t, eei.returnMessage, vm.ErrWaitingListDisabled.Error()) +} + func TestStakingSc_StakeWithV1ShouldWork(t *testing.T) { t.Parallel() @@ -1161,14 +1263,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitch(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.True(t, stakedData.Jailed) assert.True(t, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{2}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(2)) } func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { @@ -1305,14 +1400,7 @@ func TestStakingSc_ExecuteStakeStakeJailAndSwitchWithBoundaries(t *testing.T) { _ = json.Unmarshal(marshaledData, stakedData) assert.Equal(t, tt.shouldBeJailed, stakedData.Jailed) assert.Equal(t, tt.shouldBeStaked, stakedData.Staked) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, vmcommon.Ok, retCode) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, []byte{byte(tt.remainingStakedNodesNumber)}, lastOutput) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(int64(tt.remainingStakedNodesNumber))) }) } } @@ -1447,14 +1535,7 @@ func TestStakingSc_ExecuteStakeStakeStakeJailJailUnJailTwice(t *testing.T) { doGetWaitingListSize(t, stakingSmartContract, eei, 2) outPut = doGetWaitingListRegisterNonceAndRewardAddress(t, stakingSmartContract, eei) assert.Equal(t, 6, len(outPut)) - - arguments.Function = "getTotalNumberOfRegisteredNodes" - arguments.Arguments = [][]byte{} - retCode = stakingSmartContract.Execute(arguments) - assert.Equal(t, retCode, vmcommon.Ok) - - lastOutput := eei.output[len(eei.output)-1] - assert.Equal(t, lastOutput, []byte{4}) + requireTotalNumberOfRegisteredNodes(t, stakingSmartContract, eei, big.NewInt(4)) } func TestStakingSc_ExecuteStakeUnStakeJailCombinations(t *testing.T) { @@ -3337,6 +3418,150 @@ func TestStakingSc_fixMissingNodeAddOneNodeOnly(t *testing.T) { assert.Equal(t, waitingListData.blsKeys[0], blsKey) } +func TestStakingSC_StakingV4Flags(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakeFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectLastUnJailedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectFirstQueuedFlag) + enableEpochsHandler.AddActiveFlags(common.CorrectJailedNotUnStakedEmptyQueueFlag) + enableEpochsHandler.AddActiveFlags(common.ValidatorToDelegationFlag) + enableEpochsHandler.AddActiveFlags(common.StakingV4Step1Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + enableEpochsHandler.AddActiveFlags(common.StakingV4StartedFlag) + + argsVMContext := createArgsVMContext() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + args := createMockStakingScArguments() + args.Eei = eei + args.EnableEpochsHandler = enableEpochsHandler + stakingSmartContract, _ := NewStakingSmartContract(args) + + // Functions which are not allowed starting STAKING V4 INIT + arguments := CreateVmContractCallInput() + arguments.Function = "getQueueIndex" + retCode := stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + // Functions which are allowed to be called by systemSC at the end of the epoch in epoch = STAKING V4 INIT + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "function not allowed to be called by address")) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.returnMessage, "can be called by endOfEpochAccess address only")) + + enableEpochsHandler.RemoveActiveFlags(common.StakingV4Step1Flag) + // All functions from above are not allowed anymore starting STAKING V4 epoch + eei.CleanCache() + arguments.Function = "getQueueIndex" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "getQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "fixWaitingListQueueSize" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "addMissingNodeToQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "switchJailedWithWaiting" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "resetLastUnJailedFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "stakeNodesFromQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) + + eei.CleanCache() + arguments.Function = "cleanAdditionalQueue" + retCode = stakingSmartContract.Execute(arguments) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vm.ErrWaitingListDisabled.Error(), eei.returnMessage) +} + +func requireRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, stakedNodes int64, waitingListNodes uint32) { + stakeConfig := stakingSC.getConfig() + waitingList, _ := stakingSC.getWaitingListHead() + require.Equal(t, stakedNodes, stakeConfig.StakedNodes) + require.Equal(t, waitingListNodes, waitingList.Length) + + requireTotalNumberOfRegisteredNodes(t, stakingSC, eei, big.NewInt(stakedNodes+int64(waitingListNodes))) +} + +func requireTotalNumberOfRegisteredNodes(t *testing.T, stakingSC *stakingSC, eei *vmContext, expectedRegisteredNodes *big.Int) { + arguments := CreateVmContractCallInput() + arguments.Function = "getTotalNumberOfRegisteredNodes" + arguments.Arguments = [][]byte{} + + retCode := stakingSC.Execute(arguments) + lastOutput := eei.output[len(eei.output)-1] + noOfRegisteredNodes := big.NewInt(0).SetBytes(lastOutput) + require.Equal(t, retCode, vmcommon.Ok) + require.Equal(t, expectedRegisteredNodes, noOfRegisteredNodes) +} + func TestStakingSc_fixMissingNodeAddAsLast(t *testing.T) { t.Parallel() diff --git a/vm/systemSmartContracts/validator.go b/vm/systemSmartContracts/validator.go index 86350b5ef34..37799ccc447 100644 --- a/vm/systemSmartContracts/validator.go +++ b/vm/systemSmartContracts/validator.go @@ -21,6 +21,8 @@ import ( const unJailedFunds = "unJailFunds" const unStakeUnBondPauseKey = "unStakeUnBondPause" +const minPercentage = 0.0001 +const numberOfNodesTooHigh = "number of nodes too high, no new nodes activated" var zero = big.NewInt(0) @@ -51,6 +53,9 @@ type validatorSC struct { governanceSCAddress []byte shardCoordinator sharding.Coordinator enableEpochsHandler common.EnableEpochsHandler + nodesCoordinator vm.NodesCoordinator + totalStakeLimit *big.Int + nodeLimitPercentage float64 } // ArgsValidatorSmartContract is the arguments structure to create a new ValidatorSmartContract @@ -69,6 +74,7 @@ type ArgsValidatorSmartContract struct { GovernanceSCAddress []byte ShardCoordinator sharding.Coordinator EnableEpochsHandler common.EnableEpochsHandler + NodesCoordinator vm.NodesCoordinator } // NewValidatorSmartContract creates an validator smart contract @@ -120,6 +126,15 @@ func NewValidatorSmartContract( if err != nil { return nil, err } + if check.IfNil(args.NodesCoordinator) { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrNilNodesCoordinator) + } + if args.StakingSCConfig.NodeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidNodeLimitPercentage) + } + if args.StakingSCConfig.StakeLimitPercentage < minPercentage { + return nil, fmt.Errorf("%w in validatorSC", vm.ErrInvalidStakeLimitPercentage) + } baseConfig := ValidatorConfig{ TotalSupply: big.NewInt(0).Set(args.GenesisTotalSupply), @@ -151,7 +166,7 @@ func NewValidatorSmartContract( return nil, vm.ErrInvalidMinCreationDeposit } - return &validatorSC{ + reg := &validatorSC{ eei: args.Eei, unBondPeriod: args.StakingSCConfig.UnBondPeriod, unBondPeriodInEpochs: args.StakingSCConfig.UnBondPeriodInEpochs, @@ -169,7 +184,16 @@ func NewValidatorSmartContract( governanceSCAddress: args.GovernanceSCAddress, shardCoordinator: args.ShardCoordinator, enableEpochsHandler: args.EnableEpochsHandler, - }, nil + nodeLimitPercentage: args.StakingSCConfig.NodeLimitPercentage, + nodesCoordinator: args.NodesCoordinator, + } + + reg.totalStakeLimit = core.GetIntTrimmedPercentageOfValue(args.GenesisTotalSupply, args.StakingSCConfig.StakeLimitPercentage) + if reg.totalStakeLimit.Cmp(baseConfig.NodePrice) < 0 { + return nil, fmt.Errorf("%w, value is %f", vm.ErrInvalidStakeLimitPercentage, args.StakingSCConfig.StakeLimitPercentage) + } + + return reg, nil } // Execute calls one of the functions from the validator smart contract and runs the code according to the input @@ -388,11 +412,7 @@ func (v *validatorSC) unJail(args *vmcommon.ContractCallInput) vmcommon.ReturnCo } if transferBack.Cmp(zero) > 0 { - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unJail function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, transferBack, nil, 0) } finalUnJailFunds := big.NewInt(0).Sub(args.CallValue, transferBack) @@ -628,7 +648,12 @@ func (v *validatorSC) registerBLSKeys( return nil, nil, err } + newlyAddedKeys := make([][]byte, 0) for _, blsKey := range newKeys { + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys) + 1) { + break + } + vmOutput, errExec := v.executeOnStakingSC([]byte("register@" + hex.EncodeToString(blsKey) + "@" + hex.EncodeToString(registrationData.RewardAddress) + "@" + @@ -649,9 +674,10 @@ func (v *validatorSC) registerBLSKeys( } registrationData.BlsPubKeys = append(registrationData.BlsPubKeys, blsKey) + newlyAddedKeys = append(newlyAddedKeys, blsKey) } - return blsKeys, newKeys, nil + return blsKeys, newlyAddedKeys, nil } func (v *validatorSC) updateStakeValue(registrationData *ValidatorDataV2, caller []byte) vmcommon.ReturnCode { @@ -796,6 +822,11 @@ func (v *validatorSC) reStakeUnStakedNodes(args *vmcommon.ContractCallInput) vmc return vmcommon.UserError } + if v.isNumberOfNodesTooHigh(len(registrationData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + numQualified := big.NewInt(0).Div(registrationData.TotalStakeValue, validatorConfig.NodePrice) if uint64(len(args.Arguments)) > numQualified.Uint64() { v.eei.AddReturnMessage("insufficient funds") @@ -898,6 +929,27 @@ func (v *validatorSC) checkAllGivenKeysAreUnStaked(registrationData *ValidatorDa return mapBlsKeys, nil } +func (v *validatorSC) isStakeTooHigh(registrationData *ValidatorDataV2) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return registrationData.TotalStakeValue.Cmp(v.totalStakeLimit) > 0 +} + +func (v *validatorSC) isNumberOfNodesTooHigh(numNodes int) bool { + if !v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + return false + } + + return numNodes > v.computeNodeLimit() +} + +func (v *validatorSC) computeNodeLimit() int { + nodeLimit := float64(v.nodesCoordinator.GetNumTotalEligible()) * v.nodeLimitPercentage + return int(nodeLimit) +} + func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { err := v.eei.UseGas(v.gasCost.MetaChainSystemSCsCost.Stake) if err != nil { @@ -931,6 +983,11 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } + if v.isStakeTooHigh(registrationData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + lenArgs := len(args.Arguments) if lenArgs == 0 { return v.updateStakeValue(registrationData, args.CallerAddr) @@ -1018,31 +1075,73 @@ func (v *validatorSC) stake(args *vmcommon.ContractCallInput) vmcommon.ReturnCod } } - v.activateStakingFor( + v.activateNewBLSKeys(registrationData, blsKeys, newKeys, &validatorConfig, args) + + err = v.saveRegistrationData(args.CallerAddr, registrationData) + if err != nil { + v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok +} + +func (v *validatorSC) activateNewBLSKeys( + registrationData *ValidatorDataV2, + blsKeys [][]byte, + newKeys [][]byte, + validatorConfig *ValidatorConfig, + args *vmcommon.ContractCallInput, +) { + numRegisteredBlsKeys := len(registrationData.BlsPubKeys) + allNodesActivated := v.activateStakingFor( blsKeys, + newKeys, registrationData, validatorConfig.NodePrice, registrationData.RewardAddress, args.CallerAddr, ) - err = v.saveRegistrationData(args.CallerAddr, registrationData) - if err != nil { - v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) - return vmcommon.UserError + if !allNodesActivated && len(blsKeys) > 0 { + nodeLimit := int64(v.computeNodeLimit()) + entry := &vmcommon.LogEntry{ + Identifier: []byte(args.Function), + Address: args.RecipientAddr, + Topics: [][]byte{ + []byte(numberOfNodesTooHigh), + big.NewInt(int64(numRegisteredBlsKeys)).Bytes(), + big.NewInt(nodeLimit).Bytes(), + }, + } + v.eei.AddLogEntry(entry) } - return vmcommon.Ok } func (v *validatorSC) activateStakingFor( blsKeys [][]byte, + newKeys [][]byte, registrationData *ValidatorDataV2, fixedStakeValue *big.Int, rewardAddress []byte, ownerAddress []byte, -) { - numRegistered := uint64(registrationData.NumRegistered) +) bool { + numActivatedKey := uint64(registrationData.NumRegistered) + + numAllBLSKeys := len(registrationData.BlsPubKeys) + if v.isNumberOfNodesTooHigh(numAllBLSKeys) { + return false + } + + maxNumNodesToActivate := len(blsKeys) + if v.enableEpochsHandler.IsFlagEnabled(common.StakeLimitsFlag) { + maxNumNodesToActivate = v.computeNodeLimit() - numAllBLSKeys + len(newKeys) + } + nodesActivated := 0 + if nodesActivated >= maxNumNodesToActivate && len(blsKeys) >= maxNumNodesToActivate { + return false + } for i := uint64(0); i < uint64(len(blsKeys)); i++ { currentBLSKey := blsKeys[i] @@ -1061,12 +1160,19 @@ func (v *validatorSC) activateStakingFor( } if stakedData.UnStakedNonce == 0 { - numRegistered++ + numActivatedKey++ + } + + nodesActivated++ + if nodesActivated >= maxNumNodesToActivate { + break } } - registrationData.NumRegistered = uint32(numRegistered) - registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numRegistered)) + registrationData.NumRegistered = uint32(numActivatedKey) + registrationData.LockedStake.Mul(fixedStakeValue, big.NewInt(0).SetUint64(numActivatedKey)) + + return nodesActivated < maxNumNodesToActivate || len(blsKeys) <= maxNumNodesToActivate } func (v *validatorSC) stakeOneNode( @@ -1370,11 +1476,7 @@ func (v *validatorSC) unBondV1(args *vmcommon.ContractCallInput) vmcommon.Return } } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } @@ -1409,11 +1511,7 @@ func (v *validatorSC) unBond(args *vmcommon.ContractCallInput) vmcommon.ReturnCo return returnCode } - err := v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) return vmcommon.Ok } @@ -1500,11 +1598,7 @@ func (v *validatorSC) claim(args *vmcommon.ContractCallInput) vmcommon.ReturnCod return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on finalizeUnStake function: error " + err.Error()) - return vmcommon.UserError - } + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, claimable, nil, 0) return vmcommon.Ok } @@ -1703,12 +1797,7 @@ func (v *validatorSC) unBondTokens(args *vmcommon.ContractCallInput) vmcommon.Re return vmcommon.UserError } - err = v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) - if err != nil { - v.eei.AddReturnMessage("transfer error on unBond function") - return vmcommon.UserError - } - + v.eei.Transfer(args.CallerAddr, args.RecipientAddr, totalUnBond, nil, 0) err = v.saveRegistrationData(args.CallerAddr, registrationData) if err != nil { v.eei.AddReturnMessage("cannot save registration data: error " + err.Error()) @@ -2027,6 +2116,16 @@ func (v *validatorSC) mergeValidatorData(args *vmcommon.ContractCallInput) vmcom validatorConfig := v.getConfig(v.eei.BlockChainHook().CurrentEpoch()) finalValidatorData.LockedStake.Mul(validatorConfig.NodePrice, big.NewInt(int64(finalValidatorData.NumRegistered))) + if v.isNumberOfNodesTooHigh(len(finalValidatorData.BlsPubKeys)) { + v.eei.AddReturnMessage("number of nodes is too high") + return vmcommon.UserError + } + + if v.isStakeTooHigh(finalValidatorData) { + v.eei.AddReturnMessage("total stake limit reached") + return vmcommon.UserError + } + v.eei.SetStorage(oldAddress, nil) err = v.saveRegistrationData(delegationAddr, finalValidatorData) if err != nil { diff --git a/vm/systemSmartContracts/validator_test.go b/vm/systemSmartContracts/validator_test.go index f4aefd377ec..758e0167a9d 100644 --- a/vm/systemSmartContracts/validator_test.go +++ b/vm/systemSmartContracts/validator_test.go @@ -51,6 +51,8 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( MaxNumberOfNodesForStake: 10, ActivateBLSPubKeyMessageVerification: false, MinUnstakeTokensValue: "1", + StakeLimitPercentage: 100.0, + NodeLimitPercentage: 100.0, }, Marshalizer: &mock.MarshalizerMock{}, GenesisTotalSupply: big.NewInt(100000000), @@ -64,7 +66,9 @@ func createMockArgumentsForValidatorSCWithSystemScAddresses( common.ValidatorToDelegationFlag, common.DoubleKeyProtectionFlag, common.MultiClaimOnDelegationFlag, + common.StakeLimitsFlag, ), + NodesCoordinator: &mock.NodesCoordinatorStub{}, } return args @@ -224,6 +228,39 @@ func TestNewStakingValidatorSmartContract_NilValidatorSmartContractAddress(t *te assert.True(t, errors.Is(err, vm.ErrNilValidatorSmartContractAddress)) } +func TestNewStakingValidatorSmartContract_NilNodesCoordinator(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.NodesCoordinator = nil + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrNilNodesCoordinator)) +} + +func TestNewStakingValidatorSmartContract_ZeroStakeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.StakeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidStakeLimitPercentage)) +} + +func TestNewStakingValidatorSmartContract_ZeroNodeLimit(t *testing.T) { + t.Parallel() + + arguments := createMockArgumentsForValidatorSC() + arguments.StakingSCConfig.NodeLimitPercentage = 0.0 + + asc, err := NewValidatorSmartContract(arguments) + require.Nil(t, asc) + assert.True(t, errors.Is(err, vm.ErrInvalidNodeLimitPercentage)) +} + func TestNewStakingValidatorSmartContract_NilSigVerifier(t *testing.T) { t.Parallel() @@ -368,6 +405,138 @@ func TestStakingValidatorSC_ExecuteStakeWithoutArgumentsShouldWork(t *testing.T) assert.Equal(t, vmcommon.Ok, errCode) } +func TestStakingValidatorSC_ExecuteStakeTooMuchStake(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + validatorData := createAValidatorData(25000000, 2, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei := &mock.SystemEIStub{} + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + eei.AddReturnMessageCalled = func(msg string) { + assert.Equal(t, msg, "total stake limit reached") + } + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Set(stakingValidatorSc.totalStakeLimit) + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, errCode) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodes(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 5, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + assert.Fail(t, "should not stake nodes") + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) +} + +func TestStakingValidatorSC_ExecuteStakeTooManyNodesAddOnly2(t *testing.T) { + t.Parallel() + + arguments := CreateVmContractCallInput() + + eei := &mock.SystemEIStub{} + + args := createMockArgumentsForValidatorSC() + args.Eei = eei + + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 1000 + }} + args.StakingSCConfig.NodeLimitPercentage = 0.005 + stakingValidatorSc, _ := NewValidatorSmartContract(args) + + validatorData := createAValidatorData(75000000, 3, 12500000) + validatorDataBytes, _ := json.Marshal(&validatorData) + + eei.GetStorageCalled = func(key []byte) []byte { + if bytes.Equal(key, arguments.CallerAddr) { + return validatorDataBytes + } + return nil + } + called := false + eei.AddLogEntryCalled = func(entry *vmcommon.LogEntry) { + called = true + assert.Equal(t, entry.Topics[0], []byte(numberOfNodesTooHigh)) + } + + stakeCalledInStakingSC := 0 + eei.ExecuteOnDestContextCalled = func(destination, sender []byte, value *big.Int, input []byte) (*vmcommon.VMOutput, error) { + if strings.Contains(string(input), "stake") { + stakeCalledInStakingSC++ + } + return &vmcommon.VMOutput{}, nil + } + + key1 := []byte("Key1") + key2 := []byte("Key2") + key3 := []byte("Key3") + arguments.Function = "stake" + arguments.CallValue = big.NewInt(0).Mul(big.NewInt(3), big.NewInt(10000000)) + arguments.Arguments = [][]byte{big.NewInt(3).Bytes(), key1, []byte("msg1"), key2, []byte("msg2"), key3, []byte("msg3")} + + errCode := stakingValidatorSc.Execute(arguments) + assert.Equal(t, vmcommon.Ok, errCode) + assert.True(t, called) + assert.Equal(t, 2, stakeCalledInStakingSC) +} + func TestStakingValidatorSC_ExecuteStakeAddedNewPubKeysShouldWork(t *testing.T) { t.Parallel() @@ -1239,6 +1408,8 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { return stakingSc, nil }}) + nodesCoordinator := &mock.NodesCoordinatorStub{} + args.NodesCoordinator = nodesCoordinator args.StakingSCConfig = argsStaking.StakingSCConfig args.Eei = eei @@ -1282,9 +1453,21 @@ func TestStakingValidatorSC_StakeUnStake3XRestake2(t *testing.T) { retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 1 + } + arguments.Function = "reStakeUnStakedNodes" arguments.Arguments = [][]byte{stakerPubKey1, stakerPubKey2} arguments.CallValue = big.NewInt(0) + retCode = sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") + + nodesCoordinator.GetNumTotalEligibleCalled = func() uint64 { + return 10 + } + retCode = sc.Execute(arguments) assert.Equal(t, vmcommon.Ok, retCode) } @@ -5104,6 +5287,101 @@ func TestStakingValidatorSC_MergeValidatorData(t *testing.T) { assert.Equal(t, stakedData.RewardAddress, vm.FirstDelegationSCAddress) } +func TestStakingValidatorSC_MergeValidatorDataTooMuchStake(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + limitPer4 := big.NewInt(0).Div(sc.totalStakeLimit, big.NewInt(4)) + + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, limitPer4, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "total stake limit reached") +} + +func TestStakingValidatorSC_MergeValidatorDataTooMuchNodes(t *testing.T) { + t.Parallel() + + enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{} + enableEpochsHandler.AddActiveFlags(common.StakingV2Flag) + argsVMContext := createArgsVMContext() + argsVMContext.InputParser = parsers.NewCallArgsParser() + argsVMContext.EnableEpochsHandler = enableEpochsHandler + eei, _ := NewVMContext(argsVMContext) + + argsStaking := createMockStakingScArguments() + argsStaking.Eei = eei + stakingSc, _ := NewStakingSmartContract(argsStaking) + eei.SetSCAddress([]byte("addr")) + _ = eei.SetSystemSCContainer(&mock.SystemSCContainerStub{GetCalled: func(key []byte) (contract vm.SystemSmartContract, err error) { + return stakingSc, nil + }}) + + args := createMockArgumentsForValidatorSC() + args.NodesCoordinator = &mock.NodesCoordinatorStub{GetNumTotalEligibleCalled: func() uint64 { + return 5 + }} + args.StakingSCConfig = argsStaking.StakingSCConfig + args.Eei = eei + + sc, _ := NewValidatorSmartContract(args) + arguments := CreateVmContractCallInput() + arguments.CallerAddr = vm.ESDTSCAddress + arguments.Function = "mergeValidatorData" + arguments.Arguments = [][]byte{} + arguments.CallValue = big.NewInt(0) + arguments.CallerAddr = sc.delegationMgrSCAddress + randomAddress := bytes.Repeat([]byte{1}, len(arguments.CallerAddr)) + arguments.Arguments = [][]byte{randomAddress, vm.FirstDelegationSCAddress} + + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("firsstKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("secondKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, randomAddress, randomAddress, []byte("thirddKey"), big.NewInt(1).Bytes()) + + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fourthKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("fifthhKey"), big.NewInt(1).Bytes()) + stake(t, sc, stakingSc.stakeValue, vm.FirstDelegationSCAddress, vm.FirstDelegationSCAddress, []byte("sixthhKey"), big.NewInt(1).Bytes()) + + retCode := sc.Execute(arguments) + assert.Equal(t, vmcommon.UserError, retCode) + assert.Equal(t, eei.returnMessage, "number of nodes is too high") +} + func TestValidatorSC_getMinUnStakeTokensValueFromDelegationManagerMarshalizerFail(t *testing.T) { t.Parallel()