diff --git a/core/epochFlags.go b/core/epochFlags.go new file mode 100644 index 000000000..8a43202e4 --- /dev/null +++ b/core/epochFlags.go @@ -0,0 +1,25 @@ +package core + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core/check" +) + +// EnableEpochFlag defines a flag specific to the enableEpochs.toml +type EnableEpochFlag string + +// CheckHandlerCompatibility checks if the provided handler is compatible with this mx-chain-core-go version +func CheckHandlerCompatibility(handler EnableEpochsHandler, requiredFlags []EnableEpochFlag) error { + if check.IfNil(handler) { + return ErrNilEnableEpochsHandler + } + + for _, flag := range requiredFlags { + if !handler.IsFlagDefined(flag) { + return fmt.Errorf("%w for flag %s", ErrInvalidEnableEpochsHandler, flag) + } + } + + return nil +} diff --git a/core/epochFlags_test.go b/core/epochFlags_test.go new file mode 100644 index 000000000..6a702fc97 --- /dev/null +++ b/core/epochFlags_test.go @@ -0,0 +1,45 @@ +package core_test + +import ( + "errors" + "strings" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/mock" + "github.com/stretchr/testify/require" +) + +func TestCheckHandlerCompatibility(t *testing.T) { + t.Parallel() + + err := core.CheckHandlerCompatibility(nil, []core.EnableEpochFlag{}) + require.Equal(t, core.ErrNilEnableEpochsHandler, err) + + testFlags := []core.EnableEpochFlag{"f0", "f1", "f2"} + allFlagsDefinedHandler := &mock.EnableEpochsHandlerStub{ + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return true + }, + } + err = core.CheckHandlerCompatibility(allFlagsDefinedHandler, testFlags) + require.Nil(t, err) + + allFlagsUndefinedHandler := &mock.EnableEpochsHandlerStub{ + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return false + }, + } + err = core.CheckHandlerCompatibility(allFlagsUndefinedHandler, testFlags) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + + missingFlag := testFlags[1] + oneFlagUndefinedHandler := &mock.EnableEpochsHandlerStub{ + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return flag != missingFlag + }, + } + err = core.CheckHandlerCompatibility(oneFlagUndefinedHandler, testFlags) + require.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + require.True(t, strings.Contains(err.Error(), string(missingFlag))) +} diff --git a/core/errors.go b/core/errors.go index a6e64c498..5a784900e 100644 --- a/core/errors.go +++ b/core/errors.go @@ -19,7 +19,7 @@ var ErrInvalidValue = errors.New("invalid value provided") // ErrNilInputData signals that a nil data has been provided var ErrNilInputData = errors.New("nil input data") -//ErrNilUrl signals that the provided url is empty +// ErrNilUrl signals that the provided url is empty var ErrNilUrl = errors.New("url is empty") // ErrPemFileIsInvalid signals that a pem file is invalid @@ -118,3 +118,6 @@ var ErrDBIsClosed = errors.New("DB is closed") // ErrNilEnableEpochsHandler signals that a nil enable epochs handler has been provided var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") + +// ErrInvalidEnableEpochsHandler signals that an invalid enable epochs handler has been provided +var ErrInvalidEnableEpochsHandler = errors.New("invalid enable epochs handler") diff --git a/core/export_test.go b/core/export_test.go index a4589a387..98fbbf93a 100644 --- a/core/export_test.go +++ b/core/export_test.go @@ -2,10 +2,42 @@ package core import "time" +// GetContainingDuration - func (sw *StopWatch) GetContainingDuration() (map[string]time.Duration, []string) { return sw.getContainingDuration() } +// GetIdentifiers - +func (sw *StopWatch) GetIdentifiers() []string { + return sw.identifiers +} + +// SetIdentifiers - +func (sw *StopWatch) SetIdentifiers(identifiers []string) { + sw.identifiers = identifiers +} + +// GetStarted - +func (sw *StopWatch) GetStarted(identifier string) (time.Time, bool) { + s, has := sw.started[identifier] + return s, has +} + +// GetElapsed - +func (sw *StopWatch) GetElapsed(identifier string) (time.Duration, bool) { + e, has := sw.elapsed[identifier] + return e, has +} + +// SetElapsed - +func (sw *StopWatch) SetElapsed(identifier string, duration time.Duration) { + sw.elapsed[identifier] = duration +} + +// SplitExponentFraction - func SplitExponentFraction(val string) (string, string) { return splitExponentFraction(val) } + +// TestAutoBalanceDataTriesFlag - +const TestAutoBalanceDataTriesFlag = autoBalanceDataTriesFlag diff --git a/core/file.go b/core/file.go index a7300a53b..bb1cb977a 100644 --- a/core/file.go +++ b/core/file.go @@ -5,7 +5,7 @@ import ( "encoding/json" "encoding/pem" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "strings" @@ -152,7 +152,7 @@ func LoadSkPkFromPemFile(relativePath string, skIndex int) ([]byte, string, erro _ = file.Close() }() - buff, err := ioutil.ReadAll(file) + buff, err := io.ReadAll(file) if err != nil { return nil, "", fmt.Errorf("%w while reading %s file", err, relativePath) } @@ -200,7 +200,7 @@ func LoadAllKeysFromPemFile(relativePath string) ([][]byte, []string, error) { _ = file.Close() }() - buff, err := ioutil.ReadAll(file) + buff, err := io.ReadAll(file) if err != nil { return nil, nil, fmt.Errorf("%w while reading %s file", err, relativePath) } diff --git a/core/file_test.go b/core/file_test.go index edaf0ead3..bd2a84061 100644 --- a/core/file_test.go +++ b/core/file_test.go @@ -3,7 +3,6 @@ package core_test import ( "encoding/json" "errors" - "io/ioutil" "os" "path/filepath" "strings" @@ -146,7 +145,7 @@ func TestLoadJSonFile_FileExitsShouldPass(t *testing.T) { data, _ := json.MarshalIndent(TestStruct{A: 0, B: 0}, "", " ") - _ = ioutil.WriteFile(fileName, data, 0644) + _ = os.WriteFile(fileName, data, 0644) err = file.Close() assert.Nil(t, err) diff --git a/core/interface.go b/core/interface.go index 8bde1c046..27888e2b0 100644 --- a/core/interface.go +++ b/core/interface.go @@ -144,6 +144,9 @@ type TrieNodeVersionVerifier interface { // EnableEpochsHandler defines the behavior of a component that can return if a feature is enabled or not type EnableEpochsHandler interface { - IsAutoBalanceDataTriesEnabled() bool + IsFlagDefined(flag EnableEpochFlag) bool + IsFlagEnabled(flag EnableEpochFlag) bool + IsFlagEnabledInEpoch(flag EnableEpochFlag, epoch uint32) bool + GetActivationEpoch(flag EnableEpochFlag) uint32 IsInterfaceNil() bool } diff --git a/core/loggingFunctions_test.go b/core/loggingFunctions_test.go index 6aed20a01..30a746cd5 100644 --- a/core/loggingFunctions_test.go +++ b/core/loggingFunctions_test.go @@ -1,9 +1,10 @@ -package core +package core_test import ( "fmt" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/mock" "github.com/stretchr/testify/require" ) @@ -18,12 +19,12 @@ func TestDumpGoRoutinesToLogShouldNotPanic(t *testing.T) { } }() - DumpGoRoutinesToLog(0, &mock.LoggerMock{}) + core.DumpGoRoutinesToLog(0, &mock.LoggerMock{}) } func TestGetRunningGoRoutines(t *testing.T) { t.Parallel() - res := GetRunningGoRoutines(&mock.LoggerMock{}) + res := core.GetRunningGoRoutines(&mock.LoggerMock{}) require.NotNil(t, res) } diff --git a/core/mock/enableEpochsHandlerMock.go b/core/mock/enableEpochsHandlerMock.go deleted file mode 100644 index 91abd5be5..000000000 --- a/core/mock/enableEpochsHandlerMock.go +++ /dev/null @@ -1,20 +0,0 @@ -package mock - -// EnableEpochsHandlerStub - -type EnableEpochsHandlerStub struct { - IsAutoBalanceDataTriesEnabledCalled func() bool -} - -// IsAutoBalanceDataTriesEnabled - -func (e *EnableEpochsHandlerStub) IsAutoBalanceDataTriesEnabled() bool { - if e.IsAutoBalanceDataTriesEnabledCalled != nil { - return e.IsAutoBalanceDataTriesEnabledCalled() - } - - return false -} - -// IsInterfaceNil returns true if there is no value under the interface -func (e *EnableEpochsHandlerStub) IsInterfaceNil() bool { - return e == nil -} diff --git a/core/mock/enableEpochsHandlerStub.go b/core/mock/enableEpochsHandlerStub.go new file mode 100644 index 000000000..fb037b375 --- /dev/null +++ b/core/mock/enableEpochsHandlerStub.go @@ -0,0 +1,48 @@ +package mock + +import "github.com/multiversx/mx-chain-core-go/core" + +// EnableEpochsHandlerStub - +type EnableEpochsHandlerStub struct { + IsFlagDefinedCalled func(flag core.EnableEpochFlag) bool + IsFlagEnabledCalled func(flag core.EnableEpochFlag) bool + IsFlagEnabledInEpochCalled func(flag core.EnableEpochFlag, epoch uint32) bool + GetActivationEpochCalled func(flag core.EnableEpochFlag) uint32 +} + +// IsFlagDefined - +func (stub *EnableEpochsHandlerStub) IsFlagDefined(flag core.EnableEpochFlag) bool { + if stub.IsFlagDefinedCalled != nil { + return stub.IsFlagDefinedCalled(flag) + } + return false +} + +// IsFlagEnabled - +func (stub *EnableEpochsHandlerStub) IsFlagEnabled(flag core.EnableEpochFlag) bool { + if stub.IsFlagEnabledCalled != nil { + return stub.IsFlagEnabledCalled(flag) + } + return false +} + +// IsFlagEnabledInEpoch - +func (stub *EnableEpochsHandlerStub) IsFlagEnabledInEpoch(flag core.EnableEpochFlag, epoch uint32) bool { + if stub.IsFlagEnabledInEpochCalled != nil { + return stub.IsFlagEnabledInEpochCalled(flag, epoch) + } + return false +} + +// GetActivationEpoch - +func (stub *EnableEpochsHandlerStub) GetActivationEpoch(flag core.EnableEpochFlag) uint32 { + if stub.GetActivationEpochCalled != nil { + return stub.GetActivationEpochCalled(flag) + } + return 0 +} + +// IsInterfaceNil - +func (stub *EnableEpochsHandlerStub) IsInterfaceNil() bool { + return stub == nil +} diff --git a/core/stopWatch_test.go b/core/stopWatch_test.go index 135def969..54bd1ad9b 100644 --- a/core/stopWatch_test.go +++ b/core/stopWatch_test.go @@ -1,9 +1,10 @@ -package core +package core_test import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/mock" "github.com/stretchr/testify/assert" ) @@ -15,20 +16,20 @@ var log = &mock.LoggerMock{} func TestStopWatch_Start(t *testing.T) { t.Parallel() - sw := NewStopWatch() + sw := core.NewStopWatch() sw.Start(identifier) - _, has := sw.started[identifier] + _, has := sw.GetStarted(identifier) assert.True(t, has) - assert.Equal(t, identifier, sw.identifiers[0]) + assert.Equal(t, identifier, sw.GetIdentifiers()[0]) } func TestStopWatch_DoubleStartShouldNotReAddInIdentifiers(t *testing.T) { t.Parallel() - sw := NewStopWatch() + sw := core.NewStopWatch() identifier1 := "identifier1" identifier2 := "identifier2" @@ -36,19 +37,19 @@ func TestStopWatch_DoubleStartShouldNotReAddInIdentifiers(t *testing.T) { sw.Start(identifier2) sw.Start(identifier1) - assert.Equal(t, identifier1, sw.identifiers[0]) - assert.Equal(t, identifier2, sw.identifiers[1]) - assert.Equal(t, 2, len(sw.identifiers)) + assert.Equal(t, identifier1, sw.GetIdentifiers()[0]) + assert.Equal(t, identifier2, sw.GetIdentifiers()[1]) + assert.Equal(t, 2, len(sw.GetIdentifiers())) } func TestStopWatch_StopNoStartShouldNotAddDuration(t *testing.T) { t.Parallel() - sw := NewStopWatch() + sw := core.NewStopWatch() sw.Stop(identifier) - _, has := sw.elapsed[identifier] + _, has := sw.GetElapsed(identifier) assert.False(t, has) } @@ -56,12 +57,12 @@ func TestStopWatch_StopNoStartShouldNotAddDuration(t *testing.T) { func TestStopWatch_StopWithStartShouldAddDuration(t *testing.T) { t.Parallel() - sw := NewStopWatch() + sw := core.NewStopWatch() sw.Start(identifier) sw.Stop(identifier) - _, has := sw.elapsed[identifier] + _, has := sw.GetElapsed(identifier) assert.True(t, has) } @@ -69,7 +70,7 @@ func TestStopWatch_StopWithStartShouldAddDuration(t *testing.T) { func TestStopWatch_GetMeasurementsNotFinishedShouldOmit(t *testing.T) { t.Parallel() - sw := NewStopWatch() + sw := core.NewStopWatch() sw.Start(identifier) @@ -82,7 +83,7 @@ func TestStopWatch_GetMeasurementsNotFinishedShouldOmit(t *testing.T) { func TestStopWatch_GetMeasurementsShouldWork(t *testing.T) { t.Parallel() - sw := NewStopWatch() + sw := core.NewStopWatch() sw.Start(identifier) sw.Stop(identifier) @@ -102,12 +103,12 @@ func TestStopWatch_AddShouldWork(t *testing.T) { identifier2 := "identifier2" duration2 := time.Duration(7) - swSrc := NewStopWatch() - swSrc.identifiers = []string{identifier1, identifier2} - swSrc.elapsed[identifier1] = duration1 - swSrc.elapsed[identifier2] = duration2 + swSrc := core.NewStopWatch() + swSrc.SetIdentifiers([]string{identifier1, identifier2}) + swSrc.SetElapsed(identifier1, duration1) + swSrc.SetElapsed(identifier2, duration2) - sw := NewStopWatch() + sw := core.NewStopWatch() sw.Add(swSrc) @@ -126,9 +127,9 @@ func TestStopWatch_GetMeasurement(t *testing.T) { t.Parallel() fooDuration := time.Duration(4243) * time.Millisecond - sw := NewStopWatch() - sw.identifiers = []string{"foo"} - sw.elapsed["foo"] = fooDuration + sw := core.NewStopWatch() + sw.SetIdentifiers([]string{"foo"}) + sw.SetElapsed("foo", fooDuration) assert.Equal(t, fooDuration, sw.GetMeasurement("foo")) assert.Equal(t, time.Duration(0), sw.GetMeasurement("bar")) diff --git a/core/trie.go b/core/trie.go index de4623765..84331e5c0 100644 --- a/core/trie.go +++ b/core/trie.go @@ -23,6 +23,8 @@ const ( // AutoBalanceEnabledString is the string representation of AutoBalanceEnabled trie node version AutoBalanceEnabledString = "auto balanced" + + autoBalanceDataTriesFlag = EnableEpochFlag("AutoBalanceDataTriesFlag") ) func (version TrieNodeVersion) String() string { @@ -40,10 +42,15 @@ type trieNodeVersionVerifier struct { enableEpochsHandler EnableEpochsHandler } +// NewTrieNodeVersionVerifier returns a new instance of trieNodeVersionVerifier func NewTrieNodeVersionVerifier(enableEpochsHandler EnableEpochsHandler) (*trieNodeVersionVerifier, error) { if check.IfNil(enableEpochsHandler) { return nil, ErrNilEnableEpochsHandler } + err := CheckHandlerCompatibility(enableEpochsHandler, []EnableEpochFlag{autoBalanceDataTriesFlag}) + if err != nil { + return nil, err + } return &trieNodeVersionVerifier{ enableEpochsHandler: enableEpochsHandler, @@ -52,7 +59,7 @@ func NewTrieNodeVersionVerifier(enableEpochsHandler EnableEpochsHandler) (*trieN // IsValidVersion returns true if the given trie node version is valid func (vv *trieNodeVersionVerifier) IsValidVersion(version TrieNodeVersion) bool { - if vv.enableEpochsHandler.IsAutoBalanceDataTriesEnabled() { + if vv.enableEpochsHandler.IsFlagEnabled(autoBalanceDataTriesFlag) { return version <= AutoBalanceEnabled } @@ -66,7 +73,7 @@ func (vv *trieNodeVersionVerifier) IsInterfaceNil() bool { // GetVersionForNewData returns the trie node version that should be used for new data func GetVersionForNewData(handler EnableEpochsHandler) TrieNodeVersion { - if handler.IsAutoBalanceDataTriesEnabled() { + if handler.IsFlagEnabled(autoBalanceDataTriesFlag) { return AutoBalanceEnabled } diff --git a/core/trie_test.go b/core/trie_test.go index d5332723a..49deec9f2 100644 --- a/core/trie_test.go +++ b/core/trie_test.go @@ -1,8 +1,11 @@ -package core +package core_test import ( + "errors" + "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/mock" "github.com/stretchr/testify/assert" @@ -14,14 +17,31 @@ func TestNewTrieNodeVersionVerifier(t *testing.T) { t.Run("nil enableEpochsHandler", func(t *testing.T) { t.Parallel() - vv, err := NewTrieNodeVersionVerifier(nil) + vv, err := core.NewTrieNodeVersionVerifier(nil) assert.Nil(t, vv) - assert.Equal(t, ErrNilEnableEpochsHandler, err) + assert.Equal(t, core.ErrNilEnableEpochsHandler, err) + }) + t.Run("incompatible enableEpochsHandler", func(t *testing.T) { + t.Parallel() + + vv, err := core.NewTrieNodeVersionVerifier(&mock.EnableEpochsHandlerStub{ + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + assert.Equal(t, core.TestAutoBalanceDataTriesFlag, flag) + return false + }, + }) + assert.Nil(t, vv) + assert.True(t, errors.Is(err, core.ErrInvalidEnableEpochsHandler)) + assert.True(t, strings.Contains(err.Error(), string(core.TestAutoBalanceDataTriesFlag))) }) t.Run("new trieNodeVersionVerifier", func(t *testing.T) { t.Parallel() - vv, err := NewTrieNodeVersionVerifier(&mock.EnableEpochsHandlerStub{}) + vv, err := core.NewTrieNodeVersionVerifier(&mock.EnableEpochsHandlerStub{ + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return flag == core.TestAutoBalanceDataTriesFlag + }, + }) assert.Nil(t, err) assert.False(t, check.IfNil(vv)) }) @@ -33,40 +53,46 @@ func TestTrieNodeVersionVerifier_IsValidVersion(t *testing.T) { t.Run("auto balance enabled", func(t *testing.T) { t.Parallel() - vv, _ := NewTrieNodeVersionVerifier( + vv, _ := core.NewTrieNodeVersionVerifier( &mock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledCalled: func() bool { - return true + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return flag == core.TestAutoBalanceDataTriesFlag + }, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == core.TestAutoBalanceDataTriesFlag }, }, ) - assert.True(t, vv.IsValidVersion(NotSpecified)) - assert.True(t, vv.IsValidVersion(AutoBalanceEnabled)) - assert.False(t, vv.IsValidVersion(AutoBalanceEnabled+1)) + assert.True(t, vv.IsValidVersion(core.NotSpecified)) + assert.True(t, vv.IsValidVersion(core.AutoBalanceEnabled)) + assert.False(t, vv.IsValidVersion(core.AutoBalanceEnabled+1)) }) t.Run("auto balance disabled", func(t *testing.T) { t.Parallel() - vv, _ := NewTrieNodeVersionVerifier( + vv, _ := core.NewTrieNodeVersionVerifier( &mock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledCalled: func() bool { + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return flag == core.TestAutoBalanceDataTriesFlag + }, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { return false }, }, ) - assert.True(t, vv.IsValidVersion(NotSpecified)) - assert.False(t, vv.IsValidVersion(AutoBalanceEnabled)) - assert.False(t, vv.IsValidVersion(AutoBalanceEnabled+1)) + assert.True(t, vv.IsValidVersion(core.NotSpecified)) + assert.False(t, vv.IsValidVersion(core.AutoBalanceEnabled)) + assert.False(t, vv.IsValidVersion(core.AutoBalanceEnabled+1)) }) } func TestTrieNodeVersion_String(t *testing.T) { t.Parallel() - assert.Equal(t, NotSpecifiedString, NotSpecified.String()) - assert.Equal(t, AutoBalanceEnabledString, AutoBalanceEnabled.String()) - assert.Equal(t, "unknown: 100", TrieNodeVersion(100).String()) + assert.Equal(t, core.NotSpecifiedString, core.NotSpecified.String()) + assert.Equal(t, core.AutoBalanceEnabledString, core.AutoBalanceEnabled.String()) + assert.Equal(t, "unknown: 100", core.TrieNodeVersion(100).String()) } func TestGetVersionForNewData(t *testing.T) { @@ -75,26 +101,32 @@ func TestGetVersionForNewData(t *testing.T) { t.Run("auto balance enabled", func(t *testing.T) { t.Parallel() - getVersionForNewData := GetVersionForNewData( + getVersionForNewData := core.GetVersionForNewData( &mock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledCalled: func() bool { - return true + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return flag == core.TestAutoBalanceDataTriesFlag + }, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { + return flag == core.TestAutoBalanceDataTriesFlag }, }, ) - assert.Equal(t, AutoBalanceEnabled, getVersionForNewData) + assert.Equal(t, core.AutoBalanceEnabled, getVersionForNewData) }) t.Run("auto balance disabled", func(t *testing.T) { t.Parallel() - getVersionForNewData := GetVersionForNewData( + getVersionForNewData := core.GetVersionForNewData( &mock.EnableEpochsHandlerStub{ - IsAutoBalanceDataTriesEnabledCalled: func() bool { + IsFlagDefinedCalled: func(flag core.EnableEpochFlag) bool { + return flag == core.TestAutoBalanceDataTriesFlag + }, + IsFlagEnabledCalled: func(flag core.EnableEpochFlag) bool { return false }, }, ) - assert.Equal(t, NotSpecified, getVersionForNewData) + assert.Equal(t, core.NotSpecified, getVersionForNewData) }) } diff --git a/data/api/apiAccountResponse.go b/data/api/apiAccountResponse.go index 4cb66e026..77662c865 100644 --- a/data/api/apiAccountResponse.go +++ b/data/api/apiAccountResponse.go @@ -2,14 +2,15 @@ package api // AccountResponse is the data transfer object to be returned on API when requesting an address data type AccountResponse struct { - Address string `json:"address"` - Nonce uint64 `json:"nonce"` - Balance string `json:"balance"` - Username string `json:"username"` - Code string `json:"code"` - CodeHash []byte `json:"codeHash"` - RootHash []byte `json:"rootHash"` - CodeMetadata []byte `json:"codeMetadata"` - DeveloperReward string `json:"developerReward"` - OwnerAddress string `json:"ownerAddress"` + Address string `json:"address"` + Nonce uint64 `json:"nonce"` + Balance string `json:"balance"` + Username string `json:"username"` + Code string `json:"code"` + CodeHash []byte `json:"codeHash"` + RootHash []byte `json:"rootHash"` + CodeMetadata []byte `json:"codeMetadata"` + DeveloperReward string `json:"developerReward"` + OwnerAddress string `json:"ownerAddress"` + Pairs map[string]string `json:"pairs,omitempty"` } diff --git a/data/api/options.go b/data/api/options.go index 66ce40dd4..9aaedb90a 100644 --- a/data/api/options.go +++ b/data/api/options.go @@ -10,6 +10,7 @@ type AccountQueryOptions struct { BlockHash []byte BlockRootHash []byte HintEpoch core.OptionalUint32 + WithKeys bool } // BlockQueryOptions holds options for block queries diff --git a/data/block/block.go b/data/block/block.go index 0ac8fd2aa..8a32462d5 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -14,7 +14,8 @@ var _ = data.HeaderHandler(&Header{}) var _ = data.ShardHeaderHandler(&Header{}) // MiniBlockSlice should be used when referring to subset of mini blocks that is not -// necessarily representing a full block body +// +// necessarily representing a full block body type MiniBlockSlice []*MiniBlock // MiniblockAndHash holds the info related to a miniblock and its hash @@ -278,6 +279,17 @@ func (h *Header) ShallowClone() data.HeaderHandler { return &headerCopy } +// SetBlockBodyTypeInt32 sets the blockBodyType in the header +func (h *Header) SetBlockBodyTypeInt32(blockBodyType int32) error { + if h == nil { + return data.ErrNilPointerReceiver + } + + h.BlockBodyType = Type(blockBodyType) + + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (h *Header) IsInterfaceNil() bool { return h == nil diff --git a/data/block/block.proto b/data/block/block.proto index 99d11d0bb..9551b0016 100644 --- a/data/block/block.proto +++ b/data/block/block.proto @@ -8,7 +8,7 @@ syntax = "proto3"; package proto; -option go_package = "github.com/multiversx/mx-chain-core-go/data/block;block"; +option go_package = "block"; option (gogoproto.stable_marshaler_all) = true; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; diff --git a/data/block/blockV2.go b/data/block/blockV2.go index 0a690e2ee..4fb87a15c 100644 --- a/data/block/blockV2.go +++ b/data/block/blockV2.go @@ -401,6 +401,15 @@ func (hv2 *HeaderV2) ShallowClone() data.HeaderHandler { return &headerCopy } +// SetBlockBodyTypeInt32 sets the blockBodyType in the header +func (hv2 *HeaderV2) SetBlockBodyTypeInt32(blockBodyType int32) error { + if hv2 == nil { + return data.ErrNilPointerReceiver + } + + return hv2.Header.SetBlockBodyTypeInt32(blockBodyType) +} + // IsInterfaceNil returns true if there is no value under the interface func (hv2 *HeaderV2) IsInterfaceNil() bool { return hv2 == nil diff --git a/data/block/blockV2_test.go b/data/block/blockV2_test.go index a0e28f582..6cedd08a7 100644 --- a/data/block/blockV2_test.go +++ b/data/block/blockV2_test.go @@ -1217,3 +1217,29 @@ func TestHeaderV2_HasScheduledMiniBlocks(t *testing.T) { require.False(t, shardBlock.HasScheduledMiniBlocks()) } + +func TestHeaderV2_SetBlockBodyTypeInt32(t *testing.T) { + t.Parallel() + + t.Run("nil header should error", func(t *testing.T) { + t.Parallel() + + var header *block.HeaderV2 + err := header.SetBlockBodyTypeInt32(int32(block.ReceiptBlock)) + require.Equal(t, data.ErrNilPointerReceiver, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + header := &block.HeaderV2{ + Header: &block.Header{}, + } + err := header.SetBlockBodyTypeInt32(int32(block.ReceiptBlock)) + require.Nil(t, err) + require.Equal(t, int32(block.ReceiptBlock), header.GetBlockBodyTypeInt32()) + + err = header.SetBlockBodyTypeInt32(int32(block.TxBlock)) + require.Nil(t, err) + require.Equal(t, int32(block.TxBlock), header.GetBlockBodyTypeInt32()) + }) +} diff --git a/data/block/block_test.go b/data/block/block_test.go index 980543b24..c83d88e75 100644 --- a/data/block/block_test.go +++ b/data/block/block_test.go @@ -845,3 +845,27 @@ func TestMiniBlockHeader_GetMiniBlockHeaderReservedShouldErrWhenReservedFieldIsN assert.Nil(t, mbhr) assert.Equal(t, data.ErrNilReservedField, err) } + +func TestHeader_SetBlockBodyTypeInt32(t *testing.T) { + t.Parallel() + + t.Run("nil header should error", func(t *testing.T) { + t.Parallel() + + var header *block.Header + err := header.SetBlockBodyTypeInt32(int32(block.ReceiptBlock)) + require.Equal(t, data.ErrNilPointerReceiver, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + header := &block.Header{} + err := header.SetBlockBodyTypeInt32(int32(block.ReceiptBlock)) + require.Nil(t, err) + require.Equal(t, block.ReceiptBlock, header.BlockBodyType) + + err = header.SetBlockBodyTypeInt32(int32(block.TxBlock)) + require.Nil(t, err) + require.Equal(t, block.TxBlock, header.BlockBodyType) + }) +} diff --git a/data/interface.go b/data/interface.go index 80b039da2..97f5ecd76 100644 --- a/data/interface.go +++ b/data/interface.go @@ -95,6 +95,7 @@ type ShardHeaderHandler interface { GetBlockBodyTypeInt32() int32 SetMetaBlockHashes(hashes [][]byte) error MapMiniBlockHashesToShards() map[string]uint32 + SetBlockBodyTypeInt32(blockBodyType int32) error } // MetaHeaderHandler defines getters and setters for the meta block header diff --git a/data/mock/cacherMock.go b/data/mock/cacherMock.go deleted file mode 100644 index 20a5186c5..000000000 --- a/data/mock/cacherMock.go +++ /dev/null @@ -1,156 +0,0 @@ -package mock - -import ( - "sync" -) - -// CacherMock - -type CacherMock struct { - mut sync.RWMutex - dataMap map[string]interface{} - mutAddedDataHandlers sync.RWMutex - addedDataHandlers []func(key []byte, val interface{}) -} - -// NewCacherMock - -func NewCacherMock() *CacherMock { - return &CacherMock{ - dataMap: make(map[string]interface{}), - addedDataHandlers: make([]func(key []byte, val interface{}), 0), - } -} - -// Clear - -func (cacher *CacherMock) Clear() { - cacher.mut.Lock() - defer cacher.mut.Unlock() - - cacher.dataMap = make(map[string]interface{}) -} - -// Put - -func (cacher *CacherMock) Put(key []byte, value interface{}, _ int) (evicted bool) { - cacher.mut.Lock() - defer cacher.mut.Unlock() - - cacher.dataMap[string(key)] = value - cacher.callAddedDataHandlers(key, value) - - return false -} - -func (cacher *CacherMock) callAddedDataHandlers(key []byte, val interface{}) { - cacher.mutAddedDataHandlers.RLock() - for _, handler := range cacher.addedDataHandlers { - go handler(key, val) - } - cacher.mutAddedDataHandlers.RUnlock() -} - -// Get - -func (cacher *CacherMock) Get(key []byte) (value interface{}, ok bool) { - cacher.mut.RLock() - defer cacher.mut.RUnlock() - - val, ok := cacher.dataMap[string(key)] - - return val, ok -} - -// Has - -func (cacher *CacherMock) Has(key []byte) bool { - cacher.mut.RLock() - defer cacher.mut.RUnlock() - - _, ok := cacher.dataMap[string(key)] - - return ok -} - -// Peek - -func (cacher *CacherMock) Peek(key []byte) (value interface{}, ok bool) { - cacher.mut.RLock() - defer cacher.mut.RUnlock() - - val, ok := cacher.dataMap[string(key)] - - return val, ok -} - -// HasOrAdd - -func (cacher *CacherMock) HasOrAdd(key []byte, value interface{}, _ int) (has, added bool) { - cacher.mut.Lock() - defer cacher.mut.Unlock() - - _, has = cacher.dataMap[string(key)] - if has { - return true, false - } - - cacher.dataMap[string(key)] = value - cacher.callAddedDataHandlers(key, value) - return false, true -} - -// Remove - -func (cacher *CacherMock) Remove(key []byte) { - cacher.mut.Lock() - defer cacher.mut.Unlock() - - delete(cacher.dataMap, string(key)) -} - -// Keys - -func (cacher *CacherMock) Keys() [][]byte { - keys := make([][]byte, len(cacher.dataMap)) - idx := 0 - for k := range cacher.dataMap { - keys[idx] = []byte(k) - idx++ - } - - return keys -} - -// Len - -func (cacher *CacherMock) Len() int { - cacher.mut.RLock() - defer cacher.mut.RUnlock() - - return len(cacher.dataMap) -} - -// SizeInBytesContained - -func (cacher *CacherMock) SizeInBytesContained() uint64 { - return 0 -} - -// MaxSize - -func (cacher *CacherMock) MaxSize() int { - return 10000 -} - -// RegisterHandler - -func (cacher *CacherMock) RegisterHandler(handler func(key []byte, value interface{}), _ string) { - if handler == nil { - return - } - - cacher.mutAddedDataHandlers.Lock() - cacher.addedDataHandlers = append(cacher.addedDataHandlers, handler) - cacher.mutAddedDataHandlers.Unlock() -} - -// UnRegisterHandler - -func (cacher *CacherMock) UnRegisterHandler(string) { -} - -// IsInterfaceNil returns true if there is no value under the interface -func (cacher *CacherMock) IsInterfaceNil() bool { - return cacher == nil -} - -// Close - -func (cacher *CacherMock) Close() error { - return nil -} diff --git a/data/mock/memDbMock.go b/data/mock/memDbMock.go deleted file mode 100644 index 6e6de2c9e..000000000 --- a/data/mock/memDbMock.go +++ /dev/null @@ -1,124 +0,0 @@ -package mock - -import ( - "encoding/base64" - "errors" - "fmt" - "sync" -) - -// MemDbMock represents the memory database storage. It holds a map of key value pairs -// and a mutex to handle concurrent accesses to the map -type MemDbMock struct { - db map[string][]byte - mutx sync.RWMutex - PutCalled func(key, val []byte) error -} - -// NewMemDbMock creates a new memorydb object -func NewMemDbMock() *MemDbMock { - return &MemDbMock{ - db: make(map[string][]byte), - mutx: sync.RWMutex{}, - } -} - -// Put adds the value to the (key, val) storage medium -func (s *MemDbMock) Put(key, val []byte) error { - s.mutx.Lock() - defer s.mutx.Unlock() - - s.db[string(key)] = val - - if s.PutCalled != nil { - return s.PutCalled(key, val) - } - - return nil -} - -// Get gets the value associated to the key, or reports an error -func (s *MemDbMock) Get(key []byte) ([]byte, error) { - s.mutx.RLock() - defer s.mutx.RUnlock() - - val, ok := s.db[string(key)] - - if !ok { - return nil, fmt.Errorf("key: %s not found", base64.StdEncoding.EncodeToString(key)) - } - - return val, nil -} - -// Has returns true if the given key is present in the persistence medium, false otherwise -func (s *MemDbMock) Has(key []byte) error { - s.mutx.RLock() - defer s.mutx.RUnlock() - - _, ok := s.db[string(key)] - if !ok { - return errors.New("key not present") - } - - return nil -} - -// Init initializes the storage medium and prepares it for usage -func (s *MemDbMock) Init() error { - // no special initialization needed - return nil -} - -// Close closes the files/resources associated to the storage medium -func (s *MemDbMock) Close() error { - // nothing to do - return nil -} - -// Remove removes the data associated to the given key -func (s *MemDbMock) Remove(key []byte) error { - s.mutx.Lock() - defer s.mutx.Unlock() - - delete(s.db, string(key)) - - return nil -} - -// Destroy removes the storage medium stored data -func (s *MemDbMock) Destroy() error { - s.mutx.Lock() - defer s.mutx.Unlock() - - s.db = make(map[string][]byte) - - return nil -} - -// DestroyClosed removes the already closed storage medium stored data -func (s *MemDbMock) DestroyClosed() error { - return nil -} - -// RangeKeys will iterate over all contained (key, value) pairs calling the handler for each pair -func (s *MemDbMock) RangeKeys(handler func(key []byte, value []byte) bool) { - if handler == nil { - return - } - - s.mutx.RLock() - defer s.mutx.RUnlock() - - for k, v := range s.db { - shouldContinue := handler([]byte(k), v) - if !shouldContinue { - return - } - } -} - -// IsInterfaceNil returns true if there is no value under the interface -func (s *MemDbMock) IsInterfaceNil() bool { - return s == nil -} diff --git a/data/mock/multipleShardsCoordinatorMock.go b/data/mock/multipleShardsCoordinatorMock.go deleted file mode 100644 index c0c1f5aa8..000000000 --- a/data/mock/multipleShardsCoordinatorMock.go +++ /dev/null @@ -1,59 +0,0 @@ -package mock - -import ( - "fmt" -) - -// MultipleShardsCoordinatorMock - -type MultipleShardsCoordinatorMock struct { - ComputeIdCalled func(address []byte) uint32 - NoShards uint32 - CurrentShard uint32 -} - -// NewMultiShardsCoordinatorMock - -func NewMultiShardsCoordinatorMock(nrShard uint32) *MultipleShardsCoordinatorMock { - return &MultipleShardsCoordinatorMock{NoShards: nrShard} -} - -// NumberOfShards - -func (scm *MultipleShardsCoordinatorMock) NumberOfShards() uint32 { - return scm.NoShards -} - -// ComputeId - -func (scm *MultipleShardsCoordinatorMock) ComputeId(address []byte) uint32 { - if scm.ComputeIdCalled == nil { - return scm.SelfId() - } - return scm.ComputeIdCalled(address) -} - -// SelfId - -func (scm *MultipleShardsCoordinatorMock) SelfId() uint32 { - return scm.CurrentShard -} - -// SameShard - -func (scm *MultipleShardsCoordinatorMock) SameShard(_, _ []byte) bool { - return true -} - -// CommunicationIdentifier returns the identifier between current shard ID and destination shard ID -// identifier is generated such as the first shard from identifier is always smaller than the last -func (scm *MultipleShardsCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { - if destShardID == scm.CurrentShard { - return fmt.Sprintf("_%d", scm.CurrentShard) - } - - if destShardID < scm.CurrentShard { - return fmt.Sprintf("_%d_%d", destShardID, scm.CurrentShard) - } - - return fmt.Sprintf("_%d_%d", scm.CurrentShard, destShardID) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (scm *MultipleShardsCoordinatorMock) IsInterfaceNil() bool { - return scm == nil -} diff --git a/data/mock/requestHandlerStub.go b/data/mock/requestHandlerStub.go deleted file mode 100644 index 268ce86a1..000000000 --- a/data/mock/requestHandlerStub.go +++ /dev/null @@ -1,120 +0,0 @@ -package mock - -import "time" - -// RequestHandlerStub - -type RequestHandlerStub struct { - RequestShardHeaderCalled func(shardID uint32, hash []byte) - RequestMetaHeaderCalled func(hash []byte) - RequestMetaHeaderByNonceCalled func(nonce uint64) - RequestShardHeaderByNonceCalled func(shardID uint32, nonce uint64) - RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) - RequestMiniBlocksHandlerCalled func(destShardID uint32, miniblocksHashes [][]byte) - RequestTrieNodesCalled func(destShardID uint32, hashes [][]byte, topic string) - RequestStartOfEpochMetaBlockCalled func(epoch uint32) -} - -// RequestInterval - -func (rhs *RequestHandlerStub) RequestInterval() time.Duration { - return time.Second -} - -// RequestStartOfEpochMetaBlock - -func (rhs *RequestHandlerStub) RequestStartOfEpochMetaBlock(epoch uint32) { - if rhs.RequestStartOfEpochMetaBlockCalled == nil { - return - } - rhs.RequestStartOfEpochMetaBlockCalled(epoch) -} - -// SetEpoch - -func (rhs *RequestHandlerStub) SetEpoch(_ uint32) { -} - -// RequestShardHeader - -func (rhs *RequestHandlerStub) RequestShardHeader(shardID uint32, hash []byte) { - if rhs.RequestShardHeaderCalled == nil { - return - } - rhs.RequestShardHeaderCalled(shardID, hash) -} - -// RequestMetaHeader - -func (rhs *RequestHandlerStub) RequestMetaHeader(hash []byte) { - if rhs.RequestMetaHeaderCalled == nil { - return - } - rhs.RequestMetaHeaderCalled(hash) -} - -// RequestMetaHeaderByNonce - -func (rhs *RequestHandlerStub) RequestMetaHeaderByNonce(nonce uint64) { - if rhs.RequestMetaHeaderByNonceCalled == nil { - return - } - rhs.RequestMetaHeaderByNonceCalled(nonce) -} - -// RequestShardHeaderByNonce - -func (rhs *RequestHandlerStub) RequestShardHeaderByNonce(shardID uint32, nonce uint64) { - if rhs.RequestShardHeaderByNonceCalled == nil { - return - } - rhs.RequestShardHeaderByNonceCalled(shardID, nonce) -} - -// RequestTransaction - -func (rhs *RequestHandlerStub) RequestTransaction(destShardID uint32, txHashes [][]byte) { - if rhs.RequestTransactionHandlerCalled == nil { - return - } - rhs.RequestTransactionHandlerCalled(destShardID, txHashes) -} - -// RequestUnsignedTransactions - -func (rhs *RequestHandlerStub) RequestUnsignedTransactions(destShardID uint32, txHashes [][]byte) { - if rhs.RequestScrHandlerCalled == nil { - return - } - rhs.RequestScrHandlerCalled(destShardID, txHashes) -} - -// RequestRewardTransactions - -func (rhs *RequestHandlerStub) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) { - if rhs.RequestRewardTxHandlerCalled == nil { - return - } - rhs.RequestRewardTxHandlerCalled(destShardID, txHashes) -} - -// RequestMiniBlock - -func (rhs *RequestHandlerStub) RequestMiniBlock(destShardID uint32, miniblockHash []byte) { - if rhs.RequestMiniBlockHandlerCalled == nil { - return - } - rhs.RequestMiniBlockHandlerCalled(destShardID, miniblockHash) -} - -// RequestMiniBlocks - -func (rhs *RequestHandlerStub) RequestMiniBlocks(destShardID uint32, miniblocksHashes [][]byte) { - if rhs.RequestMiniBlocksHandlerCalled == nil { - return - } - rhs.RequestMiniBlocksHandlerCalled(destShardID, miniblocksHashes) -} - -// RequestTrieNodes - -func (rhs *RequestHandlerStub) RequestTrieNodes(destShardID uint32, hashes [][]byte, topic string) { - if rhs.RequestTrieNodesCalled == nil { - return - } - rhs.RequestTrieNodesCalled(destShardID, hashes, topic) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (rhs *RequestHandlerStub) IsInterfaceNil() bool { - return rhs == nil -} diff --git a/data/mock/shardCoordinatorMock.go b/data/mock/shardCoordinatorMock.go deleted file mode 100644 index ed6bd5539..000000000 --- a/data/mock/shardCoordinatorMock.go +++ /dev/null @@ -1,50 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/core" -) - -// ShardCoordinatorMock - -type ShardCoordinatorMock struct { - SelfID uint32 - NumOfShards uint32 -} - -// NumberOfShards - -func (scm *ShardCoordinatorMock) NumberOfShards() uint32 { - return scm.NumOfShards -} - -// ComputeId - -func (scm *ShardCoordinatorMock) ComputeId(_ []byte) uint32 { - panic("implement me") -} - -// SetSelfId - -func (scm *ShardCoordinatorMock) SetSelfId(_ uint32) error { - panic("implement me") -} - -// SelfId - -func (scm *ShardCoordinatorMock) SelfId() uint32 { - return scm.SelfID -} - -// SameShard - -func (scm *ShardCoordinatorMock) SameShard(_, _ []byte) bool { - return true -} - -// CommunicationIdentifier - -func (scm *ShardCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { - if destShardID == core.MetachainShardId { - return "_0_META" - } - - return "_0" -} - -// IsInterfaceNil returns true if there is no value under the interface -func (scm *ShardCoordinatorMock) IsInterfaceNil() bool { - return scm == nil -} diff --git a/data/mock/storerStub.go b/data/mock/storerStub.go deleted file mode 100644 index fed47ae00..000000000 --- a/data/mock/storerStub.go +++ /dev/null @@ -1,137 +0,0 @@ -package mock - -// StorerStub - -type StorerStub struct { - PutCalled func(key, data []byte) error - GetCalled func(key []byte) ([]byte, error) - GetFromEpochCalled func(key []byte, epoch uint32) ([]byte, error) - GetBulkFromEpochCalled func(keys [][]byte, epoch uint32) (map[string][]byte, error) - HasCalled func(key []byte) error - HasInEpochCalled func(key []byte, epoch uint32) error - SearchFirstCalled func(key []byte) ([]byte, error) - RemoveCalled func(key []byte) error - ClearCacheCalled func() - DestroyUnitCalled func() error - RangeKeysCalled func(handler func(key []byte, val []byte) bool) - PutInEpochCalled func(key, data []byte, epoch uint32) error - GetOldestEpochCalled func() (uint32, error) - CloseCalled func() error -} - -// PutInEpoch - -func (ss *StorerStub) PutInEpoch(key, data []byte, epoch uint32) error { - if ss.PutInEpochCalled != nil { - return ss.PutInEpochCalled(key, data, epoch) - } - - return nil -} - -// GetFromEpoch - -func (ss *StorerStub) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { - if ss.GetFromEpochCalled != nil { - return ss.GetFromEpochCalled(key, epoch) - } - - return nil, nil -} - -// GetBulkFromEpoch - -func (ss *StorerStub) GetBulkFromEpoch(keys [][]byte, epoch uint32) (map[string][]byte, error) { - if ss.GetBulkFromEpochCalled != nil { - return ss.GetBulkFromEpochCalled(keys, epoch) - } - - return nil, nil -} - -// SearchFirst - -func (ss *StorerStub) SearchFirst(key []byte) ([]byte, error) { - if ss.SearchFirstCalled != nil { - return ss.SearchFirstCalled(key) - } - - return nil, nil -} - -// Close - -func (ss *StorerStub) Close() error { - if ss.CloseCalled != nil { - return ss.CloseCalled() - } - - return nil -} - -// Put - -func (ss *StorerStub) Put(key, data []byte) error { - if ss.PutCalled != nil { - return ss.PutCalled(key, data) - } - - return nil -} - -// Get - -func (ss *StorerStub) Get(key []byte) ([]byte, error) { - if ss.GetCalled != nil { - return ss.GetCalled(key) - } - - return nil, nil -} - -// Has - -func (ss *StorerStub) Has(key []byte) error { - if ss.HasCalled != nil { - return ss.HasCalled(key) - } - - return nil -} - -// Remove - -func (ss *StorerStub) Remove(key []byte) error { - if ss.RemoveCalled != nil { - return ss.RemoveCalled(key) - } - - return nil -} - -// ClearCache - -func (ss *StorerStub) ClearCache() { - if ss.ClearCacheCalled != nil { - ss.ClearCacheCalled() - } -} - -// DestroyUnit - -func (ss *StorerStub) DestroyUnit() error { - if ss.DestroyUnitCalled != nil { - return ss.DestroyUnitCalled() - } - - return nil -} - -// RangeKeys - -func (ss *StorerStub) RangeKeys(handler func(key []byte, val []byte) bool) { - if ss.RangeKeysCalled != nil { - ss.RangeKeysCalled(handler) - } -} - -// GetOldestEpoch - -func (ss *StorerStub) GetOldestEpoch() (uint32, error) { - if ss.GetOldestEpochCalled != nil { - return ss.GetOldestEpochCalled() - } - - return 0, nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ss *StorerStub) IsInterfaceNil() bool { - return ss == nil -} diff --git a/data/outport/common_test.go b/data/outport/common_test.go index 555657ca1..0fe18b65f 100644 --- a/data/outport/common_test.go +++ b/data/outport/common_test.go @@ -55,7 +55,7 @@ func TestGetBody(t *testing.T) { require.Nil(t, receivedBody) require.Equal(t, errNilBodyHandler, err) - var body data.BodyHandler = &block.Body{} + body := &block.Body{} receivedBody, err = GetBody(body) require.Nil(t, err) require.Equal(t, body, receivedBody) diff --git a/data/transaction/log.pb.go b/data/transaction/log.pb.go index 0f0b729e8..426d3b048 100644 --- a/data/transaction/log.pb.go +++ b/data/transaction/log.pb.go @@ -154,28 +154,30 @@ func init() { func init() { proto.RegisterFile("log.proto", fileDescriptor_a153da538f858886) } var fileDescriptor_a153da538f858886 = []byte{ - // 327 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0x31, 0x4f, 0xc2, 0x40, - 0x14, 0xc7, 0x7b, 0x14, 0x8a, 0x3e, 0x08, 0xc3, 0x4d, 0x8d, 0x31, 0xaf, 0x84, 0xc4, 0x84, 0xc5, - 0x62, 0x74, 0x73, 0x83, 0xc8, 0x60, 0xe2, 0xd4, 0x38, 0x39, 0x98, 0x1c, 0xb4, 0xd4, 0x4b, 0xb0, - 0x47, 0xda, 0xc3, 0xd9, 0x8f, 0xe0, 0xc7, 0xf0, 0xa3, 0x38, 0x32, 0x32, 0x98, 0x46, 0xae, 0x8b, - 0xe9, 0xc4, 0x47, 0x30, 0xbe, 0xa2, 0x51, 0x27, 0xa7, 0xf6, 0xfd, 0xfe, 0xbf, 0x7b, 0xf7, 0xee, - 0xc1, 0xfe, 0x5c, 0xc5, 0xfe, 0x22, 0x55, 0x5a, 0xf1, 0x06, 0x7d, 0x0e, 0x8e, 0x63, 0xa9, 0xef, - 0x96, 0x13, 0x7f, 0xaa, 0xee, 0x07, 0xb1, 0x8a, 0xd5, 0x80, 0xf0, 0x64, 0x39, 0xa3, 0x8a, 0x0a, - 0xfa, 0xab, 0x4e, 0xf5, 0x5e, 0x19, 0x34, 0xc6, 0x0f, 0x51, 0xa2, 0xf9, 0x11, 0x34, 0x87, 0x61, - 0x98, 0x46, 0x59, 0xe6, 0xb2, 0x2e, 0xeb, 0xb7, 0x47, 0xad, 0x32, 0xf7, 0x9a, 0xa2, 0x42, 0xc1, - 0x57, 0xc6, 0x7d, 0x80, 0xcb, 0x30, 0x4a, 0xb4, 0x9c, 0xc9, 0x28, 0x75, 0x6b, 0x64, 0x76, 0xca, - 0xdc, 0x03, 0xf9, 0x4d, 0x83, 0x1f, 0x06, 0xef, 0x81, 0x73, 0xad, 0x16, 0x72, 0x9a, 0xb9, 0x76, - 0xd7, 0xee, 0xb7, 0x47, 0x50, 0xe6, 0x9e, 0xa3, 0x89, 0x04, 0xbb, 0x84, 0x1f, 0x42, 0xfd, 0x42, - 0x68, 0xe1, 0xd6, 0xa9, 0xdb, 0x5e, 0x99, 0x7b, 0xf5, 0x50, 0x68, 0x11, 0x10, 0xe5, 0xe7, 0xd0, - 0x19, 0x86, 0xa1, 0xd4, 0x52, 0x25, 0x62, 0x4e, 0x5e, 0x83, 0x3a, 0xf1, 0x32, 0xf7, 0x3a, 0xe2, - 0x57, 0x12, 0xfc, 0x31, 0x7b, 0xb7, 0x60, 0x5f, 0xa9, 0xf8, 0xbf, 0x6f, 0x3b, 0x01, 0x87, 0x76, - 0x91, 0xb9, 0xb5, 0xae, 0xdd, 0x6f, 0x9d, 0xb6, 0xab, 0x25, 0xf9, 0x04, 0xab, 0xc9, 0x23, 0xca, - 0x83, 0x9d, 0x37, 0x1a, 0xaf, 0x36, 0x68, 0xad, 0x37, 0x68, 0x6d, 0x37, 0xc8, 0x1e, 0x0d, 0xb2, - 0x67, 0x83, 0xec, 0xc5, 0x20, 0x5b, 0x19, 0x64, 0x6b, 0x83, 0xec, 0xcd, 0x20, 0x7b, 0x37, 0x68, - 0x6d, 0x0d, 0xb2, 0xa7, 0x02, 0xad, 0x55, 0x81, 0xd6, 0xba, 0x40, 0xeb, 0xa6, 0xa5, 0x53, 0x91, - 0x64, 0x62, 0xfa, 0x39, 0xec, 0xc4, 0xa1, 0x7b, 0xce, 0x3e, 0x02, 0x00, 0x00, 0xff, 0xff, 0xff, - 0xf2, 0x5d, 0xce, 0xcf, 0x01, 0x00, 0x00, + // 364 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xb1, 0x4e, 0xf2, 0x50, + 0x14, 0xc7, 0x7b, 0x29, 0x94, 0xef, 0xbb, 0x10, 0x86, 0x4e, 0xcd, 0x97, 0x2f, 0xa7, 0x84, 0xc4, + 0x84, 0xa5, 0xad, 0xd1, 0x4d, 0x27, 0x50, 0x07, 0x13, 0xa7, 0xc6, 0xc9, 0xc1, 0xe4, 0xd2, 0x96, + 0x72, 0x13, 0xe8, 0x25, 0xed, 0x85, 0xb0, 0x98, 0xf8, 0x08, 0x3e, 0x86, 0x8f, 0xe2, 0xc8, 0xc8, + 0x60, 0x1a, 0xb9, 0x2c, 0xa6, 0x13, 0x8f, 0x60, 0x38, 0x45, 0x83, 0x4e, 0x4e, 0x3d, 0xe7, 0xf7, + 0xff, 0xf7, 0xe4, 0x9c, 0xff, 0xa5, 0x7f, 0xc7, 0x22, 0x76, 0xa7, 0xa9, 0x90, 0xc2, 0xac, 0xe1, + 0xe7, 0x9f, 0x13, 0x73, 0x39, 0x9a, 0x0d, 0xdc, 0x40, 0x4c, 0xbc, 0x58, 0xc4, 0xc2, 0x43, 0x3c, + 0x98, 0x0d, 0xb1, 0xc3, 0x06, 0xab, 0xf2, 0xaf, 0xce, 0x2b, 0xa1, 0xb5, 0xab, 0x79, 0x94, 0x48, + 0xf3, 0x88, 0xd6, 0x7b, 0x61, 0x98, 0x46, 0x59, 0x66, 0x91, 0x36, 0xe9, 0x36, 0xfb, 0x8d, 0x22, + 0xb7, 0xeb, 0xac, 0x44, 0xfe, 0xa7, 0x66, 0xba, 0x94, 0x5e, 0x87, 0x51, 0x22, 0xf9, 0x90, 0x47, + 0xa9, 0x55, 0x41, 0x67, 0xab, 0xc8, 0x6d, 0xca, 0xbf, 0xa8, 0x7f, 0xe0, 0x30, 0x3b, 0xd4, 0xb8, + 0x15, 0x53, 0x1e, 0x64, 0x96, 0xde, 0xd6, 0xbb, 0xcd, 0x3e, 0x2d, 0x72, 0xdb, 0x90, 0x48, 0xfc, + 0xbd, 0x62, 0xfe, 0xa7, 0xd5, 0x4b, 0x26, 0x99, 0x55, 0xc5, 0x69, 0x7f, 0x8a, 0xdc, 0xae, 0x86, + 0x4c, 0x32, 0x1f, 0xa9, 0x79, 0x46, 0x5b, 0xbd, 0x30, 0xe4, 0x92, 0x8b, 0x84, 0x8d, 0xd1, 0x57, + 0xc3, 0x49, 0x66, 0x91, 0xdb, 0x2d, 0xf6, 0x4d, 0xf1, 0x7f, 0x38, 0x3b, 0xf7, 0x54, 0xbf, 0x11, + 0xf1, 0x6f, 0x6f, 0x3b, 0xa6, 0x06, 0x66, 0x91, 0x59, 0x95, 0xb6, 0xde, 0x6d, 0x9c, 0x34, 0xcb, + 0x90, 0x5c, 0x84, 0xe5, 0xe6, 0x11, 0xea, 0xfe, 0xde, 0xd7, 0x7f, 0x58, 0xae, 0x41, 0x5b, 0xad, + 0x41, 0xdb, 0xae, 0x81, 0x3c, 0x2a, 0x20, 0xcf, 0x0a, 0xc8, 0x8b, 0x02, 0xb2, 0x54, 0x40, 0x56, + 0x0a, 0xc8, 0x9b, 0x02, 0xf2, 0xae, 0x40, 0xdb, 0x2a, 0x20, 0x4f, 0x1b, 0xd0, 0x96, 0x1b, 0xd0, + 0x56, 0x1b, 0xd0, 0xee, 0x2e, 0x0e, 0xde, 0x69, 0x32, 0x1b, 0x4b, 0x3e, 0x8f, 0xd2, 0x6c, 0xe1, + 0x4d, 0x16, 0x4e, 0x30, 0x62, 0x3c, 0x71, 0x02, 0x91, 0x46, 0x4e, 0x2c, 0xbc, 0x5d, 0x10, 0x9e, + 0x4c, 0x59, 0x92, 0xb1, 0x60, 0x77, 0xd8, 0xf9, 0x41, 0x3d, 0x30, 0x70, 0xbf, 0xd3, 0x8f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xbc, 0xc3, 0xac, 0x36, 0x07, 0x02, 0x00, 0x00, } func (this *Event) Equal(that interface{}) bool { diff --git a/data/types.go b/data/types.go index 52a6b0d7e..d6680afe3 100644 --- a/data/types.go +++ b/data/types.go @@ -5,3 +5,9 @@ type LogData struct { LogHandler TxHash string } + +// KeyValuePair is a tuple of (key, value) +type KeyValuePair struct { + Key []byte + Value []byte +} diff --git a/data/validator/validatorStatistics.go b/data/validator/validatorStatistics.go new file mode 100644 index 000000000..f905b5dfd --- /dev/null +++ b/data/validator/validatorStatistics.go @@ -0,0 +1,2 @@ +//go:generate protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/multiversx/protobuf/protobuf --gogoslick_out=. validatorStatistics.proto +package validator diff --git a/data/validator/validatorStatistics.pb.go b/data/validator/validatorStatistics.pb.go new file mode 100644 index 000000000..fe84a0737 --- /dev/null +++ b/data/validator/validatorStatistics.pb.go @@ -0,0 +1,938 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: validatorStatistics.proto + +package validator + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ValidatorStatistics holds information about a validator +type ValidatorStatistics struct { + TempRating float32 `protobuf:"fixed32,1,opt,name=TempRating,proto3" json:"tempRating"` + NumLeaderSuccess uint32 `protobuf:"varint,2,opt,name=NumLeaderSuccess,proto3" json:"numLeaderSuccess"` + NumLeaderFailure uint32 `protobuf:"varint,3,opt,name=NumLeaderFailure,proto3" json:"numLeaderFailure"` + NumValidatorSuccess uint32 `protobuf:"varint,4,opt,name=NumValidatorSuccess,proto3" json:"numValidatorSuccess"` + NumValidatorFailure uint32 `protobuf:"varint,5,opt,name=NumValidatorFailure,proto3" json:"numValidatorFailure"` + NumValidatorIgnoredSignatures uint32 `protobuf:"varint,6,opt,name=NumValidatorIgnoredSignatures,proto3" json:"numValidatorIgnoredSignatures"` + Rating float32 `protobuf:"fixed32,7,opt,name=Rating,proto3" json:"rating"` + RatingModifier float32 `protobuf:"fixed32,8,opt,name=RatingModifier,proto3" json:"ratingModifier"` + TotalNumLeaderSuccess uint32 `protobuf:"varint,9,opt,name=TotalNumLeaderSuccess,proto3" json:"totalNumLeaderSuccess"` + TotalNumLeaderFailure uint32 `protobuf:"varint,10,opt,name=TotalNumLeaderFailure,proto3" json:"totalNumLeaderFailure"` + TotalNumValidatorSuccess uint32 `protobuf:"varint,11,opt,name=TotalNumValidatorSuccess,proto3" json:"totalNumValidatorSuccess"` + TotalNumValidatorFailure uint32 `protobuf:"varint,12,opt,name=TotalNumValidatorFailure,proto3" json:"totalNumValidatorFailure"` + TotalNumValidatorIgnoredSignatures uint32 `protobuf:"varint,13,opt,name=TotalNumValidatorIgnoredSignatures,proto3" json:"totalNumValidatorIgnoredSignatures"` + ShardId uint32 `protobuf:"varint,14,opt,name=ShardId,proto3" json:"shardId"` + ValidatorStatus string `protobuf:"bytes,15,opt,name=ValidatorStatus,proto3" json:"validatorStatus"` +} + +func (m *ValidatorStatistics) Reset() { *m = ValidatorStatistics{} } +func (*ValidatorStatistics) ProtoMessage() {} +func (*ValidatorStatistics) Descriptor() ([]byte, []int) { + return fileDescriptor_5fa43c48ee2425ed, []int{0} +} +func (m *ValidatorStatistics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatorStatistics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorStatistics.Merge(m, src) +} +func (m *ValidatorStatistics) XXX_Size() int { + return m.Size() +} +func (m *ValidatorStatistics) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorStatistics.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorStatistics proto.InternalMessageInfo + +func (m *ValidatorStatistics) GetTempRating() float32 { + if m != nil { + return m.TempRating + } + return 0 +} + +func (m *ValidatorStatistics) GetNumLeaderSuccess() uint32 { + if m != nil { + return m.NumLeaderSuccess + } + return 0 +} + +func (m *ValidatorStatistics) GetNumLeaderFailure() uint32 { + if m != nil { + return m.NumLeaderFailure + } + return 0 +} + +func (m *ValidatorStatistics) GetNumValidatorSuccess() uint32 { + if m != nil { + return m.NumValidatorSuccess + } + return 0 +} + +func (m *ValidatorStatistics) GetNumValidatorFailure() uint32 { + if m != nil { + return m.NumValidatorFailure + } + return 0 +} + +func (m *ValidatorStatistics) GetNumValidatorIgnoredSignatures() uint32 { + if m != nil { + return m.NumValidatorIgnoredSignatures + } + return 0 +} + +func (m *ValidatorStatistics) GetRating() float32 { + if m != nil { + return m.Rating + } + return 0 +} + +func (m *ValidatorStatistics) GetRatingModifier() float32 { + if m != nil { + return m.RatingModifier + } + return 0 +} + +func (m *ValidatorStatistics) GetTotalNumLeaderSuccess() uint32 { + if m != nil { + return m.TotalNumLeaderSuccess + } + return 0 +} + +func (m *ValidatorStatistics) GetTotalNumLeaderFailure() uint32 { + if m != nil { + return m.TotalNumLeaderFailure + } + return 0 +} + +func (m *ValidatorStatistics) GetTotalNumValidatorSuccess() uint32 { + if m != nil { + return m.TotalNumValidatorSuccess + } + return 0 +} + +func (m *ValidatorStatistics) GetTotalNumValidatorFailure() uint32 { + if m != nil { + return m.TotalNumValidatorFailure + } + return 0 +} + +func (m *ValidatorStatistics) GetTotalNumValidatorIgnoredSignatures() uint32 { + if m != nil { + return m.TotalNumValidatorIgnoredSignatures + } + return 0 +} + +func (m *ValidatorStatistics) GetShardId() uint32 { + if m != nil { + return m.ShardId + } + return 0 +} + +func (m *ValidatorStatistics) GetValidatorStatus() string { + if m != nil { + return m.ValidatorStatus + } + return "" +} + +func init() { + proto.RegisterType((*ValidatorStatistics)(nil), "proto.ValidatorStatistics") +} + +func init() { proto.RegisterFile("validatorStatistics.proto", fileDescriptor_5fa43c48ee2425ed) } + +var fileDescriptor_5fa43c48ee2425ed = []byte{ + // 507 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xc1, 0x8e, 0x12, 0x31, + 0x18, 0xc7, 0xa7, 0xab, 0x0b, 0x4b, 0xd7, 0x85, 0x4d, 0x71, 0xe3, 0xac, 0x71, 0x5b, 0x24, 0xd1, + 0x70, 0x91, 0x3d, 0x78, 0x33, 0x31, 0x31, 0x98, 0x98, 0x90, 0xe8, 0x9a, 0x94, 0x8d, 0x31, 0xde, + 0x0a, 0x33, 0x3b, 0xdb, 0x04, 0xa6, 0x9b, 0x4e, 0x87, 0xb3, 0x8f, 0xe0, 0x63, 0xf8, 0x28, 0x1e, + 0x39, 0x72, 0x30, 0x8d, 0x0c, 0x17, 0xd3, 0xd3, 0x3e, 0x82, 0x49, 0x61, 0x56, 0xa0, 0x03, 0x7a, + 0x62, 0xfa, 0xfd, 0xff, 0xff, 0x1f, 0x5f, 0xfb, 0x35, 0x85, 0xa7, 0x63, 0x36, 0xe4, 0x01, 0x53, + 0x42, 0xf6, 0x14, 0x53, 0x3c, 0x51, 0x7c, 0x90, 0xb4, 0x6f, 0xa4, 0x50, 0x02, 0xed, 0xdb, 0x9f, + 0xc7, 0x2f, 0x22, 0xae, 0xae, 0xd3, 0x7e, 0x7b, 0x20, 0x46, 0xe7, 0x91, 0x88, 0xc4, 0xb9, 0x2d, + 0xf7, 0xd3, 0x2b, 0xbb, 0xb2, 0x0b, 0xfb, 0xb5, 0x48, 0x35, 0x7f, 0x1e, 0xc0, 0xfa, 0x27, 0x97, + 0x89, 0xda, 0x10, 0x5e, 0x86, 0xa3, 0x1b, 0xca, 0x14, 0x8f, 0x23, 0x1f, 0x34, 0x40, 0x6b, 0xaf, + 0x53, 0x35, 0x9a, 0x40, 0x75, 0x57, 0xa5, 0x2b, 0x0e, 0xf4, 0x06, 0x1e, 0x5f, 0xa4, 0xa3, 0xf7, + 0x21, 0x0b, 0x42, 0xd9, 0x4b, 0x07, 0x83, 0x30, 0x49, 0xfc, 0xbd, 0x06, 0x68, 0x1d, 0x75, 0x1e, + 0x1a, 0x4d, 0x8e, 0xe3, 0x0d, 0x8d, 0x3a, 0xee, 0x35, 0xc2, 0x3b, 0xc6, 0x87, 0xa9, 0x0c, 0xfd, + 0x7b, 0x05, 0x84, 0xa5, 0x46, 0x1d, 0x37, 0xea, 0xc2, 0xfa, 0x45, 0x3a, 0xfa, 0xbb, 0x9b, 0x65, + 0x1b, 0xf7, 0x2d, 0xe4, 0x91, 0xd1, 0xa4, 0x1e, 0xbb, 0x32, 0x2d, 0xca, 0x6c, 0xa2, 0xf2, 0x7e, + 0xf6, 0x8b, 0x51, 0x79, 0x4b, 0x45, 0x19, 0x14, 0xc1, 0xb3, 0xd5, 0x72, 0x37, 0x8a, 0x85, 0x0c, + 0x83, 0x1e, 0x8f, 0x62, 0xa6, 0x52, 0x19, 0x26, 0x7e, 0xc9, 0x42, 0x9f, 0x1a, 0x4d, 0xce, 0xe2, + 0x5d, 0x46, 0xba, 0x9b, 0x83, 0x9a, 0xb0, 0xb4, 0x1c, 0x57, 0xd9, 0x8e, 0x0b, 0x1a, 0x4d, 0x4a, + 0x72, 0x31, 0xaa, 0xa5, 0x82, 0x5e, 0xc1, 0xea, 0xe2, 0xeb, 0x83, 0x08, 0xf8, 0x15, 0x0f, 0xa5, + 0x7f, 0x60, 0xbd, 0xc8, 0x68, 0x52, 0x95, 0x6b, 0x0a, 0xdd, 0x70, 0xa2, 0x8f, 0xf0, 0xe4, 0x52, + 0x28, 0x36, 0x74, 0xe6, 0x5c, 0xb1, 0x1b, 0x38, 0x35, 0x9a, 0x9c, 0xa8, 0x22, 0x03, 0x2d, 0xce, + 0xb9, 0xc0, 0xfc, 0x98, 0xe1, 0x36, 0x60, 0x7e, 0xd0, 0xc5, 0x39, 0xf4, 0x19, 0xfa, 0xb9, 0xe0, + 0xdc, 0x82, 0x43, 0xcb, 0x7c, 0x62, 0x34, 0xf1, 0xd5, 0x16, 0x0f, 0xdd, 0x9a, 0x2e, 0x24, 0xe7, + 0xdd, 0x3e, 0xd8, 0x41, 0xce, 0x1b, 0xde, 0x9a, 0x46, 0x63, 0xd8, 0x74, 0x34, 0xf7, 0x8e, 0x1c, + 0xd9, 0xff, 0x78, 0x6e, 0x34, 0x69, 0xaa, 0x7f, 0xba, 0xe9, 0x7f, 0x10, 0xd1, 0x33, 0x58, 0xee, + 0x5d, 0x33, 0x19, 0x74, 0x03, 0xbf, 0x6a, 0xe1, 0x87, 0x46, 0x93, 0x72, 0xb2, 0x28, 0xd1, 0x5c, + 0x43, 0xaf, 0x61, 0x6d, 0xed, 0x79, 0x48, 0x13, 0xbf, 0xd6, 0x00, 0xad, 0x4a, 0xa7, 0x6e, 0x34, + 0xa9, 0x8d, 0xd7, 0x25, 0xba, 0xe9, 0xed, 0xbc, 0x9d, 0xcc, 0xb0, 0x37, 0x9d, 0x61, 0xef, 0x76, + 0x86, 0xc1, 0xd7, 0x0c, 0x83, 0xef, 0x19, 0x06, 0x3f, 0x32, 0x0c, 0x26, 0x19, 0x06, 0xd3, 0x0c, + 0x83, 0x5f, 0x19, 0x06, 0xbf, 0x33, 0xec, 0xdd, 0x66, 0x18, 0x7c, 0x9b, 0x63, 0x6f, 0x32, 0xc7, + 0xde, 0x74, 0x8e, 0xbd, 0x2f, 0x95, 0x3b, 0x76, 0xbf, 0x64, 0x9f, 0xaa, 0x97, 0x7f, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xb2, 0xd8, 0xe5, 0xbc, 0xfd, 0x04, 0x00, 0x00, +} + +func (this *ValidatorStatistics) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ValidatorStatistics) + if !ok { + that2, ok := that.(ValidatorStatistics) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TempRating != that1.TempRating { + return false + } + if this.NumLeaderSuccess != that1.NumLeaderSuccess { + return false + } + if this.NumLeaderFailure != that1.NumLeaderFailure { + return false + } + if this.NumValidatorSuccess != that1.NumValidatorSuccess { + return false + } + if this.NumValidatorFailure != that1.NumValidatorFailure { + return false + } + if this.NumValidatorIgnoredSignatures != that1.NumValidatorIgnoredSignatures { + return false + } + if this.Rating != that1.Rating { + return false + } + if this.RatingModifier != that1.RatingModifier { + return false + } + if this.TotalNumLeaderSuccess != that1.TotalNumLeaderSuccess { + return false + } + if this.TotalNumLeaderFailure != that1.TotalNumLeaderFailure { + return false + } + if this.TotalNumValidatorSuccess != that1.TotalNumValidatorSuccess { + return false + } + if this.TotalNumValidatorFailure != that1.TotalNumValidatorFailure { + return false + } + if this.TotalNumValidatorIgnoredSignatures != that1.TotalNumValidatorIgnoredSignatures { + return false + } + if this.ShardId != that1.ShardId { + return false + } + if this.ValidatorStatus != that1.ValidatorStatus { + return false + } + return true +} +func (this *ValidatorStatistics) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 19) + s = append(s, "&validator.ValidatorStatistics{") + s = append(s, "TempRating: "+fmt.Sprintf("%#v", this.TempRating)+",\n") + s = append(s, "NumLeaderSuccess: "+fmt.Sprintf("%#v", this.NumLeaderSuccess)+",\n") + s = append(s, "NumLeaderFailure: "+fmt.Sprintf("%#v", this.NumLeaderFailure)+",\n") + s = append(s, "NumValidatorSuccess: "+fmt.Sprintf("%#v", this.NumValidatorSuccess)+",\n") + s = append(s, "NumValidatorFailure: "+fmt.Sprintf("%#v", this.NumValidatorFailure)+",\n") + s = append(s, "NumValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.NumValidatorIgnoredSignatures)+",\n") + s = append(s, "Rating: "+fmt.Sprintf("%#v", this.Rating)+",\n") + s = append(s, "RatingModifier: "+fmt.Sprintf("%#v", this.RatingModifier)+",\n") + s = append(s, "TotalNumLeaderSuccess: "+fmt.Sprintf("%#v", this.TotalNumLeaderSuccess)+",\n") + s = append(s, "TotalNumLeaderFailure: "+fmt.Sprintf("%#v", this.TotalNumLeaderFailure)+",\n") + s = append(s, "TotalNumValidatorSuccess: "+fmt.Sprintf("%#v", this.TotalNumValidatorSuccess)+",\n") + s = append(s, "TotalNumValidatorFailure: "+fmt.Sprintf("%#v", this.TotalNumValidatorFailure)+",\n") + s = append(s, "TotalNumValidatorIgnoredSignatures: "+fmt.Sprintf("%#v", this.TotalNumValidatorIgnoredSignatures)+",\n") + s = append(s, "ShardId: "+fmt.Sprintf("%#v", this.ShardId)+",\n") + s = append(s, "ValidatorStatus: "+fmt.Sprintf("%#v", this.ValidatorStatus)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringValidatorStatistics(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *ValidatorStatistics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorStatistics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorStatistics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidatorStatus) > 0 { + i -= len(m.ValidatorStatus) + copy(dAtA[i:], m.ValidatorStatus) + i = encodeVarintValidatorStatistics(dAtA, i, uint64(len(m.ValidatorStatus))) + i-- + dAtA[i] = 0x7a + } + if m.ShardId != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.ShardId)) + i-- + dAtA[i] = 0x70 + } + if m.TotalNumValidatorIgnoredSignatures != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.TotalNumValidatorIgnoredSignatures)) + i-- + dAtA[i] = 0x68 + } + if m.TotalNumValidatorFailure != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.TotalNumValidatorFailure)) + i-- + dAtA[i] = 0x60 + } + if m.TotalNumValidatorSuccess != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.TotalNumValidatorSuccess)) + i-- + dAtA[i] = 0x58 + } + if m.TotalNumLeaderFailure != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.TotalNumLeaderFailure)) + i-- + dAtA[i] = 0x50 + } + if m.TotalNumLeaderSuccess != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.TotalNumLeaderSuccess)) + i-- + dAtA[i] = 0x48 + } + if m.RatingModifier != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.RatingModifier)))) + i-- + dAtA[i] = 0x45 + } + if m.Rating != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Rating)))) + i-- + dAtA[i] = 0x3d + } + if m.NumValidatorIgnoredSignatures != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.NumValidatorIgnoredSignatures)) + i-- + dAtA[i] = 0x30 + } + if m.NumValidatorFailure != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.NumValidatorFailure)) + i-- + dAtA[i] = 0x28 + } + if m.NumValidatorSuccess != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.NumValidatorSuccess)) + i-- + dAtA[i] = 0x20 + } + if m.NumLeaderFailure != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.NumLeaderFailure)) + i-- + dAtA[i] = 0x18 + } + if m.NumLeaderSuccess != 0 { + i = encodeVarintValidatorStatistics(dAtA, i, uint64(m.NumLeaderSuccess)) + i-- + dAtA[i] = 0x10 + } + if m.TempRating != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.TempRating)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func encodeVarintValidatorStatistics(dAtA []byte, offset int, v uint64) int { + offset -= sovValidatorStatistics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ValidatorStatistics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TempRating != 0 { + n += 5 + } + if m.NumLeaderSuccess != 0 { + n += 1 + sovValidatorStatistics(uint64(m.NumLeaderSuccess)) + } + if m.NumLeaderFailure != 0 { + n += 1 + sovValidatorStatistics(uint64(m.NumLeaderFailure)) + } + if m.NumValidatorSuccess != 0 { + n += 1 + sovValidatorStatistics(uint64(m.NumValidatorSuccess)) + } + if m.NumValidatorFailure != 0 { + n += 1 + sovValidatorStatistics(uint64(m.NumValidatorFailure)) + } + if m.NumValidatorIgnoredSignatures != 0 { + n += 1 + sovValidatorStatistics(uint64(m.NumValidatorIgnoredSignatures)) + } + if m.Rating != 0 { + n += 5 + } + if m.RatingModifier != 0 { + n += 5 + } + if m.TotalNumLeaderSuccess != 0 { + n += 1 + sovValidatorStatistics(uint64(m.TotalNumLeaderSuccess)) + } + if m.TotalNumLeaderFailure != 0 { + n += 1 + sovValidatorStatistics(uint64(m.TotalNumLeaderFailure)) + } + if m.TotalNumValidatorSuccess != 0 { + n += 1 + sovValidatorStatistics(uint64(m.TotalNumValidatorSuccess)) + } + if m.TotalNumValidatorFailure != 0 { + n += 1 + sovValidatorStatistics(uint64(m.TotalNumValidatorFailure)) + } + if m.TotalNumValidatorIgnoredSignatures != 0 { + n += 1 + sovValidatorStatistics(uint64(m.TotalNumValidatorIgnoredSignatures)) + } + if m.ShardId != 0 { + n += 1 + sovValidatorStatistics(uint64(m.ShardId)) + } + l = len(m.ValidatorStatus) + if l > 0 { + n += 1 + l + sovValidatorStatistics(uint64(l)) + } + return n +} + +func sovValidatorStatistics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozValidatorStatistics(x uint64) (n int) { + return sovValidatorStatistics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ValidatorStatistics) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatorStatistics{`, + `TempRating:` + fmt.Sprintf("%v", this.TempRating) + `,`, + `NumLeaderSuccess:` + fmt.Sprintf("%v", this.NumLeaderSuccess) + `,`, + `NumLeaderFailure:` + fmt.Sprintf("%v", this.NumLeaderFailure) + `,`, + `NumValidatorSuccess:` + fmt.Sprintf("%v", this.NumValidatorSuccess) + `,`, + `NumValidatorFailure:` + fmt.Sprintf("%v", this.NumValidatorFailure) + `,`, + `NumValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.NumValidatorIgnoredSignatures) + `,`, + `Rating:` + fmt.Sprintf("%v", this.Rating) + `,`, + `RatingModifier:` + fmt.Sprintf("%v", this.RatingModifier) + `,`, + `TotalNumLeaderSuccess:` + fmt.Sprintf("%v", this.TotalNumLeaderSuccess) + `,`, + `TotalNumLeaderFailure:` + fmt.Sprintf("%v", this.TotalNumLeaderFailure) + `,`, + `TotalNumValidatorSuccess:` + fmt.Sprintf("%v", this.TotalNumValidatorSuccess) + `,`, + `TotalNumValidatorFailure:` + fmt.Sprintf("%v", this.TotalNumValidatorFailure) + `,`, + `TotalNumValidatorIgnoredSignatures:` + fmt.Sprintf("%v", this.TotalNumValidatorIgnoredSignatures) + `,`, + `ShardId:` + fmt.Sprintf("%v", this.ShardId) + `,`, + `ValidatorStatus:` + fmt.Sprintf("%v", this.ValidatorStatus) + `,`, + `}`, + }, "") + return s +} +func valueToStringValidatorStatistics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ValidatorStatistics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorStatistics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorStatistics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field TempRating", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.TempRating = float32(math.Float32frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumLeaderSuccess", wireType) + } + m.NumLeaderSuccess = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumLeaderSuccess |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumLeaderFailure", wireType) + } + m.NumLeaderFailure = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumLeaderFailure |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorSuccess", wireType) + } + m.NumValidatorSuccess = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumValidatorSuccess |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorFailure", wireType) + } + m.NumValidatorFailure = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumValidatorFailure |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumValidatorIgnoredSignatures", wireType) + } + m.NumValidatorIgnoredSignatures = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumValidatorIgnoredSignatures |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Rating", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Rating = float32(math.Float32frombits(v)) + case 8: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field RatingModifier", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.RatingModifier = float32(math.Float32frombits(v)) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalNumLeaderSuccess", wireType) + } + m.TotalNumLeaderSuccess = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalNumLeaderSuccess |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalNumLeaderFailure", wireType) + } + m.TotalNumLeaderFailure = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalNumLeaderFailure |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorSuccess", wireType) + } + m.TotalNumValidatorSuccess = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalNumValidatorSuccess |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorFailure", wireType) + } + m.TotalNumValidatorFailure = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalNumValidatorFailure |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalNumValidatorIgnoredSignatures", wireType) + } + m.TotalNumValidatorIgnoredSignatures = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalNumValidatorIgnoredSignatures |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardId", wireType) + } + m.ShardId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShardId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorStatus", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthValidatorStatistics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthValidatorStatistics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorStatus = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipValidatorStatistics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthValidatorStatistics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthValidatorStatistics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipValidatorStatistics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidatorStatistics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthValidatorStatistics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupValidatorStatistics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthValidatorStatistics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthValidatorStatistics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowValidatorStatistics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupValidatorStatistics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/data/validator/validatorStatistics.proto b/data/validator/validatorStatistics.proto new file mode 100644 index 000000000..aeb23299c --- /dev/null +++ b/data/validator/validatorStatistics.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package proto; + +option go_package = "validator"; +option (gogoproto.stable_marshaler_all) = true; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +// ValidatorStatistics holds information about a validator +message ValidatorStatistics { + float TempRating = 1 [(gogoproto.jsontag) = "tempRating"]; + uint32 NumLeaderSuccess = 2 [(gogoproto.jsontag) = "numLeaderSuccess"]; + uint32 NumLeaderFailure = 3 [(gogoproto.jsontag) = "numLeaderFailure"]; + uint32 NumValidatorSuccess = 4 [(gogoproto.jsontag) = "numValidatorSuccess"]; + uint32 NumValidatorFailure = 5 [(gogoproto.jsontag) = "numValidatorFailure"]; + uint32 NumValidatorIgnoredSignatures = 6 [(gogoproto.jsontag) = "numValidatorIgnoredSignatures"]; + float Rating = 7 [(gogoproto.jsontag) = "rating"]; + float RatingModifier = 8 [(gogoproto.jsontag) = "ratingModifier"]; + uint32 TotalNumLeaderSuccess = 9 [(gogoproto.jsontag) = "totalNumLeaderSuccess"]; + uint32 TotalNumLeaderFailure = 10 [(gogoproto.jsontag) = "totalNumLeaderFailure"]; + uint32 TotalNumValidatorSuccess = 11 [(gogoproto.jsontag) = "totalNumValidatorSuccess"]; + uint32 TotalNumValidatorFailure = 12 [(gogoproto.jsontag) = "totalNumValidatorFailure"]; + uint32 TotalNumValidatorIgnoredSignatures = 13 [(gogoproto.jsontag) = "totalNumValidatorIgnoredSignatures"]; + uint32 ShardId = 14 [(gogoproto.jsontag) = "shardId"]; + string ValidatorStatus = 15 [(gogoproto.jsontag) = "validatorStatus"]; +} diff --git a/go.mod b/go.mod index 3dc52a614..26892ac42 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/denisbrodbeck/machineid v1.0.1 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/hashicorp/golang-lru v0.5.4 github.com/mr-tron/base58 v1.2.0 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index c8ab99700..1b4ae8775 100644 --- a/go.sum +++ b/go.sum @@ -46,8 +46,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= diff --git a/storage/errors.go b/storage/errors.go deleted file mode 100644 index 7671c8818..000000000 --- a/storage/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -package storage - -import ( - "errors" -) - -// ErrCacheSizeInvalid signals that size of cache is less than 1 -var ErrCacheSizeInvalid = errors.New("cache size is less than 1") - -// ErrCacheCapacityInvalid signals that capacity of cache is less than 1 -var ErrCacheCapacityInvalid = errors.New("cache capacity is less than 1") diff --git a/storage/interface.go b/storage/interface.go deleted file mode 100644 index 2cb05c422..000000000 --- a/storage/interface.go +++ /dev/null @@ -1,69 +0,0 @@ -package storage - -// Cacher provides caching services -type Cacher interface { - // Clear is used to completely clear the cache. - Clear() - // Put adds a value to the cache. Returns true if an eviction occurred. - Put(key []byte, value interface{}, sizeInBytes int) (evicted bool) - // Get looks up a key's value from the cache. - Get(key []byte) (value interface{}, ok bool) - // Has checks if a key is in the cache, without updating the - // recent-ness or deleting it for being stale. - Has(key []byte) bool - // Peek returns the key value (or undefined if not found) without updating - // the "recently used"-ness of the key. - Peek(key []byte) (value interface{}, ok bool) - // HasOrAdd checks if a key is in the cache without updating the - // recent-ness or deleting it for being stale, and if not adds the value. - HasOrAdd(key []byte, value interface{}, sizeInBytes int) (has, added bool) - // Remove removes the provided key from the cache. - Remove(key []byte) - // Keys returns a slice of the keys in the cache, from oldest to newest. - Keys() [][]byte - // Len returns the number of items in the cache. - Len() int - // SizeInBytesContained returns the size in bytes of all contained elements - SizeInBytesContained() uint64 - // MaxSize returns the maximum number of items which can be stored in the cache. - MaxSize() int - // RegisterHandler registers a new handler to be called when a new data is added - RegisterHandler(handler func(key []byte, value interface{}), id string) - // UnRegisterHandler deletes the handler from the list - UnRegisterHandler(id string) - // Close closes the underlying temporary db if the cacher implementation has one, - // otherwise it does nothing - Close() error - // IsInterfaceNil returns true if there is no value under the interface - IsInterfaceNil() bool -} - -// ForEachItem is an iterator callback -type ForEachItem func(key []byte, value interface{}) - -// LRUCacheHandler is the interface for LRU cache. -type LRUCacheHandler interface { - Add(key, value interface{}) bool - Get(key interface{}) (value interface{}, ok bool) - Contains(key interface{}) (ok bool) - ContainsOrAdd(key, value interface{}) (ok, evicted bool) - Peek(key interface{}) (value interface{}, ok bool) - Remove(key interface{}) bool - Keys() []interface{} - Len() int - Purge() -} - -// SizedLRUCacheHandler is the interface for size capable LRU cache. -type SizedLRUCacheHandler interface { - AddSized(key, value interface{}, sizeInBytes int64) bool - Get(key interface{}) (value interface{}, ok bool) - Contains(key interface{}) (ok bool) - AddSizedIfMissing(key, value interface{}, sizeInBytes int64) (ok, evicted bool) - Peek(key interface{}) (value interface{}, ok bool) - Remove(key interface{}) bool - Keys() []interface{} - Len() int - SizeInBytesContained() uint64 - Purge() -} diff --git a/storage/keyValuePair.go b/storage/keyValuePair.go deleted file mode 100644 index 06a5f11ed..000000000 --- a/storage/keyValuePair.go +++ /dev/null @@ -1,7 +0,0 @@ -package storage - -// KeyValuePair is a tuple of (key, value) -type KeyValuePair struct { - Key []byte - Value []byte -} diff --git a/storage/lrucache/capacity/capacityLRUCache.go b/storage/lrucache/capacity/capacityLRUCache.go deleted file mode 100644 index 275a440d6..000000000 --- a/storage/lrucache/capacity/capacityLRUCache.go +++ /dev/null @@ -1,284 +0,0 @@ -package capacity - -import ( - "container/list" - "sync" - - "github.com/multiversx/mx-chain-core-go/storage" -) - -// capacityLRU implements a non thread safe LRU Cache with a max capacity size -type capacityLRU struct { - lock sync.Mutex - size int - maxCapacityInBytes int64 - currentCapacityInBytes int64 - //TODO investigate if we can replace this list with a binary tree. Check also the other implementation lruCache - evictList *list.List - items map[interface{}]*list.Element -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} - size int64 -} - -// NewCapacityLRU constructs an CapacityLRU of the given size with a byte size capacity -func NewCapacityLRU(size int, byteCapacity int64) (*capacityLRU, error) { - if size < 1 { - return nil, storage.ErrCacheSizeInvalid - } - if byteCapacity < 1 { - return nil, storage.ErrCacheCapacityInvalid - } - c := &capacityLRU{ - size: size, - maxCapacityInBytes: byteCapacity, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - } - return c, nil -} - -// Purge is used to completely clear the cache. -func (c *capacityLRU) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - - c.items = make(map[interface{}]*list.Element) - c.evictList.Init() - c.currentCapacityInBytes = 0 -} - -// AddSized adds a value to the cache. Returns true if an eviction occurred. -func (c *capacityLRU) AddSized(key, value interface{}, sizeInBytes int64) bool { - c.lock.Lock() - defer c.lock.Unlock() - - c.addSized(key, value, sizeInBytes) - - return c.evictIfNeeded() -} - -func (c *capacityLRU) addSized(key interface{}, value interface{}, sizeInBytes int64) { - if sizeInBytes < 0 { - return - } - - // Check for existing item - if ent, ok := c.items[key]; ok { - c.update(key, value, sizeInBytes, ent) - } else { - c.addNew(key, value, sizeInBytes) - } -} - -// AddSizedAndReturnEvicted adds the given key-value pair to the cache, and returns the evicted values -func (c *capacityLRU) AddSizedAndReturnEvicted(key, value interface{}, sizeInBytes int64) map[interface{}]interface{} { - c.lock.Lock() - defer c.lock.Unlock() - - c.addSized(key, value, sizeInBytes) - - evictedValues := make(map[interface{}]interface{}) - for c.shouldEvict() { - evicted := c.evictList.Back() - if evicted == nil { - continue - } - - c.removeElement(evicted) - evictedEntry, ok := evicted.Value.(*entry) - if !ok { - continue - } - - evictedValues[evictedEntry.key] = evictedEntry.value - } - - return evictedValues -} - -func (c *capacityLRU) addNew(key interface{}, value interface{}, sizeInBytes int64) { - ent := &entry{ - key: key, - value: value, - size: sizeInBytes, - } - e := c.evictList.PushFront(ent) - c.items[key] = e - c.currentCapacityInBytes += sizeInBytes -} - -func (c *capacityLRU) update(key interface{}, value interface{}, sizeInBytes int64, ent *list.Element) { - c.evictList.MoveToFront(ent) - - e := ent.Value.(*entry) - sizeDiff := sizeInBytes - e.size - e.value = value - e.size = sizeInBytes - c.currentCapacityInBytes += sizeDiff - - c.adjustSize(key, sizeInBytes) -} - -// Get looks up a key's value from the cache. -func (c *capacityLRU) Get(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - if ent.Value.(*entry) == nil { - return nil, false - } - - return ent.Value.(*entry).value, true - } - - return nil, false -} - -// Contains checks if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *capacityLRU) Contains(key interface{}) bool { - c.lock.Lock() - defer c.lock.Unlock() - - _, ok := c.items[key] - - return ok -} - -// AddSizedIfMissing checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. -// Returns whether found and whether an eviction occurred. -func (c *capacityLRU) AddSizedIfMissing(key, value interface{}, sizeInBytes int64) (bool, bool) { - if sizeInBytes < 0 { - return false, false - } - - c.lock.Lock() - defer c.lock.Unlock() - - _, ok := c.items[key] - if ok { - return true, false - } - c.addNew(key, value, sizeInBytes) - evicted := c.evictIfNeeded() - - return false, evicted -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *capacityLRU) Peek(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - - ent, ok := c.items[key] - if ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *capacityLRU) Remove(key interface{}) bool { - c.lock.Lock() - defer c.lock.Unlock() - - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *capacityLRU) Keys() []interface{} { - c.lock.Lock() - defer c.lock.Unlock() - - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *capacityLRU) Len() int { - c.lock.Lock() - defer c.lock.Unlock() - - return c.evictList.Len() -} - -// SizeInBytesContained returns the size in bytes of all contained elements -func (c *capacityLRU) SizeInBytesContained() uint64 { - c.lock.Lock() - defer c.lock.Unlock() - - return uint64(c.currentCapacityInBytes) -} - -// removeOldest removes the oldest item from the cache. -func (c *capacityLRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *capacityLRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - c.currentCapacityInBytes -= kv.size -} - -func (c *capacityLRU) adjustSize(key interface{}, sizeInBytes int64) { - element := c.items[key] - if element == nil || element.Value == nil || element.Value.(*entry) == nil { - return - } - - v := element.Value.(*entry) - c.currentCapacityInBytes -= v.size - v.size = sizeInBytes - element.Value = v - c.currentCapacityInBytes += sizeInBytes - c.evictIfNeeded() -} - -func (c *capacityLRU) shouldEvict() bool { - if c.evictList.Len() == 1 { - // keep at least one element, no matter how large it is - return false - } - - return c.evictList.Len() > c.size || c.currentCapacityInBytes > c.maxCapacityInBytes -} - -func (c *capacityLRU) evictIfNeeded() bool { - evicted := false - for c.shouldEvict() { - c.removeOldest() - evicted = true - } - - return evicted -} - -// IsInterfaceNil returns true if there is no value under the interface -func (c *capacityLRU) IsInterfaceNil() bool { - return c == nil -} diff --git a/storage/lrucache/capacity/capacityLRUCache_test.go b/storage/lrucache/capacity/capacityLRUCache_test.go deleted file mode 100644 index c59d4877e..000000000 --- a/storage/lrucache/capacity/capacityLRUCache_test.go +++ /dev/null @@ -1,499 +0,0 @@ -package capacity - -import ( - "testing" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/storage" - "github.com/stretchr/testify/assert" -) - -func createDefaultCache() *capacityLRU { - cache, _ := NewCapacityLRU(100, 100) - return cache -} - -//------- NewCapacityLRU - -func TestNewCapacityLRU_WithInvalidSize(t *testing.T) { - t.Parallel() - - size := 0 - capacity := int64(1) - cache, err := NewCapacityLRU(size, capacity) - assert.True(t, check.IfNil(cache)) - assert.Equal(t, storage.ErrCacheSizeInvalid, err) -} - -func TestNewCapacityLRU_WithInvalidCapacity(t *testing.T) { - t.Parallel() - - size := 1 - capacity := int64(0) - cache, err := NewCapacityLRU(size, capacity) - assert.Nil(t, cache) - assert.Equal(t, storage.ErrCacheCapacityInvalid, err) -} - -func TestNewCapacityLRU(t *testing.T) { - t.Parallel() - - size := 1 - capacity := int64(5) - - cache, err := NewCapacityLRU(size, capacity) - assert.False(t, check.IfNil(cache)) - assert.Nil(t, err) - assert.Equal(t, size, cache.size) - assert.Equal(t, capacity, cache.maxCapacityInBytes) - assert.Equal(t, int64(0), cache.currentCapacityInBytes) - assert.NotNil(t, cache.evictList) - assert.NotNil(t, cache.items) -} - -//------- AddSized - -func TestCapacityLRUCache_AddSizedNegativeSizeInBytesShouldReturn(t *testing.T) { - t.Parallel() - - c := createDefaultCache() - data := []byte("test") - key := "key" - c.AddSized(key, data, -1) - - assert.Equal(t, 0, c.Len()) -} - -func TestCapacityLRUCache_AddSizedSimpleTestShouldWork(t *testing.T) { - t.Parallel() - - c := createDefaultCache() - data := []byte("test") - key := "key" - capacity := int64(5) - c.AddSized(key, data, capacity) - - v, ok := c.Get(key) - assert.True(t, ok) - assert.NotNil(t, v) - assert.Equal(t, data, v) - - keys := c.Keys() - assert.Equal(t, 1, len(keys)) - assert.Equal(t, key, keys[0]) -} - -func TestCapacityLRUCache_AddSizedEvictionByCacheSizeShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(3, 100000) - - keys := []string{"key1", "key2", "key3", "key4", "key5"} - - c.AddSized(keys[0], struct{}{}, 0) - assert.Equal(t, 1, c.Len()) - - c.AddSized(keys[1], struct{}{}, 0) - assert.Equal(t, 2, c.Len()) - - c.AddSized(keys[2], struct{}{}, 0) - assert.Equal(t, 3, c.Len()) - - c.AddSized(keys[3], struct{}{}, 0) - assert.Equal(t, 3, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[3])) - - c.AddSized(keys[4], struct{}{}, 0) - assert.Equal(t, 3, c.Len()) - assert.False(t, c.Contains(keys[1])) - assert.True(t, c.Contains(keys[4])) -} - -func TestCapacityLRUCache_AddSizedEvictionBySizeInBytesShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2", "key3", "key4"} - - c.AddSized(keys[0], struct{}{}, 500) - assert.Equal(t, 1, c.Len()) - - c.AddSized(keys[1], struct{}{}, 500) - assert.Equal(t, 2, c.Len()) - - c.AddSized(keys[2], struct{}{}, 500) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[2])) - - c.AddSized(keys[3], struct{}{}, 500) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[1])) - assert.True(t, c.Contains(keys[3])) -} - -func TestCapacityLRUCache_AddSizedEvictionBySizeInBytesOneLargeElementShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2", "key3", "key4"} - - c.AddSized(keys[0], struct{}{}, 500) - assert.Equal(t, 1, c.Len()) - - c.AddSized(keys[1], struct{}{}, 500) - assert.Equal(t, 2, c.Len()) - - c.AddSized(keys[2], struct{}{}, 500) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[2])) - - c.AddSized(keys[3], struct{}{}, 500000) - assert.Equal(t, 1, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.False(t, c.Contains(keys[1])) - assert.False(t, c.Contains(keys[2])) - assert.True(t, c.Contains(keys[3])) -} - -func TestCapacityLRUCache_AddSizedEvictionBySizeInBytesOneLargeElementEvictedBySmallElementsShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2", "key3"} - - c.AddSized(keys[0], struct{}{}, 500000) - assert.Equal(t, 1, c.Len()) - - c.AddSized(keys[1], struct{}{}, 500) - assert.Equal(t, 1, c.Len()) - - c.AddSized(keys[2], struct{}{}, 500) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[1])) - assert.True(t, c.Contains(keys[2])) -} - -func TestCapacityLRUCache_AddSizedEvictionBySizeInBytesExistingOneLargeElementShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2"} - - c.AddSized(keys[0], struct{}{}, 500) - assert.Equal(t, 1, c.Len()) - - c.AddSized(keys[1], struct{}{}, 500) - assert.Equal(t, 2, c.Len()) - - c.AddSized(keys[0], struct{}{}, 500000) - assert.Equal(t, 1, c.Len()) - assert.True(t, c.Contains(keys[0])) - assert.False(t, c.Contains(keys[1])) -} - -//------- AddSizedIfMissing - -func TestCapacityLRUCache_AddSizedIfMissing(t *testing.T) { - t.Parallel() - - c := createDefaultCache() - data := []byte("data1") - key := "key" - - found, evicted := c.AddSizedIfMissing(key, data, 1) - assert.False(t, found) - assert.False(t, evicted) - - v, ok := c.Get(key) - assert.True(t, ok) - assert.NotNil(t, v) - assert.Equal(t, data, v) - - data2 := []byte("data2") - found, evicted = c.AddSizedIfMissing(key, data2, 1) - assert.True(t, found) - assert.False(t, evicted) - - v, ok = c.Get(key) - assert.True(t, ok) - assert.NotNil(t, v) - assert.Equal(t, data, v) -} - -func TestCapacityLRUCache_AddSizedIfMissingNegativeSizeInBytesShouldReturnFalse(t *testing.T) { - t.Parallel() - - c := createDefaultCache() - data := []byte("data1") - key := "key" - - has, evicted := c.AddSizedIfMissing(key, data, -1) - assert.False(t, has) - assert.False(t, evicted) - assert.Equal(t, 0, c.Len()) -} - -//------- Get - -func TestCapacityLRUCache_GetShouldWork(t *testing.T) { - t.Parallel() - - key := "key" - value := &struct{ A int }{A: 10} - - c := createDefaultCache() - c.AddSized(key, value, 0) - - recovered, exists := c.Get(key) - assert.True(t, value == recovered) //pointer testing - assert.True(t, exists) - - recovered, exists = c.Get("key not found") - assert.Nil(t, recovered) - assert.False(t, exists) -} - -//------- Purge - -func TestCapacityLRUCache_PurgeShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2"} - c.AddSized(keys[0], struct{}{}, 500) - c.AddSized(keys[1], struct{}{}, 500) - - c.Purge() - - assert.Equal(t, 0, c.Len()) - assert.Equal(t, int64(0), c.currentCapacityInBytes) -} - -//------- Peek - -func TestCapacityLRUCache_PeekNotFoundShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - val, found := c.Peek("key not found") - - assert.Nil(t, val) - assert.False(t, found) -} - -func TestCapacityLRUCache_PeekShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - key1 := "key1" - key2 := "key2" - val1 := &struct{}{} - - c.AddSized(key1, val1, 0) - c.AddSized(key2, struct{}{}, 0) - - //at this point key2 is more "recent" than key1 - assert.True(t, c.evictList.Front().Value.(*entry).key == key2) - - val, found := c.Peek(key1) - assert.True(t, val == val1) //pointer testing - assert.True(t, found) - - //recentness should not have been altered - assert.True(t, c.evictList.Front().Value.(*entry).key == key2) -} - -//------- Remove - -func TestCapacityLRUCache_RemoveNotFoundShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - removed := c.Remove("key not found") - - assert.False(t, removed) -} - -func TestCapacityLRUCache_RemovedShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - key1 := "key1" - key2 := "key2" - - c.AddSized(key1, struct{}{}, 0) - c.AddSized(key2, struct{}{}, 0) - - assert.Equal(t, 2, c.Len()) - - c.Remove(key1) - - assert.Equal(t, 1, c.Len()) - assert.True(t, c.Contains(key2)) -} - -// ---------- AddSizedAndReturnEvicted - -func TestCapacityLRUCache_AddSizedAndReturnEvictedNegativeSizeInBytesShouldReturn(t *testing.T) { - t.Parallel() - - c := createDefaultCache() - data := []byte("test") - key := "key" - c.AddSizedAndReturnEvicted(key, data, -1) - - assert.Equal(t, 0, c.Len()) -} - -func TestCapacityLRUCache_AddSizedAndReturnEvictedSimpleTestShouldWork(t *testing.T) { - t.Parallel() - - c := createDefaultCache() - data := []byte("test") - key := "key" - capacity := int64(5) - c.AddSizedAndReturnEvicted(key, data, capacity) - - v, ok := c.Get(key) - assert.True(t, ok) - assert.NotNil(t, v) - assert.Equal(t, data, v) - - keys := c.Keys() - assert.Equal(t, 1, len(keys)) - assert.Equal(t, key, keys[0]) -} - -func TestCapacityLRUCache_AddSizedAndReturnEvictedEvictionByCacheSizeShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(3, 100000) - - keys := []string{"key1", "key2", "key3", "key4", "key5"} - values := []string{"val1", "val2", "val3", "val4", "val5"} - - evicted := c.AddSizedAndReturnEvicted(keys[0], values[0], int64(len(values[0]))) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 1, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[1], values[1], int64(len(values[1]))) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 2, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[2], values[2], int64(len(values[2]))) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 3, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[3], values[3], int64(len(values[3]))) - assert.Equal(t, 3, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[3])) - assert.Equal(t, 1, len(evicted)) - assert.Equal(t, values[0], evicted[keys[0]]) - - evicted = c.AddSizedAndReturnEvicted(keys[4], values[4], int64(len(values[4]))) - assert.Equal(t, 3, c.Len()) - assert.False(t, c.Contains(keys[1])) - assert.True(t, c.Contains(keys[4])) - assert.Equal(t, 1, len(evicted)) - assert.Equal(t, values[1], evicted[keys[1]]) -} - -func TestCapacityLRUCache_AddSizedAndReturnEvictedEvictionBySizeInBytesShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2", "key3", "key4"} - values := []string{"val1", "val2", "val3", "val4"} - - evicted := c.AddSizedAndReturnEvicted(keys[0], values[0], 500) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 1, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[1], values[1], 500) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 2, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[2], values[2], 500) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[2])) - assert.Equal(t, 1, len(evicted)) - assert.Equal(t, values[0], evicted[keys[0]]) - - evicted = c.AddSizedAndReturnEvicted(keys[3], values[3], 500) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[1])) - assert.True(t, c.Contains(keys[3])) - assert.Equal(t, 1, len(evicted)) - assert.Equal(t, values[1], evicted[keys[1]]) -} - -func TestCapacityLRUCache_AddSizedAndReturnEvictedEvictionBySizeInBytesOneLargeElementShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2", "key3", "key4"} - values := []string{"val1", "val2", "val3", "val4"} - - evicted := c.AddSizedAndReturnEvicted(keys[0], values[0], 500) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 1, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[1], values[1], 500) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 2, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[2], values[2], 500) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[2])) - assert.Equal(t, 1, len(evicted)) - assert.Equal(t, values[0], evicted[keys[0]]) - - evicted = c.AddSizedAndReturnEvicted(keys[3], values[3], 500000) - assert.Equal(t, 1, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.False(t, c.Contains(keys[1])) - assert.False(t, c.Contains(keys[2])) - assert.True(t, c.Contains(keys[3])) - assert.Equal(t, 2, len(evicted)) - assert.Equal(t, values[1], evicted[keys[1]]) - assert.Equal(t, values[2], evicted[keys[2]]) -} - -func TestCapacityLRUCache_AddSizedAndReturnEvictedEvictionBySizeInBytesOneLargeElementEvictedBySmallElementsShouldWork(t *testing.T) { - t.Parallel() - - c, _ := NewCapacityLRU(100000, 1000) - - keys := []string{"key1", "key2", "key3"} - values := []string{"val1", "val2", "val3"} - - evicted := c.AddSizedAndReturnEvicted(keys[0], values[0], 500000) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 1, c.Len()) - - evicted = c.AddSizedAndReturnEvicted(keys[1], values[1], 500) - assert.Equal(t, 1, c.Len()) - assert.Equal(t, 1, len(evicted)) - assert.Equal(t, values[0], evicted[keys[0]]) - - evicted = c.AddSizedAndReturnEvicted(keys[2], values[2], 500) - assert.Equal(t, 0, len(evicted)) - assert.Equal(t, 2, c.Len()) - assert.False(t, c.Contains(keys[0])) - assert.True(t, c.Contains(keys[1])) - assert.True(t, c.Contains(keys[2])) -} diff --git a/storage/lrucache/export_test.go b/storage/lrucache/export_test.go deleted file mode 100644 index 92889ed26..000000000 --- a/storage/lrucache/export_test.go +++ /dev/null @@ -1,5 +0,0 @@ -package lrucache - -func (c *lruCache) AddedDataHandlers() map[string]func(key []byte, value interface{}) { - return c.mapDataHandlers -} diff --git a/storage/lrucache/lrucache.go b/storage/lrucache/lrucache.go deleted file mode 100644 index 93a8d767c..000000000 --- a/storage/lrucache/lrucache.go +++ /dev/null @@ -1,191 +0,0 @@ -package lrucache - -import ( - "sync" - - lru "github.com/hashicorp/golang-lru" - "github.com/multiversx/mx-chain-core-go/storage" - "github.com/multiversx/mx-chain-core-go/storage/lrucache/capacity" -) - -var _ storage.Cacher = (*lruCache)(nil) - -// LRUCache implements a Least Recently Used eviction cache -type lruCache struct { - cache storage.SizedLRUCacheHandler - maxsize int - - mutAddedDataHandlers sync.RWMutex - mapDataHandlers map[string]func(key []byte, value interface{}) -} - -// NewCache creates a new LRU cache instance -func NewCache(size int) (*lruCache, error) { - cache, err := lru.New(size) - if err != nil { - return nil, err - } - - c := createLRUCache(size, cache) - - return c, nil -} - -// NewCacheWithEviction creates a new sized LRU cache instance with eviction function -func NewCacheWithEviction(size int, onEvicted func(key interface{}, value interface{})) (*lruCache, error) { - cache, err := lru.NewWithEvict(size, onEvicted) - if err != nil { - return nil, err - } - - c := createLRUCache(size, cache) - - return c, nil -} - -func createLRUCache(size int, cache *lru.Cache) *lruCache { - c := &lruCache{ - cache: &simpleLRUCacheAdapter{ - LRUCacheHandler: cache, - }, - maxsize: size, - mutAddedDataHandlers: sync.RWMutex{}, - mapDataHandlers: make(map[string]func(key []byte, value interface{})), - } - return c -} - -// NewCacheWithSizeInBytes creates a new sized LRU cache instance -func NewCacheWithSizeInBytes(size int, sizeInBytes int64) (*lruCache, error) { - cache, err := capacity.NewCapacityLRU(size, sizeInBytes) - if err != nil { - return nil, err - } - - c := &lruCache{ - cache: cache, - maxsize: size, - mutAddedDataHandlers: sync.RWMutex{}, - mapDataHandlers: make(map[string]func(key []byte, value interface{})), - } - - return c, nil -} - -// Clear is used to completely clear the cache. -func (c *lruCache) Clear() { - c.cache.Purge() -} - -// Put adds a value to the cache. Returns true if an eviction occurred. -func (c *lruCache) Put(key []byte, value interface{}, sizeInBytes int) (evicted bool) { - evicted = c.cache.AddSized(string(key), value, int64(sizeInBytes)) - - c.callAddedDataHandlers(key, value) - - return evicted -} - -// RegisterHandler registers a new handler to be called when a new data is added -func (c *lruCache) RegisterHandler(handler func(key []byte, value interface{}), id string) { - if handler == nil { - return - } - - c.mutAddedDataHandlers.Lock() - c.mapDataHandlers[id] = handler - c.mutAddedDataHandlers.Unlock() -} - -// UnRegisterHandler removes the handler from the list -func (c *lruCache) UnRegisterHandler(id string) { - c.mutAddedDataHandlers.Lock() - delete(c.mapDataHandlers, id) - c.mutAddedDataHandlers.Unlock() -} - -// Get looks up a key's value from the cache. -func (c *lruCache) Get(key []byte) (value interface{}, ok bool) { - return c.cache.Get(string(key)) -} - -// Has checks if a key is in the cache, without updating the -// recent-ness or deleting it for being stale. -func (c *lruCache) Has(key []byte) bool { - return c.cache.Contains(string(key)) -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *lruCache) Peek(key []byte) (value interface{}, ok bool) { - v, ok := c.cache.Peek(string(key)) - - if !ok { - return nil, ok - } - - return v, ok -} - -// HasOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. -// Returns whether found and whether an eviction occurred. -func (c *lruCache) HasOrAdd(key []byte, value interface{}, sizeInBytes int) (has, added bool) { - has, _ = c.cache.AddSizedIfMissing(string(key), value, int64(sizeInBytes)) - - if !has { - c.callAddedDataHandlers(key, value) - } - - return has, !has -} - -func (c *lruCache) callAddedDataHandlers(key []byte, value interface{}) { - c.mutAddedDataHandlers.RLock() - for _, handler := range c.mapDataHandlers { - go handler(key, value) - } - c.mutAddedDataHandlers.RUnlock() -} - -// Remove removes the provided key from the cache. -func (c *lruCache) Remove(key []byte) { - c.cache.Remove(string(key)) -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *lruCache) Keys() [][]byte { - res := c.cache.Keys() - r := make([][]byte, len(res)) - - for i := 0; i < len(res); i++ { - r[i] = []byte(res[i].(string)) - } - - return r -} - -// Len returns the number of items in the cache. -func (c *lruCache) Len() int { - return c.cache.Len() -} - -// SizeInBytesContained returns the size in bytes of all contained elements -func (c *lruCache) SizeInBytesContained() uint64 { - return c.cache.SizeInBytesContained() -} - -// MaxSize returns the maximum number of items which can be stored in cache. -func (c *lruCache) MaxSize() int { - return c.maxsize -} - -// Close does nothing for this cacher implementation -func (c *lruCache) Close() error { - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (c *lruCache) IsInterfaceNil() bool { - return c == nil -} diff --git a/storage/lrucache/lrucache_test.go b/storage/lrucache/lrucache_test.go deleted file mode 100644 index 8265b00cf..000000000 --- a/storage/lrucache/lrucache_test.go +++ /dev/null @@ -1,420 +0,0 @@ -package lrucache_test - -import ( - "bytes" - "fmt" - "sync" - "testing" - "time" - - "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/storage" - "github.com/multiversx/mx-chain-core-go/storage/lrucache" - "github.com/stretchr/testify/assert" -) - -var timeoutWaitForWaitGroups = time.Second * 2 - -//------- NewCache - -func TestNewCache_BadSizeShouldErr(t *testing.T) { - t.Parallel() - - c, err := lrucache.NewCache(0) - - assert.True(t, check.IfNil(c)) - assert.NotNil(t, err) -} - -func TestNewCache_ShouldWork(t *testing.T) { - t.Parallel() - - c, err := lrucache.NewCache(1) - - assert.False(t, check.IfNil(c)) - assert.Nil(t, err) -} - -//------- NewCacheWithSizeInBytes - -func TestNewCacheWithSizeInBytes_BadSizeShouldErr(t *testing.T) { - t.Parallel() - - c, err := lrucache.NewCacheWithSizeInBytes(0, 100000) - - assert.True(t, check.IfNil(c)) - assert.Equal(t, storage.ErrCacheSizeInvalid, err) -} - -func TestNewCacheWithSizeInBytes_BadSizeInBytesShouldErr(t *testing.T) { - t.Parallel() - - c, err := lrucache.NewCacheWithSizeInBytes(1, 0) - - assert.True(t, check.IfNil(c)) - assert.Equal(t, storage.ErrCacheCapacityInvalid, err) -} - -func TestNewCacheWithSizeInBytes_ShouldWork(t *testing.T) { - t.Parallel() - - c, err := lrucache.NewCacheWithSizeInBytes(1, 100000) - - assert.False(t, check.IfNil(c)) - assert.Nil(t, err) -} - -func TestLRUCache_PutNotPresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key"), []byte("value") - c, _ := lrucache.NewCache(10) - - l := c.Len() - - assert.Zero(t, l, "cache expected to be empty") - - c.Put(key, val, 0) - l = c.Len() - - assert.Equal(t, l, 1, "cache size expected 1 but found %d", l) -} - -func TestLRUCache_PutPresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key"), []byte("value") - c, _ := lrucache.NewCache(10) - - c.Put(key, val, 0) - c.Put(key, val, 0) - - l := c.Len() - assert.Equal(t, l, 1, "cache size expected 1 but found %d", l) -} - -func TestLRUCache_PutPresentRewrite(t *testing.T) { - t.Parallel() - - key := []byte("key") - val1 := []byte("value1") - val2 := []byte("value2") - c, _ := lrucache.NewCache(10) - - c.Put(key, val1, 0) - c.Put(key, val2, 0) - - l := c.Len() - assert.Equal(t, l, 1, "cache size expected 1 but found %d", l) - recoveredVal, has := c.Get(key) - assert.True(t, has) - assert.Equal(t, val2, recoveredVal) -} - -func TestLRUCache_GetNotPresent(t *testing.T) { - t.Parallel() - - key := []byte("key1") - c, _ := lrucache.NewCache(10) - - v, ok := c.Get(key) - - assert.False(t, ok, "value %s not expected to be found", v) -} - -func TestLRUCache_GetPresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key2"), []byte("value2") - c, _ := lrucache.NewCache(10) - - c.Put(key, val, 0) - - v, ok := c.Get(key) - - assert.True(t, ok, "value expected but not found") - assert.Equal(t, val, v) -} - -func TestLRUCache_HasNotPresent(t *testing.T) { - t.Parallel() - - key := []byte("key3") - c, _ := lrucache.NewCache(10) - - found := c.Has(key) - - assert.False(t, found, "key %s not expected to be found", key) -} - -func TestLRUCache_HasPresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key4"), []byte("value4") - c, _ := lrucache.NewCache(10) - - c.Put(key, val, 0) - - found := c.Has(key) - - assert.True(t, found, "value expected but not found") -} - -func TestLRUCache_PeekNotPresent(t *testing.T) { - t.Parallel() - - key := []byte("key5") - c, _ := lrucache.NewCache(10) - - _, ok := c.Peek(key) - - assert.False(t, ok, "not expected to find key %s", key) -} - -func TestLRUCache_PeekPresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key6"), []byte("value6") - c, _ := lrucache.NewCache(10) - - c.Put(key, val, 0) - v, ok := c.Peek(key) - - assert.True(t, ok, "value expected but not found") - assert.Equal(t, val, v, "expected to find %s but found %s", val, v) -} - -func TestLRUCache_HasOrAddNotPresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key7"), []byte("value7") - c, _ := lrucache.NewCache(10) - - _, ok := c.Peek(key) - assert.False(t, ok, "not expected to find key %s", key) - - c.HasOrAdd(key, val, 0) - v, ok := c.Peek(key) - assert.True(t, ok, "value expected but not found") - assert.Equal(t, val, v, "expected to find %s but found %s", val, v) -} - -func TestLRUCache_HasOrAddPresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key8"), []byte("value8") - c, _ := lrucache.NewCache(10) - - _, ok := c.Peek(key) - - assert.False(t, ok, "not expected to find key %s", key) - - c.HasOrAdd(key, val, 0) - v, ok := c.Peek(key) - - assert.True(t, ok, "value expected but not found") - assert.Equal(t, val, v, "expected to find %s but found %s", val, v) -} - -func TestLRUCache_RemoveNotPresent(t *testing.T) { - t.Parallel() - - key := []byte("key9") - c, _ := lrucache.NewCache(10) - - found := c.Has(key) - - assert.False(t, found, "not expected to find key %s", key) - - c.Remove(key) - found = c.Has(key) - - assert.False(t, found, "not expected to find key %s", key) -} - -func TestLRUCache_RemovePresent(t *testing.T) { - t.Parallel() - - key, val := []byte("key10"), []byte("value10") - c, _ := lrucache.NewCache(10) - - c.Put(key, val, 0) - found := c.Has(key) - - assert.True(t, found, "expected to find key %s", key) - - c.Remove(key) - found = c.Has(key) - - assert.False(t, found, "not expected to find key %s", key) -} - -func TestLRUCache_Keys(t *testing.T) { - t.Parallel() - - c, _ := lrucache.NewCache(10) - - for i := 0; i < 20; i++ { - key, val := []byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i)) - c.Put(key, val, 0) - } - - keys := c.Keys() - - // check also that cache size does not grow over the capacity - assert.Equal(t, 10, len(keys), "expected cache size 10 but current size %d", len(keys)) -} - -func TestLRUCache_Len(t *testing.T) { - t.Parallel() - - c, _ := lrucache.NewCache(10) - - for i := 0; i < 20; i++ { - key, val := []byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i)) - c.Put(key, val, 0) - } - - l := c.Len() - - assert.Equal(t, 10, l, "expected cache size 10 but current size %d", l) -} - -func TestLRUCache_Clear(t *testing.T) { - t.Parallel() - - c, _ := lrucache.NewCache(10) - - for i := 0; i < 5; i++ { - key, val := []byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i)) - c.Put(key, val, 0) - } - - l := c.Len() - - assert.Equal(t, 5, l, "expected size 5, got %d", l) - - c.Clear() - l = c.Len() - - assert.Zero(t, l, "expected size 0, got %d", l) -} - -func TestLRUCache_CacherRegisterAddedDataHandlerNilHandlerShouldIgnore(t *testing.T) { - t.Parallel() - - c, _ := lrucache.NewCache(100) - c.RegisterHandler(nil, "") - - assert.Equal(t, 0, len(c.AddedDataHandlers())) -} - -func TestLRUCache_CacherRegisterPutAddedDataHandlerShouldWork(t *testing.T) { - t.Parallel() - - wg := sync.WaitGroup{} - wg.Add(1) - chDone := make(chan bool) - - f := func(key []byte, value interface{}) { - if !bytes.Equal([]byte("aaaa"), key) { - return - } - - wg.Done() - } - - go func() { - wg.Wait() - chDone <- true - }() - - c, _ := lrucache.NewCache(100) - c.RegisterHandler(f, "") - c.Put([]byte("aaaa"), "bbbb", 0) - - select { - case <-chDone: - case <-time.After(timeoutWaitForWaitGroups): - assert.Fail(t, "should have been called") - return - } - - assert.Equal(t, 1, len(c.AddedDataHandlers())) -} - -func TestLRUCache_CacherRegisterHasOrAddAddedDataHandlerShouldWork(t *testing.T) { - t.Parallel() - - wg := sync.WaitGroup{} - wg.Add(1) - chDone := make(chan bool) - - f := func(key []byte, value interface{}) { - if !bytes.Equal([]byte("aaaa"), key) { - return - } - - wg.Done() - } - - go func() { - wg.Wait() - chDone <- true - }() - - c, _ := lrucache.NewCache(100) - c.RegisterHandler(f, "") - c.HasOrAdd([]byte("aaaa"), "bbbb", 0) - - select { - case <-chDone: - case <-time.After(timeoutWaitForWaitGroups): - assert.Fail(t, "should have been called") - return - } - - assert.Equal(t, 1, len(c.AddedDataHandlers())) -} - -func TestLRUCache_CacherRegisterHasOrAddAddedDataHandlerNotAddedShouldNotCall(t *testing.T) { - t.Parallel() - - wg := sync.WaitGroup{} - wg.Add(1) - chDone := make(chan bool) - - f := func(key []byte, value interface{}) { - wg.Done() - } - - go func() { - wg.Wait() - chDone <- true - }() - - c, _ := lrucache.NewCache(100) - //first add, no call - c.HasOrAdd([]byte("aaaa"), "bbbb", 0) - c.RegisterHandler(f, "") - //second add, should not call as the data was found - c.HasOrAdd([]byte("aaaa"), "bbbb", 0) - - select { - case <-chDone: - assert.Fail(t, "should have not been called") - return - case <-time.After(timeoutWaitForWaitGroups): - } - - assert.Equal(t, 1, len(c.AddedDataHandlers())) -} - -func TestLRUCache_CloseShouldNotErr(t *testing.T) { - t.Parallel() - - c, _ := lrucache.NewCache(1) - - err := c.Close() - assert.Nil(t, err) -} diff --git a/storage/lrucache/simpleLRUCacheAdapter.go b/storage/lrucache/simpleLRUCacheAdapter.go deleted file mode 100644 index 82b481bd3..000000000 --- a/storage/lrucache/simpleLRUCacheAdapter.go +++ /dev/null @@ -1,23 +0,0 @@ -package lrucache - -import "github.com/multiversx/mx-chain-core-go/storage" - -// simpleLRUCacheAdapter provides an adapter between LRUCacheHandler and SizeLRUCacheHandler -type simpleLRUCacheAdapter struct { - storage.LRUCacheHandler -} - -// AddSized calls the Add method without the size in bytes parameter -func (slca *simpleLRUCacheAdapter) AddSized(key, value interface{}, _ int64) bool { - return slca.Add(key, value) -} - -// AddSizedIfMissing calls ContainsOrAdd without the size in bytes parameter -func (slca *simpleLRUCacheAdapter) AddSizedIfMissing(key, value interface{}, _ int64) (ok, evicted bool) { - return slca.ContainsOrAdd(key, value) -} - -// SizeInBytesContained returns 0 -func (slca *simpleLRUCacheAdapter) SizeInBytesContained() uint64 { - return 0 -}