diff --git a/CHANGES.txt b/CHANGES.txt index 902732ff..f483093f 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,9 @@ +5.5.0 (Dec 12, 2023) + - Added support for Flag Sets on Split Proxy and Synchronizer, which enables SDKs to interacting with the flag sets features (more details in our documentation): + - Updated Proxy endpoints used by SDKs to fetch flags have been updated to handle any SDKs downloading flags in certain flag sets. + - Updated Syncrhonizer to properly handle flagsets in redis. + - Added configuration options to specify which flagsets to use as a filter when downloading flag definitions. Please refer to our docs to learn more + 5.4.2 (Nov 7, 2023) - Updated docker images for vulnerability fixes. - Updated dependencies for vulnerability fixes. diff --git a/Makefile b/Makefile index 3d2c3ffa..fefba63d 100644 --- a/Makefile +++ b/Makefile @@ -110,6 +110,8 @@ images_release: # entrypoints go.sum: go.mod $(GO) mod tidy +coverage.out: test_coverage + # because of windows .exe suffix, we need a macro on the right side, which needs to be executed # after the `%` evaluation, therefore, in a second expansion .SECONDEXPANSION: diff --git a/cmd/proxy/main.go b/cmd/proxy/main.go index 69235258..9920fa9b 100644 --- a/cmd/proxy/main.go +++ b/cmd/proxy/main.go @@ -34,7 +34,10 @@ func setupConfig(cliArgs *cconf.CliFlags) (*conf.Main, error) { } cconf.PopulateFromArguments(&proxyConf, cliArgs.RawConfig) - return &proxyConf, nil + + var err error + proxyConf.FlagSetsFilter, err = cconf.ValidateFlagsets(proxyConf.FlagSetsFilter) + return &proxyConf, err } func main() { @@ -57,8 +60,13 @@ func main() { cfg, err := setupConfig(cliArgs) if err != nil { - fmt.Println("error processing config: ", err) - os.Exit(exitCodeConfigError) + var fsErr cconf.FlagSetValidationError + if errors.As(err, &fsErr) { + fmt.Println("error processing flagsets: ", err.Error()) + } else { + fmt.Println("error processing config: ", err) + os.Exit(exitCodeConfigError) + } } logger := log.BuildFromConfig(&cfg.Logging, "Split-Proxy", &cfg.Integrations.Slack) diff --git a/cmd/synchronizer/main.go b/cmd/synchronizer/main.go index a19742ec..ac6f99ac 100644 --- a/cmd/synchronizer/main.go +++ b/cmd/synchronizer/main.go @@ -34,7 +34,10 @@ func setupConfig(cliArgs *cconf.CliFlags) (*conf.Main, error) { } cconf.PopulateFromArguments(&syncConf, cliArgs.RawConfig) - return &syncConf, nil + + var err error + syncConf.FlagSetsFilter, err = cconf.ValidateFlagsets(syncConf.FlagSetsFilter) + return &syncConf, err } func main() { @@ -57,8 +60,13 @@ func main() { cfg, err := setupConfig(cliArgs) if err != nil { - fmt.Println("error processing config: ", err) - os.Exit(exitCodeConfigError) + var fsErr cconf.FlagSetValidationError + if errors.As(err, &fsErr) { + fmt.Println("error processing flagset: ", err.Error()) + } else { + fmt.Println("error processing config: ", err) + os.Exit(exitCodeConfigError) + } } logger := log.BuildFromConfig(&cfg.Logging, "Split-Sync", &cfg.Integrations.Slack) diff --git a/go.mod b/go.mod index f8c79a4b..fd37ae1d 100644 --- a/go.mod +++ b/go.mod @@ -8,10 +8,11 @@ require ( github.com/gin-gonic/gin v1.9.1 github.com/google/uuid v1.3.0 github.com/splitio/gincache v1.0.1 - github.com/splitio/go-split-commons/v5 v5.0.0 - github.com/splitio/go-toolkit/v5 v5.3.1 - github.com/stretchr/testify v1.8.3 + github.com/splitio/go-split-commons/v5 v5.1.0 + github.com/splitio/go-toolkit/v5 v5.3.2 + github.com/stretchr/testify v1.8.4 go.etcd.io/bbolt v1.3.6 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d ) require ( @@ -28,7 +29,6 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.14.0 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/google/go-cmp v0.5.6 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect diff --git a/go.sum b/go.sum index 7e41948d..5bf34df0 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,8 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -90,10 +90,10 @@ github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUA github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/splitio/gincache v1.0.1 h1:dLYdANY/BqH4KcUMCe/LluLyV5WtuE/LEdQWRE06IXU= github.com/splitio/gincache v1.0.1/go.mod h1:CcgJDSM9Af75kyBH0724v55URVwMBuSj5x1eCWIOECY= -github.com/splitio/go-split-commons/v5 v5.0.0 h1:bGRi0cf1JP5VNSi0a4BPQEWv/DACkeSKliazhPMVDPk= -github.com/splitio/go-split-commons/v5 v5.0.0/go.mod h1:lzoVmYJaCqB8UPSxWva0BZe7fF+bRJD+eP0rNi/lL7c= -github.com/splitio/go-toolkit/v5 v5.3.1 h1:9J/byd0fRxWj5/Zg0QZOnUxKBDIAMCGr7rySYzJKdJg= -github.com/splitio/go-toolkit/v5 v5.3.1/go.mod h1:xYhUvV1gga9/1029Wbp5pjnR6Cy8nvBpjw99wAbsMko= +github.com/splitio/go-split-commons/v5 v5.1.0 h1:mki1235gjXwuxcXdv/bKVduX1Lv09uXJogds+BspqSM= +github.com/splitio/go-split-commons/v5 v5.1.0/go.mod h1:9vAZrlhKvhensyRC11hyVFdgLIBrkX9D5vdYc9qB13w= +github.com/splitio/go-toolkit/v5 v5.3.2 h1:Yy9YBcHRmK5WVZjeA/klLGEdF38xpsL1ejnC3ro8a2M= +github.com/splitio/go-toolkit/v5 v5.3.2/go.mod h1:xYhUvV1gga9/1029Wbp5pjnR6Cy8nvBpjw99wAbsMko= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= @@ -105,8 +105,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= @@ -123,6 +124,8 @@ golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= @@ -143,7 +146,6 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/splitio/commitversion.go b/splitio/commitversion.go index f56361af..bee01bac 100644 --- a/splitio/commitversion.go +++ b/splitio/commitversion.go @@ -5,4 +5,4 @@ This file is created automatically, please do not edit */ // CommitVersion is the version of the last commit previous to release -const CommitVersion = "2db6c6e" +const CommitVersion = "cf3da63" diff --git a/splitio/common/conf/validators.go b/splitio/common/conf/validators.go new file mode 100644 index 00000000..ff369328 --- /dev/null +++ b/splitio/common/conf/validators.go @@ -0,0 +1,28 @@ +package conf + +import ( + "strings" + + "github.com/splitio/go-split-commons/v5/flagsets" +) + +type FlagSetValidationError struct { + wrapped []error +} + +func (f FlagSetValidationError) Error() string { + var errors []string + for _, err := range f.wrapped { + errors = append(errors, err.Error()) + } + return strings.Join(errors, ".|| ") +} + +func ValidateFlagsets(sets []string) ([]string, error) { + var toRet error + sanitizedFlagSets, fsErr := flagsets.SanitizeMany(sets) + if fsErr != nil { + toRet = FlagSetValidationError{wrapped: fsErr} + } + return sanitizedFlagSets, toRet +} diff --git a/splitio/common/conf/validators_test.go b/splitio/common/conf/validators_test.go new file mode 100644 index 00000000..d7a60b52 --- /dev/null +++ b/splitio/common/conf/validators_test.go @@ -0,0 +1,24 @@ +package conf + +import ( + "testing" + + "github.com/splitio/go-split-commons/v5/dtos" + "github.com/stretchr/testify/assert" +) + +func TestFlagSetValidationError(t *testing.T) { + + sanitized, err := ValidateFlagsets([]string{"Flagset1", " flagset2 ", "123#@flagset"}) + assert.NotNil(t, err) + assert.Equal(t, []string{"flagset1", "flagset2"}, sanitized) + + asFVE := err.(FlagSetValidationError) + assert.Equal(t, 3, len(asFVE.wrapped)) + assert.ElementsMatch(t, []error{ + dtos.FlagSetValidatonError{Message: "Flag Set name Flagset1 should be all lowercase - converting string to lowercase"}, + dtos.FlagSetValidatonError{Message: "Flag Set name flagset2 has extra whitespace, trimming"}, + dtos.FlagSetValidatonError{Message: "you passed 123#@flagset, Flag Set must adhere to the regular expressions ^[a-z0-9][_a-z0-9]{0,49}$. This means a Flag Set must " + + "start with a letter or number, be in lowercase, alphanumeric and have a max length of 50 characters. 123#@flagset was discarded."}, + }, asFVE.wrapped) +} diff --git a/splitio/producer/conf/sections.go b/splitio/producer/conf/sections.go index 32808ab6..ba26d57e 100644 --- a/splitio/producer/conf/sections.go +++ b/splitio/producer/conf/sections.go @@ -9,6 +9,7 @@ import ( type Main struct { Apikey string `json:"apikey" s-cli:"apikey" s-def:"" s-desc:"Split server side SDK key"` IPAddressEnabled bool `json:"ipAddressEnabled" s-cli:"ip-address-enabled" s-def:"true" s-desc:"Bundle host's ip address when sending data to Split"` + FlagSetsFilter []string `json:"flagSetsFilter" s-cli:"flag-sets-filter" s-def:"" s-desc:"Flag Sets Filter provided"` Initialization Initialization `json:"initialization" s-nested:"true"` Storage Storage `json:"storage" s-nested:"true"` Sync Sync `json:"sync" s-nested:"true"` diff --git a/splitio/producer/initialization.go b/splitio/producer/initialization.go index 532953b7..48fe4b9c 100644 --- a/splitio/producer/initialization.go +++ b/splitio/producer/initialization.go @@ -7,6 +7,7 @@ import ( cconf "github.com/splitio/go-split-commons/v5/conf" "github.com/splitio/go-split-commons/v5/dtos" + "github.com/splitio/go-split-commons/v5/flagsets" "github.com/splitio/go-split-commons/v5/provisional/strategy" "github.com/splitio/go-split-commons/v5/service/api" "github.com/splitio/go-split-commons/v5/storage/filter" @@ -46,6 +47,7 @@ const ( func Start(logger logging.LoggerInterface, cfg *conf.Main) error { // Getting initial config data advanced := cfg.BuildAdvancedConfig() + advanced.FlagSetsFilter = cfg.FlagSetsFilter metadata := util.GetMetadata(false, cfg.IPAddressEnabled) clientKey, err := util.GetClientKey(cfg.Apikey) @@ -84,8 +86,11 @@ func Start(logger logging.LoggerInterface, cfg *conf.Main) error { syncTelemetryStorage, _ := inmemory.NewTelemetryStorage() sdkTelemetryStorage := storage.NewRedisTelemetryCosumerclient(redisClient, logger) + // FlagSetsFilter + flagSetsFilter := flagsets.NewFlagSetFilter(cfg.FlagSetsFilter) + // These storages are forwarded to the dashboard, the sdk-telemetry is irrelevant there - splitStorage, err := observability.NewObservableSplitStorage(redis.NewSplitStorage(redisClient, logger), logger) + splitStorage, err := observability.NewObservableSplitStorage(redis.NewSplitStorage(redisClient, logger, flagSetsFilter), logger) if err != nil { return fmt.Errorf("error instantiating observable feature flag storage: %w", err) } @@ -118,7 +123,7 @@ func Start(logger logging.LoggerInterface, cfg *conf.Main) error { eventEvictionMonitor := evcalc.New(1) workers := synchronizer.Workers{ - SplitUpdater: split.NewSplitUpdater(storages.SplitStorage, splitAPI.SplitFetcher, logger, syncTelemetryStorage, appMonitor), + SplitUpdater: split.NewSplitUpdater(storages.SplitStorage, splitAPI.SplitFetcher, logger, syncTelemetryStorage, appMonitor, flagSetsFilter), SegmentUpdater: segment.NewSegmentUpdater(storages.SplitStorage, storages.SegmentStorage, splitAPI.SegmentFetcher, logger, syncTelemetryStorage, appMonitor), ImpressionsCountRecorder: impressionscount.NewRecorderSingle(impressionsCounter, splitAPI.ImpressionRecorder, diff --git a/splitio/producer/initialization_test.go b/splitio/producer/initialization_test.go index d03c6e22..df7f58c3 100644 --- a/splitio/producer/initialization_test.go +++ b/splitio/producer/initialization_test.go @@ -6,6 +6,8 @@ import ( "net/http" "net/http/httptest" "os" + "strconv" + "strings" "testing" config "github.com/splitio/go-split-commons/v5/conf" @@ -198,6 +200,58 @@ func TestSanitizeRedisWithRedisDifferentApiKey(t *testing.T) { redisClient.Del("SPLITIO.test1") } +func TestSanitizeRedisWithForcedCleanupByFlagSets(t *testing.T) { + cfg := getDefaultConf() + cfg.Apikey = "983564etyrudhijfgknf9i08euh" + cfg.Initialization.ForceFreshStartup = true + cfg.FlagSetsFilter = []string{"flagset1", "flagset2"} + + hash := util.HashAPIKey(cfg.Apikey + strings.Join(cfg.FlagSetsFilter, "::")) + + logger := logging.NewLogger(nil) + + redisClient, err := predis.NewRedisClient(&config.RedisConfig{ + Host: "localhost", + Port: 6379, + Prefix: "some_prefix", + Database: 1, + }, logger) + if err != nil { + t.Error("It should be nil") + } + + err = redisClient.Set("SPLITIO.test1", "123", 0) + redisClient.Set("SPLITIO.hash", hash, 0) + if err != nil { + t.Error("It should be nil") + } + value, err := redisClient.Get("SPLITIO.test1") + if value != "123" { + t.Error("Value should have been set properly") + } + + cfg.FlagSetsFilter = []string{"flagset7"} + miscStorage := predis.NewMiscStorage(redisClient, logger) + value, err = redisClient.Get("SPLITIO.test1") + err = sanitizeRedis(cfg, miscStorage, logger) + if err != nil { + t.Error("It should be nil", err) + } + + value, _ = redisClient.Get("SPLITIO.test1") + if value != "" { + t.Error("Value should have been removed.") + } + + val, _ := redisClient.Get("SPLITIO.hash") + parsedHash, _ := strconv.ParseUint(val, 10, 64) + if uint32(parsedHash) == hash { + t.Error("ApiHash should have been updated.") + } + redisClient.Del("SPLITIO.hash") + redisClient.Del("SPLITIO.test1") +} + func getDefaultConf() *conf.Main { var c conf.Main cconf.PopulateDefaults(&c) diff --git a/splitio/producer/storage/telemetry.go b/splitio/producer/storage/telemetry.go index b3ba6494..27d9bbdb 100644 --- a/splitio/producer/storage/telemetry.go +++ b/splitio/producer/storage/telemetry.go @@ -177,11 +177,15 @@ func setLatency(result MultiMethodLatencies, metadata *dtos.Metadata, method str if _, ok := result[*metadata]; !ok { result[*metadata] = dtos.MethodLatencies{ - Treatment: make([]int64, telemetry.LatencyBucketCount), - Treatments: make([]int64, telemetry.LatencyBucketCount), - TreatmentWithConfig: make([]int64, telemetry.LatencyBucketCount), - TreatmentsWithConfig: make([]int64, telemetry.LatencyBucketCount), - Track: make([]int64, telemetry.LatencyBucketCount), + Treatment: make([]int64, telemetry.LatencyBucketCount), + Treatments: make([]int64, telemetry.LatencyBucketCount), + TreatmentWithConfig: make([]int64, telemetry.LatencyBucketCount), + TreatmentsWithConfig: make([]int64, telemetry.LatencyBucketCount), + TreatmentsByFlagSet: make([]int64, telemetry.LatencyBucketCount), + TreatmentsByFlagSets: make([]int64, telemetry.LatencyBucketCount), + TreatmentsWithConfigByFlagSet: make([]int64, telemetry.LatencyBucketCount), + TreatmentsWithConfigByFlagSets: make([]int64, telemetry.LatencyBucketCount), + Track: make([]int64, telemetry.LatencyBucketCount), } } @@ -194,6 +198,14 @@ func setLatency(result MultiMethodLatencies, metadata *dtos.Metadata, method str result[*metadata].TreatmentWithConfig[bucket] = count case telemetry.TreatmentsWithConfig: result[*metadata].TreatmentsWithConfig[bucket] = count + case telemetry.TreatmentsByFlagSet: + result[*metadata].TreatmentsByFlagSet[bucket] = count + case telemetry.TreatmentsByFlagSets: + result[*metadata].TreatmentsByFlagSets[bucket] = count + case telemetry.TreatmentsWithConfigByFlagSet: + result[*metadata].TreatmentsWithConfigByFlagSet[bucket] = count + case telemetry.TreatmentsWithConfigByFlagSets: + result[*metadata].TreatmentsWithConfigByFlagSets[bucket] = count case telemetry.Track: result[*metadata].Track[bucket] = count default: @@ -232,6 +244,14 @@ func setException(result MultiMethodExceptions, metadata *dtos.Metadata, method curr.TreatmentWithConfig = count case telemetry.TreatmentsWithConfig: curr.TreatmentsWithConfig = count + case telemetry.TreatmentsByFlagSet: + curr.TreatmentsByFlagSet = count + case telemetry.TreatmentsByFlagSets: + curr.TreatmentsByFlagSets = count + case telemetry.TreatmentsWithConfigByFlagSet: + curr.TreatmentsWithConfigByFlagSet = count + case telemetry.TreatmentsWithConfigByFlagSets: + curr.TreatmentsWithConfigByFlagSets = count case telemetry.Track: curr.Track = count default: diff --git a/splitio/producer/storage/telemetry_test.go b/splitio/producer/storage/telemetry_test.go index b93a12ab..c077384a 100644 --- a/splitio/producer/storage/telemetry_test.go +++ b/splitio/producer/storage/telemetry_test.go @@ -59,6 +59,20 @@ func TestRedisTelemetryExceptions(t *testing.T) { producer1.RecordException(telemetry.Treatment) producer1.RecordException(telemetry.Treatments) producer1.RecordException(telemetry.Treatments) + producer1.RecordException(telemetry.TreatmentsByFlagSet) + producer1.RecordException(telemetry.TreatmentsByFlagSet) + producer1.RecordException(telemetry.TreatmentsByFlagSet) + producer1.RecordException(telemetry.TreatmentsByFlagSet) + producer1.RecordException(telemetry.TreatmentsByFlagSets) + producer1.RecordException(telemetry.TreatmentsByFlagSets) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSet) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSet) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSet) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSets) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSets) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSets) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSets) + producer1.RecordException(telemetry.TreatmentsWithConfigByFlagSets) producer1.RecordException(telemetry.TreatmentWithConfig) producer1.RecordException(telemetry.TreatmentWithConfig) producer1.RecordException(telemetry.TreatmentWithConfig) @@ -81,6 +95,16 @@ func TestRedisTelemetryExceptions(t *testing.T) { producer2.RecordException(telemetry.Treatments) producer2.RecordException(telemetry.Treatments) producer2.RecordException(telemetry.Treatments) + producer2.RecordException(telemetry.TreatmentsByFlagSet) + producer2.RecordException(telemetry.TreatmentsByFlagSet) + producer2.RecordException(telemetry.TreatmentsByFlagSet) + producer2.RecordException(telemetry.TreatmentsByFlagSets) + producer2.RecordException(telemetry.TreatmentsWithConfigByFlagSet) + producer2.RecordException(telemetry.TreatmentsWithConfigByFlagSet) + producer2.RecordException(telemetry.TreatmentsWithConfigByFlagSets) + producer2.RecordException(telemetry.TreatmentsWithConfigByFlagSets) + producer2.RecordException(telemetry.TreatmentsWithConfigByFlagSets) + producer2.RecordException(telemetry.TreatmentWithConfig) producer2.RecordException(telemetry.TreatmentWithConfig) producer2.RecordException(telemetry.TreatmentWithConfig) @@ -120,6 +144,22 @@ func TestRedisTelemetryExceptions(t *testing.T) { t.Error("exception count for track in metadata1 should be 5. Was: ", excsForM1.Track) } + if excsForM1.TreatmentsByFlagSet != 4 { + t.Error("exception count for treatmentsByFlagSet in metadata1 should be 4. Was: ", excsForM1.TreatmentsByFlagSet) + } + + if excsForM1.TreatmentsByFlagSets != 2 { + t.Error("exception count for treatmentsByFlagSets in metadata1 should be 2. Was: ", excsForM1.TreatmentsByFlagSets) + } + + if excsForM1.TreatmentsWithConfigByFlagSet != 3 { + t.Error("exception count for treatmentsWithConfigByFlagSet in metadata1 should be 3. Was: ", excsForM1.TreatmentsWithConfigByFlagSet) + } + + if excsForM1.TreatmentsWithConfigByFlagSets != 5 { + t.Error("exception count for treatmentsWithConfigByFlagSets in metadata1 should be 5. Was: ", excsForM1.TreatmentsWithConfigByFlagSets) + } + excsForM2, ok := exceptions[metadata2] if !ok { t.Error("exceptions for metadata2 should be present") @@ -145,6 +185,22 @@ func TestRedisTelemetryExceptions(t *testing.T) { t.Error("exception count for track in metadata1 should be 1. Was: ", excsForM2.Track) } + if excsForM2.TreatmentsByFlagSet != 3 { + t.Error("exception count for treatmentsByFlagSet in metadata2 should be 3. Was: ", excsForM2.TreatmentsByFlagSet) + } + + if excsForM2.TreatmentsByFlagSets != 1 { + t.Error("exception count for treatmentsByFlagSets in metadata2 should be 1. Was: ", excsForM2.TreatmentsByFlagSets) + } + + if excsForM2.TreatmentsWithConfigByFlagSet != 2 { + t.Error("exception count for treatmentsWithConfigByFlagSet in metadata2 should be 2. Was: ", excsForM2.TreatmentsWithConfigByFlagSet) + } + + if excsForM2.TreatmentsWithConfigByFlagSets != 3 { + t.Error("exception count for treatmentsWithConfigByFlagSets in metadata2 should be 3. Was: ", excsForM2.TreatmentsWithConfigByFlagSets) + } + exceptions = consumer.PopExceptions() if len(exceptions) > 0 { t.Error("no more exceptions should have been fetched from redis. Got:", exceptions) @@ -172,12 +228,24 @@ func TestRedisTelemetryLatencies(t *testing.T) { producer1.RecordLatency(telemetry.Treatments, 2*time.Second) producer1.RecordLatency(telemetry.TreatmentWithConfig, 3*time.Second) producer1.RecordLatency(telemetry.TreatmentsWithConfig, 4*time.Second) + producer1.RecordLatency(telemetry.TreatmentsByFlagSet, 2*time.Second) + producer1.RecordLatency(telemetry.TreatmentsByFlagSets, 4*time.Second) + producer1.RecordLatency(telemetry.TreatmentsWithConfigByFlagSet, 3*time.Second) + producer1.RecordLatency(telemetry.TreatmentsWithConfigByFlagSet, 6*time.Second) + producer1.RecordLatency(telemetry.TreatmentsWithConfigByFlagSets, 5*time.Second) producer1.RecordLatency(telemetry.Track, 5*time.Second) producer2.RecordLatency(telemetry.Treatment, 5*time.Second) producer2.RecordLatency(telemetry.Treatments, 4*time.Second) producer2.RecordLatency(telemetry.TreatmentWithConfig, 3*time.Second) producer2.RecordLatency(telemetry.TreatmentsWithConfig, 2*time.Second) + producer2.RecordLatency(telemetry.TreatmentsByFlagSet, 4*time.Second) + producer2.RecordLatency(telemetry.TreatmentsByFlagSet, 1*time.Second) + producer2.RecordLatency(telemetry.TreatmentsByFlagSets, 2*time.Second) + producer2.RecordLatency(telemetry.TreatmentsWithConfigByFlagSet, 5*time.Second) + producer2.RecordLatency(telemetry.TreatmentsWithConfigByFlagSets, 1*time.Second) + producer2.RecordLatency(telemetry.TreatmentsWithConfigByFlagSets, 2*time.Second) + producer2.RecordLatency(telemetry.TreatmentsWithConfigByFlagSets, 3*time.Second) producer2.RecordLatency(telemetry.Track, 1*time.Second) consumer := NewRedisTelemetryCosumerclient(client, logger) @@ -224,6 +292,38 @@ func TestRedisTelemetryLatencies(t *testing.T) { t.Error("latency count for .TreatmentsWithConfig should be 1. Is: ", l1TreatmentsWithConfig) } + l1TreatmentsByFlagSet := int64(0) + for _, count := range latsForM1.TreatmentsByFlagSet { + l1TreatmentsByFlagSet += count + } + if l1TreatmentsByFlagSet != int64(1) { + t.Error("latency count for .TreatmentsByFlagSet should be 1. Is: ", l1TreatmentsByFlagSet) + } + + l1TreatmentsByFlagSets := int64(0) + for _, count := range latsForM1.TreatmentsByFlagSets { + l1TreatmentsByFlagSets += count + } + if l1TreatmentsByFlagSets != int64(1) { + t.Error("latency count for .TreatmentsByFlagSet should be 1. Is: ", l1TreatmentsByFlagSets) + } + + l1TreatmentsWithConfigByFlagSet := int64(0) + for _, count := range latsForM1.TreatmentsWithConfigByFlagSet { + l1TreatmentsWithConfigByFlagSet += count + } + if l1TreatmentsWithConfigByFlagSet != int64(2) { + t.Error("latency count for .TreatmentsWithConfigByFlagSet should be 2. Is: ", l1TreatmentsWithConfigByFlagSet) + } + + l1TreatmentsWithConfigByFlagSets := int64(0) + for _, count := range latsForM1.TreatmentsWithConfigByFlagSets { + l1TreatmentsWithConfigByFlagSets += count + } + if l1TreatmentsWithConfigByFlagSets != int64(1) { + t.Error("latency count for .TreatmentsWithConfigByFlagSets should be 1. Is: ", l1TreatmentsWithConfigByFlagSets) + } + l1Track := int64(0) for _, count := range latsForM1.Track { l1Track += count @@ -269,6 +369,38 @@ func TestRedisTelemetryLatencies(t *testing.T) { t.Error("latency count for .TreatmentsWithConfig should be 1. Is: ", l2TreatmentsWithConfig) } + l2TreatmentsByFlagSet := int64(0) + for _, count := range latsForM2.TreatmentsByFlagSet { + l2TreatmentsByFlagSet += count + } + if l2TreatmentsByFlagSet != int64(2) { + t.Error("latency count for .TreatmentsByFlagSet should be 1. Is: ", l2TreatmentsByFlagSet) + } + + l2TreatmentsByFlagSets := int64(0) + for _, count := range latsForM2.TreatmentsByFlagSets { + l2TreatmentsByFlagSets += count + } + if l2TreatmentsByFlagSets != int64(1) { + t.Error("latency count for .TreatmentsByFlagSet should be 1. Is: ", l2TreatmentsByFlagSets) + } + + l2TreatmentsWithConfigByFlagSet := int64(0) + for _, count := range latsForM2.TreatmentsWithConfigByFlagSet { + l2TreatmentsWithConfigByFlagSet += count + } + if l2TreatmentsWithConfigByFlagSet != int64(1) { + t.Error("latency count for .TreatmentsWithConfigByFlagSet should be 1. Is: ", l2TreatmentsWithConfigByFlagSet) + } + + l2TreatmentsWithConfigByFlagSets := int64(0) + for _, count := range latsForM2.TreatmentsWithConfigByFlagSets { + l2TreatmentsWithConfigByFlagSets += count + } + if l2TreatmentsWithConfigByFlagSets != int64(3) { + t.Error("latency count for .TreatmentsWithConfigByFlagSets should be 3. Is: ", l2TreatmentsWithConfigByFlagSets) + } + l2Track := int64(0) for _, count := range latsForM2.Track { l2Track += count diff --git a/splitio/producer/task/pipelined.go b/splitio/producer/task/pipelined.go index 3a54f5ac..cdfc2fbd 100644 --- a/splitio/producer/task/pipelined.go +++ b/splitio/producer/task/pipelined.go @@ -255,23 +255,20 @@ func (p *PipelinedSyncTask) sinker() { defer asRecyblable.recycle() } - common.WithAttempts(3, func() error { + err := common.WithAttempts(3, func() error { p.logger.Debug(fmt.Sprintf("[pipelined/%s] - impressions post ready. making request", p.name)) req, err := p.worker.BuildRequest(bulk) if err != nil { - p.logger.Error(fmt.Sprintf("[pipelined/%s] error building request: %s", p.name, err)) - return err + return fmt.Errorf(fmt.Sprintf("[pipelined/%s] error building request: %s", p.name, err)) } resp, err := p.httpClient.Do(req) if err != nil { - p.logger.Error(fmt.Sprintf("[pipelined/%s] error posting: %s", p.name, err)) - return err + return fmt.Errorf(fmt.Sprintf("[pipelined/%s] error posting: %s", p.name, err)) } if resp.StatusCode < 200 || resp.StatusCode >= 300 { - p.logger.Error(fmt.Sprintf("[pipelined/%s] bad status code when sinking data: %d", p.name, resp.StatusCode)) - return errHTTP + return fmt.Errorf(fmt.Sprintf("[pipelined/%s] bad status code when sinking data: %d", p.name, resp.StatusCode)) } if resp.Body != nil { @@ -280,6 +277,9 @@ func (p *PipelinedSyncTask) sinker() { p.logger.Debug(fmt.Sprintf("[pipelined/%s] - impressions posted successfully", p.name)) return nil }) + if err != nil { + p.logger.Error(err) + } }() } } diff --git a/splitio/producer/util.go b/splitio/producer/util.go index c0f24f69..f7cda7f2 100644 --- a/splitio/producer/util.go +++ b/splitio/producer/util.go @@ -124,7 +124,7 @@ func sanitizeRedis(cfg *conf.Main, miscStorage *redis.MiscStorage, logger loggin if miscStorage == nil { return errors.New("Could not sanitize redis") } - currentHash := util.HashAPIKey(cfg.Apikey) + currentHash := util.HashAPIKey(cfg.Apikey + strings.Join(cfg.FlagSetsFilter, "::")) currentHashAsStr := strconv.Itoa(int(currentHash)) defer miscStorage.SetApikeyHash(currentHashAsStr) diff --git a/splitio/producer/worker/telemetry_test.go b/splitio/producer/worker/telemetry_test.go index 55d15ee4..72e4198d 100644 --- a/splitio/producer/worker/telemetry_test.go +++ b/splitio/producer/worker/telemetry_test.go @@ -27,14 +27,14 @@ func TestTelemetryMultiWorker(t *testing.T) { store := storageMocks.RedisTelemetryConsumerMultiMock{ PopLatenciesCall: func() storage.MultiMethodLatencies { return map[dtos.Metadata]dtos.MethodLatencies{ - metadata1: dtos.MethodLatencies{Treatment: makeBucket(1, 1)}, - metadata2: dtos.MethodLatencies{Treatment: makeBucket(2, 1)}, + metadata1: dtos.MethodLatencies{Treatment: makeBucket(1, 1), TreatmentsByFlagSet: makeBucket(1, 2), TreatmentsWithConfigByFlagSet: makeBucket(1, 3)}, + metadata2: dtos.MethodLatencies{Treatment: makeBucket(2, 1), TreatmentsByFlagSets: makeBucket(1, 3), TreatmentsWithConfigByFlagSets: makeBucket(1, 1)}, } }, PopExceptionsCall: func() storage.MultiMethodExceptions { return map[dtos.Metadata]dtos.MethodExceptions{ - metadata1: dtos.MethodExceptions{Treatment: 1}, - metadata2: dtos.MethodExceptions{Treatment: 2}, + metadata1: dtos.MethodExceptions{Treatment: 1, TreatmentsByFlagSet: 9, TreatmentsWithConfigByFlagSet: 12}, + metadata2: dtos.MethodExceptions{Treatment: 2, TreatmentsByFlagSets: 5, TreatmentsWithConfigByFlagSets: 13}, } }, PopConfigsCall: func() storage.MultiConfigs { @@ -64,16 +64,40 @@ func TestTelemetryMultiWorker(t *testing.T) { if l := stats.MethodLatencies.Treatment[1]; l != 1 { t.Error("invalid latency", l) } + if l := stats.MethodLatencies.TreatmentsByFlagSet[1]; l != 2 { + t.Error("invalid latency", l) + } + if l := stats.MethodLatencies.TreatmentsWithConfigByFlagSet[1]; l != 3 { + t.Error("invalid latency", l) + } if stats.MethodExceptions.Treatment != 1 { t.Error("invalid exception count") } + if stats.MethodExceptions.TreatmentsByFlagSet != 9 { + t.Error("invalid exception count") + } + if stats.MethodExceptions.TreatmentsWithConfigByFlagSet != 12 { + t.Error("invalid exception count") + } } else if metadata == metadata2 { if l := stats.MethodLatencies.Treatment[2]; l != 1 { t.Error("invalid latency", l) } + if l := stats.MethodLatencies.TreatmentsByFlagSets[1]; l != 3 { + t.Error("invalid latency", l) + } + if l := stats.MethodLatencies.TreatmentsWithConfigByFlagSets[1]; l != 1 { + t.Error("invalid latency", l) + } if stats.MethodExceptions.Treatment != 2 { t.Error("invalid exception count") } + if stats.MethodExceptions.TreatmentsByFlagSets != 5 { + t.Error("invalid exception count") + } + if stats.MethodExceptions.TreatmentsWithConfigByFlagSets != 13 { + t.Error("invalid exception count") + } } return nil }, diff --git a/splitio/proxy/caching/caching.go b/splitio/proxy/caching/caching.go index 29b65456..89d153ec 100644 --- a/splitio/proxy/caching/caching.go +++ b/splitio/proxy/caching/caching.go @@ -34,18 +34,6 @@ func MakeSurrogateForSegmentChanges(segmentName string) string { // MakeSurrogateForMySegments creates a list surrogate keys for all the segments involved func MakeSurrogateForMySegments(mysegments []dtos.MySegmentDTO) []string { - if len(mysegments) == 0 { - return nil - } - - /* - surrogates := make([]string, 0, len(mysegments)) - for idx := range mysegments { - surrogates = append(surrogates, segmentPrefix+mysegments[idx].Name) - } - return surrogates - */ - // Since we are now evicting individually for every updated key, we don't need surrogates for mySegments return nil } diff --git a/splitio/proxy/caching/caching_test.go b/splitio/proxy/caching/caching_test.go index 8d414cab..5d46574b 100644 --- a/splitio/proxy/caching/caching_test.go +++ b/splitio/proxy/caching/caching_test.go @@ -4,31 +4,20 @@ import ( "testing" "github.com/splitio/go-split-commons/v5/dtos" - "github.com/splitio/go-toolkit/v5/testhelpers" + "github.com/stretchr/testify/assert" ) -func TestSegment(t *testing.T) { - - if MakeSurrogateForSegmentChanges("segment1") != segmentPrefix+"segment1" { - t.Error("wrong segment changes surrogate.") - } +func TestSegmentSurrogates(t *testing.T) { + assert.Equal(t, segmentPrefix+"segment1", MakeSurrogateForSegmentChanges("segment1")) + assert.NotEqual(t, MakeSurrogateForSegmentChanges("segment1"), MakeSurrogateForSegmentChanges("segment2")) } func TestMySegmentKeyGeneration(t *testing.T) { entries := MakeMySegmentsEntries("k1") - if entries[0] != "/api/mySegments/k1" { - t.Error("invalid mySegments cache entry") - } - if entries[1] != "gzip::/api/mySegments/k1" { - t.Error("invalid mySegments cache entry") - } + assert.Equal(t, "/api/mySegments/k1", entries[0]) + assert.Equal(t, "gzip::/api/mySegments/k1", entries[1]) } -func TestMySegments(t *testing.T) { - testhelpers.AssertStringSliceEquals( - t, - MakeSurrogateForMySegments([]dtos.MySegmentDTO{{Name: "segment1"}, {Name: "segment2"}}), - []string{}, - "wrong my segments surrogate keys", - ) +func TestMySegmentsSurrogates(t *testing.T) { + assert.Equal(t, []string(nil), MakeSurrogateForMySegments([]dtos.MySegmentDTO{{Name: "segment1"}, {Name: "segment2"}})) } diff --git a/splitio/proxy/caching/workers.go b/splitio/proxy/caching/workers.go index cc4bff18..ea002998 100644 --- a/splitio/proxy/caching/workers.go +++ b/splitio/proxy/caching/workers.go @@ -2,6 +2,7 @@ package caching import ( "github.com/splitio/go-split-commons/v5/dtos" + "github.com/splitio/go-split-commons/v5/flagsets" "github.com/splitio/go-split-commons/v5/healthcheck/application" "github.com/splitio/go-split-commons/v5/service" "github.com/splitio/go-split-commons/v5/storage" @@ -27,9 +28,10 @@ func NewCacheAwareSplitSync( runtimeTelemetry storage.TelemetryRuntimeProducer, cacheFlusher gincache.CacheFlusher, appMonitor application.MonitorProducerInterface, + flagSetsFilter flagsets.FlagSetFilter, ) *CacheAwareSplitSynchronizer { return &CacheAwareSplitSynchronizer{ - wrapped: split.NewSplitUpdater(splitStorage, splitFetcher, logger, runtimeTelemetry, appMonitor), + wrapped: split.NewSplitUpdater(splitStorage, splitFetcher, logger, runtimeTelemetry, appMonitor, flagSetsFilter), splitStorage: splitStorage, cacheFlusher: cacheFlusher, } diff --git a/splitio/proxy/caching/workers_test.go b/splitio/proxy/caching/workers_test.go index cb47f3e7..e90ca744 100644 --- a/splitio/proxy/caching/workers_test.go +++ b/splitio/proxy/caching/workers_test.go @@ -3,287 +3,349 @@ package caching import ( "testing" + "github.com/splitio/gincache" "github.com/splitio/go-split-commons/v5/dtos" - storageMocks "github.com/splitio/go-split-commons/v5/storage/mocks" + "github.com/splitio/go-split-commons/v5/storage" "github.com/splitio/go-split-commons/v5/synchronizer/worker/segment" "github.com/splitio/go-split-commons/v5/synchronizer/worker/split" "github.com/splitio/go-toolkit/v5/datastructures/set" - - cacheMocks "github.com/splitio/gincache/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) -func TestCacheAwareSplitSync(t *testing.T) { - var cn int64 = -1 - - splitSyncMock := &splitUpdaterMock{ - SynchronizeFeatureFlagsCall: func(ffChange *dtos.SplitChangeUpdate) (*split.UpdateResult, error) { return nil, nil }, - SynchronizeSplitsCall: func(*int64) (*split.UpdateResult, error) { return nil, nil }, - LocalKillCall: func(string, string, int64) {}, - } - cacheFlusherMock := &cacheMocks.CacheFlusherMock{ - EvictBySurrogateCall: func(string) { t.Error("nothing should be evicted") }, - } +func TestCacheAwareSplitSyncNoChanges(t *testing.T) { + var splitSyncMock splitUpdaterMock + splitSyncMock.On("SynchronizeSplits", (*int64)(nil)).Return((*split.UpdateResult)(nil), error(nil)) + var cacheFlusherMock cacheFlusherMock + var storageMock splitStorageMock + storageMock.On("ChangeNumber").Return(int64(-1), error(nil)) css := CacheAwareSplitSynchronizer{ - splitStorage: &storageMocks.MockSplitStorage{ - ChangeNumberCall: func() (int64, error) { return cn, nil }, - }, - wrapped: splitSyncMock, - cacheFlusher: cacheFlusherMock, + splitStorage: &storageMock, + wrapped: &splitSyncMock, + cacheFlusher: &cacheFlusherMock, } - css.SynchronizeSplits(nil) + res, err := css.SynchronizeSplits(nil) + assert.Nil(t, err) + assert.Nil(t, res) - splitSyncMock.SynchronizeSplitsCall = func(*int64) (*split.UpdateResult, error) { - cn++ - return nil, nil - } + splitSyncMock.AssertExpectations(t) + cacheFlusherMock.AssertExpectations(t) + storageMock.AssertExpectations(t) +} - calls := 0 - cacheFlusherMock.EvictBySurrogateCall = func(key string) { - if key != SplitSurrogate { - t.Error("wrong surrogate") - } - calls++ - } +func TestCacheAwareSplitSyncChanges(t *testing.T) { + var splitSyncMock splitUpdaterMock + splitSyncMock.On("SynchronizeSplits", (*int64)(nil)).Return((*split.UpdateResult)(nil), error(nil)).Times(2) + + var cacheFlusherMock cacheFlusherMock + cacheFlusherMock.On("EvictBySurrogate", SplitSurrogate).Times(3) + + var storageMock splitStorageMock + storageMock.On("ChangeNumber").Return(int64(-1), error(nil)).Once() + storageMock.On("ChangeNumber").Return(int64(1), error(nil)).Once() - css.SynchronizeSplits(nil) - if calls != 1 { - t.Error("should have flushed splits once") + css := CacheAwareSplitSynchronizer{ + splitStorage: &storageMock, + wrapped: &splitSyncMock, + cacheFlusher: &cacheFlusherMock, } + res, err := css.SynchronizeSplits(nil) + assert.Nil(t, err) + assert.Nil(t, res) + + splitSyncMock.On("LocalKill", "someSplit", "off", int64(123)).Return(nil).Once() css.LocalKill("someSplit", "off", 123) - if calls != 2 { - t.Error("should have flushed again after a local kill") - } - // Test that going from cn > -1 to cn == -1 purges - cn = 123 - splitSyncMock.SynchronizeSplitsCall = func(*int64) (*split.UpdateResult, error) { - cn = -1 - return nil, nil - } - css.SynchronizeSplits(nil) - if calls != 3 { - t.Error("should have flushed splits once", calls) - } + // Test that going from cn > -1 to cn == -1 purges (can happen if the environment if wiped of splits) + storageMock.On("ChangeNumber").Return(int64(123), error(nil)).Once() + storageMock.On("ChangeNumber").Return(int64(-1), error(nil)).Once() + res, err = css.SynchronizeSplits(nil) + assert.Nil(t, err) + assert.Nil(t, res) + + splitSyncMock.AssertExpectations(t) + cacheFlusherMock.AssertExpectations(t) + storageMock.AssertExpectations(t) } -func TestCacheAwareSplitSyncFF(t *testing.T) { - var cn int64 = -1 +func TestCacheAwareSplitSyncChangesNewMethod(t *testing.T) { - splitSyncMock := &splitUpdaterMock{ - SynchronizeFeatureFlagsCall: func(ffChange *dtos.SplitChangeUpdate) (*split.UpdateResult, error) { return nil, nil }, - SynchronizeSplitsCall: func(*int64) (*split.UpdateResult, error) { return nil, nil }, - LocalKillCall: func(string, string, int64) {}, - } - cacheFlusherMock := &cacheMocks.CacheFlusherMock{ - EvictBySurrogateCall: func(string) { t.Error("nothing should be evicted") }, - } + // This test is used to test the new method. Eventually commons should be cleaned in order to have a single method for split-synchronization. + // when that happens, either this or the previous test shold be removed + var splitSyncMock splitUpdaterMock + splitSyncMock.On("SynchronizeFeatureFlags", (*dtos.SplitChangeUpdate)(nil)).Return((*split.UpdateResult)(nil), error(nil)).Times(2) + + var cacheFlusherMock cacheFlusherMock + cacheFlusherMock.On("EvictBySurrogate", SplitSurrogate).Times(2) + + var storageMock splitStorageMock + storageMock.On("ChangeNumber").Return(int64(-1), error(nil)).Once() + storageMock.On("ChangeNumber").Return(int64(1), error(nil)).Once() css := CacheAwareSplitSynchronizer{ - splitStorage: &storageMocks.MockSplitStorage{ - ChangeNumberCall: func() (int64, error) { return cn, nil }, - }, - wrapped: splitSyncMock, - cacheFlusher: cacheFlusherMock, + splitStorage: &storageMock, + wrapped: &splitSyncMock, + cacheFlusher: &cacheFlusherMock, } - css.SynchronizeFeatureFlags(nil) + res, err := css.SynchronizeFeatureFlags(nil) + assert.Nil(t, err) + assert.Nil(t, res) - splitSyncMock.SynchronizeFeatureFlagsCall = func(*dtos.SplitChangeUpdate) (*split.UpdateResult, error) { - cn++ - return nil, nil - } + // Test that going from cn > -1 to cn == -1 purges (can happen if the environment if wiped of splits) + storageMock.On("ChangeNumber").Return(int64(123), error(nil)).Once() + storageMock.On("ChangeNumber").Return(int64(-1), error(nil)).Once() + res, err = css.SynchronizeFeatureFlags(nil) + assert.Nil(t, err) + assert.Nil(t, res) - calls := 0 - cacheFlusherMock.EvictBySurrogateCall = func(key string) { - if key != SplitSurrogate { - t.Error("wrong surrogate") - } - calls++ - } + splitSyncMock.AssertExpectations(t) + cacheFlusherMock.AssertExpectations(t) + storageMock.AssertExpectations(t) +} - css.SynchronizeFeatureFlags(nil) - if calls != 1 { - t.Error("should have flushed splits once") - } +func TestCacheAwareSegmentSyncNoChanges(t *testing.T) { + var segmentUpdater segmentUpdaterMock + segmentUpdater.On("SynchronizeSegment", "segment1", (*int64)(nil)).Return(&segment.UpdateResult{}, nil).Once() - css.LocalKill("someSplit", "off", 123) - if calls != 2 { - t.Error("should have flushed again after a local kill") - } + var splitStorage splitStorageMock - // Test that going from cn > -1 to cn == -1 purges - cn = 123 - splitSyncMock.SynchronizeFeatureFlagsCall = func(*dtos.SplitChangeUpdate) (*split.UpdateResult, error) { - cn = -1 - return nil, nil - } - css.SynchronizeFeatureFlags(nil) - if calls != 3 { - t.Error("should have flushed splits once", calls) + var cacheFlusher cacheFlusherMock + + var segmentStorage segmentStorageMock + segmentStorage.On("ChangeNumber", "segment1").Return(int64(0), nil).Once() + + css := CacheAwareSegmentSynchronizer{ + splitStorage: &splitStorage, + segmentStorage: &segmentStorage, + wrapped: &segmentUpdater, + cacheFlusher: &cacheFlusher, } + + res, err := css.SynchronizeSegment("segment1", nil) + assert.Nil(t, err) + assert.Equal(t, &segment.UpdateResult{}, res) + + segmentUpdater.AssertExpectations(t) + segmentStorage.AssertExpectations(t) + splitStorage.AssertExpectations(t) + cacheFlusher.AssertExpectations(t) } -func TestCacheAwareSegmentSync(t *testing.T) { - cns := map[string]int64{"segment1": 0} +func TestCacheAwareSegmentSyncSingle(t *testing.T) { + var segmentUpdater segmentUpdaterMock + segmentUpdater.On("SynchronizeSegment", "segment1", (*int64)(nil)).Return(&segment.UpdateResult{ + UpdatedKeys: []string{"k1"}, + NewChangeNumber: 2, + }, nil).Once() - segmentSyncMock := &segmentUpdaterMock{ - SynchronizeSegmentCall: func(string, *int64) (*segment.UpdateResult, error) { return &segment.UpdateResult{}, nil }, - SynchronizeSegmentsCall: func() (map[string]segment.UpdateResult, error) { return nil, nil }, - } - cacheFlusherMock := &cacheMocks.CacheFlusherMock{ - EvictBySurrogateCall: func(string) { t.Error("nothing should be evicted") }, - EvictCall: func(string) { t.Errorf("nothing should be evicted") }, - } + var splitStorage splitStorageMock + + var cacheFlusher cacheFlusherMock + cacheFlusher.On("EvictBySurrogate", MakeSurrogateForSegmentChanges("segment1")).Times(2) + cacheFlusher.On("Evict", "/api/mySegments/k1").Times(2) + cacheFlusher.On("Evict", "gzip::/api/mySegments/k1").Times(2) + + var segmentStorage segmentStorageMock + segmentStorage.On("ChangeNumber", "segment1").Return(int64(0), nil).Once() css := CacheAwareSegmentSynchronizer{ - splitStorage: &storageMocks.MockSplitStorage{ - SegmentNamesCall: func() *set.ThreadUnsafeSet { - s := set.NewSet() - for k := range cns { - s.Add(k) - } - return s - }, - }, - segmentStorage: &storageMocks.MockSegmentStorage{ - ChangeNumberCall: func(s string) (int64, error) { - cn, _ := cns[s] - return cn, nil - }, - }, - wrapped: segmentSyncMock, - cacheFlusher: cacheFlusherMock, - } + splitStorage: &splitStorage, + segmentStorage: &segmentStorage, + wrapped: &segmentUpdater, + cacheFlusher: &cacheFlusher, + } + + res, err := css.SynchronizeSegment("segment1", nil) + assert.Nil(t, err) + assert.Equal(t, &segment.UpdateResult{UpdatedKeys: []string{"k1"}, NewChangeNumber: 2}, res) + + // // Test that going from cn > -1 to cn == -1 purges + segmentStorage.On("ChangeNumber", "segment1").Return(int64(123), nil).Once() + segmentUpdater.On("SynchronizeSegment", "segment1", (*int64)(nil)).Return(&segment.UpdateResult{ + UpdatedKeys: []string{"k1"}, + NewChangeNumber: -1, + }, nil).Once() + res, err = css.SynchronizeSegment("segment1", nil) + assert.Nil(t, err) + assert.Equal(t, &segment.UpdateResult{UpdatedKeys: []string{"k1"}, NewChangeNumber: -1}, res) + + segmentUpdater.AssertExpectations(t) + segmentStorage.AssertExpectations(t) + splitStorage.AssertExpectations(t) + cacheFlusher.AssertExpectations(t) +} - css.SynchronizeSegment("segment1", nil) +func TestCacheAwareSegmentSyncAllSegments(t *testing.T) { + var segmentUpdater segmentUpdaterMock + segmentUpdater.On("SynchronizeSegments").Return(map[string]segment.UpdateResult{"segment2": { + UpdatedKeys: []string{"k1"}, + NewChangeNumber: 1, + }}, nil).Once() - segmentSyncMock.SynchronizeSegmentCall = func(name string, c *int64) (*segment.UpdateResult, error) { - return &segment.UpdateResult{UpdatedKeys: []string{"k1"}, NewChangeNumber: 2}, nil - } + var splitStorage splitStorageMock + splitStorage.On("SegmentNames").Return(set.NewSet("segment2")).Once() - evictBySurrogateCalls := 0 - cacheFlusherMock.EvictBySurrogateCall = func(key string) { - if key != MakeSurrogateForSegmentChanges("segment1") { - t.Error("wrong surrogate") - } - evictBySurrogateCalls++ - } - cacheFlusherMock.EvictCall = func(key string) { - if key != "/api/mySegments/k1" && key != "gzip::/api/mySegments/k1" { - t.Error("incorrect mysegments entry purged: ", key) - } - } + var cacheFlusher cacheFlusherMock + cacheFlusher.On("EvictBySurrogate", MakeSurrogateForSegmentChanges("segment2")).Times(1) + cacheFlusher.On("Evict", "/api/mySegments/k1").Times(3) + cacheFlusher.On("Evict", "gzip::/api/mySegments/k1").Times(3) - // SynchronizeSegment + var segmentStorage segmentStorageMock + segmentStorage.On("ChangeNumber", "segment2").Return(int64(0), nil).Once() - css.SynchronizeSegment("segment1", nil) - if evictBySurrogateCalls != 1 { - t.Error("should have flushed splits once. Got", evictBySurrogateCalls) + css := CacheAwareSegmentSynchronizer{ + splitStorage: &splitStorage, + segmentStorage: &segmentStorage, + wrapped: &segmentUpdater, + cacheFlusher: &cacheFlusher, } - // Test that going from cn > -1 to cn == -1 purges - cns["segment1"] = 123 - segmentSyncMock.SynchronizeSegmentCall = func(name string, s *int64) (*segment.UpdateResult, error) { - return &segment.UpdateResult{UpdatedKeys: []string{"k1"}, NewChangeNumber: -1}, nil - } - css.SynchronizeSegment("segment1", nil) - if evictBySurrogateCalls != 2 { - t.Error("should have flushed splits once", evictBySurrogateCalls) - } + // Case 1: updated CN + res, err := css.SynchronizeSegments() + assert.Nil(t, err) + assert.Equal(t, map[string]segment.UpdateResult{"segment2": {UpdatedKeys: []string{"k1"}, NewChangeNumber: 1}}, res) - // SynchronizeSegments + // Case 2: added segment + segmentStorage.On("ChangeNumber", "segment3").Return(int64(2), nil).Times(2) // for next test as well + segmentUpdater.On("SynchronizeSegments").Return(map[string]segment.UpdateResult{"segment3": { + UpdatedKeys: []string{"k1"}, + NewChangeNumber: 3, + }}, nil).Once() + cacheFlusher.On("EvictBySurrogate", MakeSurrogateForSegmentChanges("segment3")).Times(2) // for next test as well + splitStorage.On("SegmentNames").Return(set.NewSet("segment3")).Times(2) // for next test as well + + res, err = css.SynchronizeSegments() + assert.Nil(t, err) + assert.Equal(t, map[string]segment.UpdateResult{"segment3": {UpdatedKeys: []string{"k1"}, NewChangeNumber: 3}}, res) + + // // Case 3: deleted segment + segmentUpdater.On("SynchronizeSegments").Return(map[string]segment.UpdateResult{"segment3": { + UpdatedKeys: []string{"k1"}, + NewChangeNumber: -1, + }}, nil).Once() + + res, err = css.SynchronizeSegments() + assert.Nil(t, err) + assert.Equal(t, map[string]segment.UpdateResult{"segment3": {UpdatedKeys: []string{"k1"}, NewChangeNumber: -1}}, res) + + segmentUpdater.AssertExpectations(t) + segmentStorage.AssertExpectations(t) + splitStorage.AssertExpectations(t) + cacheFlusher.AssertExpectations(t) +} - // Case 1: updated CN - cns["segment2"] = 0 - segmentSyncMock.SynchronizeSegmentsCall = func() (map[string]segment.UpdateResult, error) { - return map[string]segment.UpdateResult{"segment2": {UpdatedKeys: []string{"k1"}, NewChangeNumber: 1}}, nil - } +// Borrowed mocks: These sohuld be in go-split-commons. but we need to wait until testify is adopted there - cacheFlusherMock.EvictBySurrogateCall = func(key string) { - if key != MakeSurrogateForSegmentChanges("segment2") { - t.Error("wrong surrogate") - } - evictBySurrogateCalls++ - } +type splitUpdaterMock struct { + mock.Mock +} - css.SynchronizeSegments() - if evictBySurrogateCalls != 3 { - t.Error("should have flushed segments twice") - } +// LocalKill implements split.Updater +func (s *splitUpdaterMock) LocalKill(splitName string, defaultTreatment string, changeNumber int64) { + s.Called(splitName, defaultTreatment, changeNumber) +} - // Case 2: added segment - cns["segment3"] = 2 - segmentSyncMock.SynchronizeSegmentsCall = func() (map[string]segment.UpdateResult, error) { - return map[string]segment.UpdateResult{"segment3": {UpdatedKeys: []string{"k1"}, NewChangeNumber: 3}}, nil - } +// SynchronizeFeatureFlags implements split.Updater +func (s *splitUpdaterMock) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) (*split.UpdateResult, error) { + args := s.Called(ffChange) + return args.Get(0).(*split.UpdateResult), args.Error(1) +} - cacheFlusherMock.EvictBySurrogateCall = func(key string) { - if key != MakeSurrogateForSegmentChanges("segment3") { - t.Error("wrong surrogate") - } - evictBySurrogateCalls++ - } +// SynchronizeSplits implements split.Updater +func (s *splitUpdaterMock) SynchronizeSplits(till *int64) (*split.UpdateResult, error) { + args := s.Called(till) + return args.Get(0).(*split.UpdateResult), args.Error(1) +} - css.SynchronizeSegments() - if evictBySurrogateCalls != 4 { - t.Error("should have flushed segments twice") - } +// ---- - // Case 3: deleted segment - segmentSyncMock.SynchronizeSegmentsCall = func() (map[string]segment.UpdateResult, error) { - return map[string]segment.UpdateResult{"segment3": {UpdatedKeys: []string{"k1"}, NewChangeNumber: -1}}, nil - } +type cacheFlusherMock struct { + mock.Mock +} - cacheFlusherMock.EvictBySurrogateCall = func(key string) { - if key != MakeSurrogateForSegmentChanges("segment3") { - t.Error("wrong surrogate", key) - } - evictBySurrogateCalls++ - } +func (c *cacheFlusherMock) Evict(key string) { c.Called(key) } +func (c *cacheFlusherMock) EvictAll() { c.Called() } +func (c *cacheFlusherMock) EvictBySurrogate(surrogate string) { c.Called(surrogate) } - css.SynchronizeSegments() - if evictBySurrogateCalls != 5 { - t.Error("should have flushed segments 5 times: ", evictBySurrogateCalls) - } +// --- - // all keys deleted & segment till is now -1 - cacheFlusherMock.EvictBySurrogateCall = func(key string) { - if key != MakeSurrogateForSegmentChanges("segment2") { - t.Error("wrong surrogate", key) - } - evictBySurrogateCalls++ - } - cns["segment2"] = 123 - segmentSyncMock.SynchronizeSegmentsCall = func() (map[string]segment.UpdateResult, error) { - return map[string]segment.UpdateResult{"segment2": {UpdatedKeys: []string{"k1"}, NewChangeNumber: -1}}, nil - } - css.SynchronizeSegments() - if evictBySurrogateCalls != 6 { - t.Error("should have flushed segments twice") - } +type splitStorageMock struct { + mock.Mock } -type splitUpdaterMock struct { - SynchronizeFeatureFlagsCall func(ffChange *dtos.SplitChangeUpdate) (*split.UpdateResult, error) - SynchronizeSplitsCall func(till *int64) (*split.UpdateResult, error) - LocalKillCall func(splitName string, defaultTreatment string, changeNumber int64) +func (s *splitStorageMock) All() []dtos.SplitDTO { panic("unimplemented") } +func (s *splitStorageMock) ChangeNumber() (int64, error) { + args := s.Called() + return args.Get(0).(int64), args.Error(1) } -func (s *splitUpdaterMock) SynchronizeSplits(till *int64) (*split.UpdateResult, error) { - return s.SynchronizeSplitsCall(till) +func (*splitStorageMock) FetchMany(splitNames []string) map[string]*dtos.SplitDTO { + panic("unimplemented") +} +func (*splitStorageMock) GetNamesByFlagSets(sets []string) map[string][]string { + panic("unimplemented") +} +func (*splitStorageMock) KillLocally(splitName string, defaultTreatment string, changeNumber int64) { + panic("unimplemented") +} +func (s *splitStorageMock) SegmentNames() *set.ThreadUnsafeSet { + return s.Called().Get(0).(*set.ThreadUnsafeSet) +} +func (s *splitStorageMock) SetChangeNumber(changeNumber int64) error { + return s.Called(changeNumber).Error(0) +} +func (*splitStorageMock) Split(splitName string) *dtos.SplitDTO { panic("unimplemented") } +func (*splitStorageMock) SplitNames() []string { panic("unimplemented") } +func (*splitStorageMock) TrafficTypeExists(trafficType string) bool { panic("unimplemented") } +func (*splitStorageMock) Update(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, changeNumber int64) { + panic("unimplemented") } -func (s *splitUpdaterMock) LocalKill(splitName string, defaultTreatment string, changeNumber int64) { - s.LocalKillCall(splitName, defaultTreatment, changeNumber) +type segmentUpdaterMock struct { + mock.Mock } -func (s *splitUpdaterMock) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) (*split.UpdateResult, error) { - return s.SynchronizeFeatureFlagsCall(ffChange) +func (s *segmentUpdaterMock) IsSegmentCached(segmentName string) bool { panic("unimplemented") } +func (s *segmentUpdaterMock) SegmentNames() []interface{} { panic("unimplemented") } + +func (s *segmentUpdaterMock) SynchronizeSegment(name string, till *int64) (*segment.UpdateResult, error) { + args := s.Called(name, till) + return args.Get(0).(*segment.UpdateResult), args.Error(1) +} + +func (s *segmentUpdaterMock) SynchronizeSegments() (map[string]segment.UpdateResult, error) { + args := s.Called() + return args.Get(0).(map[string]segment.UpdateResult), args.Error(1) +} + +type segmentStorageMock struct { + mock.Mock +} + +func (*segmentStorageMock) SetChangeNumber(segmentName string, till int64) error { + panic("unimplemented") +} +func (s *segmentStorageMock) Update(name string, toAdd *set.ThreadUnsafeSet, toRemove *set.ThreadUnsafeSet, changeNumber int64) error { + return s.Called(name, toAdd, toRemove, changeNumber).Error(0) +} + +// ChangeNumber implements storage.SegmentStorage +func (s *segmentStorageMock) ChangeNumber(segmentName string) (int64, error) { + args := s.Called(segmentName) + return args.Get(0).(int64), args.Error(1) +} + +func (*segmentStorageMock) Keys(segmentName string) *set.ThreadUnsafeSet { panic("unimplemented") } +func (*segmentStorageMock) SegmentContainsKey(segmentName string, key string) (bool, error) { + panic("unimplemented") } +func (*segmentStorageMock) SegmentKeysCount() int64 { panic("unimplemented") } +/* type segmentUpdaterMock struct { SynchronizeSegmentCall func(name string, till *int64) (*segment.UpdateResult, error) SynchronizeSegmentsCall func() (map[string]segment.UpdateResult, error) @@ -306,3 +368,9 @@ func (s *segmentUpdaterMock) SegmentNames() []interface{} { func (s *segmentUpdaterMock) IsSegmentCached(segmentName string) bool { return s.IsSegmentCachedCall(segmentName) } +*/ +var _ split.Updater = (*splitUpdaterMock)(nil) +var _ storage.SplitStorage = (*splitStorageMock)(nil) +var _ gincache.CacheFlusher = (*cacheFlusherMock)(nil) +var _ segment.Updater = (*segmentUpdaterMock)(nil) +var _ storage.SegmentStorage = (*segmentStorageMock)(nil) diff --git a/splitio/proxy/conf/sections.go b/splitio/proxy/conf/sections.go index 204720f5..3f7d796a 100644 --- a/splitio/proxy/conf/sections.go +++ b/splitio/proxy/conf/sections.go @@ -7,17 +7,19 @@ import ( // Main configuration options type Main struct { - Apikey string `json:"apikey" s-cli:"apikey" s-def:"" s-desc:"Split server side SDK key"` - IPAddressEnabled bool `json:"ipAddressEnabled" s-cli:"ip-address-enabled" s-def:"true" s-desc:"Bundle host's ip address when sending data to Split"` - Initialization Initialization `json:"initialization" s-nested:"true"` - Server Server `json:"server" s-nested:"true"` - Admin conf.Admin `json:"admin" s-nested:"true"` - Storage Storage `json:"storage" s-nested:"true"` - Sync Sync `json:"sync" s-nested:"true"` - Integrations conf.Integrations `json:"integrations" s-nested:"true"` - Logging conf.Logging `json:"logging" s-nested:"true"` - Healthcheck Healthcheck `json:"healthcheck" s-nested:"true"` - Observability Observability `json:"observability" s-nested:"true"` + Apikey string `json:"apikey" s-cli:"apikey" s-def:"" s-desc:"Split server side SDK key"` + IPAddressEnabled bool `json:"ipAddressEnabled" s-cli:"ip-address-enabled" s-def:"true" s-desc:"Bundle host's ip address when sending data to Split"` + FlagSetsFilter []string `json:"flagSetsFilter" s-cli:"flag-sets-filter" s-def:"" s-desc:"Flag Sets Filter provided"` + FlagSetStrictMatching bool `json:"flagSetStrictMatching" s-cli:"flag-sets-strict-matching" s-def:"false" s-desc:"filter sets not present in cache when building splitChanges responses"` + Initialization Initialization `json:"initialization" s-nested:"true"` + Server Server `json:"server" s-nested:"true"` + Admin conf.Admin `json:"admin" s-nested:"true"` + Storage Storage `json:"storage" s-nested:"true"` + Sync Sync `json:"sync" s-nested:"true"` + Integrations conf.Integrations `json:"integrations" s-nested:"true"` + Logging conf.Logging `json:"logging" s-nested:"true"` + Healthcheck Healthcheck `json:"healthcheck" s-nested:"true"` + Observability Observability `json:"observability" s-nested:"true"` } // BuildAdvancedConfig generates a commons-compatible advancedconfig with default + overriden parameters diff --git a/splitio/proxy/controllers/sdk.go b/splitio/proxy/controllers/sdk.go index 3d68dbf2..7f05c2c6 100644 --- a/splitio/proxy/controllers/sdk.go +++ b/splitio/proxy/controllers/sdk.go @@ -5,13 +5,16 @@ import ( "fmt" "net/http" "strconv" + "strings" "github.com/gin-gonic/gin" "github.com/splitio/go-split-commons/v5/dtos" "github.com/splitio/go-split-commons/v5/service" "github.com/splitio/go-toolkit/v5/logging" + "golang.org/x/exp/slices" "github.com/splitio/split-synchronizer/v5/splitio/proxy/caching" + "github.com/splitio/split-synchronizer/v5/splitio/proxy/flagsets" "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage" ) @@ -21,6 +24,7 @@ type SdkServerController struct { fetcher service.SplitFetcher proxySplitStorage storage.ProxySplitStorage proxySegmentStorage storage.ProxySegmentStorage + fsmatcher flagsets.FlagSetMatcher } // NewSdkServerController instantiates a new sdk server controller @@ -29,12 +33,15 @@ func NewSdkServerController( fetcher service.SplitFetcher, proxySplitStorage storage.ProxySplitStorage, proxySegmentStorage storage.ProxySegmentStorage, + fsmatcher flagsets.FlagSetMatcher, + ) *SdkServerController { return &SdkServerController{ logger: logger, fetcher: fetcher, proxySplitStorage: proxySplitStorage, proxySegmentStorage: proxySegmentStorage, + fsmatcher: fsmatcher, } } @@ -52,9 +59,19 @@ func (c *SdkServerController) SplitChanges(ctx *gin.Context) { if err != nil { since = -1 } + + var rawSets []string + if fq, ok := ctx.GetQuery("sets"); ok { + rawSets = strings.Split(fq, ",") + } + sets := c.fsmatcher.Sanitize(rawSets) + if !slices.Equal(sets, rawSets) { + c.logger.Warning(fmt.Sprintf("SDK [%s] is sending flagsets unordered or with duplicates.", ctx.Request.Header.Get("SplitSDKVersion"))) + } + c.logger.Debug(fmt.Sprintf("SDK Fetches Feature Flags Since: %d", since)) - splits, err := c.fetchSplitChangesSince(since) + splits, err := c.fetchSplitChangesSince(since, sets) if err != nil { c.logger.Error("error fetching splitChanges payload from storage: ", err) ctx.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) @@ -112,20 +129,18 @@ func (c *SdkServerController) MySegments(ctx *gin.Context) { ctx.Set(caching.SurrogateContextKey, caching.MakeSurrogateForMySegments(mySegments)) } -func (c *SdkServerController) fetchSplitChangesSince(since int64) (*dtos.SplitChangesDTO, error) { - splits, err := c.proxySplitStorage.ChangesSince(since) +func (c *SdkServerController) fetchSplitChangesSince(since int64, sets []string) (*dtos.SplitChangesDTO, error) { + splits, err := c.proxySplitStorage.ChangesSince(since, sets) if err == nil { return splits, nil } - if !errors.Is(err, storage.ErrSummaryNotCached) { + if !errors.Is(err, storage.ErrSinceParamTooOld) { return nil, fmt.Errorf("unexpected error fetching feature flag changes from storage: %w", err) } + // perform a fetch to the BE using the supplied `since`, have the storage process it's response &, retry + // TODO(mredolatti): implement basic collapsing here to avoid flooding the BE with requests fetchOptions := service.NewFetchOptions(true, nil) - splits, err = c.fetcher.Fetch(since, &fetchOptions) - if err == nil { - c.proxySplitStorage.RegisterOlderCn(splits) - return splits, nil - } - return nil, fmt.Errorf("unexpected error fetching feature flag changes from storage: %w", err) + fetchOptions.FlagSetsFilter = strings.Join(sets, ",") // at this point the sets have been sanitized & sorted + return c.fetcher.Fetch(since, &fetchOptions) } diff --git a/splitio/proxy/controllers/sdk_test.go b/splitio/proxy/controllers/sdk_test.go index e1eeb2c4..2ddc8bbb 100644 --- a/splitio/proxy/controllers/sdk_test.go +++ b/splitio/proxy/controllers/sdk_test.go @@ -11,49 +11,35 @@ import ( "github.com/gin-gonic/gin" "github.com/splitio/go-split-commons/v5/dtos" "github.com/splitio/go-split-commons/v5/service" - "github.com/splitio/go-split-commons/v5/service/mocks" "github.com/splitio/go-toolkit/v5/logging" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/splitio/split-synchronizer/v5/splitio/proxy/flagsets" "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage" psmocks "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage/mocks" ) -func TestSplitChangesCachedRecipe(t *testing.T) { +func TestSplitChangesRecentSince(t *testing.T) { gin.SetMode(gin.TestMode) + + var splitStorage psmocks.ProxySplitStorageMock + splitStorage.On("ChangesSince", int64(-1), []string(nil)). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "s1", Status: "ACTIVE"}, {Name: "s2", Status: "ACTIVE"}}}, nil). + Once() + + var splitFetcher splitFetcherMock + resp := httptest.NewRecorder() ctx, router := gin.CreateTestContext(resp) - logger := logging.NewLogger(nil) - group := router.Group("/api") controller := NewSdkServerController( logger, - &mocks.MockSplitFetcher{ - FetchCall: func(changeNumber int64, fetchOptions *service.FetchOptions) (*dtos.SplitChangesDTO, error) { - t.Error("should not be called") - return nil, nil - }, - }, - &psmocks.ProxySplitStorageMock{ - ChangesSinceCall: func(since int64) (*dtos.SplitChangesDTO, error) { - if since != -1 { - t.Error("since should be -1") - } - - return &dtos.SplitChangesDTO{ - Since: -1, - Till: 1, - Splits: []dtos.SplitDTO{ - {Name: "s1"}, - {Name: "s2"}, - }, - }, nil - }, - RegisterOlderCnCall: func(payload *dtos.SplitChangesDTO) { - t.Error("should not be called") - }, - }, + &splitFetcher, + &splitStorage, nil, + flagsets.NewMatcher(false, nil), ) controller.Register(group) @@ -64,20 +50,35 @@ func TestSplitChangesCachedRecipe(t *testing.T) { ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") router.ServeHTTP(resp, ctx.Request) - if resp.Code != 200 { - t.Error("Status code should be 200 and is ", resp.Code) - } + assert.Equal(t, 200, resp.Code) + + body, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) - body, _ := ioutil.ReadAll(resp.Body) var s dtos.SplitChangesDTO - json.Unmarshal(body, &s) - if len(s.Splits) != 2 || s.Since != -1 || s.Till != 1 { - t.Error("wrong payload returned") - } + err = json.Unmarshal(body, &s) + assert.Nil(t, err) + assert.Equal(t, 2, len(s.Splits)) + assert.Equal(t, int64(-1), s.Since) + assert.Equal(t, int64(1), s.Till) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) } -func TestSplitChangesNonCachedRecipe(t *testing.T) { +func TestSplitChangesOlderSince(t *testing.T) { gin.SetMode(gin.TestMode) + + var splitStorage psmocks.ProxySplitStorageMock + splitStorage.On("ChangesSince", int64(-1), []string(nil)). + Return((*dtos.SplitChangesDTO)(nil), storage.ErrSinceParamTooOld). + Once() + + var splitFetcher splitFetcherMock + splitFetcher.On("Fetch", int64(-1), ref(service.NewFetchOptions(true, nil))). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "s1", Status: "ACTIVE"}, {Name: "s2", Status: "ACTIVE"}}}, nil). + Once() + resp := httptest.NewRecorder() ctx, router := gin.CreateTestContext(resp) @@ -86,36 +87,10 @@ func TestSplitChangesNonCachedRecipe(t *testing.T) { group := router.Group("/api") controller := NewSdkServerController( logger, - &mocks.MockSplitFetcher{ - FetchCall: func(changeNumber int64, fetchOptions *service.FetchOptions) (*dtos.SplitChangesDTO, error) { - if changeNumber != -1 { - t.Error("changeNumber should be -1") - } - - return &dtos.SplitChangesDTO{ - Since: -1, - Till: 1, - Splits: []dtos.SplitDTO{ - {Name: "s1"}, - {Name: "s2"}, - }, - }, nil - }, - }, - &psmocks.ProxySplitStorageMock{ - ChangesSinceCall: func(since int64) (*dtos.SplitChangesDTO, error) { - if since != -1 { - t.Error("since should be -1") - } - return nil, storage.ErrSummaryNotCached - }, - RegisterOlderCnCall: func(payload *dtos.SplitChangesDTO) { - if payload.Since != -1 || len(payload.Splits) != 2 { - t.Error("invalid payload passed") - } - }, - }, + &splitFetcher, + &splitStorage, nil, + flagsets.NewMatcher(false, nil), ) controller.Register(group) @@ -126,20 +101,35 @@ func TestSplitChangesNonCachedRecipe(t *testing.T) { ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") router.ServeHTTP(resp, ctx.Request) - if resp.Code != 200 { - t.Error("Status code should be 200 and is ", resp.Code) - } + assert.Equal(t, 200, resp.Code) + + body, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) - body, _ := ioutil.ReadAll(resp.Body) var s dtos.SplitChangesDTO - json.Unmarshal(body, &s) - if len(s.Splits) != 2 || s.Since != -1 || s.Till != 1 { - t.Error("wrong payload returned") - } + err = json.Unmarshal(body, &s) + assert.Nil(t, err) + assert.Equal(t, 2, len(s.Splits)) + assert.Equal(t, int64(-1), s.Since) + assert.Equal(t, int64(1), s.Till) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) } -func TestSplitChangesNonCachedRecipeAndFetchFails(t *testing.T) { +func TestSplitChangesOlderSinceFetchFails(t *testing.T) { gin.SetMode(gin.TestMode) + + var splitStorage psmocks.ProxySplitStorageMock + splitStorage.On("ChangesSince", int64(-1), []string(nil)). + Return((*dtos.SplitChangesDTO)(nil), storage.ErrSinceParamTooOld). + Once() + + var splitFetcher splitFetcherMock + splitFetcher.On("Fetch", int64(-1), ref(service.NewFetchOptions(true, nil))). + Return((*dtos.SplitChangesDTO)(nil), errors.New("something")). + Once() + resp := httptest.NewRecorder() ctx, router := gin.CreateTestContext(resp) @@ -148,28 +138,10 @@ func TestSplitChangesNonCachedRecipeAndFetchFails(t *testing.T) { group := router.Group("/api") controller := NewSdkServerController( logger, - &mocks.MockSplitFetcher{ - FetchCall: func(changeNumber int64, fetchOptions *service.FetchOptions) (*dtos.SplitChangesDTO, error) { - if changeNumber != -1 { - t.Error("changeNumber should be -1") - } - return nil, errors.New("something") - }, - }, - &psmocks.ProxySplitStorageMock{ - ChangesSinceCall: func(since int64) (*dtos.SplitChangesDTO, error) { - if since != -1 { - t.Error("since should be -1") - } - return nil, storage.ErrSummaryNotCached - }, - RegisterOlderCnCall: func(payload *dtos.SplitChangesDTO) { - if payload.Since != -1 || len(payload.Splits) != 2 { - t.Error("invalid payload passed") - } - }, - }, + &splitFetcher, + &splitStorage, nil, + flagsets.NewMatcher(false, nil), ) controller.Register(group) @@ -180,13 +152,22 @@ func TestSplitChangesNonCachedRecipeAndFetchFails(t *testing.T) { ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") router.ServeHTTP(resp, ctx.Request) - if resp.Code != 500 { - t.Error("Status code should be 500 and is ", resp.Code) - } + assert.Equal(t, 500, resp.Code) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) } -func TestSegmentChanges(t *testing.T) { +func TestSplitChangesWithFlagSets(t *testing.T) { gin.SetMode(gin.TestMode) + + var splitStorage psmocks.ProxySplitStorageMock + splitStorage.On("ChangesSince", int64(-1), []string{"a", "b", "c"}). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "s1", Status: "ACTIVE"}, {Name: "s2", Status: "ACTIVE"}}}, nil). + Once() + + var splitFetcher splitFetcherMock + resp := httptest.NewRecorder() ctx, router := gin.CreateTestContext(resp) @@ -195,46 +176,45 @@ func TestSegmentChanges(t *testing.T) { group := router.Group("/api") controller := NewSdkServerController( logger, - &mocks.MockSplitFetcher{}, - &psmocks.ProxySplitStorageMock{}, - &psmocks.ProxySegmentStorageMock{ - ChangesSinceCall: func(name string, since int64) (*dtos.SegmentChangesDTO, error) { - if name != "someSegment" || since != -1 { - t.Error("wrong params") - } - return &dtos.SegmentChangesDTO{ - Name: "someSegment", - Added: []string{"k1", "k2"}, - Removed: []string{}, - Since: -1, - Till: 1, - }, nil - }, - }, + &splitFetcher, + &splitStorage, + nil, + flagsets.NewMatcher(false, nil), ) controller.Register(group) - ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/segmentChanges/someSegment?since=-1", nil) + ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/splitChanges?since=-1&sets=c,b,b,a", nil) ctx.Request.Header.Set("Authorization", "Bearer someApiKey") ctx.Request.Header.Set("SplitSDKVersion", "go-1.1.1") ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4") ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") router.ServeHTTP(resp, ctx.Request) - if resp.Code != 200 { - t.Error("Status code should be 200 and is ", resp.Code) - } + assert.Equal(t, 200, resp.Code) - body, _ := ioutil.ReadAll(resp.Body) - var s dtos.SegmentChangesDTO - json.Unmarshal(body, &s) - if s.Name != "someSegment" || len(s.Added) != 2 || len(s.Removed) != 0 || s.Since != -1 || s.Till != 1 { - t.Error("wrong payload returned") - } + body, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) + + var s dtos.SplitChangesDTO + assert.Nil(t, json.Unmarshal(body, &s)) + assert.Equal(t, 2, len(s.Splits)) + assert.Equal(t, int64(-1), s.Since) + assert.Equal(t, int64(1), s.Till) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) } -func TestSegmentChangesNotFound(t *testing.T) { +func TestSplitChangesWithFlagSetsStrict(t *testing.T) { gin.SetMode(gin.TestMode) + + var splitStorage psmocks.ProxySplitStorageMock + splitStorage.On("ChangesSince", int64(-1), []string{"a", "c"}). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "s1", Status: "ACTIVE"}, {Name: "s2", Status: "ACTIVE"}}}, nil). + Once() + + var splitFetcher splitFetcherMock + resp := httptest.NewRecorder() ctx, router := gin.CreateTestContext(resp) @@ -243,53 +223,126 @@ func TestSegmentChangesNotFound(t *testing.T) { group := router.Group("/api") controller := NewSdkServerController( logger, - &mocks.MockSplitFetcher{}, - &psmocks.ProxySplitStorageMock{}, - &psmocks.ProxySegmentStorageMock{ - ChangesSinceCall: func(name string, since int64) (*dtos.SegmentChangesDTO, error) { - if name != "someSegment" || since != -1 { - t.Error("wrong params") - } - return nil, storage.ErrSegmentNotFound - }, - }, + &splitFetcher, + &splitStorage, + nil, + flagsets.NewMatcher(true, []string{"a", "c"}), ) controller.Register(group) + ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/splitChanges?since=-1&sets=c,b,b,a", nil) + ctx.Request.Header.Set("Authorization", "Bearer someApiKey") + ctx.Request.Header.Set("SplitSDKVersion", "go-1.1.1") + ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4") + ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") + router.ServeHTTP(resp, ctx.Request) + + assert.Equal(t, 200, resp.Code) + + body, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) + + var s dtos.SplitChangesDTO + assert.Nil(t, json.Unmarshal(body, &s)) + assert.Equal(t, 2, len(s.Splits)) + assert.Equal(t, int64(-1), s.Since) + assert.Equal(t, int64(1), s.Till) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) +} + +func TestSegmentChanges(t *testing.T) { + gin.SetMode(gin.TestMode) + + var splitFetcher splitFetcherMock + var splitStorage psmocks.ProxySplitStorageMock + var segmentStorage psmocks.ProxySegmentStorageMock + segmentStorage.On("ChangesSince", "someSegment", int64(-1)). + Return(&dtos.SegmentChangesDTO{Name: "someSegment", Added: []string{"k1", "k2"}, Removed: []string{}, Since: -1, Till: 1}, nil). + Once() + + resp := httptest.NewRecorder() + ctx, router := gin.CreateTestContext(resp) + + logger := logging.NewLogger(nil) + + group := router.Group("/api") + controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil)) + controller.Register(group) + + ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/segmentChanges/someSegment?since=-1", nil) + ctx.Request.Header.Set("Authorization", "Bearer someApiKey") + ctx.Request.Header.Set("SplitSDKVersion", "go-1.1.1") + ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4") + ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") + router.ServeHTTP(resp, ctx.Request) + + assert.Equal(t, 200, resp.Code) + + body, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) + + var s dtos.SegmentChangesDTO + err = json.Unmarshal(body, &s) + assert.Nil(t, err) + + assert.Equal(t, dtos.SegmentChangesDTO{Name: "someSegment", Added: []string{"k1", "k2"}, Removed: []string{}, Since: -1, Till: 1}, s) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) + segmentStorage.AssertExpectations(t) +} + +func TestSegmentChangesNotFound(t *testing.T) { + gin.SetMode(gin.TestMode) + + var splitFetcher splitFetcherMock + var splitStorage psmocks.ProxySplitStorageMock + var segmentStorage psmocks.ProxySegmentStorageMock + segmentStorage.On("ChangesSince", "someSegment", int64(-1)). + Return((*dtos.SegmentChangesDTO)(nil), storage.ErrSegmentNotFound). + Once() + + resp := httptest.NewRecorder() + ctx, router := gin.CreateTestContext(resp) + + logger := logging.NewLogger(nil) + + group := router.Group("/api") + controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil)) + controller.Register(group) + ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/segmentChanges/someSegment?since=-1", nil) ctx.Request.Header.Set("Authorization", "Bearer someApiKey") ctx.Request.Header.Set("SplitSDKVersion", "go-1.1.1") ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4") ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") router.ServeHTTP(resp, ctx.Request) + assert.Equal(t, 404, resp.Code) - if resp.Code != 404 { - t.Error("Status code should be 404 and is ", resp.Code) - } + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) + segmentStorage.AssertExpectations(t) } func TestMySegments(t *testing.T) { gin.SetMode(gin.TestMode) + + var splitFetcher splitFetcherMock + var splitStorage psmocks.ProxySplitStorageMock + var segmentStorage psmocks.ProxySegmentStorageMock + segmentStorage.On("SegmentsFor", "someKey"). + Return([]string{"segment1", "segment2"}, nil). + Once() + resp := httptest.NewRecorder() ctx, router := gin.CreateTestContext(resp) logger := logging.NewLogger(nil) group := router.Group("/api") - controller := NewSdkServerController( - logger, - &mocks.MockSplitFetcher{}, - &psmocks.ProxySplitStorageMock{}, - &psmocks.ProxySegmentStorageMock{ - SegmentsForCall: func(key string) ([]string, error) { - if key != "someKey" { - t.Error("wrong key") - } - - return []string{"segment1", "segment2"}, nil - }, - }, - ) + controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil)) controller.Register(group) ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/mySegments/someKey", nil) @@ -298,46 +351,39 @@ func TestMySegments(t *testing.T) { ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4") ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") router.ServeHTTP(resp, ctx.Request) + assert.Equal(t, 200, resp.Code) - if resp.Code != 200 { - t.Error("Status code should be 200 and is ", resp.Code) - } - - type MSC struct { - MySegments []dtos.MySegmentDTO `json:"mySegments"` - } + body, err := ioutil.ReadAll(resp.Body) + assert.Nil(t, err) - body, _ := ioutil.ReadAll(resp.Body) var ms MSC - json.Unmarshal(body, &ms) - s := ms.MySegments - if len(s) != 2 || s[0].Name != "segment1" || s[1].Name != "segment2" { - t.Error("invalid payload", s) - } + err = json.Unmarshal(body, &ms) + assert.Nil(t, err) + + assert.Equal(t, MSC{MySegments: []dtos.MySegmentDTO{{Name: "segment1"}, {Name: "segment2"}}}, ms) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) + segmentStorage.AssertExpectations(t) } func TestMySegmentsError(t *testing.T) { gin.SetMode(gin.TestMode) + + var splitFetcher splitFetcherMock + var splitStorage psmocks.ProxySplitStorageMock + var segmentStorage psmocks.ProxySegmentStorageMock + segmentStorage.On("SegmentsFor", "someKey"). + Return([]string(nil), errors.New("something")). + Once() + resp := httptest.NewRecorder() ctx, router := gin.CreateTestContext(resp) logger := logging.NewLogger(nil) group := router.Group("/api") - controller := NewSdkServerController( - logger, - &mocks.MockSplitFetcher{}, - &psmocks.ProxySplitStorageMock{}, - &psmocks.ProxySegmentStorageMock{ - SegmentsForCall: func(key string) ([]string, error) { - if key != "someKey" { - t.Error("wrong key") - } - - return nil, errors.New("something") - }, - }, - ) + controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil)) controller.Register(group) ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/mySegments/someKey", nil) @@ -346,8 +392,29 @@ func TestMySegmentsError(t *testing.T) { ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4") ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4") router.ServeHTTP(resp, ctx.Request) + assert.Equal(t, 500, resp.Code) + + splitStorage.AssertExpectations(t) + splitFetcher.AssertExpectations(t) + segmentStorage.AssertExpectations(t) +} + +type splitFetcherMock struct { + mock.Mock +} + +// Fetch implements service.SplitFetcher +func (s *splitFetcherMock) Fetch(changeNumber int64, fetchOptions *service.FetchOptions) (*dtos.SplitChangesDTO, error) { + args := s.Called(changeNumber, fetchOptions) + return args.Get(0).(*dtos.SplitChangesDTO), args.Error(1) +} - if resp.Code != 500 { - t.Error("Status code should be 500 and is ", resp.Code) - } +func ref[T any](t T) *T { + return &t } + +type MSC struct { + MySegments []dtos.MySegmentDTO `json:"mySegments"` +} + +var _ service.SplitFetcher = (*splitFetcherMock)(nil) diff --git a/splitio/proxy/flagsets/flagsets.go b/splitio/proxy/flagsets/flagsets.go new file mode 100644 index 00000000..52115660 --- /dev/null +++ b/splitio/proxy/flagsets/flagsets.go @@ -0,0 +1,48 @@ +package flagsets + +import "golang.org/x/exp/slices" + +type FlagSetMatcher struct { + strict bool + sets map[string]struct{} +} + +func NewMatcher(strict bool, fetched []string) FlagSetMatcher { + out := FlagSetMatcher{ + strict: strict, + sets: make(map[string]struct{}, len(fetched)), + } + + for idx := range fetched { + out.sets[fetched[idx]] = struct{}{} + } + + return out +} + +// Sort, Dedupe & Filter input flagsets. returns sanitized list and a boolean indicating whether a sort was necessary +func (f *FlagSetMatcher) Sanitize(input []string) []string { + if len(input) == 0 { + return input + } + + seen := map[string]struct{}{} + for idx := 0; idx < len(input); idx++ { // cant use range because we're srhinking the slice inside the loop + item := input[idx] + if (f.strict && !setContains(f.sets, item)) || setContains(seen, item) { + if idx+1 < len(input) { + input[idx] = input[len(input)-1] + } + input = input[:len(input)-1] + } + seen[item] = struct{}{} + } + + slices.Sort(input) + return input +} + +func setContains(set map[string]struct{}, item string) bool { + _, ok := set[item] + return ok +} diff --git a/splitio/proxy/flagsets/flagsets_test.go b/splitio/proxy/flagsets/flagsets_test.go new file mode 100644 index 00000000..2059712f --- /dev/null +++ b/splitio/proxy/flagsets/flagsets_test.go @@ -0,0 +1,21 @@ +package flagsets + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFlagSetsMatcher(t *testing.T) { + + m := NewMatcher(false, []string{"s1", "s2", "s3"}) + assert.Equal(t, []string{"s1", "s2", "s3"}, m.Sanitize([]string{"s1", "s2", "s3"})) + assert.Equal(t, []string{"s1", "s2"}, m.Sanitize([]string{"s1", "s2"})) + assert.Equal(t, []string{"s4"}, m.Sanitize([]string{"s4"})) + + m = NewMatcher(true, []string{"s1", "s2", "s3"}) + assert.Equal(t, []string{"s1", "s2", "s3"}, m.Sanitize([]string{"s1", "s2", "s3"})) + assert.Equal(t, []string{"s1", "s2"}, m.Sanitize([]string{"s1", "s2"})) + assert.Equal(t, []string{"s1", "s2"}, m.Sanitize([]string{"s1", "s2", "s7"})) + assert.Equal(t, []string{}, m.Sanitize([]string{"s4"})) +} diff --git a/splitio/proxy/initialization.go b/splitio/proxy/initialization.go index 8b4ec7f5..80254925 100644 --- a/splitio/proxy/initialization.go +++ b/splitio/proxy/initialization.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/splitio/go-split-commons/v5/conf" + "github.com/splitio/go-split-commons/v5/flagsets" "github.com/splitio/go-split-commons/v5/service/api" "github.com/splitio/go-split-commons/v5/synchronizer" "github.com/splitio/go-split-commons/v5/tasks" @@ -70,13 +71,17 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error { // Getting initial config data advanced := cfg.BuildAdvancedConfig() + advanced.FlagSetsFilter = cfg.FlagSetsFilter metadata := util.GetMetadata(cfg.IPAddressEnabled, true) + // FlagSetsFilter + flagSetsFilter := flagsets.NewFlagSetFilter(cfg.FlagSetsFilter) + // Setup fetchers & recorders splitAPI := api.NewSplitAPI(cfg.Apikey, *advanced, logger, metadata) // Proxy storages already implement the observable interface, so no need to wrap them - splitStorage := storage.NewProxySplitStorage(dbInstance, logger, cfg.Initialization.Snapshot != "") + splitStorage := storage.NewProxySplitStorage(dbInstance, logger, flagsets.NewFlagSetFilter(cfg.FlagSetsFilter), cfg.Initialization.Snapshot != "") segmentStorage := storage.NewProxySegmentStorage(dbInstance, logger, cfg.Initialization.Snapshot != "") // Local telemetry @@ -112,7 +117,7 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error { // setup feature flags, segments & local telemetry API interactions workers := synchronizer.Workers{ - SplitUpdater: caching.NewCacheAwareSplitSync(splitStorage, splitAPI.SplitFetcher, logger, localTelemetryStorage, httpCache, appMonitor), + SplitUpdater: caching.NewCacheAwareSplitSync(splitStorage, splitAPI.SplitFetcher, logger, localTelemetryStorage, httpCache, appMonitor, flagSetsFilter), SegmentUpdater: caching.NewCacheAwareSegmentSync(splitStorage, segmentStorage, splitAPI.SegmentFetcher, logger, localTelemetryStorage, httpCache, appMonitor), TelemetryRecorder: telemetry.NewTelemetrySynchronizer(localTelemetryStorage, telemetryRecorder, splitStorage, segmentStorage, logger, @@ -160,6 +165,7 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error { logger.Info("Synchronizer tasks started") appMonitor.Start() servicesMonitor.Start() + flagSetsAfterSanitize, _ := flagsets.SanitizeMany(cfg.FlagSetsFilter) workers.TelemetryRecorder.SynchronizeConfig( telemetry.InitConfig{ AdvancedConfig: *advanced, @@ -169,6 +175,8 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error { TelemetrySync: int(cfg.Sync.Advanced.InternalMetricsRateMs / 1000), }, ListenerEnabled: cfg.Integrations.ImpressionListener.Endpoint != "", + FlagSetsTotal: int64(len(cfg.FlagSetsFilter)), + FlagSetsInvalid: int64(len(cfg.FlagSetsFilter) - len(flagSetsAfterSanitize)), }, time.Since(before).Milliseconds(), map[string]int64{cfg.Apikey: 1}, @@ -245,6 +253,8 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error { Telemetry: localTelemetryStorage, Cache: httpCache, TLSConfig: tlsConfig, + FlagSets: cfg.FlagSetsFilter, + FlagSetsStrictMatching: cfg.FlagSetStrictMatching, } if ilcfg := cfg.Integrations.ImpressionListener; ilcfg.Endpoint != "" { diff --git a/splitio/proxy/proxy.go b/splitio/proxy/proxy.go index f35dac9e..4aab8ea5 100644 --- a/splitio/proxy/proxy.go +++ b/splitio/proxy/proxy.go @@ -11,6 +11,7 @@ import ( "github.com/splitio/split-synchronizer/v5/splitio/common/impressionlistener" "github.com/splitio/split-synchronizer/v5/splitio/proxy/controllers" "github.com/splitio/split-synchronizer/v5/splitio/proxy/controllers/middleware" + "github.com/splitio/split-synchronizer/v5/splitio/proxy/flagsets" "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage" "github.com/splitio/split-synchronizer/v5/splitio/proxy/tasks" @@ -78,6 +79,10 @@ type Options struct { // Proxy TLS configuration TLSConfig *tls.Config + + FlagSets []string + + FlagSetsStrictMatching bool } // API bundles all components required to answer API calls from Split sdks @@ -154,6 +159,7 @@ func setupSdkController(options *Options) *controllers.SdkServerController { options.SplitFetcher, options.ProxySplitStorage, options.ProxySegmentStorage, + flagsets.NewMatcher(options.FlagSetsStrictMatching, options.FlagSets), ) } diff --git a/splitio/proxy/proxy_test.go b/splitio/proxy/proxy_test.go index 1cf9c841..5459081d 100644 --- a/splitio/proxy/proxy_test.go +++ b/splitio/proxy/proxy_test.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "math/rand" "net/http" - "sync/atomic" "testing" "time" @@ -18,110 +17,142 @@ import ( "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage" pstorageMocks "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage/mocks" taskMocks "github.com/splitio/split-synchronizer/v5/splitio/proxy/tasks/mocks" + "github.com/stretchr/testify/assert" ) func TestSplitChangesEndpoints(t *testing.T) { opts := makeOpts() - var changesSinceCalls int64 = 0 - opts.ProxySplitStorage = &pstorageMocks.ProxySplitStorageMock{ - ChangesSinceCall: func(since int64) (*dtos.SplitChangesDTO, error) { - atomic.AddInt64(&changesSinceCalls, 1) - return &dtos.SplitChangesDTO{ - Since: since, - Till: changesSinceCalls, - Splits: []dtos.SplitDTO{{Name: fmt.Sprintf("split%d", changesSinceCalls)}}, - }, nil - }, - } + var splitStorage pstorageMocks.ProxySplitStorageMock + opts.ProxySplitStorage = &splitStorage proxy := New(opts) go proxy.Start() time.Sleep(1 * time.Second) // Let the scheduler switch the current thread/gr and start the server // Test that a request without auth fails and is not cached status, _, _ := get("splitChanges?since=-1", opts.Port, nil) - if status != 401 { - t.Error("status should be 401. Is", status) - } + assert.Equal(t, 401, status) - if c := atomic.LoadInt64(&changesSinceCalls); c != 0 { - t.Error("auth middleware should have filtered this. expected 0 calls to handler. got: ", c) - } + splitStorage.On("ChangesSince", int64(-1), []string(nil)). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "split1"}}}, nil). + Once() // Make a proper request - _, body, headers := get("splitChanges?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + status, body, headers := get("splitChanges?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + assert.Equal(t, 200, status) + changes := toSplitChanges(body) - if changes.Till != 1 { - t.Error("wrong till: ", changes.Till) - } + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "split1", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) - if changes.Splits[0].Name != "split1" { - t.Error("wrong split name") - } + // Make another request, check we get the same response and the call count isn't incremented (cache is working) + // Make a proper request + status, body, headers = get("splitChanges?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + assert.Equal(t, 200, status) - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } + changes = toSplitChanges(body) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "split1", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) - if c := atomic.LoadInt64(&changesSinceCalls); c != 1 { - t.Error("endpoint handler should have 1 call. has ", c) - } + // Lets evict the key (simulating a change in splits and re-check) + splitStorage.On("ChangesSince", int64(-1), []string(nil)). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 2, Splits: []dtos.SplitDTO{{Name: "split2"}}}, nil). + Once() + + opts.Cache.EvictBySurrogate(caching.SplitSurrogate) - // Make another request, check we get the same response and the call count isn't incremented (cache is working) _, body, headers = get("splitChanges?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) changes = toSplitChanges(body) - if changes.Till != 1 { - t.Error("wrong till: ", changes.Till) - } + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(2), changes.Till) + assert.Equal(t, "split2", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) - if changes.Splits[0].Name != "split1" { - t.Error("wrong split name") - } +} - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } +func TestSplitChangesWithFlagsetsCaching(t *testing.T) { + opts := makeOpts() + var splitStorage pstorageMocks.ProxySplitStorageMock + opts.ProxySplitStorage = &splitStorage + proxy := New(opts) + go proxy.Start() + time.Sleep(1 * time.Second) // Let the scheduler switch the current thread/gr and start the server - if c := atomic.LoadInt64(&changesSinceCalls); c != 1 { - t.Error("endpoint handler should have 1 call. has ", c) - } + splitStorage.On("ChangesSince", int64(-1), []string{"set1", "set2"}). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "split1"}}}, nil). + Once() + + // Make a proper request + status, body, headers := get("splitChanges?since=-1&sets=set2,set1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + assert.Equal(t, 200, status) + + changes := toSplitChanges(body) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "split1", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) + + // Make another request, check we get the same response and the call count isn't incremented (cache is working) + status, body, headers = get("splitChanges?since=-1&sets=set2,set1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + assert.Equal(t, 200, status) - // Lets evict the key (simulating a change in splits and re-check) - opts.Cache.EvictBySurrogate(caching.SplitSurrogate) - _, body, headers = get("splitChanges?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) changes = toSplitChanges(body) - if changes.Till != 2 { - t.Error("wrong till: ", changes.Till) - } + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "split1", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) - if changes.Splits[0].Name != "split2" { - t.Error("wrong split name") - } + // Make another request, with different flagsets. storage should be hit again + splitStorage.On("ChangesSince", int64(-1), []string{"set1", "set2", "set3"}). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "split1"}}}, nil). + Once() - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } + status, body, headers = get("splitChanges?since=-1&sets=set2,set1,set3", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + assert.Equal(t, 200, status) - if c := atomic.LoadInt64(&changesSinceCalls); c != 2 { - t.Error("endpoint handler should have 2 call. has ", c) - } + changes = toSplitChanges(body) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "split1", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) + + // Flush the cache, reset expectations, and retry the requests to make sure mocks are called again + opts.Cache.EvictBySurrogate(caching.SplitSurrogate) + + splitStorage.On("ChangesSince", int64(-1), []string{"set1", "set2"}). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "split1"}}}, nil). + Once() + + splitStorage.On("ChangesSince", int64(-1), []string{"set1", "set2", "set3"}). + Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "split1"}}}, nil). + Once() + + status, body, headers = get("splitChanges?since=-1&sets=set2,set1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + assert.Equal(t, 200, status) + changes = toSplitChanges(body) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "split1", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) + + status, body, headers = get("splitChanges?since=-1&sets=set2,set1,set3", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + assert.Equal(t, 200, status) + changes = toSplitChanges(body) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "split1", changes.Splits[0].Name) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) } func TestSegmentChangesAndMySegmentsEndpoints(t *testing.T) { + + var segmentStorage pstorageMocks.ProxySegmentStorageMock + opts := makeOpts() - var changesSinceCalls int64 = 0 - var mySegmentsCalls int64 = 0 - var changesToReturn atomic.Value - var segmentsForToReturn atomic.Value - opts.ProxySegmentStorage = &pstorageMocks.ProxySegmentStorageMock{ - ChangesSinceCall: func(name string, since int64) (*dtos.SegmentChangesDTO, error) { - atomic.AddInt64(&changesSinceCalls, 1) - return changesToReturn.Load().(*dtos.SegmentChangesDTO), nil - }, - SegmentsForCall: func(key string) ([]string, error) { - atomic.AddInt64(&mySegmentsCalls, 1) - return segmentsForToReturn.Load().([]string), nil - }, - } + opts.ProxySegmentStorage = &segmentStorage proxy := New(opts) go proxy.Start() time.Sleep(1 * time.Second) // Let the scheduler switch the current thread/gr and start the server @@ -132,129 +163,77 @@ func TestSegmentChangesAndMySegmentsEndpoints(t *testing.T) { t.Error("status should be 401. Is", status) } - if c := atomic.LoadInt64(&changesSinceCalls); c != 0 { - t.Error("auth middleware should have filtered this. expected 0 calls to handler. got: ", c) - } - // Same for mySegments status, _, _ = get("mySegments/k1", opts.Port, nil) if status != 401 { t.Error("status should be 401. Is", status) } - if c := atomic.LoadInt64(&mySegmentsCalls); c != 0 { - t.Error("auth middleware should have filtered this. expected 0 calls to handler. got: ", c) - } - // Set up a response and make a proper request for segmentChanges - changesToReturn.Store(&dtos.SegmentChangesDTO{Since: -1, Till: 1, Name: "segment1", Added: []string{"k1"}, Removed: nil}) - _, body, headers := get("segmentChanges/segment1?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) - changes := toSegmentChanges(body) - if changes.Till != 1 { - t.Error("wrong till: ", changes.Till) - } - - if changes.Name != "segment1" { - t.Error("wrong segment name") - } + segmentStorage.On("ChangesSince", "segment1", int64(-1)). + Return(&dtos.SegmentChangesDTO{Since: -1, Till: 1, Name: "segment1", Added: []string{"k1"}, Removed: nil}, nil). + Once() - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } - - if c := atomic.LoadInt64(&changesSinceCalls); c != 1 { - t.Error("endpoint handler should have 1 call. has ", c) - } + status, body, headers := get("segmentChanges/segment1?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + changes := toSegmentChanges(body) + assert.Equal(t, 200, status) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "segment1", changes.Name) + assert.Equal(t, []string{"k1"}, changes.Added) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) // Same for mysegments - segmentsForToReturn.Store([]string{"segment1"}) - _, body, headers = get("mySegments/k1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + segmentStorage.On("SegmentsFor", "k1").Return([]string{"segment1"}, nil).Once() + status, body, headers = get("mySegments/k1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) segments := toMySegments(body) - if segments[0].Name != "segment1" { - t.Error("wrong segment: ", segments[0]) - } - - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } - - if c := atomic.LoadInt64(&mySegmentsCalls); c != 1 { - t.Error("endpoint handler should have 1 call. has ", c) - } + assert.Equal(t, 200, status) + assert.Equal(t, []dtos.MySegmentDTO{{Name: "segment1"}}, segments) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) // Update the response, make another request and check we get the same response and the call count isn't incremented (cache is working) - changesToReturn.Store(&dtos.SegmentChangesDTO{Since: -1, Till: 2, Name: "segment1", Added: []string{"k2"}, Removed: nil}) - _, body, headers = get("segmentChanges/segment1?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) - changes = toSegmentChanges(body) - if changes.Till != 1 { - t.Error("wrong till: ", changes.Till) - } + segmentStorage.On("ChangesSince", "segment1", int64(-1)). + Return(&dtos.SegmentChangesDTO{Since: -1, Till: 2, Name: "segment1", Added: []string{"k2"}, Removed: nil}, nil). + Once() - if changes.Name != "segment1" { - t.Error("wrong segment name") - } - - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } - - if c := atomic.LoadInt64(&changesSinceCalls); c != 1 { - t.Error("endpoint handler should have 1 call. has ", c) - } + status, body, headers = get("segmentChanges/segment1?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + changes = toSegmentChanges(body) + assert.Equal(t, 200, status) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(1), changes.Till) + assert.Equal(t, "segment1", changes.Name) + assert.Equal(t, []string{"k1"}, changes.Added) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) // Same for mysegments - segmentsForToReturn.Store([]string{}) - _, body, headers = get("mySegments/k1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + segmentStorage.On("SegmentsFor", "k1").Return([]string{}, nil).Once() + status, body, headers = get("mySegments/k1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) segments = toMySegments(body) - if segments[0].Name != "segment1" { - t.Error("wrong segment: ", segments[0]) - } - - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } - - if c := atomic.LoadInt64(&mySegmentsCalls); c != 1 { - t.Error("endpoint handler should have 1 call. has ", c) - } + assert.Equal(t, 200, status) + assert.Equal(t, []dtos.MySegmentDTO{{Name: "segment1"}}, segments) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) // Lets evict the key (simulating a change in segment1 and re-check) opts.Cache.EvictBySurrogate(caching.MakeSurrogateForSegmentChanges("segment1")) - _, body, headers = get("segmentChanges/segment1?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + status, body, headers = get("segmentChanges/segment1?since=-1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) changes = toSegmentChanges(body) - if changes.Till != 2 { - t.Error("wrong till: ", changes.Till) - } - - if changes.Name != "segment1" { - t.Error("wrong segment name") - } - - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } - - if c := atomic.LoadInt64(&changesSinceCalls); c != 2 { - t.Error("endpoint handler should have 2 call. has ", c) - } + assert.Equal(t, 200, status) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(2), changes.Till) + assert.Equal(t, "segment1", changes.Name) + assert.Equal(t, []string{"k2"}, changes.Added) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) // Same for mysegments entries := caching.MakeMySegmentsEntries("k1") opts.Cache.Evict(entries[0]) opts.Cache.Evict(entries[1]) - _, body, headers = get("mySegments/k1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) + segmentStorage.On("SegmentsFor", "k1").Return([]string{}, nil).Once() + status, body, headers = get("mySegments/k1", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"}) segments = toMySegments(body) - if len(segments) != 0 { - t.Error("wrong segment: ", segments) - } - - if ce := headers.Get("Content-Type"); ce != "application/json; charset=utf-8" { - t.Error("wrong content type: ", ce) - } - - if c := atomic.LoadInt64(&mySegmentsCalls); c != 2 { - t.Error("endpoint handler should have 2 call. has ", c) - } + assert.Equal(t, 200, status) + assert.Equal(t, []dtos.MySegmentDTO{}, segments) + assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type")) } func makeOpts() *Options { diff --git a/splitio/proxy/storage/mocks/mocks.go b/splitio/proxy/storage/mocks/mocks.go index 2134054c..1228cc35 100644 --- a/splitio/proxy/storage/mocks/mocks.go +++ b/splitio/proxy/storage/mocks/mocks.go @@ -2,35 +2,36 @@ package mocks import ( "github.com/splitio/go-split-commons/v5/dtos" + "github.com/stretchr/testify/mock" ) type ProxySplitStorageMock struct { - ChangesSinceCall func(since int64) (*dtos.SplitChangesDTO, error) - RegisterOlderCnCall func(payload *dtos.SplitChangesDTO) + mock.Mock } -func (p *ProxySplitStorageMock) ChangesSince(since int64) (*dtos.SplitChangesDTO, error) { - return p.ChangesSinceCall(since) +func (p *ProxySplitStorageMock) ChangesSince(since int64, sets []string) (*dtos.SplitChangesDTO, error) { + args := p.Called(since, sets) + return args.Get(0).(*dtos.SplitChangesDTO), args.Error(1) } func (p *ProxySplitStorageMock) RegisterOlderCn(payload *dtos.SplitChangesDTO) { - p.RegisterOlderCnCall(payload) + p.Called(payload) } type ProxySegmentStorageMock struct { - ChangesSinceCall func(name string, since int64) (*dtos.SegmentChangesDTO, error) - SegmentsForCall func(key string) ([]string, error) - CountRemovedKeysCall func(segmentName string) int + mock.Mock } func (p *ProxySegmentStorageMock) ChangesSince(name string, since int64) (*dtos.SegmentChangesDTO, error) { - return p.ChangesSinceCall(name, since) + args := p.Called(name, since) + return args.Get(0).(*dtos.SegmentChangesDTO), args.Error(1) } func (p *ProxySegmentStorageMock) SegmentsFor(key string) ([]string, error) { - return p.SegmentsForCall(key) + args := p.Called(key) + return args.Get(0).([]string), args.Error(1) } func (p *ProxySegmentStorageMock) CountRemovedKeys(segmentName string) int { - return p.CountRemovedKeysCall(segmentName) + return p.Called(segmentName).Int(0) } diff --git a/splitio/proxy/storage/optimized/changesummary.go b/splitio/proxy/storage/optimized/changesummary.go deleted file mode 100644 index d34cbd03..00000000 --- a/splitio/proxy/storage/optimized/changesummary.go +++ /dev/null @@ -1,229 +0,0 @@ -package optimized - -import ( - "errors" - "math" - "sync" - - "github.com/splitio/go-split-commons/v5/dtos" -) - -// ErrUnknownChangeNumber is returned when trying to fetch a recipe for a change number not present in cache -var ErrUnknownChangeNumber = errors.New("Unknown change number") - -// SplitMinimalView is a subset of feature flag properties needed by an sdk to remove a feature flag from it's local cache -type SplitMinimalView struct { - Name string - TrafficType string -} - -// ChangeSummary represents a set of changes from/in a specific point in time -type ChangeSummary struct { - Updated map[string]string // feature flag name -> trafficType - Removed map[string]string // feature flag name -> trafficType - Current *splitSet // list of splits originally available at this point in time -} - -func newEmptyChangeSummary(ss *splitSet) ChangeSummary { - if ss == nil { - ss = newSplitSet() - } - return ChangeSummary{Updated: map[string]string{}, Removed: map[string]string{}, Current: ss} -} - -func (c *ChangeSummary) applyChange(toAdd []SplitMinimalView, toRemove []SplitMinimalView) { - for _, split := range toAdd { - delete(c.Removed, split.Name) - c.Updated[split.Name] = split.TrafficType - } - - for _, split := range toRemove { - if _, ok := c.Updated[split.Name]; ok { - delete(c.Updated, split.Name) - } - - if c.Current.contains(split.Name) { - c.Removed[split.Name] = split.TrafficType - } - } -} - -// SplitChangesSummaries keeps a set of recipes that allow an sdk to fetch from any known changeNumber -// up to the latest snapshot. -type SplitChangesSummaries struct { - maxRecipes int - currentCN int64 - changes map[int64]ChangeSummary - mutex sync.RWMutex -} - -// NewSplitChangesSummaries constructs a SplitChangesSummaries component -func NewSplitChangesSummaries(maxRecipes int) *SplitChangesSummaries { - return &SplitChangesSummaries{ - maxRecipes: maxRecipes + 1, // we keep an extra slot for -1 which is fixed - currentCN: -1, - changes: map[int64]ChangeSummary{-1: newEmptyChangeSummary(nil)}, - } -} - -// AddChanges registers a new set of changes and updates all the recipes accordingly -func (s *SplitChangesSummaries) AddChanges(added []dtos.SplitDTO, removed []dtos.SplitDTO, cn int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - - addedViews := toSplitMinimalViews(added) - removedViews := toSplitMinimalViews(removed) - - if cn == -1 { - // During the first hit (cn=-1) we want to capture ALL split names, to form an initial snapshot of what the user will get - // and nothing else. - ss := newSplitSet() - ss.update(addedViews, nil) - cs := s.changes[0] - cs.Current = ss - s.changes[0] = cs - } - - if cn <= s.currentCN { - return - } - - if len(s.changes) >= s.maxRecipes { - s.removeOldestRecipe() - } - - var lastCheckpoint int64 = -1 - lastSplitSet := newSplitSet() - for key, summary := range s.changes { - if key > lastCheckpoint { - lastCheckpoint = key - lastSplitSet = summary.Current - } - - summary.applyChange(addedViews, removedViews) - s.changes[key] = summary - } - - s.currentCN = cn - - newSS := lastSplitSet.clone() - newSS.update(addedViews, removedViews) - s.changes[cn] = newEmptyChangeSummary(newSS) -} - -// AddOlderChange is used to add a change older than the oldest one currently stored (when the sync started) -// so that it can be used to serve SDKs stuck on an older CN -func (s *SplitChangesSummaries) AddOlderChange(added []dtos.SplitDTO, removed []dtos.SplitDTO, cn int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - if cn >= s.currentCN { - // If the change number is equal or greater than our current CN, we're about to overwrite - // valid information, ignore it. - return - } - - if len(s.changes) >= s.maxRecipes { - s.removeOldestRecipe() - } - - summary := newEmptyChangeSummary(nil) // TODO(mredolatti): see if we can do better than this - for _, split := range added { - summary.Updated[split.Name] = split.TrafficTypeName - } - - for _, split := range removed { - summary.Removed[split.Name] = split.TrafficTypeName - } - - s.changes[cn] = summary -} - -// FetchSince returns a recipe explaining how to build a /splitChanges payload to serve an sdk which -// is currently on changeNumber `since`. It will contain the list of feature flags that need to be updated, and those that need -// to be deleted -func (s *SplitChangesSummaries) FetchSince(since int64) (*ChangeSummary, int64, error) { - s.mutex.RLock() - defer s.mutex.RUnlock() - view, ok := s.changes[since] - if !ok { - return nil, s.currentCN, ErrUnknownChangeNumber - } - return &view, s.currentCN, nil -} - -func (s *SplitChangesSummaries) removeOldestRecipe() { - // look for the oldest change and remove it - - if len(s.changes) == 0 { // just in case - return // nothing to do - } - oldest := int64(math.MaxInt64) - for cn := range s.changes { - if cn != -1 && cn < oldest { - oldest = cn - } - } - delete(s.changes, oldest) -} - -// BuildArchivedSplitsFor takes a mapping of feature flag name -> traffic type name, -// and build fake "ARCHIVED" feature flags to return to the sdk upon a splitChanges request -// now that we no longer store the full body of archived feature flags -func BuildArchivedSplitsFor(nameToTrafficType map[string]string) []dtos.SplitDTO { - archived := make([]dtos.SplitDTO, 0, len(nameToTrafficType)) - for name, tt := range nameToTrafficType { - archived = append(archived, dtos.SplitDTO{ - ChangeNumber: 1, - TrafficTypeName: tt, - Name: name, - TrafficAllocation: 100, - TrafficAllocationSeed: 0, - Seed: 0, - Status: "ARCHIVED", - Killed: false, - DefaultTreatment: "off", - Algo: 1, - Conditions: make([]dtos.ConditionDTO, 0), - }) - } - return archived -} - -func toSplitMinimalViews(items []dtos.SplitDTO) []SplitMinimalView { - views := make([]SplitMinimalView, 0, len(items)) - for _, dto := range items { - views = append(views, SplitMinimalView{Name: dto.Name, TrafficType: dto.TrafficTypeName}) - } - return views -} - -type splitSet struct { - data map[string]struct{} -} - -func newSplitSet() *splitSet { - return &splitSet{data: make(map[string]struct{})} -} - -func (s *splitSet) clone() *splitSet { - x := newSplitSet() - for key := range s.data { - x.data[key] = struct{}{} - } - return x -} - -func (s *splitSet) update(toAdd []SplitMinimalView, toRemove []SplitMinimalView) { - for idx := range toAdd { - s.data[toAdd[idx].Name] = struct{}{} - } - - for idx := range toRemove { - delete(s.data, toRemove[idx].Name) - } -} - -func (s *splitSet) contains(name string) bool { - _, ok := s.data[name] - return ok -} diff --git a/splitio/proxy/storage/optimized/changesummary_test.go b/splitio/proxy/storage/optimized/changesummary_test.go deleted file mode 100644 index 5c87f87f..00000000 --- a/splitio/proxy/storage/optimized/changesummary_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package optimized - -import ( - "testing" - - "github.com/splitio/go-split-commons/v5/dtos" -) - -func stringSlicesEqual(a []string, b []string) bool { - if len(a) != len(b) { - return false - } - - for idx := range a { - if a[idx] != b[idx] { - return false - } - } - return true -} - -func validateChanges(t *testing.T, c *ChangeSummary, expectedAdded []string, expectedRemoved []string) { - t.Helper() - - if len(c.Updated) != len(expectedAdded) || len(c.Removed) != len(expectedRemoved) { - t.Error("incorrect changes lengths") - } - - for _, added := range expectedAdded { - if _, ok := c.Updated[added]; !ok { - t.Errorf("key %s should be in `updated` and isnt.", added) - } - } - - for _, removed := range expectedRemoved { - if _, ok := c.Removed[removed]; !ok { - t.Errorf("key %s should be in `removed` and isnt.", removed) - } - } -} - -func TestSplitChangesSummary(t *testing.T) { - summaries := NewSplitChangesSummaries(1000) - changesM1, cnM1, err := summaries.FetchSince(-1) - if err != nil { - t.Error(err) - } - if cnM1 != -1 { - t.Error("cn should be -1, is: ", cnM1) - } - validateChanges(t, changesM1, []string{}, []string{}) - - // MOVE TO CN=1 - summaries.AddChanges( - []dtos.SplitDTO{ - {Name: "s1", TrafficTypeName: "tt1"}, - {Name: "s2", TrafficTypeName: "tt1"}, - {Name: "s3", TrafficTypeName: "tt1"}, - }, - nil, - 1, - ) - - changesM1, cnM1, err = summaries.FetchSince(-1) - if err != nil { - t.Error(err) - } - if cnM1 != 1 { - t.Error("new CN should be 1") - } - validateChanges(t, changesM1, []string{"s1", "s2", "s3"}, []string{}) - - changes1, cn1, err := summaries.FetchSince(1) - if err != nil { - t.Error(err) - } - if cn1 != 1 { - t.Error("cn should be 1, is: ", cn1) - } - validateChanges(t, changes1, []string{}, []string{}) - - // MOVE TO CN=2 - summaries.AddChanges([]dtos.SplitDTO{{Name: "s2", TrafficTypeName: "tt2"}}, nil, 2) - changesM1, cnM1, err = summaries.FetchSince(-1) - if err != nil { - t.Error(err) - } - if cnM1 != 2 { - t.Error("cn should be 2, is: ", cn1) - } - validateChanges(t, changesM1, []string{"s1", "s2", "s3"}, []string{}) - - changes1, cn1, err = summaries.FetchSince(1) - if err != nil { - t.Error(err) - } - if cn1 != 2 { - t.Error("cn should be 2, is: ", cn1) - } - validateChanges(t, changes1, []string{"s2"}, []string{}) - - changes2, cn2, err := summaries.FetchSince(2) - if err != nil { - t.Error(err) - } - if cn2 != 2 { - t.Error("cn should be 2, is: ", cn1) - } - validateChanges(t, changes2, []string{}, []string{}) - - // MOVE TO CN=3 - summaries.AddChanges([]dtos.SplitDTO{{Name: "s3", TrafficTypeName: "tt3"}}, nil, 3) - changesM1, cnM1, err = summaries.FetchSince(-1) - if err != nil { - t.Error(err) - } - if cnM1 != 3 { - t.Error("cn should be 3, is: ", cnM1) - } - validateChanges(t, changesM1, []string{"s1", "s2", "s3"}, []string{}) - - changes1, cn1, err = summaries.FetchSince(1) - if err != nil { - t.Error(err) - } - if cn1 != 3 { - t.Error("cn should be 3, is: ", cn1) - } - validateChanges(t, changes1, []string{"s2", "s3"}, []string{}) - - changes2, cn2, err = summaries.FetchSince(2) - if err != nil { - t.Error(err) - } - if cn2 != 3 { - t.Error("cn should be 3, is: ", cn2) - } - validateChanges(t, changes2, []string{"s3"}, []string{}) - - changes3, cn3, err := summaries.FetchSince(3) - if err != nil { - t.Error(err) - } - if cn3 != 3 { - t.Error("cn should be 3, is: ", cn3) - } - validateChanges(t, changes3, []string{}, []string{}) - - // MOVE TO CN=4 - summaries.AddChanges( - []dtos.SplitDTO{{Name: "s4", TrafficTypeName: "tt3"}}, - []dtos.SplitDTO{{Name: "s1", TrafficTypeName: "tt1"}}, - 4) - changesM1, cnM1, err = summaries.FetchSince(-1) - if err != nil { - t.Error(err) - } - if cnM1 != 4 { - t.Error("cn should be 4, is: ", cnM1) - } - validateChanges(t, changesM1, []string{"s2", "s3", "s4"}, []string{}) - - changes1, cn1, err = summaries.FetchSince(1) - if err != nil { - t.Error(err) - } - if cn1 != 4 { - t.Error("cn should be 4, is: ", cn1) - } - validateChanges(t, changes1, []string{"s2", "s3", "s4"}, []string{"s1"}) - - changes2, cn2, err = summaries.FetchSince(2) - if err != nil { - t.Error(err) - } - if cn2 != 4 { - t.Error("cn should be 4, is: ", cn2) - } - validateChanges(t, changes2, []string{"s3", "s4"}, []string{"s1"}) - - changes3, cn3, err = summaries.FetchSince(3) - if err != nil { - t.Error(err) - } - if cn3 != 4 { - t.Error("cn should be 4, is: ", cn3) - } - validateChanges(t, changes3, []string{"s4"}, []string{"s1"}) - - changes4, cn4, err := summaries.FetchSince(4) - if err != nil { - t.Error(err) - } - if cn4 != 4 { - t.Error("cn should be 4, is: ", cn4) - } - validateChanges(t, changes4, []string{}, []string{}) - - // TODO: Continue test plan up to 6! -} - -func TestSizeBoundaries(t *testing.T) { - summaries := NewSplitChangesSummaries(5) - // validateChanges(t, changesM1, []string{}, []string{}) - - summaries.AddChanges([]dtos.SplitDTO{{Name: "s1", TrafficTypeName: "tt1"}}, nil, 1) - summaries.AddChanges([]dtos.SplitDTO{{Name: "s1", TrafficTypeName: "tt2"}}, nil, 2) - summaries.AddChanges([]dtos.SplitDTO{{Name: "s1", TrafficTypeName: "tt3"}}, nil, 3) - summaries.AddChanges([]dtos.SplitDTO{{Name: "s1", TrafficTypeName: "tt4"}}, nil, 4) - summaries.AddChanges([]dtos.SplitDTO{{Name: "s1", TrafficTypeName: "tt5"}}, nil, 5) - - changes, cn, err := summaries.FetchSince(1) - if err != nil { - t.Error("no error should be returned. Got: ", err) - } - - if tt := changes.Updated["s1"]; tt != "tt5" { - t.Error("invalid tt: ", tt) - } - - if cn != 5 { - t.Error("cn should be 5. Is:", cn) - } - - summaries.AddChanges([]dtos.SplitDTO{{Name: "s1", TrafficTypeName: "tt5"}}, nil, 6) - _, _, err = summaries.FetchSince(1) - if err != ErrUnknownChangeNumber { - t.Error("should have gotten unknown CN error. Got: ", err) - } -} - -func TestSplitSet(t *testing.T) { - ss := newSplitSet() - ss.update([]SplitMinimalView{{Name: "s1"}, {Name: "s2"}}, nil) - if !ss.contains("s1") || !ss.contains("s2") { - t.Error("splitSet should contain s1 & s2") - } - - clone := ss.clone() - if !clone.contains("s1") || !clone.contains("s2") { - t.Error("splitSet should contain s1 & s2") - } - - ss.update(nil, []SplitMinimalView{{Name: "s2"}}) - if !clone.contains("s1") { - t.Error("splitSet should contain s1") - } -} - -/* TEST PLAN! --1: null -1: - ops: - - add(s1) - - add(s2) - - add(s3) - returns: - -1: [+s1, +s2, +s3] - 1: [] -2: - ops: - - update(s2) - returns: - -1: [+s1, +s2, +s3] - 1: [+s2] - 2: [] -3: - ops: - - kill(s3) - returns: - -1: [+s1, +s2, +s3] - 1: [+s2, +s3] - 2: [+s3] - 3: [] -4: - ops: - - add(s4) - - del(s1) - returns: - -1: [+s2, +s3, +s4] - 1: [+s2, +s3, -s1, +s4] - 2: [+s3, -s1, +s4] - 3: [-s1, +s4] - 4: [] -5: - ops: - del(s4) - returns: - -1: [+s2, +s3] - 1: [+s2, +s3, -s1] - 2: [+s3, -s1] - 3: [-s1] - 4: [-s4] - 5: [] -6: - ops: - restore(s1) - returns: - -1: [+s2, +s3, +s1] - 1: [+s2, +s3, +s1] - 2: [+s3, +s1] - 3: [+s1] - 4: [-s4, +s1] - 5: [+s1] - 6: [] -*/ diff --git a/splitio/proxy/storage/optimized/historic.go b/splitio/proxy/storage/optimized/historic.go new file mode 100644 index 00000000..b4264d1d --- /dev/null +++ b/splitio/proxy/storage/optimized/historic.go @@ -0,0 +1,231 @@ +package optimized + +import ( + "slices" + "sort" + "strings" + "sync" + + "github.com/splitio/go-split-commons/v5/dtos" +) + +type HistoricChanges interface { + GetUpdatedSince(since int64, flagSets []string) []FeatureView + Update(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, newCN int64) +} + +type HistoricChangesImpl struct { + data []FeatureView + mutex sync.RWMutex +} + +func NewHistoricSplitChanges(capacity int) *HistoricChangesImpl { + return &HistoricChangesImpl{ + data: make([]FeatureView, 0, capacity), + } +} + +func (h *HistoricChangesImpl) GetUpdatedSince(since int64, flagSets []string) []FeatureView { + slices.Sort(flagSets) + h.mutex.RLock() + views := h.findNewerThan(since) + toRet := copyAndFilter(views, flagSets, since) + h.mutex.RUnlock() + return toRet +} + +func (h *HistoricChangesImpl) Update(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, newCN int64) { + h.mutex.Lock() + h.updateFrom(toAdd) + h.updateFrom(toRemove) + sort.Slice(h.data, func(i, j int) bool { return h.data[i].LastUpdated < h.data[j].LastUpdated }) + h.mutex.Unlock() +} + +// public interface ends here + +func (h *HistoricChangesImpl) updateFrom(source []dtos.SplitDTO) { + for idx := range source { + if current := h.findByName(source[idx].Name); current != nil { + current.updateFrom(&source[idx]) + } else { + var toAdd FeatureView + toAdd.updateFrom(&source[idx]) + h.data = append(h.data, toAdd) + } + } +} + +func (h *HistoricChangesImpl) findByName(name string) *FeatureView { + // yes, it's linear search because features are sorted by CN, but it's only used + // when processing an update coming from the BE. it's off the critical path of incoming + // requests. + for idx := range h.data { + if h.data[idx].Name == name { + return &h.data[idx] + } + } + return nil +} + +func (h *HistoricChangesImpl) findNewerThan(since int64) []FeatureView { + // precondition: h.data is sorted by CN + start := sort.Search(len(h.data), func(i int) bool { return h.data[i].LastUpdated > since }) + if start == len(h.data) { + return nil + } + return h.data[start:] +} + +type FeatureView struct { + Name string + Active bool + LastUpdated int64 + TrafficTypeName string + FlagSets []FlagSetView +} + +func (f *FeatureView) updateFrom(s *dtos.SplitDTO) { + f.Name = s.Name + f.Active = s.Status == "ACTIVE" + f.LastUpdated = s.ChangeNumber + f.TrafficTypeName = s.TrafficTypeName + f.updateFlagsets(s.Sets, s.ChangeNumber) +} + +func (f *FeatureView) updateFlagsets(i []string, lastUpdated int64) { + incoming := slices.Clone(i) // make a copy since we'll reorder elements + + // check if the current flagsets are still present in the updated split. + // if they're present & currently marked as inactive, update their status & CN + // if they're not present, mark them as ARCHIVED & update the CN + for idx := range f.FlagSets { + if itemIdx := slices.Index(incoming, f.FlagSets[idx].Name); itemIdx != -1 { + if !f.FlagSets[idx].Active { // Association changed from ARCHIVED to ACTIVE + f.FlagSets[idx].Active = true + f.FlagSets[idx].LastUpdated = lastUpdated + + } + + // "soft delete" the item so that it's not traversed later on + // (replaces the item with the last one and shrinks the slice by 1) + incoming[itemIdx] = incoming[len(incoming)-1] + incoming = incoming[:len(incoming)-1] + + } else { // Association changed from ARCHIVED to ACTIVE + f.FlagSets[idx].Active = false + f.FlagSets[idx].LastUpdated = lastUpdated + } + } + + // the only leftover in `incoming` should be the items that were not + // present in the feature's previously associated flagsets, so they're new & active + for idx := range incoming { + f.FlagSets = append(f.FlagSets, FlagSetView{ + Name: incoming[idx], + Active: true, + LastUpdated: lastUpdated, + }) + } + + sort.Slice(f.FlagSets, func(i, j int) bool { return f.FlagSets[i].Name < f.FlagSets[j].Name }) +} + +func (f *FeatureView) findFlagSetByName(name string) *FlagSetView { + // precondition: f.FlagSets is sorted by name + idx := sort.Search(len(f.FlagSets), func(i int) bool { return f.FlagSets[i].Name >= name }) + if idx != len(f.FlagSets) && name == f.FlagSets[idx].Name { + return &f.FlagSets[idx] + } + return nil +} + +func (f *FeatureView) clone() FeatureView { + toRet := FeatureView{ + Name: f.Name, + Active: f.Active, + LastUpdated: f.LastUpdated, + TrafficTypeName: f.TrafficTypeName, + FlagSets: make([]FlagSetView, len(f.FlagSets)), + } + copy(toRet.FlagSets, f.FlagSets) // we need to deep clone to avoid race conditions + return toRet + +} + +func (f *FeatureView) FlagSetNames() []string { + toRet := make([]string, len(f.FlagSets)) + for idx := range f.FlagSets { + toRet[idx] = f.FlagSets[idx].Name + } + return toRet +} + +func copyAndFilter(views []FeatureView, sets []string, since int64) []FeatureView { + // precondition: f.Flagsets is sorted by name + // precondition: sets is sorted + toRet := make([]FeatureView, 0, len(views)) + + // this code computes the intersection in o(views * ) + for idx := range views { + if featureShouldBeReturned(&views[idx], since, sets) { + toRet = append(toRet, views[idx].clone()) + } + } + return toRet +} + +func featureShouldBeReturned(view *FeatureView, since int64, sets []string) bool { + + // if fetching from sratch & the feature is not active, + // or it hasn't been updated since `since`, it shouldn't even be considered for being returned + if since == -1 && !view.Active || view.LastUpdated < since { + return false + } + + // all updated features should be returned if no set filter is being used + if len(sets) == 0 { + return true + } + + // compare the sets for intersection of user supplied sets with currently active ones. + // takes linear o(len(feature.sets) + len(sets)) time since both the incoming sets are sorted + viewFlagSetIndex, requestedSetIndex := 0, 0 + for viewFlagSetIndex < len(view.FlagSets) { + switch strings.Compare(view.FlagSets[viewFlagSetIndex].Name, sets[requestedSetIndex]) { + case 0: // we got a match + fsinfo := view.FlagSets[viewFlagSetIndex] + // if an association is active, it's considered and the Feature is added to the result set. + // if an association is inactive and we're fetching from scratch (since=-1), it's not considered. + // if an association was already inactive at the time of the provided `since`, it's not considered. + // if an association was active on the provided `since` and now isn't, the feature IS added to the returned payload. + if fsinfo.Active || (since > -1 && since < fsinfo.LastUpdated) { + return true + } + viewFlagSetIndex++ + incrUpTo(&requestedSetIndex, len(sets)) + case -1: + viewFlagSetIndex++ + case 1: + if incrUpTo(&requestedSetIndex, len(sets)); requestedSetIndex+1 == len(sets) { + viewFlagSetIndex++ + } + } + } + return false +} + +type FlagSetView struct { + Name string + Active bool + LastUpdated int64 +} + +func incrUpTo(toIncr *int, limit int) { + if *toIncr+1 >= limit { + return + } + *toIncr++ +} + +var _ HistoricChanges = (*HistoricChangesImpl)(nil) diff --git a/splitio/proxy/storage/optimized/historic_test.go b/splitio/proxy/storage/optimized/historic_test.go new file mode 100644 index 00000000..6c826602 --- /dev/null +++ b/splitio/proxy/storage/optimized/historic_test.go @@ -0,0 +1,344 @@ +package optimized + +import ( + "math/rand" + "sort" + "testing" + "time" + + "github.com/splitio/go-split-commons/v5/dtos" + "github.com/stretchr/testify/assert" +) + +func TestHistoricSplitStorage(t *testing.T) { + + var historic HistoricChangesImpl + historic.Update([]dtos.SplitDTO{ + {Name: "f1", Sets: []string{"s1", "s2"}, Status: "ACTIVE", ChangeNumber: 1, TrafficTypeName: "tt1"}, + }, []dtos.SplitDTO{}, 1) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 1}, + }, + historic.GetUpdatedSince(-1, nil)) + + // process an update with no change in flagsets / split status + // - fetching from -1 && 1 should return the same paylaod as before with only `lastUpdated` bumped to 2 + // - fetching from 2 should return empty + historic.Update([]dtos.SplitDTO{ + {Name: "f1", Sets: []string{"s1", "s2"}, Status: "ACTIVE", ChangeNumber: 2, TrafficTypeName: "tt1"}, + }, []dtos.SplitDTO{}, 1) + + // no filter + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(-1, nil)) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(1, nil)) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(2, nil)) + + // filter by s1 + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(-1, []string{"s1"})) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(1, []string{"s1"})) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(2, []string{"s1"})) + + // filter by s2 + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(-1, []string{"s2"})) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(1, []string{"s2"})) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(2, []string{"s2"})) + + // ------------------- + + // process an update with one extra split + // - fetching from -1, & 1 should return the same payload + // - fetching from 2 shuold only return f2 + // - fetching from 3 should return empty + historic.Update([]dtos.SplitDTO{ + {Name: "f2", Sets: []string{"s2", "s3"}, Status: "ACTIVE", ChangeNumber: 3, TrafficTypeName: "tt1"}, + }, []dtos.SplitDTO{}, 1) + + // assert correct behaviours for CN == 1..3 and no flag sets filter + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(-1, nil)) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(1, nil)) + assert.Equal(t, + []FeatureView{ + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(2, nil)) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(3, nil)) + + // filtering by s1: + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(-1, []string{"s1"})) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + }, + historic.GetUpdatedSince(1, []string{"s1"})) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(2, []string{"s1"})) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(3, []string{"s1"})) + + // filtering by s2: + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(-1, []string{"s2"})) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", true, 1}, {"s2", true, 1}}, Active: true, LastUpdated: 2}, + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(1, []string{"s2"})) + assert.Equal(t, + []FeatureView{ + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(2, []string{"s2"})) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(3, []string{"s2"})) + + //filtering by s3 + assert.Equal(t, + []FeatureView{ + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(-1, []string{"s3"})) + assert.Equal(t, + []FeatureView{ + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(1, []string{"s3"})) + assert.Equal(t, + []FeatureView{ + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + }, + historic.GetUpdatedSince(2, []string{"s3"})) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(3, []string{"s3"})) + + // ------------------- + + // process an update that removes f1 from flagset s1 + // - fetching without a filter should remain the same + // - fetching with filter = s1 should not return f1 in CN=-1, should return it without the flagset in greater CNs + historic.Update([]dtos.SplitDTO{ + {Name: "f1", Sets: []string{"s2"}, Status: "ACTIVE", ChangeNumber: 4, TrafficTypeName: "tt1"}, + }, []dtos.SplitDTO{}, 1) + + assert.Equal(t, + []FeatureView{ + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: true, LastUpdated: 3}, + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + }, + historic.GetUpdatedSince(-1, nil)) + + // with filter = s1 (f2 never was associated with s1, f1 is no longer associated) + assert.Equal(t, + []FeatureView{}, + historic.GetUpdatedSince(-1, []string{"s1"})) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + }, + historic.GetUpdatedSince(1, []string{"s1"})) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + }, + historic.GetUpdatedSince(2, []string{"s1"})) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + }, + historic.GetUpdatedSince(3, []string{"s1"})) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(4, []string{"s1"})) + + // process an update that removes f2 (archives the feature) + // fetching from -1 should not return f2 + // fetching from any intermediate CN should return f2 as archived + // fetching from cn=5 should return empty + historic.Update([]dtos.SplitDTO{ + {Name: "f2", Sets: []string{"s2", "s3"}, Status: "ARCHIVED", ChangeNumber: 5, TrafficTypeName: "tt1"}, + }, []dtos.SplitDTO{}, 1) + + // without filter + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + }, + historic.GetUpdatedSince(-1, nil)) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: false, LastUpdated: 5}, + }, + historic.GetUpdatedSince(1, nil)) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: false, LastUpdated: 5}, + }, + historic.GetUpdatedSince(2, nil)) + assert.Equal(t, + []FeatureView{ + {Name: "f1", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s1", false, 4}, {"s2", true, 1}}, Active: true, LastUpdated: 4}, + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: false, LastUpdated: 5}, + }, + historic.GetUpdatedSince(3, nil)) + assert.Equal(t, + []FeatureView{ + {Name: "f2", TrafficTypeName: "tt1", FlagSets: []FlagSetView{{"s2", true, 3}, {"s3", true, 3}}, Active: false, LastUpdated: 5}, + }, + historic.GetUpdatedSince(4, nil)) + assert.Equal(t, []FeatureView{}, historic.GetUpdatedSince(5, nil)) + +} + +// -- code below is for benchmarking random access using hashsets (map[string]struct{}) vs sorted slices + binary search + +func setupRandomData(flagsetLength int, flagsetCount int, splits int, flagSetsPerSplitMax int, userSets int) benchmarkDataSlices { + const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + rand.Seed(time.Now().UnixNano()) + makeStr := func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) + } + + flagSets := make([]string, 0, flagsetCount) + for flagsetCount > 0 { + flagSets = append(flagSets, makeStr(flagsetLength)) + flagsetCount-- + } + + views := make([]FeatureView, 0, splits) + for len(views) < cap(views) { + fscount := rand.Intn(flagSetsPerSplitMax) + setsForSplit := make([]FlagSetView, 0, fscount) + for fscount > 0 { + setsForSplit = append(setsForSplit, FlagSetView{ + Name: flagSets[rand.Intn(len(flagSets))], + Active: rand.Intn(2) > 0, + LastUpdated: rand.Int63n(2), + }) + fscount-- + } + sort.Slice(setsForSplit, func(i, j int) bool { return setsForSplit[i].Name < setsForSplit[j].Name }) + views = append(views, FeatureView{ + Name: makeStr(20), + Active: rand.Intn(2) > 0, // rand bool + LastUpdated: rand.Int63n(2), // 1 or 2 (still an int but behaving like a bool if we filter by since=1) + TrafficTypeName: makeStr(10), + FlagSets: setsForSplit, + }) + + } + sort.Slice(views, func(i, j int) bool { return views[i].LastUpdated < views[j].LastUpdated }) + return benchmarkDataSlices{views, flagSets} +} + +type benchmarkDataSlices struct { + views []FeatureView + sets []string +} + +func (b *benchmarkDataSlices) toBenchmarkDataForMaps() benchmarkDataMaps { + setMap := make(map[string]struct{}, len(b.sets)) + for _, s := range b.sets { + setMap[s] = struct{}{} + } + + return benchmarkDataMaps{ + views: b.views, + sets: setMap, + } + +} + +type benchmarkDataMaps struct { + views []FeatureView + sets map[string]struct{} +} + +// reference implementation for benchmarking purposes only +func copyAndFilterUsingMaps(views []FeatureView, sets map[string]struct{}, since int64) []FeatureView { + toRet := make([]FeatureView, 0, len(views)) + for idx := range views { + for fsidx := range views[idx].FlagSets { + if _, ok := sets[views[idx].FlagSets[fsidx].Name]; ok { + fsinfo := views[idx].FlagSets[fsidx] + if fsinfo.Active || fsinfo.LastUpdated > since { + toRet = append(toRet, views[idx].clone()) + } + } + } + + } + return toRet +} + +func BenchmarkFlagSetProcessing(b *testing.B) { + + b.Run("sorted-slice", func(b *testing.B) { + data := make([]benchmarkDataSlices, 0, b.N) + for i := 0; i < b.N; i++ { + data = append(data, setupRandomData(20, 50, 500, 20, 10)) + } + + b.ResetTimer() // to ignore setup time & allocs + + for i := 0; i < b.N; i++ { + copyAndFilter(data[i].views, data[i].sets, 1) + } + }) + + b.Run("maps", func(b *testing.B) { + data := make([]benchmarkDataMaps, 0, b.N) + for i := 0; i < b.N; i++ { + d := setupRandomData(20, 50, 500, 20, 10) + data = append(data, d.toBenchmarkDataForMaps()) + } + + b.ResetTimer() // to ignore setup time & allocs + + for i := 0; i < b.N; i++ { + copyAndFilterUsingMaps(data[i].views, data[i].sets, 1) + } + }) +} diff --git a/splitio/proxy/storage/optimized/mocks/mocks.go b/splitio/proxy/storage/optimized/mocks/mocks.go new file mode 100644 index 00000000..24160c0f --- /dev/null +++ b/splitio/proxy/storage/optimized/mocks/mocks.go @@ -0,0 +1,23 @@ +package mocks + +import ( + "github.com/splitio/go-split-commons/v5/dtos" + "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage/optimized" + "github.com/stretchr/testify/mock" +) + +type HistoricStorageMock struct { + mock.Mock +} + +// GetUpdatedSince implements optimized.HistoricChanges +func (h *HistoricStorageMock) GetUpdatedSince(since int64, flagSets []string) []optimized.FeatureView { + return h.Called(since, flagSets).Get(0).([]optimized.FeatureView) +} + +// Update implements optimized.HistoricChanges +func (h *HistoricStorageMock) Update(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, newCN int64) { + h.Called(toAdd, toRemove, newCN) +} + +var _ optimized.HistoricChanges = (*HistoricStorageMock)(nil) diff --git a/splitio/proxy/storage/splits.go b/splitio/proxy/storage/splits.go index bc4a99d3..46cfc04d 100644 --- a/splitio/proxy/storage/splits.go +++ b/splitio/proxy/storage/splits.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/splitio/go-split-commons/v5/dtos" + "github.com/splitio/go-split-commons/v5/flagsets" "github.com/splitio/go-split-commons/v5/storage" "github.com/splitio/go-split-commons/v5/storage/inmemory/mutexmap" "github.com/splitio/go-toolkit/v5/datastructures/set" @@ -20,45 +21,53 @@ const ( maxRecipes = 1000 ) -// ErrSummaryNotCached is returned when a summary is not cached for a requested change number -var ErrSummaryNotCached = errors.New("summary for requested change number not cached") +// ErrSinceParamTooOld is returned when a summary is not cached for a requested change number +var ErrSinceParamTooOld = errors.New("summary for requested change number not cached") // ProxySplitStorage defines the interface of a storage that can be used for serving splitChanges payloads // for different requested `since` parameters type ProxySplitStorage interface { - ChangesSince(since int64) (*dtos.SplitChangesDTO, error) - RegisterOlderCn(payload *dtos.SplitChangesDTO) + ChangesSince(since int64, flagSets []string) (*dtos.SplitChangesDTO, error) } // ProxySplitStorageImpl implements the ProxySplitStorage interface and the SplitProducer interface type ProxySplitStorageImpl struct { - snapshot mutexmap.MMSplitStorage - recipes *optimized.SplitChangesSummaries - db *persistent.SplitChangesCollection - mtx sync.Mutex + snapshot mutexmap.MMSplitStorage + db *persistent.SplitChangesCollection + flagSets flagsets.FlagSetFilter + historic optimized.HistoricChanges + logger logging.LoggerInterface + oldestKnownCN int64 + mtx sync.Mutex } // NewProxySplitStorage instantiates a new proxy storage that wraps an in-memory snapshot of the last known, // flag configuration, a changes summaries containing recipes to update SDKs with different CNs, and a persistent storage // for snapshot purposes -func NewProxySplitStorage(db persistent.DBWrapper, logger logging.LoggerInterface, restoreBackup bool) *ProxySplitStorageImpl { +func NewProxySplitStorage(db persistent.DBWrapper, logger logging.LoggerInterface, flagSets flagsets.FlagSetFilter, restoreBackup bool) *ProxySplitStorageImpl { disk := persistent.NewSplitChangesCollection(db, logger) - snapshot := mutexmap.NewMMSplitStorage() - recipes := optimized.NewSplitChangesSummaries(maxRecipes) + snapshot := mutexmap.NewMMSplitStorage(flagSets) + historic := optimized.NewHistoricSplitChanges(1000) + + var initialCN int64 = -1 if restoreBackup { - snapshotFromDisk(snapshot, recipes, disk, logger) + initialCN = snapshotFromDisk(snapshot, historic, disk, logger) } return &ProxySplitStorageImpl{ - snapshot: *snapshot, - recipes: recipes, - db: disk, + snapshot: *snapshot, + db: disk, + flagSets: flagSets, + historic: historic, + logger: logger, + oldestKnownCN: initialCN, } } // ChangesSince builds a SplitChanges payload to from `since` to the latest known CN -func (p *ProxySplitStorageImpl) ChangesSince(since int64) (*dtos.SplitChangesDTO, error) { - // Special case of -1, return all - if since == -1 { +func (p *ProxySplitStorageImpl) ChangesSince(since int64, flagSets []string) (*dtos.SplitChangesDTO, error) { + + // No flagsets and fetching from -1, return the current snapshot + if since == -1 && len(flagSets) == 0 { cn, err := p.snapshot.ChangeNumber() if err != nil { return nil, fmt.Errorf("error fetching changeNumber from snapshot: %w", err) @@ -67,26 +76,36 @@ func (p *ProxySplitStorageImpl) ChangesSince(since int64) (*dtos.SplitChangesDTO return &dtos.SplitChangesDTO{Since: since, Till: cn, Splits: all}, nil } - summary, till, err := p.recipes.FetchSince(int64(since)) - if err != nil { - if errors.Is(err, optimized.ErrUnknownChangeNumber) { - return nil, ErrSummaryNotCached - } - return nil, fmt.Errorf("unexpected error when fetching changes summary: %w", err) + if p.sinceIsTooOld(since) { + return nil, ErrSinceParamTooOld } - // Regular flow - splitNames := make([]string, 0, len(summary.Updated)) - for name := range summary.Updated { - splitNames = append(splitNames, name) + views := p.historic.GetUpdatedSince(since, flagSets) + namesToFetch := make([]string, 0, len(views)) + all := make([]dtos.SplitDTO, 0, len(views)) + var till int64 = since + for idx := range views { + if t := views[idx].LastUpdated; t > till { + till = t + } + if views[idx].Active { + namesToFetch = append(namesToFetch, views[idx].Name) + } else { + all = append(all, archivedDTOForView(&views[idx])) + } } - active := p.snapshot.FetchMany(splitNames) - all := make([]dtos.SplitDTO, 0, len(summary.Removed)+len(summary.Updated)) - for _, split := range active { + for name, split := range p.snapshot.FetchMany(namesToFetch) { + if split == nil { + p.logger.Warning(fmt.Sprintf( + "possible inconsistency between historic & snapshot storages. Feature `%s` is missing in the latter", + name, + )) + continue + } all = append(all, *split) } - all = append(all, optimized.BuildArchivedSplitsFor(summary.Removed)...) + return &dtos.SplitChangesDTO{Since: since, Till: till, Splits: all}, nil } @@ -98,32 +117,19 @@ func (p *ProxySplitStorageImpl) KillLocally(splitName string, defaultTreatment s // Update the storage atomically func (p *ProxySplitStorageImpl) Update(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, changeNumber int64) { + p.setStartingPoint(changeNumber) // will be executed only the first time this method is called + if len(toAdd) == 0 && len(toRemove) == 0 { return } p.mtx.Lock() p.snapshot.Update(toAdd, toRemove, changeNumber) - p.recipes.AddChanges(toAdd, toRemove, changeNumber) + p.historic.Update(toAdd, toRemove, changeNumber) p.db.Update(toAdd, toRemove, changeNumber) p.mtx.Unlock() } -// RegisterOlderCn registers payload associated to a fetch request for an old `since` for which we don't -// have a recipe -func (p *ProxySplitStorageImpl) RegisterOlderCn(payload *dtos.SplitChangesDTO) { - toAdd := make([]dtos.SplitDTO, 0) - toDel := make([]dtos.SplitDTO, 0) - for _, split := range payload.Splits { - if split.Status == "ACTIVE" { - toAdd = append(toAdd, split) - } else { - toDel = append(toDel, split) - } - } - p.recipes.AddOlderChange(toAdd, toDel, payload.Till) -} - // ChangeNumber returns the current change number func (p *ProxySplitStorageImpl) ChangeNumber() (int64, error) { return p.snapshot.ChangeNumber() @@ -166,11 +172,43 @@ func (p *ProxySplitStorageImpl) Count() int { return len(p.SplitNames()) } -func snapshotFromDisk(dst *mutexmap.MMSplitStorage, summary *optimized.SplitChangesSummaries, src *persistent.SplitChangesCollection, logger logging.LoggerInterface) { +// GetNamesByFlagSets implements storage.SplitStorage +func (*ProxySplitStorageImpl) GetNamesByFlagSets(sets []string) map[string][]string { + // NOTE: This method is NOT used by the proxy. + // we need to revisit our interfaces so that we're not obliged to do this smeely empty impls. + return nil +} + +func (p *ProxySplitStorageImpl) setStartingPoint(cn int64) { + p.mtx.Lock() + // will be executed only the first time this method is called or when + // an older change is registered + if p.oldestKnownCN == -1 || cn < p.oldestKnownCN { + p.oldestKnownCN = cn + } + p.mtx.Unlock() +} + +func (p *ProxySplitStorageImpl) sinceIsTooOld(since int64) bool { + if since == -1 { + return false + } + + p.mtx.Lock() + defer p.mtx.Unlock() + return since < p.oldestKnownCN +} + +func snapshotFromDisk( + dst *mutexmap.MMSplitStorage, + historic optimized.HistoricChanges, + src *persistent.SplitChangesCollection, + logger logging.LoggerInterface, +) int64 { all, err := src.FetchAll() if err != nil { logger.Error("error parsing feature flags from snapshot. No data will be available!: ", err) - return + return -1 } var filtered []dtos.SplitDTO @@ -187,7 +225,25 @@ func snapshotFromDisk(dst *mutexmap.MMSplitStorage, summary *optimized.SplitChan } dst.Update(filtered, nil, cn) - summary.AddChanges(filtered, nil, cn) + historic.Update(filtered, nil, cn) + return cn +} + +func archivedDTOForView(view *optimized.FeatureView) dtos.SplitDTO { + return dtos.SplitDTO{ + ChangeNumber: view.LastUpdated, + TrafficTypeName: view.TrafficTypeName, + Name: view.Name, + TrafficAllocation: 100, + TrafficAllocationSeed: 0, + Seed: 0, + Status: "ARCHIVED", + Killed: false, + DefaultTreatment: "off", + Algo: 1, + Conditions: make([]dtos.ConditionDTO, 0), + Sets: view.FlagSetNames(), + } } var _ ProxySplitStorage = (*ProxySplitStorageImpl)(nil) diff --git a/splitio/proxy/storage/splits_test.go b/splitio/proxy/storage/splits_test.go index 7365564c..af33e016 100644 --- a/splitio/proxy/storage/splits_test.go +++ b/splitio/proxy/storage/splits_test.go @@ -3,60 +3,183 @@ package storage import ( "testing" + "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage/optimized" + "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage/optimized/mocks" "github.com/splitio/split-synchronizer/v5/splitio/proxy/storage/persistent" "github.com/splitio/go-split-commons/v5/dtos" + "github.com/splitio/go-split-commons/v5/flagsets" "github.com/splitio/go-toolkit/v5/logging" + + "github.com/stretchr/testify/assert" ) func TestSplitStorage(t *testing.T) { dbw, err := persistent.NewBoltWrapper(persistent.BoltInMemoryMode, nil) - if err != nil { - t.Error("error creating bolt wrapper: ", err) - } + assert.Nil(t, err) logger := logging.NewLogger(nil) - splitC := persistent.NewSplitChangesCollection(dbw, logger) - - splitC.Update([]dtos.SplitDTO{ - {Name: "s1", ChangeNumber: 1, Status: "ACTIVE"}, - {Name: "s2", ChangeNumber: 2, Status: "ACTIVE"}, - }, nil, 1) - pss := NewProxySplitStorage(dbw, logger, true) - - sinceMinus1, currentCN, err := pss.recipes.FetchSince(-1) - if err != nil { - t.Error("unexpected error: ", err) - } - - if currentCN != 2 { - t.Error("current cn should be 2. Is: ", currentCN) + toAdd := []dtos.SplitDTO{ + {Name: "f1", ChangeNumber: 1, Status: "ACTIVE", TrafficTypeName: "ttt"}, + {Name: "f2", ChangeNumber: 2, Status: "ACTIVE", TrafficTypeName: "ttt"}, } - - if _, ok := sinceMinus1.Updated["s1"]; !ok { - t.Error("s1 should be added") + toAdd2 := []dtos.SplitDTO{{Name: "f3", ChangeNumber: 3, Status: "ACTIVE", TrafficTypeName: "ttt"}} + toRemove := []dtos.SplitDTO{ + archivedDTOForView(&optimized.FeatureView{Name: "f2", Active: false, LastUpdated: 4, TrafficTypeName: "ttt"}), } - if _, ok := sinceMinus1.Updated["s2"]; !ok { - t.Error("s2 should be added") - } + splitC := persistent.NewSplitChangesCollection(dbw, logger) + splitC.Update(toAdd, nil, 2) + + var historicMock mocks.HistoricStorageMock + historicMock.On("Update", toAdd2, []dtos.SplitDTO(nil), int64(3)).Once() + historicMock.On("GetUpdatedSince", int64(2), []string(nil)).Once().Return([]optimized.FeatureView{}) + + pss := NewProxySplitStorage(dbw, logger, flagsets.NewFlagSetFilter(nil), true) + + // validate initial state of the historic cache & replace it with a mock for the next validations + assert.ElementsMatch(t, + []optimized.FeatureView{ + {Name: "f1", Active: true, LastUpdated: 1, FlagSets: []optimized.FlagSetView{}, TrafficTypeName: "ttt"}, + {Name: "f2", Active: true, LastUpdated: 2, FlagSets: []optimized.FlagSetView{}, TrafficTypeName: "ttt"}, + }, pss.historic.GetUpdatedSince(-1, nil)) + pss.historic = &historicMock + // ---- + + changes, err := pss.ChangesSince(-1, nil) + assert.Nil(t, err) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(2), changes.Till) + assert.ElementsMatch(t, changes.Splits, toAdd) + + changes, err = pss.ChangesSince(2, nil) + assert.Nil(t, err) + assert.Equal(t, int64(2), changes.Since) + assert.Equal(t, int64(2), changes.Till) + assert.Empty(t, changes.Splits) + + pss.Update(toAdd2, nil, 3) + historicMock.On("GetUpdatedSince", int64(2), []string(nil)). + Once(). + Return([]optimized.FeatureView{{Name: "f3", LastUpdated: 3, Active: true, TrafficTypeName: "ttt"}}) + + changes, err = pss.ChangesSince(-1, nil) + assert.Nil(t, err) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(3), changes.Till) + assert.ElementsMatch(t, changes.Splits, append(append([]dtos.SplitDTO(nil), toAdd...), toAdd2...)) + + changes, err = pss.ChangesSince(2, nil) + assert.Nil(t, err) + assert.Equal(t, int64(2), changes.Since) + assert.Equal(t, int64(3), changes.Till) + assert.ElementsMatch(t, changes.Splits, toAdd2) + + // archive split2 and check it's no longer returned + historicMock.On("Update", []dtos.SplitDTO(nil), toRemove, int64(4)).Once() + pss.Update(nil, toRemove, 4) + historicMock.On("GetUpdatedSince", int64(3), []string(nil)). + Once(). + Return([]optimized.FeatureView{{Name: "f2", LastUpdated: 4, Active: false, TrafficTypeName: "ttt"}}) + + changes, err = pss.ChangesSince(-1, nil) + assert.Nil(t, err) + assert.Equal(t, int64(-1), changes.Since) + assert.Equal(t, int64(4), changes.Till) + assert.ElementsMatch(t, + []dtos.SplitDTO{ + {Name: "f1", ChangeNumber: 1, Status: "ACTIVE", TrafficTypeName: "ttt"}, + {Name: "f3", ChangeNumber: 3, Status: "ACTIVE", TrafficTypeName: "ttt"}, + }, + changes.Splits) + + changes, err = pss.ChangesSince(3, nil) + assert.Nil(t, err) + assert.Equal(t, int64(3), changes.Since) + assert.Equal(t, int64(4), changes.Till) + assert.ElementsMatch(t, toRemove, changes.Splits) + + historicMock.AssertExpectations(t) +} - since2, currentCN, err := pss.recipes.FetchSince(2) +func TestSplitStorageWithFlagsets(t *testing.T) { + dbw, err := persistent.NewBoltWrapper(persistent.BoltInMemoryMode, nil) if err != nil { - t.Error("unexpected error: ", err) - } - - if currentCN != 2 { - t.Error("current cn should be 2. Is: ", currentCN) - } - - if len(since2.Updated) != 0 { - t.Error("nothing should have been added") + t.Error("error creating bolt wrapper: ", err) } - if len(since2.Removed) != 0 { - t.Error("nothing should have been removed") - } + logger := logging.NewLogger(nil) + splitC := persistent.NewSplitChangesCollection(dbw, logger) + splitC.Update(nil, []dtos.SplitDTO{{Name: "f0", ChangeNumber: 0, Status: "ARCHIVED", TrafficTypeName: "ttt"}}, 0) + + pss := NewProxySplitStorage(dbw, logger, flagsets.NewFlagSetFilter(nil), true) + + pss.Update([]dtos.SplitDTO{ + {Name: "f1", ChangeNumber: 1, Status: "ACTIVE", Sets: []string{"s1", "s2"}}, + {Name: "f2", ChangeNumber: 2, Status: "ACTIVE", Sets: []string{"s2", "s3"}}, + }, nil, 2) + + res, err := pss.ChangesSince(-1, nil) + assert.Nil(t, err) + assert.Equal(t, int64(-1), res.Since) + assert.Equal(t, int64(2), res.Till) + assert.ElementsMatch(t, []dtos.SplitDTO{ + {Name: "f1", ChangeNumber: 1, Status: "ACTIVE", Sets: []string{"s1", "s2"}}, + {Name: "f2", ChangeNumber: 2, Status: "ACTIVE", Sets: []string{"s2", "s3"}}, + }, res.Splits) + + // check for s1 + res, err = pss.ChangesSince(-1, []string{"s1"}) + assert.Nil(t, err) + assert.Equal(t, int64(-1), res.Since) + assert.Equal(t, int64(1), res.Till) + assert.ElementsMatch(t, []dtos.SplitDTO{ + {Name: "f1", ChangeNumber: 1, Status: "ACTIVE", Sets: []string{"s1", "s2"}}, + }, res.Splits) + + // check for s2 + res, err = pss.ChangesSince(-1, []string{"s2"}) + assert.Nil(t, err) + assert.Equal(t, int64(-1), res.Since) + assert.Equal(t, int64(2), res.Till) + assert.ElementsMatch(t, []dtos.SplitDTO{ + {Name: "f1", ChangeNumber: 1, Status: "ACTIVE", Sets: []string{"s1", "s2"}}, + {Name: "f2", ChangeNumber: 2, Status: "ACTIVE", Sets: []string{"s2", "s3"}}, + }, res.Splits) + + // check for s3 + res, err = pss.ChangesSince(-1, []string{"s3"}) + assert.Nil(t, err) + assert.Equal(t, int64(-1), res.Since) + assert.Equal(t, int64(2), res.Till) + assert.ElementsMatch(t, []dtos.SplitDTO{ + {Name: "f2", ChangeNumber: 2, Status: "ACTIVE", Sets: []string{"s2", "s3"}}, + }, res.Splits) + + // --------------------------- + + // remove f1 from s2 + pss.Update([]dtos.SplitDTO{ + {Name: "f1", ChangeNumber: 3, Status: "ACTIVE", Sets: []string{"s1"}}, + }, nil, 2) + + // fetching from -1 only returns f1 + res, err = pss.ChangesSince(-1, []string{"s2"}) + assert.Nil(t, err) + assert.Equal(t, int64(-1), res.Since) + assert.Equal(t, int64(2), res.Till) + assert.ElementsMatch(t, []dtos.SplitDTO{ + {Name: "f2", ChangeNumber: 2, Status: "ACTIVE", Sets: []string{"s2", "s3"}}, + }, res.Splits) + + // fetching from -1 only returns f1 + res, err = pss.ChangesSince(-1, []string{"s2"}) + assert.Nil(t, err) + assert.Equal(t, int64(-1), res.Since) + assert.Equal(t, int64(2), res.Till) + assert.ElementsMatch(t, []dtos.SplitDTO{ + {Name: "f2", ChangeNumber: 2, Status: "ACTIVE", Sets: []string{"s2", "s3"}}, + }, res.Splits) } diff --git a/splitio/version.go b/splitio/version.go index c18ff84c..5760d415 100644 --- a/splitio/version.go +++ b/splitio/version.go @@ -2,4 +2,4 @@ package splitio // Version is the version of this Agent -const Version = "5.4.2" +const Version = "5.5.0"