Cached Flag Sets
diff --git a/splitio/common/conf/advanced.go b/splitio/common/conf/advanced.go
index 0d678b00..36063ad7 100644
--- a/splitio/common/conf/advanced.go
+++ b/splitio/common/conf/advanced.go
@@ -8,13 +8,14 @@ import (
// InitAdvancedOptions initializes an advanced config with default values + overriden urls.
func InitAdvancedOptions(proxy bool) *conf.AdvancedConfig {
+ advanced := conf.GetDefaultAdvancedConfig()
prefix := "SPLIT_SYNC_"
if proxy {
prefix = "SPLIT_PROXY_"
+ advanced.LargeSegment.Enable = true
}
- advanced := conf.GetDefaultAdvancedConfig()
if envSdkURL := os.Getenv(prefix + "SDK_URL"); envSdkURL != "" {
advanced.SdkURL = envSdkURL
}
diff --git a/splitio/producer/initialization.go b/splitio/producer/initialization.go
index 445ccdad..77cb05f0 100644
--- a/splitio/producer/initialization.go
+++ b/splitio/producer/initialization.go
@@ -112,7 +112,7 @@ func Start(logger logging.LoggerInterface, cfg *conf.Main) error {
// Healcheck Monitor
splitsConfig, segmentsConfig, storageConfig := getAppCounterConfigs(storages.SplitStorage)
- appMonitor := hcApplication.NewMonitorImp(splitsConfig, segmentsConfig, &storageConfig, logger)
+ appMonitor := hcApplication.NewMonitorImp(splitsConfig, segmentsConfig, nil, &storageConfig, logger)
servicesMonitor := hcServices.NewMonitorImp(getServicesCountersConfig(advanced), logger)
impressionsCounter := strategy.NewImpressionsCounter()
diff --git a/splitio/provisional/healthcheck/application/counter/basecounter.go b/splitio/provisional/healthcheck/application/counter/basecounter.go
index 4eeb4cea..6a700465 100644
--- a/splitio/provisional/healthcheck/application/counter/basecounter.go
+++ b/splitio/provisional/healthcheck/application/counter/basecounter.go
@@ -15,15 +15,6 @@ const (
Low
)
-const (
- // Splits counter type
- Splits = iota
- // Segments counter type
- Segments
- // Storage counter type
- Storage
-)
-
// HealthyResult description
type HealthyResult struct {
Name string
diff --git a/splitio/provisional/healthcheck/application/monitor.go b/splitio/provisional/healthcheck/application/monitor.go
index 1ceead03..f3de9a5d 100644
--- a/splitio/provisional/healthcheck/application/monitor.go
+++ b/splitio/provisional/healthcheck/application/monitor.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ hc "github.com/splitio/go-split-commons/v6/healthcheck/application"
"github.com/splitio/go-toolkit/v5/logging"
toolkitsync "github.com/splitio/go-toolkit/v5/sync"
"github.com/splitio/split-synchronizer/v5/splitio/provisional/healthcheck/application/counter"
@@ -21,13 +22,12 @@ type MonitorIterface interface {
// MonitorImp description
type MonitorImp struct {
- splitsCounter counter.ThresholdCounterInterface
- segmentsCounter counter.ThresholdCounterInterface
- storageCounter counter.PeriodicCounterInterface
- producerMode toolkitsync.AtomicBool
- healthySince *time.Time
- lock sync.RWMutex
- logger logging.LoggerInterface
+ counters map[int]counter.ThresholdCounterInterface
+ storageCounter counter.PeriodicCounterInterface
+ producerMode toolkitsync.AtomicBool
+ healthySince *time.Time
+ lock sync.RWMutex
+ logger logging.LoggerInterface
}
// HealthDto struct
@@ -56,7 +56,7 @@ func (m *MonitorImp) getHealthySince(healthy bool) *time.Time {
func checkIfIsHealthy(result []ItemDto) bool {
for _, r := range result {
- if r.Healthy == false && r.Severity == counter.Critical {
+ if !r.Healthy && r.Severity == counter.Critical {
return false
}
}
@@ -71,7 +71,10 @@ func (m *MonitorImp) GetHealthStatus() HealthDto {
var items []ItemDto
var results []counter.HealthyResult
- results = append(results, m.splitsCounter.IsHealthy(), m.segmentsCounter.IsHealthy())
+
+ for _, mc := range m.counters {
+ results = append(results, mc.IsHealthy())
+ }
if m.producerMode.IsSet() {
results = append(results, m.storageCounter.IsHealthy())
@@ -104,12 +107,12 @@ func (m *MonitorImp) NotifyEvent(counterType int) {
m.logger.Debug(fmt.Sprintf("Notify Event. Type: %d.", counterType))
- switch counterType {
- case counter.Splits:
- m.splitsCounter.NotifyHit()
- case counter.Segments:
- m.segmentsCounter.NotifyHit()
+ counter, ok := m.counters[counterType]
+ if !ok {
+ m.logger.Debug(fmt.Sprintf("wrong counterType: %d", counterType))
+ return
}
+ counter.NotifyHit()
}
// Reset counter value
@@ -119,12 +122,12 @@ func (m *MonitorImp) Reset(counterType int, value int) {
m.logger.Debug(fmt.Sprintf("Reset. Type: %d. Value: %d", counterType, value))
- switch counterType {
- case counter.Splits:
- m.splitsCounter.ResetThreshold(value)
- case counter.Segments:
- m.segmentsCounter.ResetThreshold(value)
+ counter, ok := m.counters[counterType]
+ if !ok {
+ m.logger.Debug(fmt.Sprintf("wrong counterType: %d", counterType))
+ return
}
+ counter.ResetThreshold(value)
}
// Start counters
@@ -132,8 +135,10 @@ func (m *MonitorImp) Start() {
m.lock.Lock()
defer m.lock.Unlock()
- m.splitsCounter.Start()
- m.segmentsCounter.Start()
+ for _, counter := range m.counters {
+ counter.Start()
+ }
+
if m.producerMode.IsSet() {
m.storageCounter.Start()
}
@@ -146,8 +151,9 @@ func (m *MonitorImp) Stop() {
m.lock.Lock()
defer m.lock.Unlock()
- m.splitsCounter.Stop()
- m.segmentsCounter.Stop()
+ for _, counter := range m.counters {
+ counter.Stop()
+ }
if m.producerMode.IsSet() {
m.storageCounter.Stop()
@@ -158,16 +164,25 @@ func (m *MonitorImp) Stop() {
func NewMonitorImp(
splitsConfig counter.ThresholdConfig,
segmentsConfig counter.ThresholdConfig,
+ largeSegmentsConfig *counter.ThresholdConfig,
storageConfig *counter.PeriodicConfig,
logger logging.LoggerInterface,
) *MonitorImp {
now := time.Now()
+ splitsCounter := counter.NewThresholdCounter(splitsConfig, logger)
+ segmentsCounter := counter.NewThresholdCounter(segmentsConfig, logger)
monitor := &MonitorImp{
- splitsCounter: counter.NewThresholdCounter(splitsConfig, logger),
- segmentsCounter: counter.NewThresholdCounter(segmentsConfig, logger),
- producerMode: *toolkitsync.NewAtomicBool(storageConfig != nil),
- logger: logger,
- healthySince: &now,
+ counters: map[int]counter.ThresholdCounterInterface{},
+ producerMode: *toolkitsync.NewAtomicBool(storageConfig != nil),
+ logger: logger,
+ healthySince: &now,
+ }
+
+ monitor.counters[hc.Splits] = splitsCounter
+ monitor.counters[hc.Segments] = segmentsCounter
+
+ if largeSegmentsConfig != nil {
+ monitor.counters[hc.LargeSegments] = counter.NewThresholdCounter(*largeSegmentsConfig, logger)
}
if monitor.producerMode.IsSet() {
diff --git a/splitio/provisional/healthcheck/application/monitor_test.go b/splitio/provisional/healthcheck/application/monitor_test.go
index edf9859e..ff3ad16f 100644
--- a/splitio/provisional/healthcheck/application/monitor_test.go
+++ b/splitio/provisional/healthcheck/application/monitor_test.go
@@ -36,6 +36,12 @@ func TestMonitor(t *testing.T) {
Severity: counter.Critical,
}
+ lsCfg := counter.ThresholdConfig{
+ Name: "LargeSegments",
+ Period: 10,
+ Severity: counter.Critical,
+ }
+
storageCfg := counter.PeriodicConfig{
Name: "Storage",
Period: 10,
@@ -46,7 +52,7 @@ func TestMonitor(t *testing.T) {
},
}
- monitor := NewMonitorImp(splitsCfg, segmentsCfg, &storageCfg, logging.NewLogger(nil))
+ monitor := NewMonitorImp(splitsCfg, segmentsCfg, &lsCfg, &storageCfg, logging.NewLogger(nil))
monitor.Start()
diff --git a/splitio/proxy/caching/caching.go b/splitio/proxy/caching/caching.go
index efae93cf..66807f2f 100644
--- a/splitio/proxy/caching/caching.go
+++ b/splitio/proxy/caching/caching.go
@@ -19,6 +19,9 @@ const (
// SplitSurrogate key (we only need one, since all splitChanges should be expired when an update is processed)
SplitSurrogate = "sp"
+ // MembershipsSurrogate key (we only need one, since all memberships should be expired when an update is processed)
+ MembershipsSurrogate = "mem"
+
// AuthSurrogate key (having push disabled, it's safe to cache this and return it on all requests)
AuthSurrogate = "au"
@@ -60,7 +63,6 @@ func MakeProxyCache() *gincache.Middleware {
}
func keyFactoryFN(ctx *gin.Context) string {
-
var encodingPrefix string
if strings.Contains(ctx.Request.Header.Get("Accept-Encoding"), "gzip") {
encodingPrefix = "gzip::"
diff --git a/splitio/proxy/caching/mocks/mock.go b/splitio/proxy/caching/mocks/mock.go
new file mode 100644
index 00000000..d3ef093d
--- /dev/null
+++ b/splitio/proxy/caching/mocks/mock.go
@@ -0,0 +1,189 @@
+package mocks
+
+import (
+ "github.com/splitio/gincache"
+ "github.com/splitio/go-split-commons/v6/dtos"
+ "github.com/splitio/go-split-commons/v6/storage"
+ "github.com/splitio/go-split-commons/v6/synchronizer/worker/largesegment"
+ "github.com/splitio/go-split-commons/v6/synchronizer/worker/segment"
+ "github.com/splitio/go-split-commons/v6/synchronizer/worker/split"
+ "github.com/splitio/go-toolkit/v5/datastructures/set"
+ "github.com/stretchr/testify/mock"
+)
+
+// Borrowed mocks: These sohuld be in go-split-commons. but we need to wait until testify is adopted there
+
+type SplitUpdaterMock struct {
+ mock.Mock
+}
+
+// LocalKill implements split.Updater
+func (s *SplitUpdaterMock) LocalKill(splitName string, defaultTreatment string, changeNumber int64) {
+ s.Called(splitName, defaultTreatment, changeNumber)
+}
+
+// SynchronizeFeatureFlags implements split.Updater
+func (s *SplitUpdaterMock) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) (*split.UpdateResult, error) {
+ args := s.Called(ffChange)
+ return args.Get(0).(*split.UpdateResult), args.Error(1)
+}
+
+// SynchronizeSplits implements split.Updater
+func (s *SplitUpdaterMock) SynchronizeSplits(till *int64) (*split.UpdateResult, error) {
+ args := s.Called(till)
+ return args.Get(0).(*split.UpdateResult), args.Error(1)
+}
+
+// ----
+
+type CacheFlusherMock struct {
+ mock.Mock
+}
+
+func (c *CacheFlusherMock) Evict(key string) { c.Called(key) }
+func (c *CacheFlusherMock) EvictAll() { c.Called() }
+func (c *CacheFlusherMock) EvictBySurrogate(surrogate string) { c.Called(surrogate) }
+
+// ---
+
+type SplitStorageMock struct {
+ mock.Mock
+}
+
+func (s *SplitStorageMock) All() []dtos.SplitDTO { panic("unimplemented") }
+func (s *SplitStorageMock) ChangeNumber() (int64, error) {
+ args := s.Called()
+ return args.Get(0).(int64), args.Error(1)
+}
+
+func (*SplitStorageMock) FetchMany(splitNames []string) map[string]*dtos.SplitDTO {
+ panic("unimplemented")
+}
+func (*SplitStorageMock) GetNamesByFlagSets(sets []string) map[string][]string {
+ panic("unimplemented")
+}
+func (*SplitStorageMock) GetAllFlagSetNames() []string {
+ panic("unimplemented")
+}
+func (*SplitStorageMock) KillLocally(splitName string, defaultTreatment string, changeNumber int64) {
+ panic("unimplemented")
+}
+func (s *SplitStorageMock) SegmentNames() *set.ThreadUnsafeSet {
+ return s.Called().Get(0).(*set.ThreadUnsafeSet)
+}
+func (s *SplitStorageMock) LargeSegmentNames() *set.ThreadUnsafeSet {
+ return s.Called().Get(0).(*set.ThreadUnsafeSet)
+}
+func (s *SplitStorageMock) SetChangeNumber(changeNumber int64) error {
+ return s.Called(changeNumber).Error(0)
+}
+func (*SplitStorageMock) Split(splitName string) *dtos.SplitDTO { panic("unimplemented") }
+func (*SplitStorageMock) SplitNames() []string { panic("unimplemented") }
+func (*SplitStorageMock) TrafficTypeExists(trafficType string) bool { panic("unimplemented") }
+func (*SplitStorageMock) Update(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, changeNumber int64) {
+ panic("unimplemented")
+}
+
+type SegmentUpdaterMock struct {
+ mock.Mock
+}
+
+func (s *SegmentUpdaterMock) IsSegmentCached(segmentName string) bool { panic("unimplemented") }
+func (s *SegmentUpdaterMock) SegmentNames() []interface{} { panic("unimplemented") }
+
+func (s *SegmentUpdaterMock) SynchronizeSegment(name string, till *int64) (*segment.UpdateResult, error) {
+ args := s.Called(name, till)
+ return args.Get(0).(*segment.UpdateResult), args.Error(1)
+}
+
+func (s *SegmentUpdaterMock) SynchronizeSegments() (map[string]segment.UpdateResult, error) {
+ args := s.Called()
+ return args.Get(0).(map[string]segment.UpdateResult), args.Error(1)
+}
+
+type SegmentStorageMock struct {
+ mock.Mock
+}
+
+func (*SegmentStorageMock) SetChangeNumber(segmentName string, till int64) error {
+ panic("unimplemented")
+}
+func (s *SegmentStorageMock) Update(name string, toAdd *set.ThreadUnsafeSet, toRemove *set.ThreadUnsafeSet, changeNumber int64) error {
+ return s.Called(name, toAdd, toRemove, changeNumber).Error(0)
+}
+
+// ChangeNumber implements storage.SegmentStorage
+func (s *SegmentStorageMock) ChangeNumber(segmentName string) (int64, error) {
+ args := s.Called(segmentName)
+ return args.Get(0).(int64), args.Error(1)
+}
+
+func (*SegmentStorageMock) Keys(segmentName string) *set.ThreadUnsafeSet { panic("unimplemented") }
+func (*SegmentStorageMock) SegmentContainsKey(segmentName string, key string) (bool, error) {
+ panic("unimplemented")
+}
+func (*SegmentStorageMock) SegmentKeysCount() int64 { panic("unimplemented") }
+
+// ---
+
+type LargeSegmentStorageMock struct {
+ mock.Mock
+}
+
+func (s *LargeSegmentStorageMock) SetChangeNumber(name string, till int64) {
+ s.Called(name, till).Error(0)
+}
+
+func (s *LargeSegmentStorageMock) Update(name string, userKeys []string, till int64) {
+ s.Called(name, userKeys, till)
+}
+func (s *LargeSegmentStorageMock) ChangeNumber(name string) int64 {
+ args := s.Called(name)
+ return args.Get(0).(int64)
+}
+func (s *LargeSegmentStorageMock) Count() int {
+ args := s.Called()
+ return args.Get(0).(int)
+}
+func (s *LargeSegmentStorageMock) LargeSegmentsForUser(userKey string) []string {
+ return []string{}
+}
+func (s *LargeSegmentStorageMock) IsInLargeSegment(name string, key string) (bool, error) {
+ args := s.Called(name, key)
+ return args.Get(0).(bool), args.Error(1)
+}
+func (s *LargeSegmentStorageMock) TotalKeys(name string) int {
+ return s.Called(name).Get(0).(int)
+}
+
+// ---
+
+type LargeSegmentUpdaterMock struct {
+ mock.Mock
+}
+
+func (u *LargeSegmentUpdaterMock) SynchronizeLargeSegment(name string, till *int64) (*int64, error) {
+ args := u.Called(name, till)
+ return args.Get(0).(*int64), args.Error(1)
+}
+func (u *LargeSegmentUpdaterMock) SynchronizeLargeSegments() (map[string]*int64, error) {
+ args := u.Called()
+ return args.Get(0).(map[string]*int64), args.Error(1)
+}
+func (u *LargeSegmentUpdaterMock) IsCached(name string) bool {
+ return u.Called().Get(0).(bool)
+}
+func (u *LargeSegmentUpdaterMock) SynchronizeLargeSegmentUpdate(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) (*int64, error) {
+ args := u.Called(lsRFDResponseDTO)
+ return args.Get(0).(*int64), args.Error(1)
+}
+
+// ---
+
+var _ gincache.CacheFlusher = (*CacheFlusherMock)(nil)
+var _ split.Updater = (*SplitUpdaterMock)(nil)
+var _ segment.Updater = (*SegmentUpdaterMock)(nil)
+var _ largesegment.Updater = (*LargeSegmentUpdaterMock)(nil)
+var _ storage.SplitStorage = (*SplitStorageMock)(nil)
+var _ storage.SegmentStorage = (*SegmentStorageMock)(nil)
+var _ storage.LargeSegmentsStorage = (*LargeSegmentStorageMock)(nil)
diff --git a/splitio/proxy/caching/workers.go b/splitio/proxy/caching/workers.go
index deb9be13..60f7388a 100644
--- a/splitio/proxy/caching/workers.go
+++ b/splitio/proxy/caching/workers.go
@@ -6,6 +6,7 @@ import (
"github.com/splitio/go-split-commons/v6/healthcheck/application"
"github.com/splitio/go-split-commons/v6/service"
"github.com/splitio/go-split-commons/v6/storage"
+ "github.com/splitio/go-split-commons/v6/synchronizer/worker/largesegment"
"github.com/splitio/go-split-commons/v6/synchronizer/worker/segment"
"github.com/splitio/go-split-commons/v6/synchronizer/worker/split"
"github.com/splitio/go-toolkit/v5/logging"
@@ -98,6 +99,7 @@ func (c *CacheAwareSegmentSynchronizer) SynchronizeSegment(name string, till *in
result, err := c.wrapped.SynchronizeSegment(name, till)
if current := result.NewChangeNumber; current > previous || (previous != -1 && current == -1) {
c.cacheFlusher.EvictBySurrogate(MakeSurrogateForSegmentChanges(name))
+ c.cacheFlusher.EvictBySurrogate(MembershipsSurrogate)
}
// remove individual entries for each affected key
@@ -129,6 +131,7 @@ func (c *CacheAwareSegmentSynchronizer) SynchronizeSegments() (map[string]segmen
if pcn, _ := previousCNs[segmentName]; ccn > pcn || (pcn > 0 && ccn == -1) {
// if the segment was updated or the segment was removed, evict it
c.cacheFlusher.EvictBySurrogate(MakeSurrogateForSegmentChanges(segmentName))
+ c.cacheFlusher.EvictBySurrogate(MembershipsSurrogate)
}
for idx := range result.UpdatedKeys {
@@ -151,3 +154,74 @@ func (c *CacheAwareSegmentSynchronizer) SegmentNames() []interface{} {
func (c *CacheAwareSegmentSynchronizer) IsSegmentCached(segmentName string) bool {
return c.wrapped.IsSegmentCached(segmentName)
}
+
+// CacheAwareLargeSegmentSynchronizer
+type CacheAwareLargeSegmentSynchronizer struct {
+ wrapped largesegment.Updater
+ largeSegmentStorage storage.LargeSegmentsStorage
+ cacheFlusher gincache.CacheFlusher
+ splitStorage storage.SplitStorage
+}
+
+func NewCacheAwareLargeSegmentSync(
+ splitStorage storage.SplitStorage,
+ largeSegmentStorage storage.LargeSegmentsStorage,
+ largeSegmentFetcher service.LargeSegmentFetcher,
+ logger logging.LoggerInterface,
+ runtimeTelemetry storage.TelemetryRuntimeProducer,
+ cacheFlusher gincache.CacheFlusher,
+ appMonitor application.MonitorProducerInterface,
+) *CacheAwareLargeSegmentSynchronizer {
+ return &CacheAwareLargeSegmentSynchronizer{
+ wrapped: largesegment.NewLargeSegmentUpdater(splitStorage, largeSegmentStorage, largeSegmentFetcher, logger, runtimeTelemetry, appMonitor),
+ cacheFlusher: cacheFlusher,
+ largeSegmentStorage: largeSegmentStorage,
+ splitStorage: splitStorage,
+ }
+}
+
+func (c *CacheAwareLargeSegmentSynchronizer) SynchronizeLargeSegment(name string, till *int64) (*int64, error) {
+ previous := c.largeSegmentStorage.ChangeNumber(name)
+ newCN, err := c.wrapped.SynchronizeLargeSegment(name, till)
+
+ c.evictByLargeSegmentSurrogate(previous, *newCN)
+
+ return newCN, err
+}
+
+func (c *CacheAwareLargeSegmentSynchronizer) SynchronizeLargeSegments() (map[string]*int64, error) {
+ previousLargeSegmentNames := c.splitStorage.LargeSegmentNames()
+ previousCNs := make(map[string]int64, previousLargeSegmentNames.Size())
+ for _, name := range previousLargeSegmentNames.List() {
+ if strName, ok := name.(string); ok {
+ cn := c.largeSegmentStorage.ChangeNumber(strName)
+ previousCNs[strName] = cn
+ }
+ }
+
+ results, err := c.wrapped.SynchronizeLargeSegments()
+ for name, currentCN := range results {
+ c.evictByLargeSegmentSurrogate(previousCNs[name], *currentCN)
+ }
+
+ return results, err
+}
+
+func (c *CacheAwareLargeSegmentSynchronizer) IsCached(name string) bool {
+ return c.wrapped.IsCached(name)
+}
+
+func (c *CacheAwareLargeSegmentSynchronizer) SynchronizeLargeSegmentUpdate(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) (*int64, error) {
+ previous := c.largeSegmentStorage.ChangeNumber(lsRFDResponseDTO.Name)
+ newCN, err := c.wrapped.SynchronizeLargeSegmentUpdate(lsRFDResponseDTO)
+
+ c.evictByLargeSegmentSurrogate(previous, *newCN)
+
+ return newCN, err
+}
+
+func (c *CacheAwareLargeSegmentSynchronizer) evictByLargeSegmentSurrogate(previousCN int64, currentCN int64) {
+ if currentCN > previousCN || currentCN == -1 {
+ c.cacheFlusher.EvictBySurrogate(MembershipsSurrogate)
+ }
+}
diff --git a/splitio/proxy/caching/workers_test.go b/splitio/proxy/caching/workers_test.go
index ad19e847..57cda98b 100644
--- a/splitio/proxy/caching/workers_test.go
+++ b/splitio/proxy/caching/workers_test.go
@@ -3,21 +3,19 @@ package caching
import (
"testing"
- "github.com/splitio/gincache"
"github.com/splitio/go-split-commons/v6/dtos"
- "github.com/splitio/go-split-commons/v6/storage"
"github.com/splitio/go-split-commons/v6/synchronizer/worker/segment"
"github.com/splitio/go-split-commons/v6/synchronizer/worker/split"
"github.com/splitio/go-toolkit/v5/datastructures/set"
+ "github.com/splitio/split-synchronizer/v5/splitio/proxy/caching/mocks"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/mock"
)
func TestCacheAwareSplitSyncNoChanges(t *testing.T) {
- var splitSyncMock splitUpdaterMock
+ var splitSyncMock mocks.SplitUpdaterMock
splitSyncMock.On("SynchronizeSplits", (*int64)(nil)).Return((*split.UpdateResult)(nil), error(nil))
- var cacheFlusherMock cacheFlusherMock
- var storageMock splitStorageMock
+ var cacheFlusherMock mocks.CacheFlusherMock
+ var storageMock mocks.SplitStorageMock
storageMock.On("ChangeNumber").Return(int64(-1), error(nil))
css := CacheAwareSplitSynchronizer{
@@ -36,13 +34,13 @@ func TestCacheAwareSplitSyncNoChanges(t *testing.T) {
}
func TestCacheAwareSplitSyncChanges(t *testing.T) {
- var splitSyncMock splitUpdaterMock
+ var splitSyncMock mocks.SplitUpdaterMock
splitSyncMock.On("SynchronizeSplits", (*int64)(nil)).Return((*split.UpdateResult)(nil), error(nil)).Times(2)
- var cacheFlusherMock cacheFlusherMock
+ var cacheFlusherMock mocks.CacheFlusherMock
cacheFlusherMock.On("EvictBySurrogate", SplitSurrogate).Times(3)
- var storageMock splitStorageMock
+ var storageMock mocks.SplitStorageMock
storageMock.On("ChangeNumber").Return(int64(-1), error(nil)).Once()
storageMock.On("ChangeNumber").Return(int64(1), error(nil)).Once()
@@ -75,13 +73,13 @@ func TestCacheAwareSplitSyncChangesNewMethod(t *testing.T) {
// This test is used to test the new method. Eventually commons should be cleaned in order to have a single method for split-synchronization.
// when that happens, either this or the previous test shold be removed
- var splitSyncMock splitUpdaterMock
+ var splitSyncMock mocks.SplitUpdaterMock
splitSyncMock.On("SynchronizeFeatureFlags", (*dtos.SplitChangeUpdate)(nil)).Return((*split.UpdateResult)(nil), error(nil)).Times(2)
- var cacheFlusherMock cacheFlusherMock
+ var cacheFlusherMock mocks.CacheFlusherMock
cacheFlusherMock.On("EvictBySurrogate", SplitSurrogate).Times(2)
- var storageMock splitStorageMock
+ var storageMock mocks.SplitStorageMock
storageMock.On("ChangeNumber").Return(int64(-1), error(nil)).Once()
storageMock.On("ChangeNumber").Return(int64(1), error(nil)).Once()
@@ -108,14 +106,12 @@ func TestCacheAwareSplitSyncChangesNewMethod(t *testing.T) {
}
func TestCacheAwareSegmentSyncNoChanges(t *testing.T) {
- var segmentUpdater segmentUpdaterMock
+ var segmentUpdater mocks.SegmentUpdaterMock
segmentUpdater.On("SynchronizeSegment", "segment1", (*int64)(nil)).Return(&segment.UpdateResult{}, nil).Once()
- var splitStorage splitStorageMock
-
- var cacheFlusher cacheFlusherMock
-
- var segmentStorage segmentStorageMock
+ var splitStorage mocks.SplitStorageMock
+ var cacheFlusher mocks.CacheFlusherMock
+ var segmentStorage mocks.SegmentStorageMock
segmentStorage.On("ChangeNumber", "segment1").Return(int64(0), nil).Once()
css := CacheAwareSegmentSynchronizer{
@@ -136,20 +132,21 @@ func TestCacheAwareSegmentSyncNoChanges(t *testing.T) {
}
func TestCacheAwareSegmentSyncSingle(t *testing.T) {
- var segmentUpdater segmentUpdaterMock
+ var segmentUpdater mocks.SegmentUpdaterMock
segmentUpdater.On("SynchronizeSegment", "segment1", (*int64)(nil)).Return(&segment.UpdateResult{
UpdatedKeys: []string{"k1"},
NewChangeNumber: 2,
}, nil).Once()
- var splitStorage splitStorageMock
+ var splitStorage mocks.SplitStorageMock
- var cacheFlusher cacheFlusherMock
+ var cacheFlusher mocks.CacheFlusherMock
cacheFlusher.On("EvictBySurrogate", MakeSurrogateForSegmentChanges("segment1")).Times(2)
cacheFlusher.On("Evict", "/api/mySegments/k1").Times(2)
cacheFlusher.On("Evict", "gzip::/api/mySegments/k1").Times(2)
+ cacheFlusher.On("EvictBySurrogate", MembershipsSurrogate).Times(2)
- var segmentStorage segmentStorageMock
+ var segmentStorage mocks.SegmentStorageMock
segmentStorage.On("ChangeNumber", "segment1").Return(int64(0), nil).Once()
css := CacheAwareSegmentSynchronizer{
@@ -180,21 +177,22 @@ func TestCacheAwareSegmentSyncSingle(t *testing.T) {
}
func TestCacheAwareSegmentSyncAllSegments(t *testing.T) {
- var segmentUpdater segmentUpdaterMock
+ var segmentUpdater mocks.SegmentUpdaterMock
segmentUpdater.On("SynchronizeSegments").Return(map[string]segment.UpdateResult{"segment2": {
UpdatedKeys: []string{"k1"},
NewChangeNumber: 1,
}}, nil).Once()
- var splitStorage splitStorageMock
+ var splitStorage mocks.SplitStorageMock
splitStorage.On("SegmentNames").Return(set.NewSet("segment2")).Once()
- var cacheFlusher cacheFlusherMock
+ var cacheFlusher mocks.CacheFlusherMock
cacheFlusher.On("EvictBySurrogate", MakeSurrogateForSegmentChanges("segment2")).Times(1)
cacheFlusher.On("Evict", "/api/mySegments/k1").Times(3)
cacheFlusher.On("Evict", "gzip::/api/mySegments/k1").Times(3)
+ cacheFlusher.On("EvictBySurrogate", MembershipsSurrogate).Times(3)
- var segmentStorage segmentStorageMock
+ var segmentStorage mocks.SegmentStorageMock
segmentStorage.On("ChangeNumber", "segment2").Return(int64(0), nil).Once()
css := CacheAwareSegmentSynchronizer{
@@ -238,142 +236,119 @@ func TestCacheAwareSegmentSyncAllSegments(t *testing.T) {
cacheFlusher.AssertExpectations(t)
}
-// Borrowed mocks: These sohuld be in go-split-commons. but we need to wait until testify is adopted there
+// CacheAwareLargeSegmentSynchronizer
+func TestSynchronizeLargeSegment(t *testing.T) {
+ lsName := "largeSegment1"
-type splitUpdaterMock struct {
- mock.Mock
-}
+ var splitStorage mocks.SplitStorageMock
+ var cacheFlusher mocks.CacheFlusherMock
+ cacheFlusher.On("EvictBySurrogate", MembershipsSurrogate).Once()
-// LocalKill implements split.Updater
-func (s *splitUpdaterMock) LocalKill(splitName string, defaultTreatment string, changeNumber int64) {
- s.Called(splitName, defaultTreatment, changeNumber)
-}
+ var largeSegmentStorage mocks.LargeSegmentStorageMock
+ largeSegmentStorage.On("ChangeNumber", lsName).Return(int64(-1)).Once()
-// SynchronizeFeatureFlags implements split.Updater
-func (s *splitUpdaterMock) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) (*split.UpdateResult, error) {
- args := s.Called(ffChange)
- return args.Get(0).(*split.UpdateResult), args.Error(1)
-}
+ var lsUpdater mocks.LargeSegmentUpdaterMock
+ cnToReturn := int64(100)
+ lsUpdater.On("SynchronizeLargeSegment", lsName, (*int64)(nil)).Return(&cnToReturn, nil).Once()
-// SynchronizeSplits implements split.Updater
-func (s *splitUpdaterMock) SynchronizeSplits(till *int64) (*split.UpdateResult, error) {
- args := s.Called(till)
- return args.Get(0).(*split.UpdateResult), args.Error(1)
-}
+ clsSync := CacheAwareLargeSegmentSynchronizer{
+ wrapped: &lsUpdater,
+ cacheFlusher: &cacheFlusher,
+ largeSegmentStorage: &largeSegmentStorage,
+ splitStorage: &splitStorage,
+ }
-// ----
+ cn, err := clsSync.SynchronizeLargeSegment(lsName, nil)
+ if err != nil {
+ t.Error("Error should be nil. Actual: ", err)
+ }
+
+ if *cn != 100 {
+ t.Error("ChangeNumber should be 100. Actual: ", *cn)
+ }
-type cacheFlusherMock struct {
- mock.Mock
+ cacheFlusher.AssertExpectations(t)
+ largeSegmentStorage.AssertExpectations(t)
+ lsUpdater.AssertExpectations(t)
}
-func (c *cacheFlusherMock) Evict(key string) { c.Called(key) }
-func (c *cacheFlusherMock) EvictAll() { c.Called() }
-func (c *cacheFlusherMock) EvictBySurrogate(surrogate string) { c.Called(surrogate) }
+func TestSynchronizeLargeSegmentHighestPrevious(t *testing.T) {
+ lsName := "largeSegment1"
-// ---
+ var splitStorage mocks.SplitStorageMock
+ var cacheFlusher mocks.CacheFlusherMock
-type splitStorageMock struct {
- mock.Mock
-}
+ var largeSegmentStorage mocks.LargeSegmentStorageMock
+ largeSegmentStorage.On("ChangeNumber", lsName).Return(int64(200)).Once()
-func (s *splitStorageMock) All() []dtos.SplitDTO { panic("unimplemented") }
-func (s *splitStorageMock) ChangeNumber() (int64, error) {
- args := s.Called()
- return args.Get(0).(int64), args.Error(1)
-}
+ var lsUpdater mocks.LargeSegmentUpdaterMock
+ cnToReturn := int64(100)
+ lsUpdater.On("SynchronizeLargeSegment", lsName, (*int64)(nil)).Return(&cnToReturn, nil).Once()
-func (*splitStorageMock) FetchMany(splitNames []string) map[string]*dtos.SplitDTO {
- panic("unimplemented")
-}
-func (*splitStorageMock) GetNamesByFlagSets(sets []string) map[string][]string {
- panic("unimplemented")
-}
-func (*splitStorageMock) GetAllFlagSetNames() []string {
- panic("unimplemented")
-}
-func (*splitStorageMock) KillLocally(splitName string, defaultTreatment string, changeNumber int64) {
- panic("unimplemented")
-}
-func (s *splitStorageMock) SegmentNames() *set.ThreadUnsafeSet {
- return s.Called().Get(0).(*set.ThreadUnsafeSet)
-}
-func (s *splitStorageMock) SetChangeNumber(changeNumber int64) error {
- return s.Called(changeNumber).Error(0)
-}
-func (*splitStorageMock) Split(splitName string) *dtos.SplitDTO { panic("unimplemented") }
-func (*splitStorageMock) SplitNames() []string { panic("unimplemented") }
-func (*splitStorageMock) TrafficTypeExists(trafficType string) bool { panic("unimplemented") }
-func (*splitStorageMock) Update(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, changeNumber int64) {
- panic("unimplemented")
-}
-
-type segmentUpdaterMock struct {
- mock.Mock
-}
+ clsSync := CacheAwareLargeSegmentSynchronizer{
+ wrapped: &lsUpdater,
+ cacheFlusher: &cacheFlusher,
+ largeSegmentStorage: &largeSegmentStorage,
+ splitStorage: &splitStorage,
+ }
-func (s *segmentUpdaterMock) IsSegmentCached(segmentName string) bool { panic("unimplemented") }
-func (s *segmentUpdaterMock) SegmentNames() []interface{} { panic("unimplemented") }
+ cn, err := clsSync.SynchronizeLargeSegment(lsName, nil)
+ if err != nil {
+ t.Error("Error should be nil. Actual: ", err)
+ }
-func (s *segmentUpdaterMock) SynchronizeSegment(name string, till *int64) (*segment.UpdateResult, error) {
- args := s.Called(name, till)
- return args.Get(0).(*segment.UpdateResult), args.Error(1)
-}
+ if *cn != 100 {
+ t.Error("ChangeNumber should be 100. Actual: ", *cn)
+ }
-func (s *segmentUpdaterMock) SynchronizeSegments() (map[string]segment.UpdateResult, error) {
- args := s.Called()
- return args.Get(0).(map[string]segment.UpdateResult), args.Error(1)
+ splitStorage.AssertExpectations(t)
+ cacheFlusher.AssertExpectations(t)
+ largeSegmentStorage.AssertExpectations(t)
+ lsUpdater.AssertExpectations(t)
}
-type segmentStorageMock struct {
- mock.Mock
-}
+func TestSynchronizeLargeSegments(t *testing.T) {
+ var splitStorage mocks.SplitStorageMock
+ splitStorage.On("LargeSegmentNames").Return(set.NewSet("ls1", "ls2"))
-func (*segmentStorageMock) SetChangeNumber(segmentName string, till int64) error {
- panic("unimplemented")
-}
-func (s *segmentStorageMock) Update(name string, toAdd *set.ThreadUnsafeSet, toRemove *set.ThreadUnsafeSet, changeNumber int64) error {
- return s.Called(name, toAdd, toRemove, changeNumber).Error(0)
-}
+ var cacheFlusher mocks.CacheFlusherMock
+ cacheFlusher.On("EvictBySurrogate", MembershipsSurrogate).Times(2)
-// ChangeNumber implements storage.SegmentStorage
-func (s *segmentStorageMock) ChangeNumber(segmentName string) (int64, error) {
- args := s.Called(segmentName)
- return args.Get(0).(int64), args.Error(1)
-}
+ var cn1 int64 = 100
+ var cn2 int64 = 200
+ var largeSegmentStorage mocks.LargeSegmentStorageMock
+ largeSegmentStorage.On("ChangeNumber", "ls1").Return(cn1 - 50).Once()
+ largeSegmentStorage.On("ChangeNumber", "ls2").Return(cn2 - 50).Once()
-func (*segmentStorageMock) Keys(segmentName string) *set.ThreadUnsafeSet { panic("unimplemented") }
-func (*segmentStorageMock) SegmentContainsKey(segmentName string, key string) (bool, error) {
- panic("unimplemented")
-}
-func (*segmentStorageMock) SegmentKeysCount() int64 { panic("unimplemented") }
-
-/*
- type segmentUpdaterMock struct {
- SynchronizeSegmentCall func(name string, till *int64) (*segment.UpdateResult, error)
- SynchronizeSegmentsCall func() (map[string]segment.UpdateResult, error)
- SegmentNamesCall func() []interface{}
- IsSegmentCachedCall func(segmentName string) bool
+ var lsUpdater mocks.LargeSegmentUpdaterMock
+ result := map[string]*int64{
+ "ls1": &cn1,
+ "ls2": &cn2,
}
+ lsUpdater.On("SynchronizeLargeSegments").Return(result, nil).Once()
- func (s *segmentUpdaterMock) SynchronizeSegment(name string, till *int64) (*segment.UpdateResult, error) {
- return s.SynchronizeSegmentCall(name, till)
+ clsSync := CacheAwareLargeSegmentSynchronizer{
+ wrapped: &lsUpdater,
+ cacheFlusher: &cacheFlusher,
+ largeSegmentStorage: &largeSegmentStorage,
+ splitStorage: &splitStorage,
}
- func (s *segmentUpdaterMock) SynchronizeSegments() (map[string]segment.UpdateResult, error) {
- return s.SynchronizeSegmentsCall()
+ cn, err := clsSync.SynchronizeLargeSegments()
+ if err != nil {
+ t.Error("Error should be nil. Actual: ", err)
}
- func (s *segmentUpdaterMock) SegmentNames() []interface{} {
- return s.SegmentNamesCall()
+ if *cn["ls1"] != cn1 {
+ t.Error("ChangeNumber should be 100. Actual: ", *cn["ls1"])
}
- func (s *segmentUpdaterMock) IsSegmentCached(segmentName string) bool {
- return s.IsSegmentCachedCall(segmentName)
+ if *cn["ls2"] != cn2 {
+ t.Error("ChangeNumber should be 200. Actual: ", *cn["ls2"])
}
-*/
-var _ split.Updater = (*splitUpdaterMock)(nil)
-var _ storage.SplitStorage = (*splitStorageMock)(nil)
-var _ gincache.CacheFlusher = (*cacheFlusherMock)(nil)
-var _ segment.Updater = (*segmentUpdaterMock)(nil)
-var _ storage.SegmentStorage = (*segmentStorageMock)(nil)
+
+ splitStorage.AssertExpectations(t)
+ cacheFlusher.AssertExpectations(t)
+ largeSegmentStorage.AssertExpectations(t)
+ lsUpdater.AssertExpectations(t)
+}
diff --git a/splitio/proxy/conf/sections.go b/splitio/proxy/conf/sections.go
index 4b3df854..5ffb8b8d 100644
--- a/splitio/proxy/conf/sections.go
+++ b/splitio/proxy/conf/sections.go
@@ -2,6 +2,7 @@ package conf
import (
cconf "github.com/splitio/go-split-commons/v6/conf"
+ "github.com/splitio/go-split-commons/v6/service/api/specs"
"github.com/splitio/split-synchronizer/v5/splitio/common/conf"
)
@@ -20,7 +21,7 @@ type Main struct {
Logging conf.Logging `json:"logging" s-nested:"true"`
Healthcheck Healthcheck `json:"healthcheck" s-nested:"true"`
Observability Observability `json:"observability" s-nested:"true"`
- FlagSpecVersion string `json:"flagSpecVersion" s-cli:"flag-spec-version" s-def:"1.1" s-desc:"Spec version for flags"`
+ FlagSpecVersion string `json:"flagSpecVersion" s-cli:"flag-spec-version" s-def:"1.2" s-desc:"Spec version for flags"`
}
// BuildAdvancedConfig generates a commons-compatible advancedconfig with default + overriden parameters
@@ -32,6 +33,9 @@ func (m *Main) BuildAdvancedConfig() *cconf.AdvancedConfig {
tmp.StreamingEnabled = m.Sync.Advanced.StreamingEnabled
tmp.SplitsRefreshRate = int(m.Sync.SplitRefreshRateMs / 1000)
tmp.SegmentsRefreshRate = int(m.Sync.SegmentRefreshRateMs / 1000)
+ tmp.LargeSegment.LazyLoad = m.Sync.Advanced.LargeSegmentLazyLoad
+ tmp.LargeSegment.RefreshRate = int(m.Sync.LargeSegmentRefreshRateMs / 1000)
+ tmp.LargeSegment.Version = specs.LARGESEGMENT_V10
return tmp
}
@@ -68,9 +72,10 @@ type Persistent struct {
// Sync configuration options
type Sync struct {
- SplitRefreshRateMs int64 `json:"splitRefreshRateMs" s-cli:"split-refresh-rate-ms" s-def:"60000" s-desc:"How often to refresh feature flags"`
- SegmentRefreshRateMs int64 `json:"segmentRefreshRateMs" s-cli:"segment-refresh-rate-ms" s-def:"60000" s-desc:"How often to refresh segments"`
- Advanced AdvancedSync `json:"advanced" s-nested:"true"`
+ SplitRefreshRateMs int64 `json:"splitRefreshRateMs" s-cli:"split-refresh-rate-ms" s-def:"60000" s-desc:"How often to refresh feature flags"`
+ SegmentRefreshRateMs int64 `json:"segmentRefreshRateMs" s-cli:"segment-refresh-rate-ms" s-def:"60000" s-desc:"How often to refresh segments"`
+ LargeSegmentRefreshRateMs int64 `json:"largeSegmentRefreshRateMs" s-cli:"largesegment-refresh-rate-ms" s-def:"600000" s-desc:"How often to refresh large segments"`
+ Advanced AdvancedSync `json:"advanced" s-nested:"true"`
}
// AdvancedSync configuration options
@@ -84,6 +89,7 @@ type AdvancedSync struct {
EventsWorkers int64 `json:"eventsWorkers" s-cli:"events-workers" s-def:"10" s-desc:"#workers to forward events to Split servers"`
TelemetryWorkers int64 `json:"telemetryWorkers" s-cli:"telemetry-workers" s-def:"10" s-desc:"#workers to forward telemetry to Split servers"`
InternalMetricsRateMs int64 `json:"internalTelemetryRateMs" s-cli:"internal-metrics-rate-ms" s-def:"3600000" s-desc:"How often to send internal metrics"`
+ LargeSegmentLazyLoad bool `json:"largeSegmentLazyLoad" s-cli:"largesegment-lazy-load" s-def:"false" s-desc:"On/Off Large Segment Lazy Load"`
}
// Healthcheck configuration options
diff --git a/splitio/proxy/controllers/sdk.go b/splitio/proxy/controllers/sdk.go
index 3909558b..87855863 100644
--- a/splitio/proxy/controllers/sdk.go
+++ b/splitio/proxy/controllers/sdk.go
@@ -12,6 +12,7 @@ import (
"github.com/splitio/go-split-commons/v6/engine/validator"
"github.com/splitio/go-split-commons/v6/service"
"github.com/splitio/go-split-commons/v6/service/api/specs"
+ cmnStorage "github.com/splitio/go-split-commons/v6/storage"
"github.com/splitio/go-toolkit/v5/logging"
"golang.org/x/exp/slices"
@@ -28,6 +29,7 @@ type SdkServerController struct {
proxySegmentStorage storage.ProxySegmentStorage
fsmatcher flagsets.FlagSetMatcher
versionFilter specs.SplitVersionFilter
+ largeSegmentStorage cmnStorage.LargeSegmentsStorage
}
// NewSdkServerController instantiates a new sdk server controller
@@ -37,6 +39,7 @@ func NewSdkServerController(
proxySplitStorage storage.ProxySplitStorage,
proxySegmentStorage storage.ProxySegmentStorage,
fsmatcher flagsets.FlagSetMatcher,
+ largeSegmentStorage cmnStorage.LargeSegmentsStorage,
) *SdkServerController {
return &SdkServerController{
@@ -46,6 +49,7 @@ func NewSdkServerController(
proxySegmentStorage: proxySegmentStorage,
fsmatcher: fsmatcher,
versionFilter: specs.NewSplitVersionFilter(),
+ largeSegmentStorage: largeSegmentStorage,
}
}
@@ -54,6 +58,41 @@ func (c *SdkServerController) Register(router gin.IRouter) {
router.GET("/splitChanges", c.SplitChanges)
router.GET("/segmentChanges/:name", c.SegmentChanges)
router.GET("/mySegments/:key", c.MySegments)
+ router.GET("/memberships/:key", c.Memberships)
+}
+
+func (c *SdkServerController) Memberships(ctx *gin.Context) {
+ c.logger.Debug(fmt.Sprintf("Headers: %v", ctx.Request.Header))
+ key := ctx.Param("key")
+ segmentList, err := c.proxySegmentStorage.SegmentsFor(key)
+ if err != nil {
+ c.logger.Error(fmt.Sprintf("error fetching segments for user '%s': %s", key, err.Error()))
+ ctx.JSON(http.StatusInternalServerError, gin.H{})
+ return
+ }
+
+ mySegments := make([]dtos.Segment, 0, len(segmentList))
+ for _, segmentName := range segmentList {
+ mySegments = append(mySegments, dtos.Segment{Name: segmentName})
+ }
+
+ lsList := c.largeSegmentStorage.LargeSegmentsForUser(key)
+ myLargeSegments := make([]dtos.Segment, 0, len(lsList))
+ for _, name := range lsList {
+ myLargeSegments = append(myLargeSegments, dtos.Segment{Name: name})
+ }
+
+ payoad := dtos.MembershipsResponseDTO{
+ MySegments: dtos.Memberships{
+ Segments: mySegments,
+ },
+ MyLargeSegments: dtos.Memberships{
+ Segments: myLargeSegments,
+ },
+ }
+
+ ctx.JSON(http.StatusOK, payoad)
+ ctx.Set(caching.SurrogateContextKey, []string{caching.MembershipsSurrogate})
}
// SplitChanges Returns a diff containing changes in feature flags from a certain point in time until now.
@@ -82,10 +121,14 @@ func (c *SdkServerController) SplitChanges(ctx *gin.Context) {
return
}
- spec, _ := ctx.GetQuery("s")
- if spec != specs.FLAG_V1_1 {
- spec = specs.FLAG_V1_0
+ sParam, _ := ctx.GetQuery("s")
+ spec, err := specs.ParseAndValidate(sParam)
+ if err != nil {
+ c.logger.Error(fmt.Sprintf("error parsing spec version: %s.", err))
+ ctx.JSON(http.StatusBadRequest, gin.H{"code": 400, "message": err.Error()})
+ return
}
+
splits.Splits = c.patchUnsupportedMatchers(splits.Splits, spec)
ctx.JSON(http.StatusOK, splits)
diff --git a/splitio/proxy/controllers/sdk_test.go b/splitio/proxy/controllers/sdk_test.go
index 0c0c1b99..50bc3952 100644
--- a/splitio/proxy/controllers/sdk_test.go
+++ b/splitio/proxy/controllers/sdk_test.go
@@ -15,6 +15,7 @@ import (
"github.com/splitio/go-split-commons/v6/engine/grammar/matchers"
"github.com/splitio/go-split-commons/v6/service"
"github.com/splitio/go-split-commons/v6/service/api/specs"
+ cmnStorage "github.com/splitio/go-split-commons/v6/storage"
"github.com/splitio/go-toolkit/v5/logging"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
@@ -33,6 +34,7 @@ func TestSplitChangesRecentSince(t *testing.T) {
Once()
var splitFetcher splitFetcherMock
+ var largeSegmentStorageMock largeSegmentStorageMock
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
@@ -44,6 +46,7 @@ func TestSplitChangesRecentSince(t *testing.T) {
&splitStorage,
nil,
flagsets.NewMatcher(false, nil),
+ &largeSegmentStorageMock,
)
controller.Register(group)
@@ -83,6 +86,8 @@ func TestSplitChangesOlderSince(t *testing.T) {
Return(&dtos.SplitChangesDTO{Since: -1, Till: 1, Splits: []dtos.SplitDTO{{Name: "s1", Status: "ACTIVE"}, {Name: "s2", Status: "ACTIVE"}}}, nil).
Once()
+ var largeSegmentStorageMock largeSegmentStorageMock
+
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
@@ -95,6 +100,7 @@ func TestSplitChangesOlderSince(t *testing.T) {
&splitStorage,
nil,
flagsets.NewMatcher(false, nil),
+ &largeSegmentStorageMock,
)
controller.Register(group)
@@ -134,6 +140,8 @@ func TestSplitChangesOlderSinceFetchFails(t *testing.T) {
Return((*dtos.SplitChangesDTO)(nil), errors.New("something")).
Once()
+ var largeSegmentStorageMock largeSegmentStorageMock
+
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
@@ -146,6 +154,7 @@ func TestSplitChangesOlderSinceFetchFails(t *testing.T) {
&splitStorage,
nil,
flagsets.NewMatcher(false, nil),
+ &largeSegmentStorageMock,
)
controller.Register(group)
@@ -171,6 +180,7 @@ func TestSplitChangesWithFlagSets(t *testing.T) {
Once()
var splitFetcher splitFetcherMock
+ var largeSegmentStorageMock largeSegmentStorageMock
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
@@ -184,6 +194,7 @@ func TestSplitChangesWithFlagSets(t *testing.T) {
&splitStorage,
nil,
flagsets.NewMatcher(false, nil),
+ &largeSegmentStorageMock,
)
controller.Register(group)
@@ -218,6 +229,7 @@ func TestSplitChangesWithFlagSetsStrict(t *testing.T) {
Once()
var splitFetcher splitFetcherMock
+ var largeSegmentStorageMock largeSegmentStorageMock
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
@@ -231,6 +243,7 @@ func TestSplitChangesWithFlagSetsStrict(t *testing.T) {
&splitStorage,
nil,
flagsets.NewMatcher(true, []string{"a", "c"}),
+ &largeSegmentStorageMock,
)
controller.Register(group)
@@ -285,6 +298,7 @@ func TestSplitChangesNewMatcherOldSpec(t *testing.T) {
Once()
var splitFetcher splitFetcherMock
+ var largeSegmentStorageMock largeSegmentStorageMock
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
@@ -296,6 +310,7 @@ func TestSplitChangesNewMatcherOldSpec(t *testing.T) {
&splitStorage,
nil,
flagsets.NewMatcher(false, nil),
+ &largeSegmentStorageMock,
)
controller.Register(group)
@@ -353,6 +368,7 @@ func TestSplitChangesNewMatcherNewSpec(t *testing.T) {
Once()
var splitFetcher splitFetcherMock
+ var largeSegmentStorageMock largeSegmentStorageMock
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
@@ -364,6 +380,7 @@ func TestSplitChangesNewMatcherNewSpec(t *testing.T) {
&splitStorage,
nil,
flagsets.NewMatcher(false, nil),
+ &largeSegmentStorageMock,
)
controller.Register(group)
@@ -408,13 +425,15 @@ func TestSegmentChanges(t *testing.T) {
Return(&dtos.SegmentChangesDTO{Name: "someSegment", Added: []string{"k1", "k2"}, Removed: []string{}, Since: -1, Till: 1}, nil).
Once()
+ var largeSegmentStorageMock largeSegmentStorageMock
+
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
logger := logging.NewLogger(nil)
group := router.Group("/api")
- controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil))
+ controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil), &largeSegmentStorageMock)
controller.Register(group)
ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/segmentChanges/someSegment?since=-1", nil)
@@ -450,13 +469,15 @@ func TestSegmentChangesNotFound(t *testing.T) {
Return((*dtos.SegmentChangesDTO)(nil), storage.ErrSegmentNotFound).
Once()
+ var largeSegmentStorageMock largeSegmentStorageMock
+
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
logger := logging.NewLogger(nil)
group := router.Group("/api")
- controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil))
+ controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil), &largeSegmentStorageMock)
controller.Register(group)
ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/segmentChanges/someSegment?since=-1", nil)
@@ -482,13 +503,15 @@ func TestMySegments(t *testing.T) {
Return([]string{"segment1", "segment2"}, nil).
Once()
+ var largeSegmentStorageMock largeSegmentStorageMock
+
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
logger := logging.NewLogger(nil)
group := router.Group("/api")
- controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil))
+ controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil), &largeSegmentStorageMock)
controller.Register(group)
ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/mySegments/someKey", nil)
@@ -523,13 +546,15 @@ func TestMySegmentsError(t *testing.T) {
Return([]string(nil), errors.New("something")).
Once()
+ var largeSegmentStorageMock largeSegmentStorageMock
+
resp := httptest.NewRecorder()
ctx, router := gin.CreateTestContext(resp)
logger := logging.NewLogger(nil)
group := router.Group("/api")
- controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil))
+ controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil), &largeSegmentStorageMock)
controller.Register(group)
ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/mySegments/someKey", nil)
@@ -545,6 +570,93 @@ func TestMySegmentsError(t *testing.T) {
segmentStorage.AssertExpectations(t)
}
+func TestMemberships(t *testing.T) {
+ gin.SetMode(gin.TestMode)
+
+ var splitFetcher splitFetcherMock
+ var splitStorage psmocks.ProxySplitStorageMock
+ var segmentStorage psmocks.ProxySegmentStorageMock
+ segmentStorage.On("SegmentsFor", "keyTest").
+ Return([]string{"segment1", "segment2"}, nil).
+ Once()
+
+ var largeSegmentStorageMock largeSegmentStorageMock
+ largeSegmentStorageMock.On("LargeSegmentsForUser", "keyTest").
+ Return([]string{"largeSegment1", "largeSegment2"}).
+ Once()
+
+ resp := httptest.NewRecorder()
+ ctx, router := gin.CreateTestContext(resp)
+
+ logger := logging.NewLogger(nil)
+
+ group := router.Group("/api")
+ controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil), &largeSegmentStorageMock)
+ controller.Register(group)
+
+ ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/memberships/keyTest", nil)
+ ctx.Request.Header.Set("Authorization", "Bearer someApiKey")
+ ctx.Request.Header.Set("SplitSDKVersion", "go-1.1.1")
+ ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4")
+ ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4")
+ router.ServeHTTP(resp, ctx.Request)
+ assert.Equal(t, 200, resp.Code)
+
+ body, err := io.ReadAll(resp.Body)
+ assert.Nil(t, err)
+
+ var actualDTO dtos.MembershipsResponseDTO
+ err = json.Unmarshal(body, &actualDTO)
+ assert.Nil(t, err)
+
+ expectedDTO := dtos.MembershipsResponseDTO{
+ MySegments: dtos.Memberships{
+ Segments: []dtos.Segment{{Name: "segment1"}, {Name: "segment2"}},
+ },
+ MyLargeSegments: dtos.Memberships{
+ Segments: []dtos.Segment{{Name: "largeSegment1"}, {Name: "largeSegment2"}},
+ },
+ }
+ assert.Equal(t, expectedDTO, actualDTO)
+
+ splitStorage.AssertExpectations(t)
+ splitFetcher.AssertExpectations(t)
+ segmentStorage.AssertExpectations(t)
+}
+
+func TestMembershipsError(t *testing.T) {
+ gin.SetMode(gin.TestMode)
+
+ var splitFetcher splitFetcherMock
+ var splitStorage psmocks.ProxySplitStorageMock
+ var largeSegmentStorageMock largeSegmentStorageMock
+ var segmentStorage psmocks.ProxySegmentStorageMock
+ segmentStorage.On("SegmentsFor", "keyTest").
+ Return([]string{}, errors.New("error message.")).
+ Once()
+
+ resp := httptest.NewRecorder()
+ ctx, router := gin.CreateTestContext(resp)
+
+ logger := logging.NewLogger(nil)
+
+ group := router.Group("/api")
+ controller := NewSdkServerController(logger, &splitFetcher, &splitStorage, &segmentStorage, flagsets.NewMatcher(false, nil), &largeSegmentStorageMock)
+ controller.Register(group)
+
+ ctx.Request, _ = http.NewRequest(http.MethodGet, "/api/memberships/keyTest", nil)
+ ctx.Request.Header.Set("Authorization", "Bearer someApiKey")
+ ctx.Request.Header.Set("SplitSDKVersion", "go-1.1.1")
+ ctx.Request.Header.Set("SplitSDKMachineIp", "1.2.3.4")
+ ctx.Request.Header.Set("SplitSDKMachineName", "ip-1-2-3-4")
+ router.ServeHTTP(resp, ctx.Request)
+ assert.Equal(t, 500, resp.Code)
+
+ splitStorage.AssertExpectations(t)
+ splitFetcher.AssertExpectations(t)
+ segmentStorage.AssertExpectations(t)
+}
+
type splitFetcherMock struct {
mock.Mock
}
@@ -563,4 +675,34 @@ type MSC struct {
MySegments []dtos.MySegmentDTO `json:"mySegments"`
}
+// --
+type largeSegmentStorageMock struct {
+ mock.Mock
+}
+
+func (s *largeSegmentStorageMock) SetChangeNumber(name string, till int64) {
+ s.Called()
+}
+func (s *largeSegmentStorageMock) Update(name string, userKeys []string, till int64) {
+}
+func (s *largeSegmentStorageMock) ChangeNumber(name string) int64 {
+ return s.Called(name).Get(0).(int64)
+}
+func (s *largeSegmentStorageMock) Count() int {
+ return s.Called().Get(0).(int)
+}
+func (s *largeSegmentStorageMock) LargeSegmentsForUser(userKey string) []string {
+ return s.Called(userKey).Get(0).([]string)
+}
+func (s *largeSegmentStorageMock) IsInLargeSegment(name string, key string) (bool, error) {
+ args := s.Called(name, key)
+ return args.Get(0).(bool), args.Error(1)
+}
+func (s *largeSegmentStorageMock) TotalKeys(name string) int {
+ return s.Called(name).Get(0).(int)
+}
+
+// --
+
+var _ cmnStorage.LargeSegmentsStorage = (*largeSegmentStorageMock)(nil)
var _ service.SplitFetcher = (*splitFetcherMock)(nil)
diff --git a/splitio/proxy/initialization.go b/splitio/proxy/initialization.go
index 30a3ca6f..b61152b5 100644
--- a/splitio/proxy/initialization.go
+++ b/splitio/proxy/initialization.go
@@ -12,6 +12,7 @@ import (
"github.com/splitio/go-split-commons/v6/conf"
"github.com/splitio/go-split-commons/v6/flagsets"
"github.com/splitio/go-split-commons/v6/service/api"
+ inmemory "github.com/splitio/go-split-commons/v6/storage/inmemory/mutexmap"
"github.com/splitio/go-split-commons/v6/synchronizer"
"github.com/splitio/go-split-commons/v6/tasks"
"github.com/splitio/go-split-commons/v6/telemetry"
@@ -38,7 +39,6 @@ import (
// Start initialize in proxy mode
func Start(logger logging.LoggerInterface, cfg *pconf.Main) error {
-
clientKey, err := util.GetClientKey(cfg.Apikey)
if err != nil {
return common.NewInitError(fmt.Errorf("error parsing client key from provided apikey: %w", err), common.ExitInvalidApikey)
@@ -85,6 +85,7 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error {
// Proxy storages already implement the observable interface, so no need to wrap them
splitStorage := storage.NewProxySplitStorage(dbInstance, logger, flagsets.NewFlagSetFilter(cfg.FlagSetsFilter), cfg.Initialization.Snapshot != "")
segmentStorage := storage.NewProxySegmentStorage(dbInstance, logger, cfg.Initialization.Snapshot != "")
+ largeSegmentStorage := inmemory.NewLargeSegmentsStorage()
// Local telemetry
tbufferSize := int(cfg.Sync.Advanced.TelemetryBuffer)
@@ -97,8 +98,8 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error {
)
// Healcheck Monitor
- splitsConfig, segmentsConfig := getAppCounterConfigs()
- appMonitor := hcApplication.NewMonitorImp(splitsConfig, segmentsConfig, nil, logger)
+ splitsConfig, segmentsConfig, lsConfig := getAppCounterConfigs()
+ appMonitor := hcApplication.NewMonitorImp(splitsConfig, segmentsConfig, &lsConfig, nil, logger)
servicesMonitor := hcServices.NewMonitorImp(getServicesCountersConfig(*advanced), logger)
// Creating Workers and Tasks
@@ -124,6 +125,7 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error {
appMonitor),
TelemetryRecorder: telemetry.NewTelemetrySynchronizer(localTelemetryStorage, telemetryRecorder, splitStorage, segmentStorage, logger,
metadata, localTelemetryStorage),
+ LargeSegmentUpdater: caching.NewCacheAwareLargeSegmentSync(splitStorage, largeSegmentStorage, splitAPI.LargeSegmentFetcher, logger, localTelemetryStorage, httpCache, appMonitor),
}
// setup periodic tasks in case streaming is disabled or we need to fall back to polling
@@ -135,6 +137,7 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error {
ImpressionSyncTask: impressionTask,
ImpressionsCountSyncTask: impressionCountTask,
EventSyncTask: eventsTask,
+ LargeSegmentSyncTask: tasks.NewFetchLargeSegmentsTask(workers.LargeSegmentUpdater, splitStorage, advanced.LargeSegment.RefreshRate, advanced.LargeSegment.Workers, advanced.LargeSegment.QueueSize, logger, appMonitor),
}
// Creating Synchronizer for tasks
@@ -198,6 +201,7 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error {
SplitStorage: splitStorage,
SegmentStorage: segmentStorage,
LocalTelemetryStorage: localTelemetryStorage,
+ LargeSegmentStorage: largeSegmentStorage,
}
// --------------------------- ADMIN DASHBOARD ------------------------------
@@ -258,6 +262,7 @@ func Start(logger logging.LoggerInterface, cfg *pconf.Main) error {
TLSConfig: tlsConfig,
FlagSets: cfg.FlagSetsFilter,
FlagSetsStrictMatching: cfg.FlagSetStrictMatching,
+ ProxyLargeSegmentStorage: largeSegmentStorage,
}
if ilcfg := cfg.Integrations.ImpressionListener; ilcfg.Endpoint != "" {
@@ -316,11 +321,12 @@ func startBGSyng(m synchronizer.Manager, mstatus chan int, haveSnapshot bool, on
}
-func getAppCounterConfigs() (hcAppCounter.ThresholdConfig, hcAppCounter.ThresholdConfig) {
+func getAppCounterConfigs() (hcAppCounter.ThresholdConfig, hcAppCounter.ThresholdConfig, hcAppCounter.ThresholdConfig) {
splitsConfig := hcAppCounter.DefaultThresholdConfig("Splits")
segmentsConfig := hcAppCounter.DefaultThresholdConfig("Segments")
+ LargeSegmentsConfig := hcAppCounter.DefaultThresholdConfig("LargeSegments")
- return splitsConfig, segmentsConfig
+ return splitsConfig, segmentsConfig, LargeSegmentsConfig
}
func getServicesCountersConfig(advanced conf.AdvancedConfig) []hcServicesCounter.Config {
diff --git a/splitio/proxy/proxy.go b/splitio/proxy/proxy.go
index f11e2f43..cf26ec22 100644
--- a/splitio/proxy/proxy.go
+++ b/splitio/proxy/proxy.go
@@ -6,6 +6,7 @@ import (
"net/http"
"github.com/splitio/go-split-commons/v6/service"
+ cmnStorage "github.com/splitio/go-split-commons/v6/storage"
"github.com/splitio/go-toolkit/v5/logging"
"github.com/splitio/split-synchronizer/v5/splitio/common/impressionlistener"
@@ -50,6 +51,9 @@ type Options struct {
// used to resolve segmentChanges & mySegments requests
ProxySegmentStorage storage.ProxySegmentStorage
+ // ProxyLargeSegmentStorage
+ ProxyLargeSegmentStorage cmnStorage.LargeSegmentsStorage
+
// what to do with received impression bulk payloads
ImpressionsSink tasks.DeferredRecordingTask
@@ -160,6 +164,7 @@ func setupSdkController(options *Options) *controllers.SdkServerController {
options.ProxySplitStorage,
options.ProxySegmentStorage,
flagsets.NewMatcher(options.FlagSetsStrictMatching, options.FlagSets),
+ options.ProxyLargeSegmentStorage,
)
}
diff --git a/splitio/proxy/proxy_test.go b/splitio/proxy/proxy_test.go
index faad8614..4ec41de8 100644
--- a/splitio/proxy/proxy_test.go
+++ b/splitio/proxy/proxy_test.go
@@ -236,6 +236,41 @@ func TestSegmentChangesAndMySegmentsEndpoints(t *testing.T) {
assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type"))
}
+func TestMembershipEndpoint(t *testing.T) {
+ var segmentStorage pstorageMocks.ProxySegmentStorageMock
+ var lsStorage pstorageMocks.ProxyLargeSegmentStorageMock
+
+ opts := makeOpts()
+ opts.ProxySegmentStorage = &segmentStorage
+ opts.ProxyLargeSegmentStorage = &lsStorage
+ proxy := New(opts)
+ go proxy.Start()
+ time.Sleep(1 * time.Second) // Let the scheduler switch the current thread/gr and start the server
+
+ // Test that a request without auth fails and is not cached
+ status, _, _ := get("memberships/mauro", opts.Port, nil)
+ if status != 401 {
+ t.Error("status should be 401. Is", status)
+ }
+
+ segmentStorage.On("SegmentsFor", "mauro").Return([]string{"segment1"}, nil).Once()
+ lsStorage.On("LargeSegmentsForUser", "mauro").Return([]string{"largeSegment1", "largeSegment2"}).Once()
+
+ status, body, headers := get("memberships/mauro", opts.Port, map[string]string{"Authorization": "Bearer someApiKey"})
+ response := memberships(body)
+ expected := dtos.MembershipsResponseDTO{
+ MySegments: dtos.Memberships{
+ Segments: []dtos.Segment{{Name: "segment1"}},
+ },
+ MyLargeSegments: dtos.Memberships{
+ Segments: []dtos.Segment{{Name: "largeSegment1"}, {Name: "largeSegment2"}},
+ },
+ }
+ assert.Equal(t, 200, status)
+ assert.Equal(t, expected, response)
+ assert.Equal(t, "application/json; charset=utf-8", headers.Get("Content-Type"))
+}
+
func makeOpts() *Options {
return &Options{
Logger: logging.NewLogger(nil),
@@ -307,3 +342,12 @@ func toMySegments(body []byte) []dtos.MySegmentDTO {
}
return c["mySegments"]
}
+
+func memberships(body []byte) dtos.MembershipsResponseDTO {
+ var c dtos.MembershipsResponseDTO
+ err := json.Unmarshal(body, &c)
+ if err != nil {
+ panic(err.Error())
+ }
+ return c
+}
diff --git a/splitio/proxy/storage/mocks/mocks.go b/splitio/proxy/storage/mocks/mocks.go
index 5910c7ca..c802fd89 100644
--- a/splitio/proxy/storage/mocks/mocks.go
+++ b/splitio/proxy/storage/mocks/mocks.go
@@ -35,3 +35,34 @@ func (p *ProxySegmentStorageMock) SegmentsFor(key string) ([]string, error) {
func (p *ProxySegmentStorageMock) CountRemovedKeys(segmentName string) int {
return p.Called(segmentName).Int(0)
}
+
+type ProxyLargeSegmentStorageMock struct {
+ mock.Mock
+}
+
+func (s *ProxyLargeSegmentStorageMock) SetChangeNumber(name string, till int64) {
+ s.Called(name, till).Error(0)
+}
+
+func (s *ProxyLargeSegmentStorageMock) Update(name string, userKeys []string, till int64) {
+ s.Called(name, userKeys, till)
+}
+func (s *ProxyLargeSegmentStorageMock) ChangeNumber(name string) int64 {
+ args := s.Called(name)
+ return args.Get(0).(int64)
+}
+func (s *ProxyLargeSegmentStorageMock) Count() int {
+ args := s.Called()
+ return args.Get(0).(int)
+}
+func (s *ProxyLargeSegmentStorageMock) LargeSegmentsForUser(userKey string) []string {
+ args := s.Called(userKey)
+ return args.Get(0).([]string)
+}
+func (s *ProxyLargeSegmentStorageMock) IsInLargeSegment(name string, key string) (bool, error) {
+ args := s.Called(name, key)
+ return args.Get(0).(bool), args.Error(1)
+}
+func (s *ProxyLargeSegmentStorageMock) TotalKeys(name string) int {
+ return s.Called(name).Get(0).(int)
+}
diff --git a/splitio/proxy/storage/splits.go b/splitio/proxy/storage/splits.go
index 9b3f2f8c..2d6fa6e6 100644
--- a/splitio/proxy/storage/splits.go
+++ b/splitio/proxy/storage/splits.go
@@ -156,6 +156,11 @@ func (p *ProxySplitStorageImpl) FetchMany(names []string) map[string]*dtos.Split
// SegmentNames call is forwarded to the snapshot
func (p *ProxySplitStorageImpl) SegmentNames() *set.ThreadUnsafeSet { return p.snapshot.SegmentNames() }
+// LargeSegmentNames call is forwarded to the snapshot
+func (p *ProxySplitStorageImpl) LargeSegmentNames() *set.ThreadUnsafeSet {
+ return p.snapshot.LargeSegmentNames()
+}
+
// Split call is forwarded to the snapshot
func (p *ProxySplitStorageImpl) Split(name string) *dtos.SplitDTO { return p.snapshot.Split(name) }
diff --git a/splitio/version.go b/splitio/version.go
index 2697b785..b895a090 100644
--- a/splitio/version.go
+++ b/splitio/version.go
@@ -2,4 +2,4 @@
package splitio
// Version is the version of this Agent
-const Version = "5.8.3"
+const Version = "5.9.0"