From 0988889b6a0cd365dfd71090992e56e2560b054b Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Wed, 28 Aug 2024 16:59:42 -0700 Subject: [PATCH 01/62] refactor: add internal data system config --- config.go | 20 +++++ .../data_system_configuration_builder.go | 86 +++++++++++++++++++ subsystems/datasystem_configuration.go | 37 ++++++++ 3 files changed, 143 insertions(+) create mode 100644 ldcomponents/data_system_configuration_builder.go create mode 100644 subsystems/datasystem_configuration.go diff --git a/config.go b/config.go index 5be351b1..78269b44 100644 --- a/config.go +++ b/config.go @@ -197,4 +197,24 @@ type Config struct { // LaunchDarkly provides integration packages, and most applications will not // need to implement their own hooks. Hooks []ldhooks.Hook + + // This field is not stable, and not subject to any backwards compatability guarantees or semantic versioning. + // It is not suitable for production usage. Do not use it. You have been warned. + // + // DataSystem configures how data (e.g. flags, segments) are retrieved by the SDK. + // + // Set this field only if you want to specify non-default values for any of the data system configuration, + // such as defining an alternate data source or setting up a persistent store. + // + // Below, the default configuration is described with the relevant config item in parentheses: + // 1. The SDK will first attempt to fetch all data from LaunchDarkly's global Content Delivery Network (Initializer) + // 2. It will then establish a streaming connection with LaunchDarkly's realtime Flag Delivery Network (Primary + // Synchronizer.) + // 3. If at any point the connection to the realtime network is interrupted for a short period of time, + // the connection will be automatically re-established. + // 4. If the connection cannot be re-established over a sustained period, the SDK will begin to make periodic + // requests to LaunchDarkly's global CDN (Secondary Synchronizer) + // 5. After a period of time, the SDK will swap back to the realtime Flag Delivery Network if it becomes + // available again. + DataSystem subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration] } diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go new file mode 100644 index 00000000..ede9d8c4 --- /dev/null +++ b/ldcomponents/data_system_configuration_builder.go @@ -0,0 +1,86 @@ +package ldcomponents + +import ( + "errors" + "fmt" + "github.com/launchdarkly/go-server-sdk/v7/ldcomponents" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" +) + +type DataSystemConfigurationBuilder struct { + storeBuilder subsystems.ComponentConfigurer[subsystems.DataStore] + initializerBuilders []subsystems.ComponentConfigurer[subsystems.Initializer] + primarySyncBuilder subsystems.ComponentConfigurer[subsystems.Synchronizer] + secondarySyncBuilder subsystems.ComponentConfigurer[subsystems.Synchronizer] + config subsystems.DataSystemConfiguration +} + +func DataSystem() *DataSystemConfigurationBuilder { + return &DataSystemConfigurationBuilder{ + primarySyncBuilder: ldcomponents.StreamingDataSource(), + secondarySyncBuilder: ldcomponents.PollingDataSource(), + storeBuilder: nil, // in-memory only + initializerBuilders: []subsystems.ComponentConfigurer[subsystems.Initializer]{ldcomponents.PollingInitializer()}, + } +} + +func (d *DataSystemConfigurationBuilder) Store(store subsystems.ComponentConfigurer[subsystems.DataStore]) *DataSystemConfigurationBuilder { + d.storeBuilder = store + return d +} + +func (d *DataSystemConfigurationBuilder) Initializers(initializers ...subsystems.ComponentConfigurer[subsystems.Initializer]) *DataSystemConfigurationBuilder { + d.initializerBuilders = initializers + return d +} + +func (d *DataSystemConfigurationBuilder) PrimarySynchronizer(sync subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { + d.primarySyncBuilder = sync + return d +} + +func (d *DataSystemConfigurationBuilder) SecondarySynchronizer(sync subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { + d.secondarySyncBuilder = sync + return d +} + +func (d *DataSystemConfigurationBuilder) Build( + context subsystems.ClientContext, +) (subsystems.DataSystemConfiguration, error) { + conf := d.config + if d.secondarySyncBuilder != nil && d.primarySyncBuilder == nil { + return subsystems.DataSystemConfiguration{}, errors.New("cannot have a secondary synchronizer without a primary synchronizer") + } + if d.storeBuilder != nil { + store, err := d.storeBuilder.Build(context) + if err != nil { + return subsystems.DataSystemConfiguration{}, err + } + conf.Store = store + } + for i, initializerBuilder := range d.initializerBuilders { + if initializerBuilder == nil { + return subsystems.DataSystemConfiguration{}, fmt.Errorf("initializer %d is nil", i) + } + initializer, err := initializerBuilder.Build(context) + if err != nil { + return subsystems.DataSystemConfiguration{}, err + } + conf.Initializers = append(conf.Initializers, initializer) + } + if d.primarySyncBuilder != nil { + primarySync, err := d.primarySyncBuilder.Build(context) + if err != nil { + return subsystems.DataSystemConfiguration{}, err + } + conf.Synchronizers.Primary = primarySync + } + if d.secondarySyncBuilder != nil { + secondarySync, err := d.secondarySyncBuilder.Build(context) + if err != nil { + return subsystems.DataSystemConfiguration{}, err + } + conf.Synchronizers.Secondary = secondarySync + } + return conf, nil +} diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go new file mode 100644 index 00000000..03f6b036 --- /dev/null +++ b/subsystems/datasystem_configuration.go @@ -0,0 +1,37 @@ +package subsystems + +type Initializer interface { + Fetch() error +} + +type Synchronizer interface { + Start() + Stop() +} + +type SynchronizersConfiguration struct { + Primary Synchronizer + Secondary Synchronizer +} + +type DataSystemConfiguration struct { + Store DataStore + // Initializers obtain data for the SDK in a one-shot manner at startup. Their job is to get the SDK + // into a state where it is serving somewhat fresh values as fast as possible. + Initializers []Initializer + Synchronizers SynchronizersConfiguration +} + +/** + +DataSystemConfiguration { + Store: ldcomponents.Empty(), || ldcomponents.PersistentStore( + Initializers: []ldcomponents.Initializer{ + ldcomponents.PollFDv2() + }, + Synchronizers: ldcomponents.SynchronizersConfiguration{ + Primary: ldcomponents.StreamingFDv2(), + Secondary: ldcomponents.PollFDv2() + } +} +*/ From 46814ddff6f0840f9eee256dc4be26a208a3cdde Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Wed, 28 Aug 2024 17:25:55 -0700 Subject: [PATCH 02/62] more docs --- config.go | 17 +++++++++++ .../data_system_configuration_builder.go | 29 ++++++++++++------- subsystems/datasystem_configuration.go | 14 --------- 3 files changed, 36 insertions(+), 24 deletions(-) diff --git a/config.go b/config.go index 78269b44..a7bb878b 100644 --- a/config.go +++ b/config.go @@ -216,5 +216,22 @@ type Config struct { // requests to LaunchDarkly's global CDN (Secondary Synchronizer) // 5. After a period of time, the SDK will swap back to the realtime Flag Delivery Network if it becomes // available again. + // + // The default streaming mode configuration is preferred for most use-cases (DataSystem().StreamingPreferred()). + // Sometimes streaming connections are blocked by firewalls or proxies. If this is the case, a polling-only mode + // can be configured: + // + // config := ld.Config{ + // DataSystem: ldcomponents.DataSystem().PollingOnly(), + // } + // + // If you'd like to load data from a local source to provide redundancy if there is a problem + // connecting to LaunchDarkly, you can add a custom initializer: + // + // config := ld.Config { + // DataSystem: ldcomponents.DataSystem().PrependInitializers(myCustomInitializer), + // } + // + // The initializer(s) will run before LaunchDarkly's default initializer. DataSystem subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration] } diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index ede9d8c4..c970c229 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -16,12 +16,8 @@ type DataSystemConfigurationBuilder struct { } func DataSystem() *DataSystemConfigurationBuilder { - return &DataSystemConfigurationBuilder{ - primarySyncBuilder: ldcomponents.StreamingDataSource(), - secondarySyncBuilder: ldcomponents.PollingDataSource(), - storeBuilder: nil, // in-memory only - initializerBuilders: []subsystems.ComponentConfigurer[subsystems.Initializer]{ldcomponents.PollingInitializer()}, - } + d := &DataSystemConfigurationBuilder{} + return d.StreamingPreferred() } func (d *DataSystemConfigurationBuilder) Store(store subsystems.ComponentConfigurer[subsystems.DataStore]) *DataSystemConfigurationBuilder { @@ -34,16 +30,29 @@ func (d *DataSystemConfigurationBuilder) Initializers(initializers ...subsystems return d } -func (d *DataSystemConfigurationBuilder) PrimarySynchronizer(sync subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { - d.primarySyncBuilder = sync +func (d *DataSystemConfigurationBuilder) PrependInitializers(initializers ...subsystems.ComponentConfigurer[subsystems.Initializer]) *DataSystemConfigurationBuilder { + d.initializerBuilders = append(initializers, d.initializerBuilders...) return d } -func (d *DataSystemConfigurationBuilder) SecondarySynchronizer(sync subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { - d.secondarySyncBuilder = sync +func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { + d.primarySyncBuilder = primary + d.secondarySyncBuilder = secondary return d } +func (d *DataSystemConfigurationBuilder) PollingOnly() *DataSystemConfigurationBuilder { + return d.Synchronizer(ldcomponents.PollingDataSource()) +} + +func (d *DataSystemConfigurationBuilder) StreamingPreferred() *DataSystemConfigurationBuilder { + return d.Initializers(ldcomponents.PollingInitializer()).Synchronizer(ldcomponents.StreamingDataSource(), ldcomponents.PollingDataSource()) +} + +func (d *DataSystemConfigurationBuilder) Synchronizer(sync subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { + return d.Synchronizers(sync, nil) +} + func (d *DataSystemConfigurationBuilder) Build( context subsystems.ClientContext, ) (subsystems.DataSystemConfiguration, error) { diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go index 03f6b036..e82aab40 100644 --- a/subsystems/datasystem_configuration.go +++ b/subsystems/datasystem_configuration.go @@ -21,17 +21,3 @@ type DataSystemConfiguration struct { Initializers []Initializer Synchronizers SynchronizersConfiguration } - -/** - -DataSystemConfiguration { - Store: ldcomponents.Empty(), || ldcomponents.PersistentStore( - Initializers: []ldcomponents.Initializer{ - ldcomponents.PollFDv2() - }, - Synchronizers: ldcomponents.SynchronizersConfiguration{ - Primary: ldcomponents.StreamingFDv2(), - Secondary: ldcomponents.PollFDv2() - } -} -*/ From 0efda7adc381698479b310064a8366fecd5235e8 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 29 Aug 2024 18:00:41 -0700 Subject: [PATCH 03/62] refactor persistent store config --- .../data_system_configuration_builder.go | 40 +++++++++---------- subsystems/datasystem_configuration.go | 10 ++++- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index c970c229..1dbebea2 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -9,6 +9,7 @@ import ( type DataSystemConfigurationBuilder struct { storeBuilder subsystems.ComponentConfigurer[subsystems.DataStore] + storeMode subsystems.StoreMode initializerBuilders []subsystems.ComponentConfigurer[subsystems.Initializer] primarySyncBuilder subsystems.ComponentConfigurer[subsystems.Synchronizer] secondarySyncBuilder subsystems.ComponentConfigurer[subsystems.Synchronizer] @@ -16,12 +17,28 @@ type DataSystemConfigurationBuilder struct { } func DataSystem() *DataSystemConfigurationBuilder { - d := &DataSystemConfigurationBuilder{} - return d.StreamingPreferred() + return &DataSystemConfigurationBuilder{} } -func (d *DataSystemConfigurationBuilder) Store(store subsystems.ComponentConfigurer[subsystems.DataStore]) *DataSystemConfigurationBuilder { +func DaemonModeV2(store subsystems.ComponentConfigurer[subsystems.DataStore]) *DataSystemConfigurationBuilder { + return DataSystem().DataStore(store, subsystems.StoreModeRead) +} + +func PersistentStoreV2(store subsystems.ComponentConfigurer[subsystems.DataStore]) *DataSystemConfigurationBuilder { + return StreamingDataSourceV2().DataStore(store, subsystems.StoreModeReadWrite) +} + +func PollingDataSourceV2() *DataSystemConfigurationBuilder { + return DataSystem().Synchronizers(ldcomponents.PollingDataSource(), nil) +} + +func StreamingDataSourceV2() *DataSystemConfigurationBuilder { + return DataSystem().Initializers(ldcomponents.PollingInitializer()).Synchronizers(ldcomponents.StreamingDataSource(), ldcomponents.PollingDataSource()) +} + +func (d *DataSystemConfigurationBuilder) DataStore(store subsystems.ComponentConfigurer[subsystems.DataStore], storeMode subsystems.StoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store + d.storeMode = storeMode return d } @@ -30,29 +47,12 @@ func (d *DataSystemConfigurationBuilder) Initializers(initializers ...subsystems return d } -func (d *DataSystemConfigurationBuilder) PrependInitializers(initializers ...subsystems.ComponentConfigurer[subsystems.Initializer]) *DataSystemConfigurationBuilder { - d.initializerBuilders = append(initializers, d.initializerBuilders...) - return d -} - func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { d.primarySyncBuilder = primary d.secondarySyncBuilder = secondary return d } -func (d *DataSystemConfigurationBuilder) PollingOnly() *DataSystemConfigurationBuilder { - return d.Synchronizer(ldcomponents.PollingDataSource()) -} - -func (d *DataSystemConfigurationBuilder) StreamingPreferred() *DataSystemConfigurationBuilder { - return d.Initializers(ldcomponents.PollingInitializer()).Synchronizer(ldcomponents.StreamingDataSource(), ldcomponents.PollingDataSource()) -} - -func (d *DataSystemConfigurationBuilder) Synchronizer(sync subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { - return d.Synchronizers(sync, nil) -} - func (d *DataSystemConfigurationBuilder) Build( context subsystems.ClientContext, ) (subsystems.DataSystemConfiguration, error) { diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go index e82aab40..db05e149 100644 --- a/subsystems/datasystem_configuration.go +++ b/subsystems/datasystem_configuration.go @@ -14,8 +14,16 @@ type SynchronizersConfiguration struct { Secondary Synchronizer } +type StoreMode int + +const ( + StoreModeRead = 0 + StoreModeReadWrite = 1 +) + type DataSystemConfiguration struct { - Store DataStore + Store DataStore + StoreMode StoreMode // Initializers obtain data for the SDK in a one-shot manner at startup. Their job is to get the SDK // into a state where it is serving somewhat fresh values as fast as possible. Initializers []Initializer From cfc7c255bcba152c0f7aeb58b4e5b3649f98f12e Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 5 Sep 2024 17:20:07 -0700 Subject: [PATCH 04/62] make package name shorter in import --- .../data_system_configuration_builder.go | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 1dbebea2..f1451f8f 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -4,90 +4,90 @@ import ( "errors" "fmt" "github.com/launchdarkly/go-server-sdk/v7/ldcomponents" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" + ss "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) type DataSystemConfigurationBuilder struct { - storeBuilder subsystems.ComponentConfigurer[subsystems.DataStore] - storeMode subsystems.StoreMode - initializerBuilders []subsystems.ComponentConfigurer[subsystems.Initializer] - primarySyncBuilder subsystems.ComponentConfigurer[subsystems.Synchronizer] - secondarySyncBuilder subsystems.ComponentConfigurer[subsystems.Synchronizer] - config subsystems.DataSystemConfiguration + storeBuilder ss.ComponentConfigurer[ss.DataStore] + storeMode ss.StoreMode + initializerBuilders []ss.ComponentConfigurer[ss.Initializer] + primarySyncBuilder ss.ComponentConfigurer[ss.Synchronizer] + secondarySyncBuilder ss.ComponentConfigurer[ss.Synchronizer] + config ss.DataSystemConfiguration } func DataSystem() *DataSystemConfigurationBuilder { return &DataSystemConfigurationBuilder{} } -func DaemonModeV2(store subsystems.ComponentConfigurer[subsystems.DataStore]) *DataSystemConfigurationBuilder { - return DataSystem().DataStore(store, subsystems.StoreModeRead) +func DaemonModeV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return DataSystem().DataStore(store, ss.StoreModeRead) } -func PersistentStoreV2(store subsystems.ComponentConfigurer[subsystems.DataStore]) *DataSystemConfigurationBuilder { - return StreamingDataSourceV2().DataStore(store, subsystems.StoreModeReadWrite) +func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return StreamingDataSourceV2().DataStore(store, ss.StoreModeReadWrite) } func PollingDataSourceV2() *DataSystemConfigurationBuilder { - return DataSystem().Synchronizers(ldcomponents.PollingDataSource(), nil) + return DataSystem().Synchronizers(ldcomponents.PollingDataSourceV2(), nil) } func StreamingDataSourceV2() *DataSystemConfigurationBuilder { - return DataSystem().Initializers(ldcomponents.PollingInitializer()).Synchronizers(ldcomponents.StreamingDataSource(), ldcomponents.PollingDataSource()) + return DataSystem().Initializers(ldcomponents.PollingInitializer()).Synchronizers(ldcomponents.StreamingDataSourceV2(), ldcomponents.PollingDataSourceV2()) } -func (d *DataSystemConfigurationBuilder) DataStore(store subsystems.ComponentConfigurer[subsystems.DataStore], storeMode subsystems.StoreMode) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store d.storeMode = storeMode return d } -func (d *DataSystemConfigurationBuilder) Initializers(initializers ...subsystems.ComponentConfigurer[subsystems.Initializer]) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) Initializers(initializers ...ss.ComponentConfigurer[ss.Initializer]) *DataSystemConfigurationBuilder { d.initializerBuilders = initializers return d } -func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary subsystems.ComponentConfigurer[subsystems.Synchronizer]) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary ss.ComponentConfigurer[ss.Synchronizer]) *DataSystemConfigurationBuilder { d.primarySyncBuilder = primary d.secondarySyncBuilder = secondary return d } func (d *DataSystemConfigurationBuilder) Build( - context subsystems.ClientContext, -) (subsystems.DataSystemConfiguration, error) { + context ss.ClientContext, +) (ss.DataSystemConfiguration, error) { conf := d.config if d.secondarySyncBuilder != nil && d.primarySyncBuilder == nil { - return subsystems.DataSystemConfiguration{}, errors.New("cannot have a secondary synchronizer without a primary synchronizer") + return ss.DataSystemConfiguration{}, errors.New("cannot have a secondary synchronizer without a primary synchronizer") } if d.storeBuilder != nil { store, err := d.storeBuilder.Build(context) if err != nil { - return subsystems.DataSystemConfiguration{}, err + return ss.DataSystemConfiguration{}, err } conf.Store = store } for i, initializerBuilder := range d.initializerBuilders { if initializerBuilder == nil { - return subsystems.DataSystemConfiguration{}, fmt.Errorf("initializer %d is nil", i) + return ss.DataSystemConfiguration{}, fmt.Errorf("initializer %d is nil", i) } initializer, err := initializerBuilder.Build(context) if err != nil { - return subsystems.DataSystemConfiguration{}, err + return ss.DataSystemConfiguration{}, err } conf.Initializers = append(conf.Initializers, initializer) } if d.primarySyncBuilder != nil { primarySync, err := d.primarySyncBuilder.Build(context) if err != nil { - return subsystems.DataSystemConfiguration{}, err + return ss.DataSystemConfiguration{}, err } conf.Synchronizers.Primary = primarySync } if d.secondarySyncBuilder != nil { secondarySync, err := d.secondarySyncBuilder.Build(context) if err != nil { - return subsystems.DataSystemConfiguration{}, err + return ss.DataSystemConfiguration{}, err } conf.Synchronizers.Secondary = secondarySync } From 17a417927c3ab5d311118e4e710360bbd247fd5e Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 5 Sep 2024 18:15:03 -0700 Subject: [PATCH 05/62] adding a V2 method to existing data sources --- .../data_system_configuration_builder.go | 16 +++++----- ldcomponents/polling_data_source_builder.go | 26 +++++++++++++--- ldcomponents/streaming_data_source_builder.go | 31 ++++++++++++++++--- subsystems/data_initializer.go | 1 + subsystems/data_source.go | 9 ++++++ subsystems/datasystem_configuration.go | 15 ++------- 6 files changed, 68 insertions(+), 30 deletions(-) create mode 100644 subsystems/data_initializer.go diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index f1451f8f..9c585a92 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -3,16 +3,16 @@ package ldcomponents import ( "errors" "fmt" - "github.com/launchdarkly/go-server-sdk/v7/ldcomponents" + ss "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) type DataSystemConfigurationBuilder struct { storeBuilder ss.ComponentConfigurer[ss.DataStore] storeMode ss.StoreMode - initializerBuilders []ss.ComponentConfigurer[ss.Initializer] - primarySyncBuilder ss.ComponentConfigurer[ss.Synchronizer] - secondarySyncBuilder ss.ComponentConfigurer[ss.Synchronizer] + initializerBuilders []ss.ComponentConfigurer[ss.DataInitializer] + primarySyncBuilder ss.ComponentConfigurer[ss.DataSynchronizer] + secondarySyncBuilder ss.ComponentConfigurer[ss.DataSynchronizer] config ss.DataSystemConfiguration } @@ -29,11 +29,11 @@ func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemCo } func PollingDataSourceV2() *DataSystemConfigurationBuilder { - return DataSystem().Synchronizers(ldcomponents.PollingDataSourceV2(), nil) + return DataSystem().Synchronizers(PollingDataSource().V2(), nil) } func StreamingDataSourceV2() *DataSystemConfigurationBuilder { - return DataSystem().Initializers(ldcomponents.PollingInitializer()).Synchronizers(ldcomponents.StreamingDataSourceV2(), ldcomponents.PollingDataSourceV2()) + return DataSystem().Initializers(PollingDataSource().V2()).Synchronizers(StreamingDataSource().V2(), PollingDataSource().V2()) } func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { @@ -42,12 +42,12 @@ func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ return d } -func (d *DataSystemConfigurationBuilder) Initializers(initializers ...ss.ComponentConfigurer[ss.Initializer]) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) Initializers(initializers ...ss.ComponentConfigurer[ss.DataInitializer]) *DataSystemConfigurationBuilder { d.initializerBuilders = initializers return d } -func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary ss.ComponentConfigurer[ss.Synchronizer]) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary ss.ComponentConfigurer[ss.DataSynchronizer]) *DataSystemConfigurationBuilder { d.primarySyncBuilder = primary d.secondarySyncBuilder = secondary return d diff --git a/ldcomponents/polling_data_source_builder.go b/ldcomponents/polling_data_source_builder.go index 72f4cf3e..c98ef724 100644 --- a/ldcomponents/polling_data_source_builder.go +++ b/ldcomponents/polling_data_source_builder.go @@ -2,6 +2,7 @@ package ldcomponents import ( "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" "time" "github.com/launchdarkly/go-sdk-common/v3/ldvalue" @@ -20,8 +21,9 @@ const DefaultPollInterval = 30 * time.Second // // See [PollingDataSource] for usage. type PollingDataSourceBuilder struct { - pollInterval time.Duration - filterKey ldvalue.OptionalString + pollInterval time.Duration + filterKey ldvalue.OptionalString + protocolVersion int } // PollingDataSource returns a configurable factory for using polling mode to get feature flag data. @@ -40,7 +42,8 @@ type PollingDataSourceBuilder struct { // } func PollingDataSource() *PollingDataSourceBuilder { return &PollingDataSourceBuilder{ - pollInterval: DefaultPollInterval, + pollInterval: DefaultPollInterval, + protocolVersion: 1, } } @@ -78,6 +81,16 @@ func (b *PollingDataSourceBuilder) PayloadFilter(filterKey string) *PollingDataS return b } +// V2 uses the next generation polling protocol. This method is not stable, and not subject to any backwards +// compatability guarantees or semantic versioning. +// It is not suitable for production usage. +// Do not use it. +// You have been warned. +func (b *PollingDataSourceBuilder) V2() *PollingDataSourceBuilder { + b.protocolVersion = 2 + return b +} + // Build is called internally by the SDK. func (b *PollingDataSourceBuilder) Build(context subsystems.ClientContext) (subsystems.DataSource, error) { context.GetLogging().Loggers.Warn( @@ -96,8 +109,11 @@ func (b *PollingDataSourceBuilder) Build(context subsystems.ClientContext) (subs PollInterval: b.pollInterval, FilterKey: filterKey, } - pp := datasource.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg) - return pp, nil + if b.protocolVersion == 1 { + return datasource.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg), nil + } else { + return datasourcev2.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg), nil + } } // DescribeConfiguration is used internally by the SDK to inspect the configuration. diff --git a/ldcomponents/streaming_data_source_builder.go b/ldcomponents/streaming_data_source_builder.go index ade67b04..ce285d79 100644 --- a/ldcomponents/streaming_data_source_builder.go +++ b/ldcomponents/streaming_data_source_builder.go @@ -2,6 +2,7 @@ package ldcomponents import ( "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" "time" "github.com/launchdarkly/go-sdk-common/v3/ldvalue" @@ -22,6 +23,7 @@ const DefaultInitialReconnectDelay = time.Second type StreamingDataSourceBuilder struct { initialReconnectDelay time.Duration filterKey ldvalue.OptionalString + protocolVersion int } // StreamingDataSource returns a configurable factory for using streaming mode to get feature flag data. @@ -37,6 +39,7 @@ type StreamingDataSourceBuilder struct { func StreamingDataSource() *StreamingDataSourceBuilder { return &StreamingDataSourceBuilder{ initialReconnectDelay: DefaultInitialReconnectDelay, + protocolVersion: 1, } } @@ -71,6 +74,16 @@ func (b *StreamingDataSourceBuilder) PayloadFilter(filterKey string) *StreamingD return b } +// V2 uses the next generation streaming protocol. This method is not stable, and not subject to any backwards +// compatability guarantees or semantic versioning. +// It is not suitable for production usage. +// Do not use it. +// You have been warned. +func (b *StreamingDataSourceBuilder) V2() *StreamingDataSourceBuilder { + b.protocolVersion = 2 + return b +} + // Build is called internally by the SDK. func (b *StreamingDataSourceBuilder) Build(context subsystems.ClientContext) (subsystems.DataSource, error) { filterKey, wasSet := b.filterKey.Get() @@ -87,11 +100,19 @@ func (b *StreamingDataSourceBuilder) Build(context subsystems.ClientContext) (su InitialReconnectDelay: b.initialReconnectDelay, FilterKey: filterKey, } - return datasource.NewStreamProcessor( - context, - context.GetDataSourceUpdateSink(), - cfg, - ), nil + if b.protocolVersion == 1 { + return datasource.NewStreamProcessor( + context, + context.GetDataSourceUpdateSink(), + cfg, + ), nil + } else { + return datasourcev2.NewStreamProcessor( + context, + context.GetDataSourceUpdateSink(), + cfg, + ), nil + } } // DescribeConfiguration is used internally by the SDK to inspect the configuration. diff --git a/subsystems/data_initializer.go b/subsystems/data_initializer.go new file mode 100644 index 00000000..738597db --- /dev/null +++ b/subsystems/data_initializer.go @@ -0,0 +1 @@ +package subsystems diff --git a/subsystems/data_source.go b/subsystems/data_source.go index eaf6aab5..485b6645 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -18,3 +18,12 @@ type DataSource interface { // initialized for the first time, or determined that initialization cannot ever succeed. Start(closeWhenReady chan<- struct{}) } + +type DataInitializer interface { + Fetch() error +} + +type DataSynchronizer interface { + Start() + io.Closer +} diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go index db05e149..f6c93e69 100644 --- a/subsystems/datasystem_configuration.go +++ b/subsystems/datasystem_configuration.go @@ -1,17 +1,8 @@ package subsystems -type Initializer interface { - Fetch() error -} - -type Synchronizer interface { - Start() - Stop() -} - type SynchronizersConfiguration struct { - Primary Synchronizer - Secondary Synchronizer + Primary DataSource + Secondary DataSource } type StoreMode int @@ -26,6 +17,6 @@ type DataSystemConfiguration struct { StoreMode StoreMode // Initializers obtain data for the SDK in a one-shot manner at startup. Their job is to get the SDK // into a state where it is serving somewhat fresh values as fast as possible. - Initializers []Initializer + Initializers []DataInitializer Synchronizers SynchronizersConfiguration } From ebae2cf19c229a920a87afd0bcd30b137dc85374 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 5 Sep 2024 20:07:59 -0700 Subject: [PATCH 06/62] creating the dataSystem interface --- internal/datastore/data_store_eval_impl.go | 4 +- internal/datasystem/fdv1_datasystem.go | 140 ++++++++++++++++++ ldclient.go | 136 +++++++---------- .../data_system_configuration_builder.go | 20 +-- subsystems/data_store.go | 17 +-- subsystems/datasystem_configuration.go | 4 +- subsystems/ldstoreimpl/data_store_eval.go | 2 +- subsystems/read_only_store.go | 33 +++++ 8 files changed, 240 insertions(+), 116 deletions(-) create mode 100644 internal/datasystem/fdv1_datasystem.go create mode 100644 subsystems/read_only_store.go diff --git a/internal/datastore/data_store_eval_impl.go b/internal/datastore/data_store_eval_impl.go index da9ca3b8..869f84d8 100644 --- a/internal/datastore/data_store_eval_impl.go +++ b/internal/datastore/data_store_eval_impl.go @@ -9,13 +9,13 @@ import ( ) type dataStoreEvaluatorDataProviderImpl struct { - store subsystems.DataStore + store subsystems.ReadOnlyStore loggers ldlog.Loggers } // NewDataStoreEvaluatorDataProviderImpl creates the internal implementation of the adapter that connects // the Evaluator (from go-server-sdk-evaluation) with the data store. -func NewDataStoreEvaluatorDataProviderImpl(store subsystems.DataStore, loggers ldlog.Loggers) ldeval.DataProvider { +func NewDataStoreEvaluatorDataProviderImpl(store subsystems.ReadOnlyStore, loggers ldlog.Loggers) ldeval.DataProvider { return dataStoreEvaluatorDataProviderImpl{store, loggers} } diff --git a/internal/datasystem/fdv1_datasystem.go b/internal/datasystem/fdv1_datasystem.go new file mode 100644 index 00000000..a99488ab --- /dev/null +++ b/internal/datasystem/fdv1_datasystem.go @@ -0,0 +1,140 @@ +package datasystem + +import ( + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk/v7/interfaces" + "github.com/launchdarkly/go-server-sdk/v7/internal" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" + "github.com/launchdarkly/go-server-sdk/v7/ldcomponents" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" +) + +type FDv1 struct { + dataSourceStatusBroadcaster *internal.Broadcaster[interfaces.DataSourceStatus] + dataSourceStatusProvider interfaces.DataSourceStatusProvider + dataStoreStatusBroadcaster *internal.Broadcaster[interfaces.DataStoreStatus] + dataStoreStatusProvider interfaces.DataStoreStatusProvider + flagChangeEventBroadcaster *internal.Broadcaster[interfaces.FlagChangeEvent] + dataStore subsystems.DataStore + dataSource subsystems.DataSource +} + +func NewFDv1(loggers ldlog.Loggers, dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], clientContext *internal.ClientContextImpl) (*FDv1, error) { + system := &FDv1{ + dataSourceStatusBroadcaster: internal.NewBroadcaster[interfaces.DataSourceStatus](), + flagChangeEventBroadcaster: internal.NewBroadcaster[interfaces.FlagChangeEvent](), + } + + dataStoreUpdateSink := datastore.NewDataStoreUpdateSinkImpl(system.dataStoreStatusBroadcaster) + storeFactory := dataStoreFactory + if storeFactory == nil { + storeFactory = ldcomponents.InMemoryDataStore() + } + clientContextWithDataStoreUpdateSink := clientContext + clientContextWithDataStoreUpdateSink.DataStoreUpdateSink = dataStoreUpdateSink + store, err := storeFactory.Build(clientContextWithDataStoreUpdateSink) + if err != nil { + return nil, err + } + system.dataStore = store + + system.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(store, dataStoreUpdateSink) + + dataSourceUpdateSink := datasource.NewDataSourceUpdateSinkImpl( + store, + system.dataStoreStatusProvider, + system.dataSourceStatusBroadcaster, + system.flagChangeEventBroadcaster, + clientContext.GetLogging().LogDataSourceOutageAsErrorAfter, + loggers, + ) + + dataSource, err := createDataSource(clientContext, dataSourceFactory, dataSourceUpdateSink) + if err != nil { + return nil, err + } + system.dataSource = dataSource + system.dataSourceStatusProvider = datasource.NewDataSourceStatusProviderImpl( + system.dataSourceStatusBroadcaster, + dataSourceUpdateSink, + ) + + return system, nil + +} + +func createDataSource( + context *internal.ClientContextImpl, + dataSourceBuilder subsystems.ComponentConfigurer[subsystems.DataSource], + dataSourceUpdateSink subsystems.DataSourceUpdateSink, +) (subsystems.DataSource, error) { + if context.Offline { + context.GetLogging().Loggers.Info("Starting LaunchDarkly client in offline mode") + dataSourceUpdateSink.UpdateStatus(interfaces.DataSourceStateValid, interfaces.DataSourceErrorInfo{}) + return datasource.NewNullDataSource(), nil + } + factory := dataSourceBuilder + if factory == nil { + // COVERAGE: can't cause this condition in unit tests because it would try to connect to production LD + factory = ldcomponents.StreamingDataSource() + } + contextCopy := *context + contextCopy.BasicClientContext.DataSourceUpdateSink = dataSourceUpdateSink + return factory.Build(&contextCopy) +} + +func (f *FDv1) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { + return f.dataSourceStatusBroadcaster +} + +func (f *FDv1) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { + return f.dataSourceStatusProvider +} + +func (f *FDv1) DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] { + return f.dataStoreStatusBroadcaster +} + +func (f *FDv1) DataStoreStatusProvider() interfaces.DataStoreStatusProvider { + return f.dataStoreStatusProvider +} + +func (f *FDv1) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] { + return f.flagChangeEventBroadcaster +} + +func (f *FDv1) Start(closeWhenReady chan struct{}) { + f.dataSource.Start(closeWhenReady) +} + +func (f *FDv1) Stop() error { + if f.dataSource != nil { + _ = f.dataSource.Close() + } + if f.dataStore != nil { + _ = f.dataStore.Close() + } + if f.dataSourceStatusBroadcaster != nil { + f.dataSourceStatusBroadcaster.Close() + } + if f.dataStoreStatusBroadcaster != nil { + f.dataStoreStatusBroadcaster.Close() + } + if f.flagChangeEventBroadcaster != nil { + f.flagChangeEventBroadcaster.Close() + } + return nil +} + +func (f *FDv1) Offline() bool { + return f.dataSource == datasource.NewNullDataSource() +} + +func (f *FDv1) Initialized() bool { + return f.dataSource.IsInitialized() +} + +func (f *FDv1) Store() subsystems.ReadOnlyStore { + return f.dataStore +} diff --git a/ldclient.go b/ldclient.go index 9dbd38ab..f63cdaf2 100644 --- a/ldclient.go +++ b/ldclient.go @@ -7,6 +7,8 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasystem" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" "reflect" "time" @@ -23,11 +25,8 @@ import ( "github.com/launchdarkly/go-server-sdk/v7/internal" "github.com/launchdarkly/go-server-sdk/v7/internal/bigsegments" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" - "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "github.com/launchdarkly/go-server-sdk/v7/internal/hooks" "github.com/launchdarkly/go-server-sdk/v7/ldcomponents" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" ) @@ -67,6 +66,36 @@ const ( migrationVarExFuncName = "LDClient.MigrationVariationCtx" ) +// type dataSystem represents the internal connections between the SDK's data sources, data store, +// and various status broadcasters. This is hidden behind an interface so that the data system can be +// swapped out with a new implementation for FDv2. +type dataSystem interface { + v1Methods + // Offline indicates whether the SDK is configured to be offline, either because the offline config item was + // explicitly set, or because a NullDataSource was used. + Offline() bool + // Initialized indicates whether the SDK has data. + Initialized() bool + // Start starts the data system; the given channel will be closed when the system has reached an initial state + // (either permanently failed, e.g. due to bad auth, or succeeded, where Initialized() == true). + Start(closeWhenReady chan struct{}) + // Stop halts the data system. Should be called when the client is closed to stop any long running operations. + Stop() error + + Store() subsystems.ReadOnlyStore +} + +// type v1Methods includes the public facing +type v1Methods interface { + DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] + DataSourceStatusProvider() interfaces.DataSourceStatusProvider + DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] + DataStoreStatusProvider() interfaces.DataStoreStatusProvider + FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] +} + +var _ dataSystem = &datasystem.FDv1{} + // LDClient is the LaunchDarkly client. // // This object evaluates feature flags, generates analytics events, and communicates with @@ -86,14 +115,8 @@ type LDClient struct { sdkKey string loggers ldlog.Loggers eventProcessor ldevents.EventProcessor - dataSource subsystems.DataSource - store subsystems.DataStore evaluator ldeval.Evaluator - dataSourceStatusBroadcaster *internal.Broadcaster[interfaces.DataSourceStatus] - dataSourceStatusProvider interfaces.DataSourceStatusProvider - dataStoreStatusBroadcaster *internal.Broadcaster[interfaces.DataStoreStatus] - dataStoreStatusProvider interfaces.DataStoreStatusProvider - flagChangeEventBroadcaster *internal.Broadcaster[interfaces.FlagChangeEvent] + dataSystem dataSystem flagTracker interfaces.FlagTracker bigSegmentStoreStatusBroadcaster *internal.Broadcaster[interfaces.BigSegmentStoreStatus] bigSegmentStoreStatusProvider interfaces.BigSegmentStoreStatusProvider @@ -222,19 +245,12 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.offline = config.Offline - client.dataStoreStatusBroadcaster = internal.NewBroadcaster[interfaces.DataStoreStatus]() - dataStoreUpdateSink := datastore.NewDataStoreUpdateSinkImpl(client.dataStoreStatusBroadcaster) - storeFactory := config.DataStore - if storeFactory == nil { - storeFactory = ldcomponents.InMemoryDataStore() - } - clientContextWithDataStoreUpdateSink := clientContext - clientContextWithDataStoreUpdateSink.DataStoreUpdateSink = dataStoreUpdateSink - store, err := storeFactory.Build(clientContextWithDataStoreUpdateSink) + fdv1, err := datasystem.NewFDv1(loggers, config.DataStore, config.DataSource, clientContext) if err != nil { return nil, err } - client.store = store + + client.dataSystem = fdv1 bigSegments := config.BigSegments if bigSegments == nil { @@ -269,7 +285,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC ) } - dataProvider := ldstoreimpl.NewDataStoreEvaluatorDataProvider(store, loggers) + dataProvider := ldstoreimpl.NewDataStoreEvaluatorDataProvider(client.dataSystem.Store(), loggers) evalOptions := []ldeval.EvaluatorOption{ ldeval.EvaluatorOptionErrorLogger(client.loggers.ForLevel(ldlog.Error)), } @@ -278,19 +294,6 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC } client.evaluator = ldeval.NewEvaluatorWithOptions(dataProvider, evalOptions...) - client.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(store, dataStoreUpdateSink) - - client.dataSourceStatusBroadcaster = internal.NewBroadcaster[interfaces.DataSourceStatus]() - client.flagChangeEventBroadcaster = internal.NewBroadcaster[interfaces.FlagChangeEvent]() - dataSourceUpdateSink := datasource.NewDataSourceUpdateSinkImpl( - store, - client.dataStoreStatusProvider, - client.dataSourceStatusBroadcaster, - client.flagChangeEventBroadcaster, - clientContext.GetLogging().LogDataSourceOutageAsErrorAfter, - loggers, - ) - client.eventProcessor, err = eventProcessorFactory.Build(clientContext) if err != nil { return nil, err @@ -306,18 +309,8 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC // frequently, it won't be causing an allocation each time. client.withEventsDisabled = newClientEventsDisabledDecorator(client) - dataSource, err := createDataSource(config, clientContext, dataSourceUpdateSink) - client.dataSource = dataSource - if err != nil { - return nil, err - } - client.dataSourceStatusProvider = datasource.NewDataSourceStatusProviderImpl( - client.dataSourceStatusBroadcaster, - dataSourceUpdateSink, - ) - client.flagTracker = internal.NewFlagTrackerImpl( - client.flagChangeEventBroadcaster, + fdv1.FlagChangeEventBroadcaster(), func(flagKey string, context ldcontext.Context, defaultValue ldvalue.Value) ldvalue.Value { value, _ := client.JSONVariation(flagKey, context, defaultValue) return value @@ -327,8 +320,9 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.hookRunner = hooks.NewRunner(loggers, config.Hooks) clientValid = true - client.dataSource.Start(closeWhenReady) - if waitFor > 0 && client.dataSource != datasource.NewNullDataSource() { + + client.dataSystem.Start(closeWhenReady) + if waitFor > 0 && !client.dataSystem.Offline() { loggers.Infof("Waiting up to %d milliseconds for LaunchDarkly client to start...", waitFor/time.Millisecond) @@ -343,7 +337,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC for { select { case <-closeWhenReady: - if !client.dataSource.IsInitialized() { + if !client.dataSystem.Initialized() { loggers.Warn("LaunchDarkly client initialization failed") return client, ErrInitializationFailed } @@ -361,26 +355,6 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC return client, nil } -func createDataSource( - config Config, - context *internal.ClientContextImpl, - dataSourceUpdateSink subsystems.DataSourceUpdateSink, -) (subsystems.DataSource, error) { - if config.Offline { - context.GetLogging().Loggers.Info("Starting LaunchDarkly client in offline mode") - dataSourceUpdateSink.UpdateStatus(interfaces.DataSourceStateValid, interfaces.DataSourceErrorInfo{}) - return datasource.NewNullDataSource(), nil - } - factory := config.DataSource - if factory == nil { - // COVERAGE: can't cause this condition in unit tests because it would try to connect to production LD - factory = ldcomponents.StreamingDataSource() - } - contextCopy := *context - contextCopy.BasicClientContext.DataSourceUpdateSink = dataSourceUpdateSink - return factory.Build(&contextCopy) -} - // MigrationVariation returns the migration stage of the migration feature flag for the given evaluation context. // // Returns defaultStage if there is an error or if the flag doesn't exist. @@ -589,7 +563,7 @@ func (client *LDClient) SecureModeHash(context ldcontext.Context) string { // already been stored in the database by a successfully connected SDK in the past. You can use // [LDClient.GetDataSourceStatusProvider] to get information on errors, or to wait for a successful retry. func (client *LDClient) Initialized() bool { - return client.dataSource.IsInitialized() + return client.dataSystem.Initialized() } // Close shuts down the LaunchDarkly client. After calling this, the LaunchDarkly client @@ -604,21 +578,11 @@ func (client *LDClient) Close() error { if client.eventProcessor != nil { _ = client.eventProcessor.Close() } - if client.dataSource != nil { - _ = client.dataSource.Close() - } - if client.store != nil { - _ = client.store.Close() - } - if client.dataSourceStatusBroadcaster != nil { - client.dataSourceStatusBroadcaster.Close() - } - if client.dataStoreStatusBroadcaster != nil { - client.dataStoreStatusBroadcaster.Close() - } - if client.flagChangeEventBroadcaster != nil { - client.flagChangeEventBroadcaster.Close() + + if client.dataSystem != nil { + _ = client.dataSystem.Stop() } + if client.bigSegmentStoreStatusBroadcaster != nil { client.bigSegmentStoreStatusBroadcaster.Close() } @@ -684,7 +648,7 @@ func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flag client.loggers.Warn("Called AllFlagsState in offline mode. Returning empty state") valid = false } else if !client.Initialized() { - if client.store.IsInitialized() { + if client.dataSystem.Initialized() { client.loggers.Warn("Called AllFlagsState before client initialization; using last known values from data store") } else { client.loggers.Warn("Called AllFlagsState before client initialization. Data store not available; returning empty state") //nolint:lll @@ -1119,7 +1083,7 @@ func (client *LDClient) JSONVariationDetailCtx( // // See the DataSourceStatusProvider interface for more about this functionality. func (client *LDClient) GetDataSourceStatusProvider() interfaces.DataSourceStatusProvider { - return client.dataSourceStatusProvider + return client.dataSystem.DataSourceStatusProvider() } // GetDataStoreStatusProvider returns an interface for tracking the status of a persistent data store. @@ -1131,7 +1095,7 @@ func (client *LDClient) GetDataSourceStatusProvider() interfaces.DataSourceStatu // // See the DataStoreStatusProvider interface for more about this functionality. func (client *LDClient) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { - return client.dataStoreStatusProvider + return client.dataSystem.DataStoreStatusProvider() } // GetFlagTracker returns an interface for tracking changes in feature flag configurations. diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 9c585a92..d15325fd 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -24,17 +24,17 @@ func DaemonModeV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigu return DataSystem().DataStore(store, ss.StoreModeRead) } -func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return StreamingDataSourceV2().DataStore(store, ss.StoreModeReadWrite) -} - -func PollingDataSourceV2() *DataSystemConfigurationBuilder { - return DataSystem().Synchronizers(PollingDataSource().V2(), nil) -} +//func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { +// return StreamingDataSourceV2().DataStore(store, ss.StoreModeReadWrite) +//} -func StreamingDataSourceV2() *DataSystemConfigurationBuilder { - return DataSystem().Initializers(PollingDataSource().V2()).Synchronizers(StreamingDataSource().V2(), PollingDataSource().V2()) -} +//func PollingDataSourceV2() *DataSystemConfigurationBuilder { +// return DataSystem().Synchronizers(PollingDataSource().V2(), nil) +//} +// +//func StreamingDataSourceV2() *DataSystemConfigurationBuilder { +// return DataSystem().Initializers(PollingDataSource().V2()).Synchronizers(StreamingDataSource().V2(), PollingDataSource().V2()) +//} func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store diff --git a/subsystems/data_store.go b/subsystems/data_store.go index 5781dff3..3cb4296f 100644 --- a/subsystems/data_store.go +++ b/subsystems/data_store.go @@ -15,6 +15,8 @@ import ( type DataStore interface { io.Closer + ReadOnlyStore + // Init overwrites the store's contents with a set of items for each collection. // // All previous data should be discarded, regardless of versioning. @@ -24,21 +26,6 @@ type DataStore interface { // data, and then delete any previously stored items that were not in the input data. Init(allData []ldstoretypes.Collection) error - // Get retrieves an item from the specified collection, if available. - // - // If the specified key does not exist in the collection, it should return an ItemDescriptor - // whose Version is -1. - // - // If the item has been deleted and the store contains a placeholder, it should return an - // ItemDescriptor whose Version is the version of the placeholder, and whose Item is nil. - Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) - - // GetAll retrieves all items from the specified collection. - // - // If the store contains placeholders for deleted items, it should include them in the results, - // not filter them out. - GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) - // Upsert updates or inserts an item in the specified collection. For updates, the object will only be // updated if the existing version is less than the new version. // diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go index f6c93e69..c23e7fdc 100644 --- a/subsystems/datasystem_configuration.go +++ b/subsystems/datasystem_configuration.go @@ -1,8 +1,8 @@ package subsystems type SynchronizersConfiguration struct { - Primary DataSource - Secondary DataSource + Primary DataSynchronizer + Secondary DataSynchronizer } type StoreMode int diff --git a/subsystems/ldstoreimpl/data_store_eval.go b/subsystems/ldstoreimpl/data_store_eval.go index 5f37ae04..d7cef4b7 100644 --- a/subsystems/ldstoreimpl/data_store_eval.go +++ b/subsystems/ldstoreimpl/data_store_eval.go @@ -16,6 +16,6 @@ import ( // // Normal use of the SDK does not require this type. It is provided for use by other LaunchDarkly // components that use DataStore and Evaluator separately from the SDK. -func NewDataStoreEvaluatorDataProvider(store subsystems.DataStore, loggers ldlog.Loggers) ldeval.DataProvider { +func NewDataStoreEvaluatorDataProvider(store subsystems.ReadOnlyStore, loggers ldlog.Loggers) ldeval.DataProvider { return datastore.NewDataStoreEvaluatorDataProviderImpl(store, loggers) } diff --git a/subsystems/read_only_store.go b/subsystems/read_only_store.go new file mode 100644 index 00000000..0071788e --- /dev/null +++ b/subsystems/read_only_store.go @@ -0,0 +1,33 @@ +package subsystems + +import "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + +type ReadOnlyStore interface { + // Get retrieves an item from the specified collection, if available. + // + // If the specified key does not exist in the collection, it should return an ItemDescriptor + // whose Version is -1. + // + // If the item has been deleted and the store contains a placeholder, it should return an + // ItemDescriptor whose Version is the version of the placeholder, and whose Item is nil. + Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) + + // GetAll retrieves all items from the specified collection. + // + // If the store contains placeholders for deleted items, it should include them in the results, + // not filter them out. + GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) + + // IsStatusMonitoringEnabled returns true if this data store implementation supports status + // monitoring. + // + // This is normally only true for persistent data stores created with ldcomponents.PersistentDataStore(), + // but it could also be true for any custom DataStore implementation that makes use of the + // statusUpdater parameter provided to the DataStoreFactory. Returning true means that the store + // guarantees that if it ever enters an invalid state (that is, an operation has failed or it knows + // that operations cannot succeed at the moment), it will publish a status update, and will then + // publish another status update once it has returned to a valid state. + // + // The same value will be returned from DataStoreStatusProvider.IsStatusMonitoringEnabled(). + IsStatusMonitoringEnabled() bool +} From 71e1fb7581a5559f19b14172b828a8503e826a70 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 6 Sep 2024 12:34:54 -0700 Subject: [PATCH 07/62] tests pass --- internal/datasystem/fdv1_datasystem.go | 5 +++++ ldclient.go | 15 +++++++-------- ldclient_evaluation_benchmark_test.go | 16 ++++++++-------- ldclient_test.go | 21 ++++++++++++++++++++- subsystems/data_store.go | 10 ---------- subsystems/read_only_store.go | 19 ++++++++----------- 6 files changed, 48 insertions(+), 38 deletions(-) diff --git a/internal/datasystem/fdv1_datasystem.go b/internal/datasystem/fdv1_datasystem.go index a99488ab..4adee888 100644 --- a/internal/datasystem/fdv1_datasystem.go +++ b/internal/datasystem/fdv1_datasystem.go @@ -23,6 +23,7 @@ type FDv1 struct { func NewFDv1(loggers ldlog.Loggers, dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], clientContext *internal.ClientContextImpl) (*FDv1, error) { system := &FDv1{ dataSourceStatusBroadcaster: internal.NewBroadcaster[interfaces.DataSourceStatus](), + dataStoreStatusBroadcaster: internal.NewBroadcaster[interfaces.DataStoreStatus](), flagChangeEventBroadcaster: internal.NewBroadcaster[interfaces.FlagChangeEvent](), } @@ -138,3 +139,7 @@ func (f *FDv1) Initialized() bool { func (f *FDv1) Store() subsystems.ReadOnlyStore { return f.dataStore } + +func (f *FDv1) DataSource() subsystems.DataSource { + return f.dataSource +} diff --git a/ldclient.go b/ldclient.go index f63cdaf2..6f08108d 100644 --- a/ldclient.go +++ b/ldclient.go @@ -74,8 +74,6 @@ type dataSystem interface { // Offline indicates whether the SDK is configured to be offline, either because the offline config item was // explicitly set, or because a NullDataSource was used. Offline() bool - // Initialized indicates whether the SDK has data. - Initialized() bool // Start starts the data system; the given channel will be closed when the system has reached an initial state // (either permanently failed, e.g. due to bad auth, or succeeded, where Initialized() == true). Start(closeWhenReady chan struct{}) @@ -92,6 +90,7 @@ type v1Methods interface { DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] DataStoreStatusProvider() interfaces.DataStoreStatusProvider FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] + DataSource() subsystems.DataSource } var _ dataSystem = &datasystem.FDv1{} @@ -337,7 +336,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC for { select { case <-closeWhenReady: - if !client.dataSystem.Initialized() { + if !client.dataSystem.DataSource().IsInitialized() { loggers.Warn("LaunchDarkly client initialization failed") return client, ErrInitializationFailed } @@ -563,7 +562,7 @@ func (client *LDClient) SecureModeHash(context ldcontext.Context) string { // already been stored in the database by a successfully connected SDK in the past. You can use // [LDClient.GetDataSourceStatusProvider] to get information on errors, or to wait for a successful retry. func (client *LDClient) Initialized() bool { - return client.dataSystem.Initialized() + return client.dataSystem.DataSource().IsInitialized() } // Close shuts down the LaunchDarkly client. After calling this, the LaunchDarkly client @@ -648,7 +647,7 @@ func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flag client.loggers.Warn("Called AllFlagsState in offline mode. Returning empty state") valid = false } else if !client.Initialized() { - if client.dataSystem.Initialized() { + if client.dataSystem.Store().IsInitialized() { client.loggers.Warn("Called AllFlagsState before client initialization; using last known values from data store") } else { client.loggers.Warn("Called AllFlagsState before client initialization. Data store not available; returning empty state") //nolint:lll @@ -660,7 +659,7 @@ func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flag return flagstate.AllFlags{} } - items, err := client.store.GetAll(datakinds.Features) + items, err := client.dataSystem.Store().GetAll(datakinds.Features) if err != nil { client.loggers.Warn("Unable to fetch flags from data store. Returning empty state. Error: " + err.Error()) return flagstate.AllFlags{} @@ -1244,14 +1243,14 @@ func (client *LDClient) evaluateInternal( } if !client.Initialized() { - if client.store.IsInitialized() { + if client.dataSystem.Store().IsInitialized() { client.loggers.Warn("Feature flag evaluation called before LaunchDarkly client initialization completed; using last known values from data store") //nolint:lll } else { return evalErrorResult(ldreason.EvalErrorClientNotReady, nil, ErrClientNotInitialized) } } - itemDesc, storeErr := client.store.Get(datakinds.Features, key) + itemDesc, storeErr := client.dataSystem.Store().Get(datakinds.Features, key) if storeErr != nil { client.loggers.Errorf("Encountered error fetching feature from store: %+v", storeErr) diff --git a/ldclient_evaluation_benchmark_test.go b/ldclient_evaluation_benchmark_test.go index 677ff056..a40b28d0 100644 --- a/ldclient_evaluation_benchmark_test.go +++ b/ldclient_evaluation_benchmark_test.go @@ -46,7 +46,7 @@ func newEvalBenchmarkEnv() *evalBenchmarkEnv { func (env *evalBenchmarkEnv) setUp(withEventGeneration bool, bc evalBenchmarkCase, variations []ldvalue.Value) { // Set up the client. - env.client = makeTestClientWithConfig(func(c *Config) { + env.client = makeTestClientWithConfigAndStore(func(c *Config) { if withEventGeneration { // In this mode, we use a stub EventProcessor implementation that immediately discards // every event, but the SDK will still generate the events before passing them to the stub, @@ -58,15 +58,15 @@ func (env *evalBenchmarkEnv) setUp(withEventGeneration bool, bc evalBenchmarkCas // Events is set to the specific factory returned by NoEvents(). c.Events = ldcomponents.NoEvents() } + }, func(store subsystems.DataStore) { + // Set up the feature flag store. Note that we're using a regular in-memory data store, so the + // benchmarks will include the overhead of calling Get on the store. + testFlags := makeEvalBenchmarkFlags(bc, variations) + for _, ff := range testFlags { + _, _ = store.Upsert(datakinds.Features, ff.Key, sharedtest.FlagDescriptor(*ff)) + } }) - // Set up the feature flag store. Note that we're using a regular in-memory data store, so the - // benchmarks will include the overhead of calling Get on the store. - testFlags := makeEvalBenchmarkFlags(bc, variations) - for _, ff := range testFlags { - env.client.store.Upsert(datakinds.Features, ff.Key, sharedtest.FlagDescriptor(*ff)) - } - env.evalUser = makeEvalBenchmarkUser(bc) // Target a feature key in the middle of the list in case a linear search is being used. diff --git a/ldclient_test.go b/ldclient_test.go index 5e596bd4..e9221f84 100644 --- a/ldclient_test.go +++ b/ldclient_test.go @@ -2,6 +2,7 @@ package ldclient import ( "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "testing" "time" @@ -77,9 +78,16 @@ func makeTestClient() *LDClient { } func makeTestClientWithConfig(modConfig func(*Config)) *LDClient { + return makeTestClientWithConfigAndStore(modConfig, func(store subsystems.DataStore) {}) +} + +// makeTestClientWithConfigAndStore is a variant of makeTestClientWithConfig, which allows you to also pre-populate the +// client's in-memory data store with some test data. The second argument is a callback that provides you access to the +// store; then you can call Upsert/Init to inject data. +func makeTestClientWithConfigAndStore(modConfig func(*Config), populate func(store subsystems.DataStore)) *LDClient { config := Config{ Offline: false, - DataStore: ldcomponents.InMemoryDataStore(), + DataStore: populateStore(populate), DataSource: mocks.DataSourceThatIsAlwaysInitialized(), Events: mocks.SingleComponentConfigurer[ldevents.EventProcessor]{Instance: &mocks.CapturingEventProcessor{}}, Logging: ldcomponents.Logging().Loggers(sharedtest.NewTestLoggers()), @@ -90,3 +98,14 @@ func makeTestClientWithConfig(modConfig func(*Config)) *LDClient { client, _ := MakeCustomClient(testSdkKey, config, time.Duration(0)) return client } + +// populateStore (which is a function) is defined here a type so that we can implement the ComponentConfigurer interface +// on it. That way, when the SDK configures the data store, we can hook in additional logic to populate the store +// via the callback provided in makeTestClientWithConfigAndStore. +type populateStore func(store subsystems.DataStore) + +func (populate populateStore) Build(context subsystems.ClientContext) (subsystems.DataStore, error) { + inMemory := datastore.NewInMemoryDataStore(context.GetLogging().Loggers) + populate(inMemory) + return inMemory, nil +} diff --git a/subsystems/data_store.go b/subsystems/data_store.go index 3cb4296f..20c540b7 100644 --- a/subsystems/data_store.go +++ b/subsystems/data_store.go @@ -37,16 +37,6 @@ type DataStore interface { // contains an equal or greater version. Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) (bool, error) - // IsInitialized returns true if the data store contains a data set, meaning that Init has been - // called at least once. - // - // In a shared data store, it should be able to detect this even if Init was called in a - // different process: that is, the test should be based on looking at what is in the data store. - // Once this has been determined to be true, it can continue to return true without having to - // check the store again; this method should be as fast as possible since it may be called during - // feature flag evaluations. - IsInitialized() bool - // IsStatusMonitoringEnabled returns true if this data store implementation supports status // monitoring. // diff --git a/subsystems/read_only_store.go b/subsystems/read_only_store.go index 0071788e..c1eefcbf 100644 --- a/subsystems/read_only_store.go +++ b/subsystems/read_only_store.go @@ -18,16 +18,13 @@ type ReadOnlyStore interface { // not filter them out. GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) - // IsStatusMonitoringEnabled returns true if this data store implementation supports status - // monitoring. + // IsInitialized returns true if the data store contains a data set, meaning that Init has been + // called at least once. // - // This is normally only true for persistent data stores created with ldcomponents.PersistentDataStore(), - // but it could also be true for any custom DataStore implementation that makes use of the - // statusUpdater parameter provided to the DataStoreFactory. Returning true means that the store - // guarantees that if it ever enters an invalid state (that is, an operation has failed or it knows - // that operations cannot succeed at the moment), it will publish a status update, and will then - // publish another status update once it has returned to a valid state. - // - // The same value will be returned from DataStoreStatusProvider.IsStatusMonitoringEnabled(). - IsStatusMonitoringEnabled() bool + // In a shared data store, it should be able to detect this even if Init was called in a + // different process: that is, the test should be based on looking at what is in the data store. + // Once this has been determined to be true, it can continue to return true without having to + // check the store again; this method should be as fast as possible since it may be called during + // feature flag evaluations. + IsInitialized() bool } From e4b2ea709583f94cc9b302e43b834c4123581234 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 6 Sep 2024 12:43:49 -0700 Subject: [PATCH 08/62] lints --- config.go | 2 +- ldclient.go | 5 +++-- ldclient_test.go | 3 ++- .../data_system_configuration_builder.go | 17 +++++++++-------- ldcomponents/polling_data_source_builder.go | 5 +++-- ldcomponents/streaming_data_source_builder.go | 5 +++-- 6 files changed, 21 insertions(+), 16 deletions(-) diff --git a/config.go b/config.go index a7bb878b..65ed384e 100644 --- a/config.go +++ b/config.go @@ -198,7 +198,7 @@ type Config struct { // need to implement their own hooks. Hooks []ldhooks.Hook - // This field is not stable, and not subject to any backwards compatability guarantees or semantic versioning. + // This field is not stable, and not subject to any backwards compatibility guarantees or semantic versioning. // It is not suitable for production usage. Do not use it. You have been warned. // // DataSystem configures how data (e.g. flags, segments) are retrieved by the SDK. diff --git a/ldclient.go b/ldclient.go index 6f08108d..fe6226bf 100644 --- a/ldclient.go +++ b/ldclient.go @@ -7,11 +7,12 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasystem" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" "reflect" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasystem" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/launchdarkly/go-sdk-common/v3/ldcontext" "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-sdk-common/v3/ldmigration" diff --git a/ldclient_test.go b/ldclient_test.go index e9221f84..5904c9f6 100644 --- a/ldclient_test.go +++ b/ldclient_test.go @@ -2,10 +2,11 @@ package ldclient import ( "errors" - "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "testing" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" + "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest/mocks" "github.com/launchdarkly/go-sdk-common/v3/lduser" diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index d15325fd..a342d711 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -24,17 +24,18 @@ func DaemonModeV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigu return DataSystem().DataStore(store, ss.StoreModeRead) } -//func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { +// func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { // return StreamingDataSourceV2().DataStore(store, ss.StoreModeReadWrite) -//} - -//func PollingDataSourceV2() *DataSystemConfigurationBuilder { +// } +// +// func PollingDataSourceV2() *DataSystemConfigurationBuilder { // return DataSystem().Synchronizers(PollingDataSource().V2(), nil) -//} +// } // -//func StreamingDataSourceV2() *DataSystemConfigurationBuilder { -// return DataSystem().Initializers(PollingDataSource().V2()).Synchronizers(StreamingDataSource().V2(), PollingDataSource().V2()) -//} +// func StreamingDataSourceV2() *DataSystemConfigurationBuilder { +// return DataSystem().Initializers(PollingDataSource().V2()).Synchronizers(StreamingDataSource().V2(), +// PollingDataSource().V2()) +// } func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store diff --git a/ldcomponents/polling_data_source_builder.go b/ldcomponents/polling_data_source_builder.go index c98ef724..1570eca0 100644 --- a/ldcomponents/polling_data_source_builder.go +++ b/ldcomponents/polling_data_source_builder.go @@ -2,9 +2,10 @@ package ldcomponents import ( "errors" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-sdk-common/v3/ldvalue" "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" @@ -82,7 +83,7 @@ func (b *PollingDataSourceBuilder) PayloadFilter(filterKey string) *PollingDataS } // V2 uses the next generation polling protocol. This method is not stable, and not subject to any backwards -// compatability guarantees or semantic versioning. +// compatibility guarantees or semantic versioning. // It is not suitable for production usage. // Do not use it. // You have been warned. diff --git a/ldcomponents/streaming_data_source_builder.go b/ldcomponents/streaming_data_source_builder.go index ce285d79..166d1ccc 100644 --- a/ldcomponents/streaming_data_source_builder.go +++ b/ldcomponents/streaming_data_source_builder.go @@ -2,9 +2,10 @@ package ldcomponents import ( "errors" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-sdk-common/v3/ldvalue" "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" @@ -75,7 +76,7 @@ func (b *StreamingDataSourceBuilder) PayloadFilter(filterKey string) *StreamingD } // V2 uses the next generation streaming protocol. This method is not stable, and not subject to any backwards -// compatability guarantees or semantic versioning. +// compatibility guarantees or semantic versioning. // It is not suitable for production usage. // Do not use it. // You have been warned. From e706bd1d6320dcdd288a3dfb82aab4fec38faf6a Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 6 Sep 2024 16:36:30 -0700 Subject: [PATCH 09/62] add concept of DataStatus, remove need to check initialized --- internal/datasystem/data_status.go | 12 +++++++++ internal/datasystem/fdv1_datasystem.go | 14 ++++++----- ldclient.go | 35 +++++++++++--------------- subsystems/data_initializer.go | 1 - 4 files changed, 35 insertions(+), 27 deletions(-) create mode 100644 internal/datasystem/data_status.go delete mode 100644 subsystems/data_initializer.go diff --git a/internal/datasystem/data_status.go b/internal/datasystem/data_status.go new file mode 100644 index 00000000..9e4e4229 --- /dev/null +++ b/internal/datasystem/data_status.go @@ -0,0 +1,12 @@ +package datasystem + +type DataStatus string + +const ( + // Defaults means the SDK has no data and will evaluate flags using the application-provided default values. + Defaults = DataStatus("defaults") + // Cached means the SDK has data, not necessarily the latest, which will be used to evaluate flags. + Cached = DataStatus("cached") + // Refreshed means the SDK has obtained, at least once, the latest known data from LaunchDarkly. + Refreshed = DataStatus("refreshed") +) diff --git a/internal/datasystem/fdv1_datasystem.go b/internal/datasystem/fdv1_datasystem.go index 4adee888..e2887dbd 100644 --- a/internal/datasystem/fdv1_datasystem.go +++ b/internal/datasystem/fdv1_datasystem.go @@ -132,14 +132,16 @@ func (f *FDv1) Offline() bool { return f.dataSource == datasource.NewNullDataSource() } -func (f *FDv1) Initialized() bool { - return f.dataSource.IsInitialized() +func (f *FDv1) DataStatus() DataStatus { + if f.dataSource.IsInitialized() { + return Refreshed + } else if f.dataStore.IsInitialized() { + return Cached + } else { + return Defaults + } } func (f *FDv1) Store() subsystems.ReadOnlyStore { return f.dataStore } - -func (f *FDv1) DataSource() subsystems.DataSource { - return f.dataSource -} diff --git a/ldclient.go b/ldclient.go index fe6226bf..9d064527 100644 --- a/ldclient.go +++ b/ldclient.go @@ -67,11 +67,14 @@ const ( migrationVarExFuncName = "LDClient.MigrationVariationCtx" ) -// type dataSystem represents the internal connections between the SDK's data sources, data store, -// and various status broadcasters. This is hidden behind an interface so that the data system can be -// swapped out with a new implementation for FDv2. +// The dataSystem interface represents the requirements for the client to retrieve data necessary +// for evaluations, as well as the related status updates related to the data. type dataSystem interface { - v1Methods + DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] + DataSourceStatusProvider() interfaces.DataSourceStatusProvider + DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] + DataStoreStatusProvider() interfaces.DataStoreStatusProvider + FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] // Offline indicates whether the SDK is configured to be offline, either because the offline config item was // explicitly set, or because a NullDataSource was used. Offline() bool @@ -82,16 +85,8 @@ type dataSystem interface { Stop() error Store() subsystems.ReadOnlyStore -} -// type v1Methods includes the public facing -type v1Methods interface { - DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] - DataSourceStatusProvider() interfaces.DataSourceStatusProvider - DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] - DataStoreStatusProvider() interfaces.DataStoreStatusProvider - FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] - DataSource() subsystems.DataSource + DataStatus() datasystem.DataStatus } var _ dataSystem = &datasystem.FDv1{} @@ -245,12 +240,12 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.offline = config.Offline - fdv1, err := datasystem.NewFDv1(loggers, config.DataStore, config.DataSource, clientContext) + fdv1DataSystem, err := datasystem.NewFDv1(loggers, config.DataStore, config.DataSource, clientContext) if err != nil { return nil, err } - client.dataSystem = fdv1 + client.dataSystem = fdv1DataSystem bigSegments := config.BigSegments if bigSegments == nil { @@ -310,7 +305,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.withEventsDisabled = newClientEventsDisabledDecorator(client) client.flagTracker = internal.NewFlagTrackerImpl( - fdv1.FlagChangeEventBroadcaster(), + fdv1DataSystem.FlagChangeEventBroadcaster(), func(flagKey string, context ldcontext.Context, defaultValue ldvalue.Value) ldvalue.Value { value, _ := client.JSONVariation(flagKey, context, defaultValue) return value @@ -337,7 +332,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC for { select { case <-closeWhenReady: - if !client.dataSystem.DataSource().IsInitialized() { + if client.dataSystem.DataStatus() != datasystem.Refreshed { loggers.Warn("LaunchDarkly client initialization failed") return client, ErrInitializationFailed } @@ -563,7 +558,7 @@ func (client *LDClient) SecureModeHash(context ldcontext.Context) string { // already been stored in the database by a successfully connected SDK in the past. You can use // [LDClient.GetDataSourceStatusProvider] to get information on errors, or to wait for a successful retry. func (client *LDClient) Initialized() bool { - return client.dataSystem.DataSource().IsInitialized() + return client.dataSystem.DataStatus() == datasystem.Refreshed } // Close shuts down the LaunchDarkly client. After calling this, the LaunchDarkly client @@ -648,7 +643,7 @@ func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flag client.loggers.Warn("Called AllFlagsState in offline mode. Returning empty state") valid = false } else if !client.Initialized() { - if client.dataSystem.Store().IsInitialized() { + if client.dataSystem.DataStatus() == datasystem.Cached { client.loggers.Warn("Called AllFlagsState before client initialization; using last known values from data store") } else { client.loggers.Warn("Called AllFlagsState before client initialization. Data store not available; returning empty state") //nolint:lll @@ -1244,7 +1239,7 @@ func (client *LDClient) evaluateInternal( } if !client.Initialized() { - if client.dataSystem.Store().IsInitialized() { + if client.dataSystem.DataStatus() == datasystem.Cached { client.loggers.Warn("Feature flag evaluation called before LaunchDarkly client initialization completed; using last known values from data store") //nolint:lll } else { return evalErrorResult(ldreason.EvalErrorClientNotReady, nil, ErrClientNotInitialized) diff --git a/subsystems/data_initializer.go b/subsystems/data_initializer.go deleted file mode 100644 index 738597db..00000000 --- a/subsystems/data_initializer.go +++ /dev/null @@ -1 +0,0 @@ -package subsystems From 618609a28a23120d971ad00df25f1003312a8a98 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 6 Sep 2024 16:50:09 -0700 Subject: [PATCH 10/62] create stub fdv2 data system --- internal/datasystem/fdv2_datasystem.go | 65 ++++++++++++++++++++++++++ ldclient.go | 24 +++++++--- 2 files changed, 82 insertions(+), 7 deletions(-) create mode 100644 internal/datasystem/fdv2_datasystem.go diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go new file mode 100644 index 00000000..9cb0f0a8 --- /dev/null +++ b/internal/datasystem/fdv2_datasystem.go @@ -0,0 +1,65 @@ +package datasystem + +import ( + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk/v7/interfaces" + "github.com/launchdarkly/go-server-sdk/v7/internal" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" +) + +type FDv2 struct { +} + +func (F FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { + //TODO implement me + panic("implement me") +} + +func (F FDv2) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { + //TODO implement me + panic("implement me") +} + +func (F FDv2) DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] { + //TODO implement me + panic("implement me") +} + +func (F FDv2) DataStoreStatusProvider() interfaces.DataStoreStatusProvider { + //TODO implement me + panic("implement me") +} + +func (F FDv2) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] { + //TODO implement me + panic("implement me") +} + +func (F FDv2) Offline() bool { + //TODO implement me + panic("implement me") +} + +func (F FDv2) Start(closeWhenReady chan struct{}) { + //TODO implement me + panic("implement me") +} + +func (F FDv2) Stop() error { + //TODO implement me + panic("implement me") +} + +func (F FDv2) Store() subsystems.ReadOnlyStore { + //TODO implement me + panic("implement me") +} + +func (F FDv2) DataStatus() DataStatus { + //TODO implement me + panic("implement me") +} + +func NewFDv2(loggers ldlog.Loggers, configurer subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration]) (*FDv2, error) { + return &FDv2{}, nil +} diff --git a/ldclient.go b/ldclient.go index 9d064527..e0247bd4 100644 --- a/ldclient.go +++ b/ldclient.go @@ -89,7 +89,10 @@ type dataSystem interface { DataStatus() datasystem.DataStatus } -var _ dataSystem = &datasystem.FDv1{} +var ( + _ dataSystem = &datasystem.FDv1{} + _ dataSystem = &datasystem.FDv2{} +) // LDClient is the LaunchDarkly client. // @@ -240,13 +243,20 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.offline = config.Offline - fdv1DataSystem, err := datasystem.NewFDv1(loggers, config.DataStore, config.DataSource, clientContext) - if err != nil { - return nil, err + if config.DataSystem == nil { + system, err := datasystem.NewFDv1(loggers, config.DataStore, config.DataSource, clientContext) + if err != nil { + return nil, err + } + client.dataSystem = system + } else { + system, err := datasystem.NewFDv2(loggers, config.DataSystem) + if err != nil { + return nil, err + } + client.dataSystem = system } - client.dataSystem = fdv1DataSystem - bigSegments := config.BigSegments if bigSegments == nil { bigSegments = ldcomponents.BigSegments(nil) @@ -305,7 +315,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.withEventsDisabled = newClientEventsDisabledDecorator(client) client.flagTracker = internal.NewFlagTrackerImpl( - fdv1DataSystem.FlagChangeEventBroadcaster(), + client.dataSystem.FlagChangeEventBroadcaster(), func(flagKey string, context ldcontext.Context, defaultValue ldvalue.Value) ldvalue.Value { value, _ := client.JSONVariation(flagKey, context, defaultValue) return value From 3c6cac468f2cbc84cef1f1c2f7e728e5b8c4bf0c Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 6 Sep 2024 18:17:14 -0700 Subject: [PATCH 11/62] add dual-mode store --- internal/datasystem/fdv1_datasystem.go | 5 +- internal/datasystem/fdv2_datasystem.go | 207 ++++++++++++++++-- ldclient.go | 4 +- .../data_system_configuration_builder.go | 9 + subsystems/data_source.go | 17 +- subsystems/datasystem_configuration.go | 1 + 6 files changed, 213 insertions(+), 30 deletions(-) diff --git a/internal/datasystem/fdv1_datasystem.go b/internal/datasystem/fdv1_datasystem.go index e2887dbd..0ef11efa 100644 --- a/internal/datasystem/fdv1_datasystem.go +++ b/internal/datasystem/fdv1_datasystem.go @@ -1,7 +1,6 @@ package datasystem import ( - "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal" "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" @@ -20,7 +19,7 @@ type FDv1 struct { dataSource subsystems.DataSource } -func NewFDv1(loggers ldlog.Loggers, dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], clientContext *internal.ClientContextImpl) (*FDv1, error) { +func NewFDv1(dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], clientContext *internal.ClientContextImpl) (*FDv1, error) { system := &FDv1{ dataSourceStatusBroadcaster: internal.NewBroadcaster[interfaces.DataSourceStatus](), dataStoreStatusBroadcaster: internal.NewBroadcaster[interfaces.DataStoreStatus](), @@ -48,7 +47,7 @@ func NewFDv1(loggers ldlog.Loggers, dataStoreFactory subsystems.ComponentConfigu system.dataSourceStatusBroadcaster, system.flagChangeEventBroadcaster, clientContext.GetLogging().LogDataSourceOutageAsErrorAfter, - loggers, + clientContext.GetLogging().Loggers, ) dataSource, err := createDataSource(clientContext, dataSourceFactory, dataSourceUpdateSink) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 9cb0f0a8..af1284c4 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -1,65 +1,228 @@ package datasystem import ( + "context" + "errors" "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "sync" ) +type store struct { + persistentStore subsystems.DataStore + persistentStoreMode subsystems.StoreMode + + memoryStore subsystems.DataStore + memory bool + refreshed bool + mu sync.RWMutex +} + +func newStore(persistent subsystems.DataStore, mode subsystems.StoreMode, loggers ldlog.Loggers) *store { + return &store{ + persistentStore: persistent, + persistentStoreMode: mode, + memoryStore: datastore.NewInMemoryDataStore(loggers), + } +} + +func (s *store) Close() error { + if s.persistentStore != nil { + return s.persistentStore.Close() + } + return nil +} + +func (s *store) GetActive() subsystems.DataStore { + s.mu.RLock() + defer s.mu.RUnlock() + if s.memory { + return s.memoryStore + } + return s.persistentStore +} + +func (s *store) Status() DataStatus { + s.mu.RLock() + defer s.mu.RUnlock() + // The logic here is: + // 1. If the memory store is active, we either got that data from an (initializer|synchronizer) that indicated + // the data is the latest known (Refreshed) or that it is potentially stale (Cached). This is set when SwapToMemory + // is called. + // 2. Otherwise, the persistent store - if any - is active. If there is none configured, the status is Defaults. + // If there is, we need to query the database availability to determine if we actually have access to the data + // or not. + if s.memory { + if s.refreshed { + return Refreshed + } + return Cached + } + if s.persistentStore != nil { + if s.persistentStore.IsInitialized() { + return Cached + } + } + return Defaults + +} + +func (s *store) GetMemory() subsystems.DataStore { + return s.memoryStore +} + +func (s *store) SwapToMemory(isRefreshed bool) { + s.mu.Lock() + defer s.mu.Unlock() + s.memory = true + s.refreshed = isRefreshed +} + type FDv2 struct { + store *store + + initializers []subsystems.DataInitializer + primarySync subsystems.DataSynchronizer + secondarySync subsystems.DataSynchronizer + + offline bool + + loggers ldlog.Loggers + + cancel context.CancelFunc + done chan struct{} + + readyOnce sync.Once } -func (F FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { - //TODO implement me - panic("implement me") +func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { + cfg, err := cfgBuilder.Build(*clientContext) + if err != nil { + return nil, err + } + return &FDv2{ + store: newStore(cfg.Store, cfg.StoreMode, clientContext.GetLogging().Loggers), + initializers: cfg.Initializers, + primarySync: cfg.Synchronizers.Primary, + secondarySync: cfg.Synchronizers.Secondary, + offline: cfg.Offline, + loggers: clientContext.GetLogging().Loggers, + done: make(chan struct{}), + }, nil } -func (F FDv2) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { - //TODO implement me - panic("implement me") +func (f *FDv2) Start(closeWhenReady chan struct{}) { + ctx, cancel := context.WithCancel(context.Background()) + f.cancel = cancel + go func() { + defer close(f.done) + payloadVersion := f.runInitializers(ctx, closeWhenReady) + f.runSynchronizers(ctx, closeWhenReady, payloadVersion) + }() } -func (F FDv2) DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] { - //TODO implement me - panic("implement me") +func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) *int { + for _, initializer := range f.initializers { + payload, err := initializer.Fetch(ctx) + if errors.Is(err, context.Canceled) { + return nil + } + if err != nil { + continue + } + _ = f.store.GetMemory().Init(payload.Data) + f.store.SwapToMemory(payload.Fresh) + f.readyOnce.Do(func() { + close(closeWhenReady) + }) + return payload.Version + } + return nil } -func (F FDv2) DataStoreStatusProvider() interfaces.DataStoreStatusProvider { - //TODO implement me - panic("implement me") +func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{}, payloadVersion *int) { + // If the SDK was configured with no synchronizer, then (assuming no initializer succeeded), we should + // trigger the ready signal to let the call to MakeClient unblock immediately. + if f.primarySync == nil { + f.readyOnce.Do(func() { + close(closeWhenReady) + }) + return + } + + ready := make(chan struct{}) + f.primarySync.Start(ready, f.store.GetMemory(), payloadVersion) + + for { + select { + case <-ready: + // We may have synchronizers that don't actually validate that a payload is fresh. In this case, + // we'd need a mechanism to propagate the status to this method, just like for the initializers. + // For now, we assume that the only synchronizers are LaunchDarkly-provided and do receive fresh payloads. + f.store.SwapToMemory(true) + f.readyOnce.Do(func() { + close(closeWhenReady) + }) + case <-ctx.Done(): + return + } + } } -func (F FDv2) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] { - //TODO implement me - panic("implement me") +func (f *FDv2) Stop() error { + if f.cancel != nil { + f.cancel() + <-f.done + } + _ = f.store.Close() + if f.primarySync != nil { + _ = f.primarySync.Close() + } + if f.secondarySync != nil { + _ = f.secondarySync.Close() + } + return nil +} + +func (f *FDv2) Store() subsystems.ReadOnlyStore { + return f.store.GetActive() +} + +func (f *FDv2) DataStatus() DataStatus { + if f.offline { + return Defaults + } + return f.store.Status() } -func (F FDv2) Offline() bool { +func (f *FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { //TODO implement me panic("implement me") } -func (F FDv2) Start(closeWhenReady chan struct{}) { +func (f *FDv2) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { //TODO implement me panic("implement me") } -func (F FDv2) Stop() error { +func (f *FDv2) DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] { //TODO implement me panic("implement me") } -func (F FDv2) Store() subsystems.ReadOnlyStore { +func (f *FDv2) DataStoreStatusProvider() interfaces.DataStoreStatusProvider { //TODO implement me panic("implement me") } -func (F FDv2) DataStatus() DataStatus { +func (f *FDv2) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] { //TODO implement me panic("implement me") } -func NewFDv2(loggers ldlog.Loggers, configurer subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration]) (*FDv2, error) { - return &FDv2{}, nil +func (f *FDv2) Offline() bool { + return f.offline } diff --git a/ldclient.go b/ldclient.go index e0247bd4..350a1e71 100644 --- a/ldclient.go +++ b/ldclient.go @@ -244,13 +244,13 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.offline = config.Offline if config.DataSystem == nil { - system, err := datasystem.NewFDv1(loggers, config.DataStore, config.DataSource, clientContext) + system, err := datasystem.NewFDv1(config.DataStore, config.DataSource, clientContext) if err != nil { return nil, err } client.dataSystem = system } else { - system, err := datasystem.NewFDv2(loggers, config.DataSystem) + system, err := datasystem.NewFDv2(config.DataSystem, clientContext) if err != nil { return nil, err } diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index a342d711..f6160da8 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -24,6 +24,10 @@ func DaemonModeV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigu return DataSystem().DataStore(store, ss.StoreModeRead) } +func Offline() *DataSystemConfigurationBuilder { + return DataSystem().Offline(true) +} + // func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { // return StreamingDataSourceV2().DataStore(store, ss.StoreModeReadWrite) // } @@ -54,6 +58,11 @@ func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary ss.Com return d } +func (d *DataSystemConfigurationBuilder) Offline(offline bool) *DataSystemConfigurationBuilder { + d.config.Offline = offline + return d +} + func (d *DataSystemConfigurationBuilder) Build( context ss.ClientContext, ) (ss.DataSystemConfiguration, error) { diff --git a/subsystems/data_source.go b/subsystems/data_source.go index 485b6645..1ac0c19b 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -1,6 +1,10 @@ package subsystems -import "io" +import ( + "context" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + "io" +) // DataSource describes the interface for an object that receives feature flag data. type DataSource interface { @@ -19,11 +23,18 @@ type DataSource interface { Start(closeWhenReady chan<- struct{}) } +type InitialPayload struct { + Data []ldstoretypes.Collection + Version *int + Fresh bool +} + type DataInitializer interface { - Fetch() error + Name() string + Fetch(ctx context.Context) (*InitialPayload, error) } type DataSynchronizer interface { - Start() + Start(closeWhenReady chan struct{}, dataStore DataStore, payloadVersion *int) io.Closer } diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go index c23e7fdc..ae01e01d 100644 --- a/subsystems/datasystem_configuration.go +++ b/subsystems/datasystem_configuration.go @@ -19,4 +19,5 @@ type DataSystemConfiguration struct { // into a state where it is serving somewhat fresh values as fast as possible. Initializers []DataInitializer Synchronizers SynchronizersConfiguration + Offline bool } From 33b0ae8d853f8a0f2b93c376e7774e7a504e24f2 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 9 Sep 2024 18:43:11 -0700 Subject: [PATCH 12/62] refactoring the store component --- .../datasourcev2/streaming_data_source.go | 48 ++-- .../data_store_status_provider_impl.go | 20 +- internal/datasystem/fdv2_datasystem.go | 205 ++++++++++-------- internal/datasystem/fdv2_store.go | 180 +++++++++++++++ ldclient_end_to_end_test.go | 59 +++-- .../data_system_configuration_builder.go | 30 ++- subsystems/data_source.go | 2 +- 7 files changed, 398 insertions(+), 146 deletions(-) create mode 100644 internal/datasystem/fdv2_store.go diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 265c361b..59472eac 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -81,7 +81,6 @@ type StreamProcessor struct { loggers ldlog.Loggers isInitialized internal.AtomicBoolean halt chan struct{} - storeStatusCh <-chan interfaces.DataStoreStatus connectionAttemptStartTime ldtime.UnixMillisecondTime connectionAttemptLock sync.Mutex readyOnce sync.Once @@ -123,12 +122,13 @@ func (sp *StreamProcessor) IsInitialized() bool { //nolint:revive // no doc comment for standard method func (sp *StreamProcessor) Start(closeWhenReady chan<- struct{}) { sp.loggers.Info("Starting LaunchDarkly streaming connection") - if sp.dataSourceUpdates.GetDataStoreStatusProvider().IsStatusMonitoringEnabled() { - sp.storeStatusCh = sp.dataSourceUpdates.GetDataStoreStatusProvider().AddStatusListener() - } go sp.subscribe(closeWhenReady) } +func (sp *StreamProcessor) Sync(closeWhenReady chan struct{}, payloadVersion *int) { + sp.Start(closeWhenReady) +} + // TODO: Remove this nolint once we have a better implementation. // //nolint:gocyclo,godox // this function is a stepping stone. It will get better over time. @@ -161,6 +161,8 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< sp.logConnectionResult(true) + // TODO(cwaldren/mkeeler): Should this actually be true by default? It means if we receive an event + // we don't understand then we go to the Valid state. processedEvent := true shouldRestart := false @@ -190,14 +192,11 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< } storeUpdateFailed := func(updateDesc string) { - if sp.storeStatusCh != nil { - sp.loggers.Errorf("Failed to store %s in data store; will try again once data store is working", updateDesc) - // scenario 2a in error handling comments at top of file - } else { - sp.loggers.Errorf("Failed to store %s in data store; will restart stream until successful", updateDesc) - shouldRestart = true // scenario 2b - processedEvent = false - } + // TODO: the data source previously had the responsibility of figuring out if storing an update failed, + // and then potentially restarting the streaming connection to get a new PUT so that it could init + // the database if updates got lost. This is no longer the responsibility of the data source (now the + // data system will handle it.) Ideally, the update sink's methods should not be fallible. + sp.loggers.Errorf("Failed to store %s in data store; will try again once data store is working", updateDesc) } switch event.Event() { @@ -267,17 +266,21 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< switch u := update.(type) { case datasource.PatchData: if !sp.dataSourceUpdates.Upsert(u.Kind, u.Key, u.Data) { + //TODO: indicate that this can't actually fail anymore from the perspective of the data source storeUpdateFailed("streaming update of " + u.Key) } case datasource.PutData: if sp.dataSourceUpdates.Init(u.Data) { sp.setInitializedAndNotifyClient(true, closeWhenReady) } else { + //TODO: indicate that this can't actually fail anymore from the perspective of the data source + storeUpdateFailed("initial streaming data") } case datasource.DeleteData: deletedItem := ldstoretypes.ItemDescriptor{Version: u.Version, Item: nil} if !sp.dataSourceUpdates.Upsert(u.Kind, u.Key, deletedItem) { + //TODO: indicate that this can't actually fail anymore from the perspective of the data source storeUpdateFailed("streaming deletion of " + u.Key) } @@ -297,24 +300,6 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< stream.Restart() } - case newStoreStatus := <-sp.storeStatusCh: - if sp.loggers.IsDebugEnabled() { - sp.loggers.Debugf("StreamProcessorV2 received store status update: %+v", newStoreStatus) - } - if newStoreStatus.Available { - // The store has just transitioned from unavailable to available (scenario 2a above) - if newStoreStatus.NeedsRefresh { - // The store is telling us that it can't guarantee that all of the latest data was cached. - // So we'll restart the stream to ensure a full refresh. - sp.loggers.Warn("Restarting stream to refresh data after data store outage") - stream.Restart() - } - // All of the updates were cached and have been written to the store, so we don't need to - // restart the stream. We just need to make sure the client knows we're initialized now - // (in case the initial "put" was not stored). - sp.setInitializedAndNotifyClient(true, closeWhenReady) - } - case <-sp.halt: stream.Close() return @@ -453,9 +438,6 @@ func (sp *StreamProcessor) logConnectionResult(success bool) { func (sp *StreamProcessor) Close() error { sp.closeOnce.Do(func() { close(sp.halt) - if sp.storeStatusCh != nil { - sp.dataSourceUpdates.GetDataStoreStatusProvider().RemoveStatusListener(sp.storeStatusCh) - } sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateOff, interfaces.DataSourceErrorInfo{}) }) return nil diff --git a/internal/datastore/data_store_status_provider_impl.go b/internal/datastore/data_store_status_provider_impl.go index 19af353d..e0114555 100644 --- a/internal/datastore/data_store_status_provider_impl.go +++ b/internal/datastore/data_store_status_provider_impl.go @@ -2,19 +2,33 @@ package datastore import ( "github.com/launchdarkly/go-server-sdk/v7/interfaces" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) +type StatusMonitorable interface { + // IsStatusMonitoringEnabled returns true if this data store implementation supports status + // monitoring. + // + // This is normally only true for persistent data stores created with ldcomponents.PersistentDataStore(), + // but it could also be true for any custom DataStore implementation that makes use of the + // statusUpdater parameter provided to the DataStoreFactory. Returning true means that the store + // guarantees that if it ever enters an invalid state (that is, an operation has failed or it knows + // that operations cannot succeed at the moment), it will publish a status update, and will then + // publish another status update once it has returned to a valid state. + // + // The same value will be returned from DataStoreStatusProvider.IsStatusMonitoringEnabled(). + IsStatusMonitoringEnabled() bool +} + // dataStoreStatusProviderImpl is the internal implementation of DataStoreStatusProvider. It's not // exported because the rest of the SDK code only interacts with the public interface. type dataStoreStatusProviderImpl struct { - store subsystems.DataStore + store StatusMonitorable dataStoreUpdates *DataStoreUpdateSinkImpl } // NewDataStoreStatusProviderImpl creates the internal implementation of DataStoreStatusProvider. func NewDataStoreStatusProviderImpl( - store subsystems.DataStore, + store StatusMonitorable, dataStoreUpdates *DataStoreUpdateSinkImpl, ) interfaces.DataStoreStatusProvider { return &dataStoreStatusProviderImpl{ diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index af1284c4..55c2a0ef 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -11,119 +11,148 @@ import ( "sync" ) -type store struct { - persistentStore subsystems.DataStore - persistentStoreMode subsystems.StoreMode +var _ subsystems.DataSourceUpdateSink = (*store)(nil) - memoryStore subsystems.DataStore - memory bool - refreshed bool - mu sync.RWMutex -} - -func newStore(persistent subsystems.DataStore, mode subsystems.StoreMode, loggers ldlog.Loggers) *store { - return &store{ - persistentStore: persistent, - persistentStoreMode: mode, - memoryStore: datastore.NewInMemoryDataStore(loggers), - } -} - -func (s *store) Close() error { - if s.persistentStore != nil { - return s.persistentStore.Close() - } - return nil -} - -func (s *store) GetActive() subsystems.DataStore { - s.mu.RLock() - defer s.mu.RUnlock() - if s.memory { - return s.memoryStore - } - return s.persistentStore -} - -func (s *store) Status() DataStatus { - s.mu.RLock() - defer s.mu.RUnlock() - // The logic here is: - // 1. If the memory store is active, we either got that data from an (initializer|synchronizer) that indicated - // the data is the latest known (Refreshed) or that it is potentially stale (Cached). This is set when SwapToMemory - // is called. - // 2. Otherwise, the persistent store - if any - is active. If there is none configured, the status is Defaults. - // If there is, we need to query the database availability to determine if we actually have access to the data - // or not. - if s.memory { - if s.refreshed { - return Refreshed - } - return Cached - } - if s.persistentStore != nil { - if s.persistentStore.IsInitialized() { - return Cached - } - } - return Defaults - -} - -func (s *store) GetMemory() subsystems.DataStore { - return s.memoryStore -} - -func (s *store) SwapToMemory(isRefreshed bool) { - s.mu.Lock() - defer s.mu.Unlock() - s.memory = true - s.refreshed = isRefreshed +type broadcasters struct { + dataSourceStatus *internal.Broadcaster[interfaces.DataSourceStatus] + dataStoreStatus *internal.Broadcaster[interfaces.DataStoreStatus] + flagChangeEvent *internal.Broadcaster[interfaces.FlagChangeEvent] } type FDv2 struct { + // Operates the in-memory and optional persistent store that backs data queries. store *store - initializers []subsystems.DataInitializer - primarySync subsystems.DataSynchronizer + // List of initializers that are capable of obtaining an initial payload of data. + initializers []subsystems.DataInitializer + + // The primary synchronizer responsible for keeping data up-to-date. + primarySync subsystems.DataSynchronizer + + // The secondary synchronizer, in case the primary is unavailable. secondarySync subsystems.DataSynchronizer + // Whether the SDK should make use of persistent store/initializers/synchronizers or not. offline bool loggers ldlog.Loggers + // Cancel and wg are used to track and stop the goroutines used by the system. cancel context.CancelFunc - done chan struct{} - + wg sync.WaitGroup + + // The SDK client, via MakeClient, expects to pass a channel down into a data source which will then be + // closed when the source is considered to be ready or in a terminal state. This is what allows the initialization + // timeout logic to work correctly and return early - otherwise, users would have to wait the full init timeout + // before receiving a status update. The following are true: + // 1. Initializers may close the channel (because an initializer's job is to initialize the SDK!) + // 2. Synchronizers may close the channel (because an initializer might not be configured, or have failed) + // To ensure the channel is closed only once, we use a sync.Once wrapping the close() call. readyOnce sync.Once + + // These broadcasters are mainly to satisfy the existing SDK contract with users to provide status updates for + // the data source, data store, and flag change events. These may be different in fdv2, but we attempt to implement + // them for now. + broadcasters *broadcasters + + // We hold a reference to the dataStoreStatusProvider because it's required for the public interface of the + // SDK client. + dataStoreStatusProvider interfaces.DataStoreStatusProvider } func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { - cfg, err := cfgBuilder.Build(*clientContext) + + store := newStore(clientContext.GetLogging().Loggers) + + bcasters := &broadcasters{ + dataSourceStatus: internal.NewBroadcaster[interfaces.DataSourceStatus](), + dataStoreStatus: internal.NewBroadcaster[interfaces.DataStoreStatus](), + flagChangeEvent: internal.NewBroadcaster[interfaces.FlagChangeEvent](), + } + + dataStoreUpdateSink := datastore.NewDataStoreUpdateSinkImpl(bcasters.dataStoreStatus) + clientContextCopy := *clientContext + clientContextCopy.DataStoreUpdateSink = dataStoreUpdateSink + clientContextCopy.DataSourceUpdateSink = store + + cfg, err := cfgBuilder.Build(clientContextCopy) if err != nil { return nil, err } - return &FDv2{ - store: newStore(cfg.Store, cfg.StoreMode, clientContext.GetLogging().Loggers), + + fdv2 := &FDv2{ + store: store, initializers: cfg.Initializers, primarySync: cfg.Synchronizers.Primary, secondarySync: cfg.Synchronizers.Secondary, offline: cfg.Offline, loggers: clientContext.GetLogging().Loggers, - done: make(chan struct{}), - }, nil + broadcasters: bcasters, + } + + if cfg.Store != nil { + fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(cfg.Store, dataStoreUpdateSink) + store.SetPersistent(cfg.Store, cfg.StoreMode, fdv2.dataStoreStatusProvider) + } else { + fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(noStatusMonitoring{}, dataStoreUpdateSink) + } + + return fdv2, nil +} + +type noStatusMonitoring struct{} + +func (n noStatusMonitoring) IsStatusMonitoringEnabled() bool { + return false +} + +func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <-chan interfaces.DataStoreStatus) { + for { + select { + case newStoreStatus := <-statuses: + if newStoreStatus.Available { + // The store has just transitioned from unavailable to available (scenario 2a above) + if newStoreStatus.NeedsRefresh { + f.loggers.Warn("Reinitializing data store from in-memory cache after after data store outage") + if err := f.store.Commit(); err != nil { + f.loggers.Error("Failed to reinitialize data store: %v", err) + } + } + } + case <-ctx.Done(): + return + } + } } func (f *FDv2) Start(closeWhenReady chan struct{}) { ctx, cancel := context.WithCancel(context.Background()) f.cancel = cancel + f.launchTask(func() { + f.run(ctx, closeWhenReady) + }) +} + +func (f *FDv2) launchTask(task func()) { + f.wg.Add(1) go func() { - defer close(f.done) - payloadVersion := f.runInitializers(ctx, closeWhenReady) - f.runSynchronizers(ctx, closeWhenReady, payloadVersion) + defer f.wg.Done() + task() }() } +func (f *FDv2) run(ctx context.Context, closeWhenReady chan struct{}) { + payloadVersion := f.runInitializers(ctx, closeWhenReady) + + if f.store.Mirroring() { + f.launchTask(func() { + f.runPersistentStoreOutageRecovery(ctx, f.dataStoreStatusProvider.AddStatusListener()) + }) + } + + f.runSynchronizers(ctx, closeWhenReady, payloadVersion) +} + func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) *int { for _, initializer := range f.initializers { payload, err := initializer.Fetch(ctx) @@ -133,7 +162,7 @@ func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{} if err != nil { continue } - _ = f.store.GetMemory().Init(payload.Data) + f.store.Init(payload.Data) f.store.SwapToMemory(payload.Fresh) f.readyOnce.Do(func() { close(closeWhenReady) @@ -154,7 +183,7 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ } ready := make(chan struct{}) - f.primarySync.Start(ready, f.store.GetMemory(), payloadVersion) + f.primarySync.Sync(ready, payloadVersion) for { select { @@ -175,7 +204,7 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ func (f *FDv2) Stop() error { if f.cancel != nil { f.cancel() - <-f.done + f.wg.Wait() } _ = f.store.Close() if f.primarySync != nil { @@ -199,8 +228,7 @@ func (f *FDv2) DataStatus() DataStatus { } func (f *FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { - //TODO implement me - panic("implement me") + return f.broadcasters.dataSourceStatus } func (f *FDv2) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { @@ -209,18 +237,15 @@ func (f *FDv2) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { } func (f *FDv2) DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] { - //TODO implement me - panic("implement me") + return f.broadcasters.dataStoreStatus } func (f *FDv2) DataStoreStatusProvider() interfaces.DataStoreStatusProvider { - //TODO implement me - panic("implement me") + return f.dataStoreStatusProvider } func (f *FDv2) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] { - //TODO implement me - panic("implement me") + return f.broadcasters.flagChangeEvent } func (f *FDv2) Offline() bool { diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go new file mode 100644 index 00000000..1d7db58a --- /dev/null +++ b/internal/datasystem/fdv2_store.go @@ -0,0 +1,180 @@ +package datasystem + +import ( + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk/v7/interfaces" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + "sync" +) + +type store struct { + // Represents a remote store, like Redis. This is optional; if present, it's only used + // before the in-memory store is initialized. + persistentStore subsystems.DataStore + + // The persistentStore is read-only, or read-write. In read-only mode, the store + // is *never* written to, and only read before the in-memory store is initialized. + // This is equivalent to the concept of "daemon mode". + // + // In read-write mode, data from initializers/synchronizers is written to the store + // as it is received. This is equivalent to the normal "persistent store" configuration + // that an SDK can use to collaborate with zero or more other SDKs with a (possibly shared) database. + persistentStoreMode subsystems.StoreMode + + // This exists as a quirk of the DataSourceUpdateSink interface, which store implements. The DataSourceUpdateSink + // has a method to return a DataStoreStatusProvider so that a DataSource can monitor the state of the store. This + // was originally used in fdv1 to know when the store went offline/online, so that data could be committed back + // to the store when it came back online. In fdv2 system, this is handled by the FDv2 struct itself, so the + // data source doesn't need any knowledge of it. We can delete this piece of infrastructure when we no longer + // need to support fdv1 (or we could refactor the fdv2 data sources to use a different set of interfaces that don't + // require this.) + persistentStoreStatusProvider interfaces.DataStoreStatusProvider + + // Represents the store that all flag/segment data queries are served from after data is received from + // initializers/synchronizers. Before the in-memory store is initialized, queries are served from the + // persistentStore (if configured). + memoryStore subsystems.DataStore + + // Whether the memoryStore is active or not. This should go from false -> true and never back. + memory bool + + // Whether the memoryStore's data should be considered authoritative, or fresh - that is, if it is known + // to be the latest data. Data from a baked in file for example would not be considered refreshed. The purpose + // of this is to know if we should commit data to the persistentStore. For example, if we initialize with "stale" + // data from a local file (refreshed=false), we may not want to pollute a connected Redis database with it. + refreshed bool + + // Protects the memory and refreshed fields. + mu sync.RWMutex + + loggers ldlog.Loggers +} + +func newStore(loggers ldlog.Loggers) *store { + return &store{ + persistentStore: nil, + persistentStoreMode: subsystems.StoreModeRead, + memoryStore: datastore.NewInMemoryDataStore(loggers), + memory: true, + loggers: loggers, + } +} + +// This method exists only because of the weird way the Go SDK is configured - we need a ClientContext +// before we can call Build to actually get ther persistent store. That ClientContext requires the +// DataStoreUpdateSink, which is what this store struct implements. +func (s *store) SetPersistent(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) { + s.persistentStore = persistent + s.persistentStoreMode = mode + s.memory = false +} + +func (s *store) Close() error { + if s.persistentStore != nil { + return s.persistentStore.Close() + } + return nil +} + +func (s *store) GetActive() subsystems.DataStore { + s.mu.RLock() + defer s.mu.RUnlock() + if s.memory { + return s.memoryStore + } + return s.persistentStore +} + +func (s *store) Status() DataStatus { + s.mu.RLock() + defer s.mu.RUnlock() + // The logic here is: + // 1. If the memory store is active, we either got that data from an (initializer|synchronizer) that indicated + // the data is the latest known (Refreshed) or that it is potentially stale (Cached). This is set when SwapToMemory + // is called. + // 2. Otherwise, the persistent store - if any - is active. If there is none configured, the status is Defaults. + // If there is, we need to query the database availability to determine if we actually have access to the data + // or not. + if s.memory { + if s.refreshed { + return Refreshed + } + return Cached + } + if s.persistentStore != nil { + if s.persistentStore.IsInitialized() { + return Cached + } + } + return Defaults + +} + +func (s *store) Mirroring() bool { + return s.persistentStore != nil && s.persistentStoreMode == subsystems.StoreModeReadWrite +} + +func (s *store) Init(allData []ldstoretypes.Collection) bool { + // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. + // TODO: handle errors from initializing the memory or persistent stores. + _ = s.memoryStore.Init(allData) + + if s.Mirroring() { + _ = s.persistentStore.Init(allData) // TODO: insert in topo-sort order + } + return true +} + +func (s *store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { + var ( + memErr error + persErr error + ) + + // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. + _, memErr = s.memoryStore.Upsert(kind, key, item) + + if s.Mirroring() { + _, persErr = s.persistentStore.Upsert(kind, key, item) + } + return memErr == nil && persErr == nil +} + +func (s *store) UpdateStatus(newState interfaces.DataSourceState, newError interfaces.DataSourceErrorInfo) { + //TODO: In the FDv2 world, instead of having users check the state, we instead have them monitor the + // DataStatus(), because that's actually what they care about. + // For now, discard any status updates coming from the data sources. + s.loggers.Info("fdv2_store: swallowing status update (", newState, ", ", newError, ")") +} + +func (s *store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { + return s.persistentStoreStatusProvider +} + +func (s *store) SwapToMemory(isRefreshed bool) { + s.mu.Lock() + defer s.mu.Unlock() + s.memory = true + s.refreshed = isRefreshed +} + +func (s *store) Commit() error { + if s.Status() == Refreshed && s.Mirroring() { + flags, err := s.memoryStore.GetAll(datakinds.Features) + if err != nil { + return err + } + segments, err := s.memoryStore.GetAll(datakinds.Segments) + if err != nil { + return err + } + return s.persistentStore.Init([]ldstoretypes.Collection{ + {Kind: datakinds.Features, Items: flags}, + {Kind: datakinds.Segments, Items: segments}, + }) + } + return nil +} diff --git a/ldclient_end_to_end_test.go b/ldclient_end_to_end_test.go index 685c9276..a8e0dc18 100644 --- a/ldclient_end_to_end_test.go +++ b/ldclient_end_to_end_test.go @@ -44,26 +44,53 @@ func assertNoMoreRequests(t *testing.T, requestsCh <-chan httphelpers.HTTPReques } func TestDefaultDataSourceIsStreaming(t *testing.T) { - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) - streamHandler, _ := ldservices.ServerSideStreamingServiceHandler(data.ToPutEvent()) - httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { - logCapture := ldlogtest.NewMockLog() - defer logCapture.DumpIfTestFailed(t) + t.Run("fdv1", func(t *testing.T) { + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + streamHandler, _ := ldservices.ServerSideStreamingServiceHandler(data.ToPutEvent()) + httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + defer logCapture.DumpIfTestFailed(t) - config := Config{ - Events: ldcomponents.NoEvents(), - Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), - ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, - } + config := Config{ + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, + } - client, err := MakeCustomClient(testSdkKey, config, time.Second*5) - require.NoError(t, err) - defer client.Close() + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.NoError(t, err) + defer client.Close() - assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) + assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) - value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) - assert.True(t, value) + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.True(t, value) + }) + }) + + t.Run("fdv2", func(t *testing.T) { + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + streamHandler, _ := ldservices.ServerSideStreamingServiceHandler(data.ToPutEvent()) + httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + defer logCapture.DumpIfTestFailed(t) + + config := Config{ + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, + DataSystem: ldcomponents.DataSystem(), + } + + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.NoError(t, err) + defer client.Close() + + assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) + + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.True(t, value) + }) }) } diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index f6160da8..aaec4955 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -3,6 +3,7 @@ package ldcomponents import ( "errors" "fmt" + "reflect" ss "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) @@ -17,15 +18,38 @@ type DataSystemConfigurationBuilder struct { } func DataSystem() *DataSystemConfigurationBuilder { - return &DataSystemConfigurationBuilder{} + return &DataSystemConfigurationBuilder{ + primarySyncBuilder: toSynchronizer{StreamingDataSource().V2()}, + } +} + +type toSynchronizer struct { + configurer ss.ComponentConfigurer[ss.DataSource] +} + +func ToSynchronizer(configurer ss.ComponentConfigurer[ss.DataSource]) ss.ComponentConfigurer[ss.DataSynchronizer] { + return toSynchronizer{configurer} +} + +func (t toSynchronizer) Build(ctx ss.ClientContext) (ss.DataSynchronizer, error) { + datasource, err := t.configurer.Build(ctx) + if err != nil { + return nil, err + } + synchronizer, ok := datasource.(ss.DataSynchronizer) + if !ok { + panic("programmer error: " + reflect.TypeOf(datasource).Elem().Name() + " cannot be upgraded to subsystems.DataSynchronizer") + } + return synchronizer, nil + } func DaemonModeV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return DataSystem().DataStore(store, ss.StoreModeRead) + return DataSystem().Initializers().Synchronizers(nil, nil).DataStore(store, ss.StoreModeRead) } func Offline() *DataSystemConfigurationBuilder { - return DataSystem().Offline(true) + return DataSystem().Initializers().Synchronizers(nil, nil).Offline(true) } // func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { diff --git a/subsystems/data_source.go b/subsystems/data_source.go index 1ac0c19b..e7efb31e 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -35,6 +35,6 @@ type DataInitializer interface { } type DataSynchronizer interface { - Start(closeWhenReady chan struct{}, dataStore DataStore, payloadVersion *int) + Sync(closeWhenReady chan struct{}, payloadVersion *int) io.Closer } From d814a167b307f0dcba7b2d0ae4985f652281ff9b Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 9 Sep 2024 18:54:12 -0700 Subject: [PATCH 13/62] comment --- internal/datasourcev2/streaming_data_source.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 59472eac..48318e6a 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -125,6 +125,8 @@ func (sp *StreamProcessor) Start(closeWhenReady chan<- struct{}) { go sp.subscribe(closeWhenReady) } +// Sync satisfies the new Synchronizer interface, which is similar to the old DataSource interface, but +// can take a payload version. For now, just ignore the payload version. func (sp *StreamProcessor) Sync(closeWhenReady chan struct{}, payloadVersion *int) { sp.Start(closeWhenReady) } From 8fef482c0012ad03b57db0b78b0184129d88e645 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 9 Sep 2024 19:08:49 -0700 Subject: [PATCH 14/62] doc comments --- internal/datasystem/fdv2_datasystem.go | 59 ++++++++++++++++---------- 1 file changed, 36 insertions(+), 23 deletions(-) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 55c2a0ef..4cf712ab 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -91,9 +91,12 @@ func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConf } if cfg.Store != nil { + // If there's a persistent store, we should provide a status monitor and inform store that it's present. fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(cfg.Store, dataStoreUpdateSink) store.SetPersistent(cfg.Store, cfg.StoreMode, fdv2.dataStoreStatusProvider) } else { + // If there's no persistent store, we still need to satisfy the SDK's public interface of having + // a data store status provider. So we create one that just says "I don't know what's going on". fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(noStatusMonitoring{}, dataStoreUpdateSink) } @@ -106,26 +109,10 @@ func (n noStatusMonitoring) IsStatusMonitoringEnabled() bool { return false } -func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <-chan interfaces.DataStoreStatus) { - for { - select { - case newStoreStatus := <-statuses: - if newStoreStatus.Available { - // The store has just transitioned from unavailable to available (scenario 2a above) - if newStoreStatus.NeedsRefresh { - f.loggers.Warn("Reinitializing data store from in-memory cache after after data store outage") - if err := f.store.Commit(); err != nil { - f.loggers.Error("Failed to reinitialize data store: %v", err) - } - } - } - case <-ctx.Done(): - return - } - } -} - func (f *FDv2) Start(closeWhenReady chan struct{}) { + if f.offline { + return + } ctx, cancel := context.WithCancel(context.Background()) f.cancel = cancel f.launchTask(func() { @@ -153,6 +140,25 @@ func (f *FDv2) run(ctx context.Context, closeWhenReady chan struct{}) { f.runSynchronizers(ctx, closeWhenReady, payloadVersion) } +func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <-chan interfaces.DataStoreStatus) { + for { + select { + case newStoreStatus := <-statuses: + if newStoreStatus.Available { + // The store has just transitioned from unavailable to available (scenario 2a above) + if newStoreStatus.NeedsRefresh { + f.loggers.Warn("Reinitializing data store from in-memory cache after after data store outage") + if err := f.store.Commit(); err != nil { + f.loggers.Error("Failed to reinitialize data store: %v", err) + } + } + } + case <-ctx.Done(): + return + } + } +} + func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) *int { for _, initializer := range f.initializers { payload, err := initializer.Fetch(ctx) @@ -182,15 +188,23 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ return } + // We can't simply pass closeWhenReady to the data source, because it might have already been closed. + // Instead, create a "proxy" channel just for the data source; if that is closed, we close the real one + // using the sync.Once. ready := make(chan struct{}) f.primarySync.Sync(ready, payloadVersion) for { select { case <-ready: - // We may have synchronizers that don't actually validate that a payload is fresh. In this case, - // we'd need a mechanism to propagate the status to this method, just like for the initializers. - // For now, we assume that the only synchronizers are LaunchDarkly-provided and do receive fresh payloads. + // SwapToMemory takes a bool representing if the data is "fresh" or not. Fresh meaning we think it's from + // LaunchDarkly and represents the latest available. Here, we're assuming that any data from a synchronizer + // is fresh (since we currently control all the synchronizer implementations.) Theoretically it could be + // not fresh though, like polling some database. + + // TODO: this is an incorrect hack. What we should be doing is calling readyOnce - that's it. + // To trigger the swapping to the in-memory store, we need to be monitoring the Data Source status + // for "valid". This will currently swap even if the data source has failed. f.store.SwapToMemory(true) f.readyOnce.Do(func() { close(closeWhenReady) @@ -232,7 +246,6 @@ func (f *FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.Da } func (f *FDv2) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { - //TODO implement me panic("implement me") } From 3e93a66195ae38c9f491484906348ce2ca25f3d4 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 9 Sep 2024 19:34:52 -0700 Subject: [PATCH 15/62] copious comments --- internal/datasystem/fdv2_datasystem.go | 13 +++--- internal/datasystem/fdv2_store.go | 59 +++++++++++++++++++------- 2 files changed, 51 insertions(+), 21 deletions(-) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 4cf712ab..391ae092 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -145,7 +145,7 @@ func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <- select { case newStoreStatus := <-statuses: if newStoreStatus.Available { - // The store has just transitioned from unavailable to available (scenario 2a above) + // The store has just transitioned from unavailable to available if newStoreStatus.NeedsRefresh { f.loggers.Warn("Reinitializing data store from in-memory cache after after data store outage") if err := f.store.Commit(); err != nil { @@ -188,7 +188,7 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ return } - // We can't simply pass closeWhenReady to the data source, because it might have already been closed. + // We can't pass closeWhenReady to the data source, because it might have already been closed. // Instead, create a "proxy" channel just for the data source; if that is closed, we close the real one // using the sync.Once. ready := make(chan struct{}) @@ -202,9 +202,10 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ // is fresh (since we currently control all the synchronizer implementations.) Theoretically it could be // not fresh though, like polling some database. - // TODO: this is an incorrect hack. What we should be doing is calling readyOnce - that's it. - // To trigger the swapping to the in-memory store, we need to be monitoring the Data Source status - // for "valid". This will currently swap even if the data source has failed. + // TODO: this is an incorrect hack. The responsibility of this loop should be limited to + // calling readyOnce/close. + // To trigger the swapping to the in-memory store, we need to be independently monitoring the Data Source status + // for "valid" status. This hack will currently swap even if the data source has failed. f.store.SwapToMemory(true) f.readyOnce.Do(func() { close(closeWhenReady) @@ -238,7 +239,7 @@ func (f *FDv2) DataStatus() DataStatus { if f.offline { return Defaults } - return f.store.Status() + return f.store.DataStatus() } func (f *FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index 1d7db58a..b0f9170e 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -10,6 +10,28 @@ import ( "sync" ) +// store is a hybrid persistent/in-memory store that serves queries for data from the evaluation +// algorithm. + +// At any given moment, 1 of 2 stores is active: in-memory, or persistent. This doesn't preclude a caller +// from holding on to a reference to the persistent store even when we swap to the in-memory store. +// +// Once the in-memory store has data (either from initializers running, or from a synchronizer), the persistent +// store is no longer regarded as active. From that point forward, GetActive() will return the in-memory store. +// +// The idea is that persistent stores can offer a way to immediately start evaluating flags before a connection +// is made to LD (or even in a very brief moment before an initializer has run.) The persistent store has caching +// logic which can result in inconsistent/stale date being used. Therefore, once we have fresh data, we don't +// want to use the persistent store at all. +// +// A complication is that persistent stores have historically operated in multiple regimes. The first is "daemon mode", +// where the SDK is effectively using the store in read-only mode, with the store being populated by Relay or another SDK. +// The second is just plain persistent store mode, where it is both read and written to. In the FDv2 system, we explicitly +// differentiate these cases using a read/read-write mode. In all cases, the in-memory store is used once it has data available. +// This contrasts from FDv1 where even if data from LD is available, that data may fall out of memory due to the persistent +// store's caching logic ("sparse mode", when the TTL is non-infinite). + +// We have found this to almost always be undesirable for users. type store struct { // Represents a remote store, like Redis. This is optional; if present, it's only used // before the in-memory store is initialized. @@ -53,6 +75,8 @@ type store struct { loggers ldlog.Loggers } +// Creates a new store. By default the store is in-memory. To add a persistent store, call SetPersistent. Ensure this is +// called at configuration time, only once and before the store is ever accessed. func newStore(loggers ldlog.Loggers) *store { return &store{ persistentStore: nil, @@ -63,8 +87,8 @@ func newStore(loggers ldlog.Loggers) *store { } } -// This method exists only because of the weird way the Go SDK is configured - we need a ClientContext -// before we can call Build to actually get ther persistent store. That ClientContext requires the +// SetPersistent exists only because of the weird way the Go SDK is configured - we need a ClientContext +// before we can call Build to actually get the persistent store. That ClientContext requires the // DataStoreUpdateSink, which is what this store struct implements. func (s *store) SetPersistent(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) { s.persistentStore = persistent @@ -72,6 +96,7 @@ func (s *store) SetPersistent(persistent subsystems.DataStore, mode subsystems.S s.memory = false } +// Close closes the store. If there is a persistent store configured, it will be closed. func (s *store) Close() error { if s.persistentStore != nil { return s.persistentStore.Close() @@ -79,25 +104,22 @@ func (s *store) Close() error { return nil } +// GetActive returns the active store, either persistent or in-memory. If there is no persistent store configured, +// the in-memory store is always active. func (s *store) GetActive() subsystems.DataStore { s.mu.RLock() defer s.mu.RUnlock() - if s.memory { + if s.memory || s.persistentStore == nil { return s.memoryStore } return s.persistentStore } -func (s *store) Status() DataStatus { +// DataStatus returns the status of the store's data. Defaults means there is no data, Cached means there is +// data, but it's not guaranteed to be recent, and Refreshed means the data has been refreshed from the server. +func (s *store) DataStatus() DataStatus { s.mu.RLock() defer s.mu.RUnlock() - // The logic here is: - // 1. If the memory store is active, we either got that data from an (initializer|synchronizer) that indicated - // the data is the latest known (Refreshed) or that it is potentially stale (Cached). This is set when SwapToMemory - // is called. - // 2. Otherwise, the persistent store - if any - is active. If there is none configured, the status is Defaults. - // If there is, we need to query the database availability to determine if we actually have access to the data - // or not. if s.memory { if s.refreshed { return Refreshed @@ -113,10 +135,12 @@ func (s *store) Status() DataStatus { } +// Mirroring returns true data is being mirrored to a persistent store. func (s *store) Mirroring() bool { return s.persistentStore != nil && s.persistentStoreMode == subsystems.StoreModeReadWrite } +// nolint:revive // Standard DataSourceUpdateSink method func (s *store) Init(allData []ldstoretypes.Collection) bool { // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. // TODO: handle errors from initializing the memory or persistent stores. @@ -128,6 +152,7 @@ func (s *store) Init(allData []ldstoretypes.Collection) bool { return true } +// nolint:revive // Standard DataSourceUpdateSink method func (s *store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { var ( memErr error @@ -143,13 +168,17 @@ func (s *store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes return memErr == nil && persErr == nil } +// nolint:revive // Standard DataSourceUpdateSink method func (s *store) UpdateStatus(newState interfaces.DataSourceState, newError interfaces.DataSourceErrorInfo) { - //TODO: In the FDv2 world, instead of having users check the state, we instead have them monitor the - // DataStatus(), because that's actually what they care about. - // For now, discard any status updates coming from the data sources. + //TODO: although DataSourceUpdateSink is where data is pushed to the store by the data source, it doesn't really + // make sense to have it also be the place that status updates are received. It only cares whether data has + // *ever* been received, and that is already known by the store. + // This should probably be refactored so that the data source takes a separate injected dependency for the + // status updates. s.loggers.Info("fdv2_store: swallowing status update (", newState, ", ", newError, ")") } +// nolint:revive // Standard DataSourceUpdateSink method func (s *store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { return s.persistentStoreStatusProvider } @@ -162,7 +191,7 @@ func (s *store) SwapToMemory(isRefreshed bool) { } func (s *store) Commit() error { - if s.Status() == Refreshed && s.Mirroring() { + if s.DataStatus() == Refreshed && s.Mirroring() { flags, err := s.memoryStore.GetAll(datakinds.Features) if err != nil { return err From a99a329ae46443703dd962889babd59b87ef3f27 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 14:17:39 -0700 Subject: [PATCH 16/62] adding store unit tests --- internal/datasystem/fdv2_datasystem.go | 21 ++++---- internal/datasystem/fdv2_store.go | 56 ++++++++++---------- internal/datasystem/fdv2_store_test.go | 72 ++++++++++++++++++++++++++ ldclient.go | 2 + 4 files changed, 115 insertions(+), 36 deletions(-) create mode 100644 internal/datasystem/fdv2_store_test.go diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 391ae092..31f3309c 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -11,7 +11,7 @@ import ( "sync" ) -var _ subsystems.DataSourceUpdateSink = (*store)(nil) +var _ subsystems.DataSourceUpdateSink = (*Store)(nil) type broadcasters struct { dataSourceStatus *internal.Broadcaster[interfaces.DataSourceStatus] @@ -21,7 +21,7 @@ type broadcasters struct { type FDv2 struct { // Operates the in-memory and optional persistent store that backs data queries. - store *store + store *Store // List of initializers that are capable of obtaining an initial payload of data. initializers []subsystems.DataInitializer @@ -62,7 +62,7 @@ type FDv2 struct { func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { - store := newStore(clientContext.GetLogging().Loggers) + store := NewStore(clientContext.GetLogging().Loggers) bcasters := &broadcasters{ dataSourceStatus: internal.NewBroadcaster[interfaces.DataSourceStatus](), @@ -91,12 +91,12 @@ func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConf } if cfg.Store != nil { - // If there's a persistent store, we should provide a status monitor and inform store that it's present. + // If there's a persistent Store, we should provide a status monitor and inform Store that it's present. fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(cfg.Store, dataStoreUpdateSink) store.SetPersistent(cfg.Store, cfg.StoreMode, fdv2.dataStoreStatusProvider) } else { - // If there's no persistent store, we still need to satisfy the SDK's public interface of having - // a data store status provider. So we create one that just says "I don't know what's going on". + // If there's no persistent Store, we still need to satisfy the SDK's public interface of having + // a data Store status provider. So we create one that just says "I don't know what's going on". fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(noStatusMonitoring{}, dataStoreUpdateSink) } @@ -145,11 +145,11 @@ func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <- select { case newStoreStatus := <-statuses: if newStoreStatus.Available { - // The store has just transitioned from unavailable to available + // The Store has just transitioned from unavailable to available if newStoreStatus.NeedsRefresh { - f.loggers.Warn("Reinitializing data store from in-memory cache after after data store outage") + f.loggers.Warn("Reinitializing data Store from in-memory cache after after data Store outage") if err := f.store.Commit(); err != nil { - f.loggers.Error("Failed to reinitialize data store: %v", err) + f.loggers.Error("Failed to reinitialize data Store: %v", err) } } } @@ -166,6 +166,7 @@ func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{} return nil } if err != nil { + // TODO: log that this initializer failed continue } f.store.Init(payload.Data) @@ -204,7 +205,7 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ // TODO: this is an incorrect hack. The responsibility of this loop should be limited to // calling readyOnce/close. - // To trigger the swapping to the in-memory store, we need to be independently monitoring the Data Source status + // To trigger the swapping to the in-memory Store, we need to be independently monitoring the Data Source status // for "valid" status. This hack will currently swap even if the data source has failed. f.store.SwapToMemory(true) f.readyOnce.Do(func() { diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index b0f9170e..d00700f1 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -10,9 +10,9 @@ import ( "sync" ) -// store is a hybrid persistent/in-memory store that serves queries for data from the evaluation +// Store is a hybrid persistent/in-memory store that serves queries for data from the evaluation // algorithm. - +// // At any given moment, 1 of 2 stores is active: in-memory, or persistent. This doesn't preclude a caller // from holding on to a reference to the persistent store even when we swap to the in-memory store. // @@ -30,9 +30,9 @@ import ( // differentiate these cases using a read/read-write mode. In all cases, the in-memory store is used once it has data available. // This contrasts from FDv1 where even if data from LD is available, that data may fall out of memory due to the persistent // store's caching logic ("sparse mode", when the TTL is non-infinite). - +// // We have found this to almost always be undesirable for users. -type store struct { +type Store struct { // Represents a remote store, like Redis. This is optional; if present, it's only used // before the in-memory store is initialized. persistentStore subsystems.DataStore @@ -67,6 +67,8 @@ type store struct { // to be the latest data. Data from a baked in file for example would not be considered refreshed. The purpose // of this is to know if we should commit data to the persistentStore. For example, if we initialize with "stale" // data from a local file (refreshed=false), we may not want to pollute a connected Redis database with it. + // TODO: this could also be called "Authoritative". "It was the latest at some point.. that point being when we asked + // if it was the latest". refreshed bool // Protects the memory and refreshed fields. @@ -75,14 +77,15 @@ type store struct { loggers ldlog.Loggers } -// Creates a new store. By default the store is in-memory. To add a persistent store, call SetPersistent. Ensure this is +// NewStore creates a new store. By default the store is in-memory. To add a persistent store, call SetPersistent. Ensure this is // called at configuration time, only once and before the store is ever accessed. -func newStore(loggers ldlog.Loggers) *store { - return &store{ +func NewStore(loggers ldlog.Loggers) *Store { + return &Store{ persistentStore: nil, persistentStoreMode: subsystems.StoreModeRead, memoryStore: datastore.NewInMemoryDataStore(loggers), memory: true, + refreshed: false, loggers: loggers, } } @@ -90,14 +93,15 @@ func newStore(loggers ldlog.Loggers) *store { // SetPersistent exists only because of the weird way the Go SDK is configured - we need a ClientContext // before we can call Build to actually get the persistent store. That ClientContext requires the // DataStoreUpdateSink, which is what this store struct implements. -func (s *store) SetPersistent(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) { +func (s *Store) SetPersistent(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) { s.persistentStore = persistent s.persistentStoreMode = mode + s.persistentStoreStatusProvider = statusProvider s.memory = false } // Close closes the store. If there is a persistent store configured, it will be closed. -func (s *store) Close() error { +func (s *Store) Close() error { if s.persistentStore != nil { return s.persistentStore.Close() } @@ -106,7 +110,7 @@ func (s *store) Close() error { // GetActive returns the active store, either persistent or in-memory. If there is no persistent store configured, // the in-memory store is always active. -func (s *store) GetActive() subsystems.DataStore { +func (s *Store) GetActive() subsystems.DataStore { s.mu.RLock() defer s.mu.RUnlock() if s.memory || s.persistentStore == nil { @@ -117,32 +121,32 @@ func (s *store) GetActive() subsystems.DataStore { // DataStatus returns the status of the store's data. Defaults means there is no data, Cached means there is // data, but it's not guaranteed to be recent, and Refreshed means the data has been refreshed from the server. -func (s *store) DataStatus() DataStatus { +func (s *Store) DataStatus() DataStatus { s.mu.RLock() defer s.mu.RUnlock() if s.memory { - if s.refreshed { - return Refreshed - } - return Cached - } - if s.persistentStore != nil { - if s.persistentStore.IsInitialized() { + if s.memoryStore.IsInitialized() { + if s.refreshed { + return Refreshed + } return Cached } } + if s.persistentStore != nil && s.persistentStore.IsInitialized() { + return Cached + } return Defaults } // Mirroring returns true data is being mirrored to a persistent store. -func (s *store) Mirroring() bool { +func (s *Store) Mirroring() bool { return s.persistentStore != nil && s.persistentStoreMode == subsystems.StoreModeReadWrite } // nolint:revive // Standard DataSourceUpdateSink method -func (s *store) Init(allData []ldstoretypes.Collection) bool { - // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. +func (s *Store) Init(allData []ldstoretypes.Collection) bool { + // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. // TODO: handle errors from initializing the memory or persistent stores. _ = s.memoryStore.Init(allData) @@ -153,7 +157,7 @@ func (s *store) Init(allData []ldstoretypes.Collection) bool { } // nolint:revive // Standard DataSourceUpdateSink method -func (s *store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { +func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { var ( memErr error persErr error @@ -169,7 +173,7 @@ func (s *store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes } // nolint:revive // Standard DataSourceUpdateSink method -func (s *store) UpdateStatus(newState interfaces.DataSourceState, newError interfaces.DataSourceErrorInfo) { +func (s *Store) UpdateStatus(newState interfaces.DataSourceState, newError interfaces.DataSourceErrorInfo) { //TODO: although DataSourceUpdateSink is where data is pushed to the store by the data source, it doesn't really // make sense to have it also be the place that status updates are received. It only cares whether data has // *ever* been received, and that is already known by the store. @@ -179,18 +183,18 @@ func (s *store) UpdateStatus(newState interfaces.DataSourceState, newError inter } // nolint:revive // Standard DataSourceUpdateSink method -func (s *store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { +func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { return s.persistentStoreStatusProvider } -func (s *store) SwapToMemory(isRefreshed bool) { +func (s *Store) SwapToMemory(isRefreshed bool) { s.mu.Lock() defer s.mu.Unlock() s.memory = true s.refreshed = isRefreshed } -func (s *store) Commit() error { +func (s *Store) Commit() error { if s.DataStatus() == Refreshed && s.Mirroring() { flags, err := s.memoryStore.GetAll(datakinds.Features) if err != nil { diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go new file mode 100644 index 00000000..bc7156a2 --- /dev/null +++ b/internal/datasystem/fdv2_store_test.go @@ -0,0 +1,72 @@ +package datasystem + +import ( + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestStore_New(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + assert.NoError(t, store.Close()) +} + +func TestStore_NoPersistence_NewStore_DataStatus(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.Equal(t, store.DataStatus(), Defaults) +} + +func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { + tests := []struct { + name string + refreshed bool + expected DataStatus + }{ + {"fresh data", true, Refreshed}, + {"cached data", false, Cached}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + store.Init([]ldstoretypes.Collection{}) + assert.Equal(t, store.DataStatus(), Cached) + store.SwapToMemory(tt.refreshed) + assert.Equal(t, store.DataStatus(), tt.expected) + }) + } +} + +func TestStore_NoPersistence_Commit_NoCrashesCaused(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.NoError(t, store.Commit()) +} + +func TestStore_GetActive(t *testing.T) { + t.Run("memory store is active if no persistent store configured", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + foo, err := store.GetActive().Get(ldstoreimpl.Features(), "foo") + assert.NoError(t, err) + assert.Equal(t, foo, ldstoretypes.ItemDescriptor{}.NotFound()) + + assert.True(t, store.Init([]ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + })) + + foo, err = store.GetActive().Get(ldstoreimpl.Features(), "foo") + assert.NoError(t, err) + assert.Equal(t, 1, foo.Version) + }) +} diff --git a/ldclient.go b/ldclient.go index 350a1e71..98fa1563 100644 --- a/ldclient.go +++ b/ldclient.go @@ -75,6 +75,7 @@ type dataSystem interface { DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] DataStoreStatusProvider() interfaces.DataStoreStatusProvider FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] + // Offline indicates whether the SDK is configured to be offline, either because the offline config item was // explicitly set, or because a NullDataSource was used. Offline() bool @@ -290,6 +291,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC ) } + // TODO: We can't actually pass STore() here because it wont' swap between the active ones. dataProvider := ldstoreimpl.NewDataStoreEvaluatorDataProvider(client.dataSystem.Store(), loggers) evalOptions := []ldeval.EvaluatorOption{ ldeval.EvaluatorOptionErrorLogger(client.loggers.ForLevel(ldlog.Error)), From b4e0e4b1bfe8fcfe7a82c1c1a942e6606540524d Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 14:32:57 -0700 Subject: [PATCH 17/62] use pointer swap to switch stores --- internal/datasystem/fdv2_datasystem.go | 5 +- internal/datasystem/fdv2_store.go | 66 ++++++++++++++------------ internal/datasystem/fdv2_store_test.go | 12 ++++- 3 files changed, 49 insertions(+), 34 deletions(-) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 31f3309c..36e54763 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -12,6 +12,7 @@ import ( ) var _ subsystems.DataSourceUpdateSink = (*Store)(nil) +var _ subsystems.ReadOnlyStore = (*Store)(nil) type broadcasters struct { dataSourceStatus *internal.Broadcaster[interfaces.DataSourceStatus] @@ -93,7 +94,7 @@ func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConf if cfg.Store != nil { // If there's a persistent Store, we should provide a status monitor and inform Store that it's present. fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(cfg.Store, dataStoreUpdateSink) - store.SetPersistent(cfg.Store, cfg.StoreMode, fdv2.dataStoreStatusProvider) + store.SwapToPersistent(cfg.Store, cfg.StoreMode, fdv2.dataStoreStatusProvider) } else { // If there's no persistent Store, we still need to satisfy the SDK's public interface of having // a data Store status provider. So we create one that just says "I don't know what's going on". @@ -233,7 +234,7 @@ func (f *FDv2) Stop() error { } func (f *FDv2) Store() subsystems.ReadOnlyStore { - return f.store.GetActive() + return f.store } func (f *FDv2) DataStatus() DataStatus { diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index d00700f1..7b1a1f9f 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -60,8 +60,7 @@ type Store struct { // persistentStore (if configured). memoryStore subsystems.DataStore - // Whether the memoryStore is active or not. This should go from false -> true and never back. - memory bool + active subsystems.DataStore // Whether the memoryStore's data should be considered authoritative, or fresh - that is, if it is known // to be the latest data. Data from a baked in file for example would not be considered refreshed. The purpose @@ -77,27 +76,18 @@ type Store struct { loggers ldlog.Loggers } -// NewStore creates a new store. By default the store is in-memory. To add a persistent store, call SetPersistent. Ensure this is +// NewStore creates a new store. By default the store is in-memory. To add a persistent store, call SwapToPersistent. Ensure this is // called at configuration time, only once and before the store is ever accessed. func NewStore(loggers ldlog.Loggers) *Store { - return &Store{ + s := &Store{ persistentStore: nil, persistentStoreMode: subsystems.StoreModeRead, memoryStore: datastore.NewInMemoryDataStore(loggers), - memory: true, refreshed: false, loggers: loggers, } -} - -// SetPersistent exists only because of the weird way the Go SDK is configured - we need a ClientContext -// before we can call Build to actually get the persistent store. That ClientContext requires the -// DataStoreUpdateSink, which is what this store struct implements. -func (s *Store) SetPersistent(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) { - s.persistentStore = persistent - s.persistentStoreMode = mode - s.persistentStoreStatusProvider = statusProvider - s.memory = false + s.SwapToMemory(false) + return s } // Close closes the store. If there is a persistent store configured, it will be closed. @@ -110,13 +100,10 @@ func (s *Store) Close() error { // GetActive returns the active store, either persistent or in-memory. If there is no persistent store configured, // the in-memory store is always active. -func (s *Store) GetActive() subsystems.DataStore { +func (s *Store) getActive() subsystems.DataStore { s.mu.RLock() defer s.mu.RUnlock() - if s.memory || s.persistentStore == nil { - return s.memoryStore - } - return s.persistentStore + return s.active } // DataStatus returns the status of the store's data. Defaults means there is no data, Cached means there is @@ -124,19 +111,13 @@ func (s *Store) GetActive() subsystems.DataStore { func (s *Store) DataStatus() DataStatus { s.mu.RLock() defer s.mu.RUnlock() - if s.memory { - if s.memoryStore.IsInitialized() { - if s.refreshed { - return Refreshed - } - return Cached + if s.active.IsInitialized() { + if s.refreshed { + return Refreshed } - } - if s.persistentStore != nil && s.persistentStore.IsInitialized() { return Cached } return Defaults - } // Mirroring returns true data is being mirrored to a persistent store. @@ -187,11 +168,24 @@ func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider return s.persistentStoreStatusProvider } +// SwapToPersistent exists only because of the weird way the Go SDK is configured - we need a ClientContext +// before we can call Build to actually get the persistent store. That ClientContext requires the +// DataStoreUpdateSink, which is what this store struct implements. +func (s *Store) SwapToPersistent(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) { + s.mu.Lock() + defer s.mu.Unlock() + s.persistentStore = persistent + s.persistentStoreMode = mode + s.persistentStoreStatusProvider = statusProvider + s.active = s.persistentStore + s.refreshed = false +} + func (s *Store) SwapToMemory(isRefreshed bool) { s.mu.Lock() defer s.mu.Unlock() - s.memory = true s.refreshed = isRefreshed + s.active = s.memoryStore } func (s *Store) Commit() error { @@ -211,3 +205,15 @@ func (s *Store) Commit() error { } return nil } + +func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { + return s.getActive().GetAll(kind) +} + +func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { + return s.getActive().Get(kind, key) +} + +func (s *Store) IsInitialized() bool { + return s.getActive().IsInitialized() +} diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go index bc7156a2..77752be0 100644 --- a/internal/datasystem/fdv2_store_test.go +++ b/internal/datasystem/fdv2_store_test.go @@ -21,6 +21,13 @@ func TestStore_NoPersistence_NewStore_DataStatus(t *testing.T) { assert.Equal(t, store.DataStatus(), Defaults) } +func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.False(t, store.IsInitialized()) +} + func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { tests := []struct { name string @@ -37,6 +44,7 @@ func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { defer store.Close() store.Init([]ldstoretypes.Collection{}) assert.Equal(t, store.DataStatus(), Cached) + assert.True(t, store.IsInitialized()) store.SwapToMemory(tt.refreshed) assert.Equal(t, store.DataStatus(), tt.expected) }) @@ -55,7 +63,7 @@ func TestStore_GetActive(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() - foo, err := store.GetActive().Get(ldstoreimpl.Features(), "foo") + foo, err := store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) assert.Equal(t, foo, ldstoretypes.ItemDescriptor{}.NotFound()) @@ -65,7 +73,7 @@ func TestStore_GetActive(t *testing.T) { }}, })) - foo, err = store.GetActive().Get(ldstoreimpl.Features(), "foo") + foo, err = store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) assert.Equal(t, 1, foo.Version) }) From 98d660beacb87c6ea6154cabb83fe23cbd0af548 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 14:35:06 -0700 Subject: [PATCH 18/62] goimports --- internal/datasystem/fdv2_datasystem.go | 3 ++- internal/datasystem/fdv2_store.go | 3 ++- internal/datasystem/fdv2_store_test.go | 3 ++- subsystems/data_source.go | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 36e54763..9bcecb92 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -3,12 +3,13 @@ package datasystem import ( "context" "errors" + "sync" + "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal" "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "github.com/launchdarkly/go-server-sdk/v7/subsystems" - "sync" ) var _ subsystems.DataSourceUpdateSink = (*Store)(nil) diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index 7b1a1f9f..3eeeb87f 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -1,13 +1,14 @@ package datasystem import ( + "sync" + "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" - "sync" ) // Store is a hybrid persistent/in-memory store that serves queries for data from the evaluation diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go index 77752be0..f0c98422 100644 --- a/internal/datasystem/fdv2_store_test.go +++ b/internal/datasystem/fdv2_store_test.go @@ -1,11 +1,12 @@ package datasystem import ( + "testing" + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" "github.com/stretchr/testify/assert" - "testing" ) func TestStore_New(t *testing.T) { diff --git a/subsystems/data_source.go b/subsystems/data_source.go index e7efb31e..93164537 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -2,8 +2,9 @@ package subsystems import ( "context" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" "io" + + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) // DataSource describes the interface for an object that receives feature flag data. From 6bbf09e5bcfa47c6b4eb4b1ab8a059193609652e Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 15:33:41 -0700 Subject: [PATCH 19/62] more store unit tests --- internal/datasystem/fdv2_datasystem.go | 2 +- internal/datasystem/fdv2_store.go | 25 ++- internal/datasystem/fdv2_store_test.go | 252 ++++++++++++++++++++++++- 3 files changed, 268 insertions(+), 11 deletions(-) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 9bcecb92..0be33a7d 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -133,7 +133,7 @@ func (f *FDv2) launchTask(task func()) { func (f *FDv2) run(ctx context.Context, closeWhenReady chan struct{}) { payloadVersion := f.runInitializers(ctx, closeWhenReady) - if f.store.Mirroring() { + if f.dataStoreStatusProvider.IsStatusMonitoringEnabled() { f.launchTask(func() { f.runPersistentStoreOutageRecovery(ctx, f.dataStoreStatusProvider.AddStatusListener()) }) diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index 3eeeb87f..68ffd9e2 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -71,7 +71,7 @@ type Store struct { // if it was the latest". refreshed bool - // Protects the memory and refreshed fields. + // Protects the refreshed, persistentStore, persistentStoreMode, and active fields. mu sync.RWMutex loggers ldlog.Loggers @@ -93,6 +93,8 @@ func NewStore(loggers ldlog.Loggers) *Store { // Close closes the store. If there is a persistent store configured, it will be closed. func (s *Store) Close() error { + s.mu.Lock() + defer s.mu.Unlock() if s.persistentStore != nil { return s.persistentStore.Close() } @@ -122,17 +124,20 @@ func (s *Store) DataStatus() DataStatus { } // Mirroring returns true data is being mirrored to a persistent store. -func (s *Store) Mirroring() bool { +func (s *Store) mirroring() bool { return s.persistentStore != nil && s.persistentStoreMode == subsystems.StoreModeReadWrite } // nolint:revive // Standard DataSourceUpdateSink method func (s *Store) Init(allData []ldstoretypes.Collection) bool { + s.mu.RLock() + defer s.mu.RUnlock() + // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. // TODO: handle errors from initializing the memory or persistent stores. _ = s.memoryStore.Init(allData) - if s.Mirroring() { + if s.mirroring() { _ = s.persistentStore.Init(allData) // TODO: insert in topo-sort order } return true @@ -140,6 +145,9 @@ func (s *Store) Init(allData []ldstoretypes.Collection) bool { // nolint:revive // Standard DataSourceUpdateSink method func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { + s.mu.RLock() + defer s.mu.RUnlock() + var ( memErr error persErr error @@ -148,7 +156,7 @@ func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. _, memErr = s.memoryStore.Upsert(kind, key, item) - if s.Mirroring() { + if s.mirroring() { _, persErr = s.persistentStore.Upsert(kind, key, item) } return memErr == nil && persErr == nil @@ -166,6 +174,8 @@ func (s *Store) UpdateStatus(newState interfaces.DataSourceState, newError inter // nolint:revive // Standard DataSourceUpdateSink method func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { + s.mu.RLock() + defer s.mu.RUnlock() return s.persistentStoreStatusProvider } @@ -179,7 +189,6 @@ func (s *Store) SwapToPersistent(persistent subsystems.DataStore, mode subsystem s.persistentStoreMode = mode s.persistentStoreStatusProvider = statusProvider s.active = s.persistentStore - s.refreshed = false } func (s *Store) SwapToMemory(isRefreshed bool) { @@ -190,7 +199,11 @@ func (s *Store) SwapToMemory(isRefreshed bool) { } func (s *Store) Commit() error { - if s.DataStatus() == Refreshed && s.Mirroring() { + s.mu.RLock() + defer s.mu.RUnlock() + + // Note: DataStatus() will also take a read lock. + if s.DataStatus() == Refreshed && s.mirroring() { flags, err := s.memoryStore.GetAll(datakinds.Features) if err != nil { return err diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go index f0c98422..81e81b0b 100644 --- a/internal/datasystem/fdv2_store_test.go +++ b/internal/datasystem/fdv2_store_test.go @@ -1,7 +1,13 @@ package datasystem import ( + "errors" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/stretchr/testify/require" + "math/rand" + "sync" "testing" + "time" "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" @@ -53,10 +59,112 @@ func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { } func TestStore_NoPersistence_Commit_NoCrashesCaused(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - assert.NoError(t, store.Commit()) + +} + +func TestStore_Commit(t *testing.T) { + t.Run("no persistent store doesn't cause an error", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.NoError(t, store.Commit()) + }) + + t.Run("refreshed memory items are copied to persistent store in r/w mode", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + initPayload := []ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, + }}, + } + + assert.True(t, store.Init(initPayload)) + + spy := &fakeStore{} + // This is kind of awkward, but the idea is to simulate a data store outage. Therefore, we can't have the + // persistent store already configured at the start of the test (or else the data would have been inserted + // automatically.) This way, it should be empty before the commit, and we'll assert that fact. + store.SwapToPersistent(spy, subsystems.StoreModeReadWrite, nil) + // Need to set refreshed == true, otherwise nothing will be commited. This stops stale data from polluting + // the database. + store.SwapToMemory(true) + + require.Empty(t, spy.initPayload) + + require.NoError(t, store.Commit()) + + assert.Equal(t, initPayload, spy.initPayload) + }) + + t.Run("stale memory items are not copied to persistent store in r/w mode", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + initPayload := []ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, + }}, + } + + assert.True(t, store.Init(initPayload)) + + spy := &fakeStore{} + // This is kind of awkward, but the idea is to simulate a data store outage. Therefore, we can't have the + // persistent store already configured at the start of the test (or else the data would have been inserted + // automatically.) This way, it should be empty before the commit, and we'll assert that fact. + store.SwapToPersistent(spy, subsystems.StoreModeReadWrite, nil) + + // Need to set refreshed == false, which should make Commit a no-op. + store.SwapToMemory(false) + + require.Empty(t, spy.initPayload) + + require.NoError(t, store.Commit()) + + assert.Empty(t, spy.initPayload) + }) + + t.Run("refreshed memory items are not copied to persistent store in r-only mode", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + initPayload := []ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, + }}, + } + + assert.True(t, store.Init(initPayload)) + + spy := &fakeStore{} + // This is kind of awkward, but the idea is to simulate a data store outage. Therefore, we can't have the + // persistent store already configured at the start of the test (or else the data would have been inserted + // automatically.) This way, it should be empty before the commit, and we'll assert that fact. + store.SwapToPersistent(spy, subsystems.StoreModeRead, nil) + // Need to set refreshed == true, otherwise nothing will be commited. This stops stale data from polluting + // the database. + store.SwapToMemory(true) + + require.Empty(t, spy.initPayload) + + require.NoError(t, store.Commit()) + + assert.Empty(t, spy.initPayload) + }) } func TestStore_GetActive(t *testing.T) { @@ -78,4 +186,140 @@ func TestStore_GetActive(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, foo.Version) }) + + t.Run("persistent store is active if configured", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + assert.True(t, store.Init([]ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + })) + + store.SwapToPersistent(&fakeStore{}, subsystems.StoreModeReadWrite, nil) + + _, err := store.Get(ldstoreimpl.Features(), "foo") + assert.Equal(t, errImAPersistentStore, err) + }) + + t.Run("active store swaps from persistent to memory", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + assert.True(t, store.Init([]ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + })) + + store.SwapToPersistent(&fakeStore{}, subsystems.StoreModeReadWrite, nil) + + _, err := store.Get(ldstoreimpl.Features(), "foo") + assert.Equal(t, errImAPersistentStore, err) + + store.SwapToMemory(false) + + foo, err := store.Get(ldstoreimpl.Features(), "foo") + assert.NoError(t, err) + assert.Equal(t, 1, foo.Version) + }) +} + +func TestStore_Concurrency(t *testing.T) { + t.Run("methods using the active store", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + var wg sync.WaitGroup + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + store.SwapToMemory(true) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + store.SwapToPersistent(&fakeStore{}, subsystems.StoreModeReadWrite, nil) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _ = store.DataStatus() + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _, _ = store.Get(ldstoreimpl.Features(), "foo") + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _, _ = store.GetAll(ldstoreimpl.Features()) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _ = store.IsInitialized() + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _ = store.Init([]ldstoretypes.Collection{}) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + }) +} + +type fakeStore struct { + initPayload []ldstoretypes.Collection +} + +var errImAPersistentStore = errors.New("i'm a persistent store") + +func (f *fakeStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { + return nil, nil +} + +func (f *fakeStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { + return ldstoretypes.ItemDescriptor{}, errImAPersistentStore +} + +func (f *fakeStore) IsInitialized() bool { + return false +} + +func (f *fakeStore) Init(allData []ldstoretypes.Collection) error { + f.initPayload = allData + return nil +} + +func (f *fakeStore) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) (bool, error) { + return false, nil +} + +func (f *fakeStore) IsStatusMonitoringEnabled() bool { + return false +} + +func (f *fakeStore) Close() error { + return nil } From 64b8074c6d44f8fed95a6bb746232da42df0a4a6 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 15:40:49 -0700 Subject: [PATCH 20/62] revert changes to StreamingDataSourceBuilder, and make a V2 struct instead --- .../data_system_configuration_builder.go | 2 +- ldcomponents/streaming_data_source_builder.go | 32 +----- .../streaming_data_source_builder_v2.go | 104 ++++++++++++++++++ 3 files changed, 110 insertions(+), 28 deletions(-) create mode 100644 ldcomponents/streaming_data_source_builder_v2.go diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index aaec4955..840c62dc 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -19,7 +19,7 @@ type DataSystemConfigurationBuilder struct { func DataSystem() *DataSystemConfigurationBuilder { return &DataSystemConfigurationBuilder{ - primarySyncBuilder: toSynchronizer{StreamingDataSource().V2()}, + primarySyncBuilder: toSynchronizer{StreamingDataSourceV2()}, } } diff --git a/ldcomponents/streaming_data_source_builder.go b/ldcomponents/streaming_data_source_builder.go index 166d1ccc..ade67b04 100644 --- a/ldcomponents/streaming_data_source_builder.go +++ b/ldcomponents/streaming_data_source_builder.go @@ -4,8 +4,6 @@ import ( "errors" "time" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" - "github.com/launchdarkly/go-sdk-common/v3/ldvalue" "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" @@ -24,7 +22,6 @@ const DefaultInitialReconnectDelay = time.Second type StreamingDataSourceBuilder struct { initialReconnectDelay time.Duration filterKey ldvalue.OptionalString - protocolVersion int } // StreamingDataSource returns a configurable factory for using streaming mode to get feature flag data. @@ -40,7 +37,6 @@ type StreamingDataSourceBuilder struct { func StreamingDataSource() *StreamingDataSourceBuilder { return &StreamingDataSourceBuilder{ initialReconnectDelay: DefaultInitialReconnectDelay, - protocolVersion: 1, } } @@ -75,16 +71,6 @@ func (b *StreamingDataSourceBuilder) PayloadFilter(filterKey string) *StreamingD return b } -// V2 uses the next generation streaming protocol. This method is not stable, and not subject to any backwards -// compatibility guarantees or semantic versioning. -// It is not suitable for production usage. -// Do not use it. -// You have been warned. -func (b *StreamingDataSourceBuilder) V2() *StreamingDataSourceBuilder { - b.protocolVersion = 2 - return b -} - // Build is called internally by the SDK. func (b *StreamingDataSourceBuilder) Build(context subsystems.ClientContext) (subsystems.DataSource, error) { filterKey, wasSet := b.filterKey.Get() @@ -101,19 +87,11 @@ func (b *StreamingDataSourceBuilder) Build(context subsystems.ClientContext) (su InitialReconnectDelay: b.initialReconnectDelay, FilterKey: filterKey, } - if b.protocolVersion == 1 { - return datasource.NewStreamProcessor( - context, - context.GetDataSourceUpdateSink(), - cfg, - ), nil - } else { - return datasourcev2.NewStreamProcessor( - context, - context.GetDataSourceUpdateSink(), - cfg, - ), nil - } + return datasource.NewStreamProcessor( + context, + context.GetDataSourceUpdateSink(), + cfg, + ), nil } // DescribeConfiguration is used internally by the SDK to inspect the configuration. diff --git a/ldcomponents/streaming_data_source_builder_v2.go b/ldcomponents/streaming_data_source_builder_v2.go new file mode 100644 index 00000000..73d6c085 --- /dev/null +++ b/ldcomponents/streaming_data_source_builder_v2.go @@ -0,0 +1,104 @@ +package ldcomponents + +import ( + "errors" + "github.com/launchdarkly/go-sdk-common/v3/ldvalue" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "time" +) + +// StreamingDataSourceBuilderV2 provides methods for configuring the streaming data source in v2 mode. +// +// V2 uses the next generation streaming protocol. This method is not stable, and not subject to any backwards +// compatibility guarantees or semantic versioning. It is not suitable for production usage. +// +// Do not use it. +// You have been warned. +type StreamingDataSourceBuilderV2 struct { + initialReconnectDelay time.Duration + filterKey ldvalue.OptionalString +} + +// StreamingDataSourceV2 returns a configurable factory for using streaming mode to get feature flag data. +// +// By default, the SDK uses a streaming connection to receive feature flag data from LaunchDarkly. To use the +// default behavior, you do not need to call this method. However, if you want to customize the behavior of +// the connection, call this method to obtain a builder, set its properties with the [StreamingDataSourceBuilderV2] +// methods, and then store it in the DataSource field of [github.com/launchdarkly/go-server-sdk/v7.Config]: +// +// config := ld.Config{ +// DataSource: ldcomponents.StreamingDataSourceV2().InitialReconnectDelay(500 * time.Millisecond), +// } +func StreamingDataSourceV2() *StreamingDataSourceBuilderV2 { + return &StreamingDataSourceBuilderV2{ + initialReconnectDelay: DefaultInitialReconnectDelay, + } +} + +// InitialReconnectDelay sets the initial reconnect delay for the streaming connection. +// +// The streaming service uses a backoff algorithm (with jitter) every time the connection needs to be +// reestablished. The delay for the first reconnection will start near this value, and then increase +// exponentially for any subsequent connection failures. +// +// The default value is [DefaultInitialReconnectDelay]. +func (b *StreamingDataSourceBuilderV2) InitialReconnectDelay( + initialReconnectDelay time.Duration, +) *StreamingDataSourceBuilderV2 { + if initialReconnectDelay <= 0 { + b.initialReconnectDelay = DefaultInitialReconnectDelay + } else { + b.initialReconnectDelay = initialReconnectDelay + } + return b +} + +// PayloadFilter sets the payload filter key for this streaming connection. The filter key +// cannot be an empty string. +// +// By default, the SDK is able to evaluate all flags in an environment. If this is undesirable - +// for example, the environment contains thousands of flags, but this application only needs to evaluate +// a smaller, known subset - then a payload filter may be setup in LaunchDarkly, and the filter's key specified here. +// +// Evaluations for flags that aren't part of the filtered environment will return default values. +func (b *StreamingDataSourceBuilderV2) PayloadFilter(filterKey string) *StreamingDataSourceBuilderV2 { + b.filterKey = ldvalue.NewOptionalString(filterKey) + return b +} + +// Build is called internally by the SDK. +func (b *StreamingDataSourceBuilderV2) Build(context subsystems.ClientContext) (subsystems.DataSource, error) { + filterKey, wasSet := b.filterKey.Get() + if wasSet && filterKey == "" { + return nil, errors.New("payload filter key cannot be an empty string") + } + configuredBaseURI := endpoints.SelectBaseURI( + context.GetServiceEndpoints(), + endpoints.StreamingService, + context.GetLogging().Loggers, + ) + cfg := datasource.StreamConfig{ + URI: configuredBaseURI, + InitialReconnectDelay: b.initialReconnectDelay, + FilterKey: filterKey, + } + return datasourcev2.NewStreamProcessor( + context, + context.GetDataSourceUpdateSink(), + cfg, + ), nil +} + +// DescribeConfiguration is used internally by the SDK to inspect the configuration. +func (b *StreamingDataSourceBuilderV2) DescribeConfiguration(context subsystems.ClientContext) ldvalue.Value { + return ldvalue.ObjectBuild(). + SetBool("streamingDisabled", false). + SetBool("customStreamURI", + endpoints.IsCustom(context.GetServiceEndpoints(), endpoints.StreamingService)). + Set("reconnectTimeMillis", durationToMillisValue(b.initialReconnectDelay)). + SetBool("usingRelayDaemon", false). + Build() +} From 460a1fbc18ea004ba248ab1557c577e91cbbea7c Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 15:41:51 -0700 Subject: [PATCH 21/62] remove now-unnecessary ToSynchronizer converter --- .../data_system_configuration_builder.go | 25 +------------------ .../streaming_data_source_builder_v2.go | 2 +- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 840c62dc..1ab761b5 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -3,8 +3,6 @@ package ldcomponents import ( "errors" "fmt" - "reflect" - ss "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) @@ -19,29 +17,8 @@ type DataSystemConfigurationBuilder struct { func DataSystem() *DataSystemConfigurationBuilder { return &DataSystemConfigurationBuilder{ - primarySyncBuilder: toSynchronizer{StreamingDataSourceV2()}, - } -} - -type toSynchronizer struct { - configurer ss.ComponentConfigurer[ss.DataSource] -} - -func ToSynchronizer(configurer ss.ComponentConfigurer[ss.DataSource]) ss.ComponentConfigurer[ss.DataSynchronizer] { - return toSynchronizer{configurer} -} - -func (t toSynchronizer) Build(ctx ss.ClientContext) (ss.DataSynchronizer, error) { - datasource, err := t.configurer.Build(ctx) - if err != nil { - return nil, err + primarySyncBuilder: StreamingDataSourceV2(), } - synchronizer, ok := datasource.(ss.DataSynchronizer) - if !ok { - panic("programmer error: " + reflect.TypeOf(datasource).Elem().Name() + " cannot be upgraded to subsystems.DataSynchronizer") - } - return synchronizer, nil - } func DaemonModeV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { diff --git a/ldcomponents/streaming_data_source_builder_v2.go b/ldcomponents/streaming_data_source_builder_v2.go index 73d6c085..aaa42dce 100644 --- a/ldcomponents/streaming_data_source_builder_v2.go +++ b/ldcomponents/streaming_data_source_builder_v2.go @@ -70,7 +70,7 @@ func (b *StreamingDataSourceBuilderV2) PayloadFilter(filterKey string) *Streamin } // Build is called internally by the SDK. -func (b *StreamingDataSourceBuilderV2) Build(context subsystems.ClientContext) (subsystems.DataSource, error) { +func (b *StreamingDataSourceBuilderV2) Build(context subsystems.ClientContext) (subsystems.DataSynchronizer, error) { filterKey, wasSet := b.filterKey.Get() if wasSet && filterKey == "" { return nil, errors.New("payload filter key cannot be an empty string") From dac8e9ddb31d1608bb4511fec2619650f37e041a Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 16:51:10 -0700 Subject: [PATCH 22/62] make v2 data sources implement Synchronizer --- internal/datasourcev2/polling_data_source.go | 20 +++- .../datasourcev2/streaming_data_source.go | 22 ++-- internal/datasystem/fdv2_datasystem.go | 2 +- internal/datasystem/fdv2_store_test.go | 5 +- .../data_system_configuration_builder.go | 59 ++++++--- ldcomponents/polling_data_source_builder.go | 26 +--- .../polling_data_source_builder_v2.go | 112 ++++++++++++++++++ .../streaming_data_source_builder_v2.go | 2 +- subsystems/data_source.go | 29 ++++- 9 files changed, 218 insertions(+), 59 deletions(-) create mode 100644 ldcomponents/polling_data_source_builder_v2.go diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index dde2c372..e5910477 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -1,6 +1,7 @@ package datasourcev2 import ( + "context" "sync" "time" @@ -68,8 +69,23 @@ func newPollingProcessor( return pp } -//nolint:revive // no doc comment for standard method -func (pp *PollingProcessor) Start(closeWhenReady chan<- struct{}) { +//nolint:revive // DataInitializer method. +func (pp *PollingProcessor) Name() string { + return "PollingDataSourceV2" +} + +//nolint:revive // DataInitializer method. +func (pp *PollingProcessor) Fetch(ctx context.Context) (*subsystems.InitialPayload, error) { + // TODO: ideally, the Request method would take a context so it could be interrupted. + allData, _, err := pp.requester.Request() + if err != nil { + return nil, err + } + return &subsystems.InitialPayload{Data: allData, Authoritative: true, Version: nil}, nil +} + +//nolint:revive // DataSynchronizer method. +func (pp *PollingProcessor) Sync(closeWhenReady chan<- struct{}, payloadVersion *int) { pp.loggers.Infof("Starting LaunchDarkly polling with interval: %+v", pp.pollInterval) ticker := newTickerWithInitialTick(pp.pollInterval) diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 48318e6a..c2c99a8d 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -1,6 +1,7 @@ package datasourcev2 import ( + "context" "encoding/json" "errors" "net/http" @@ -114,23 +115,28 @@ func NewStreamProcessor( return sp } +//nolint:revive // DataInitializer method. +func (sp *StreamProcessor) Name() string { + return "StreamingDataSourceV2" +} + +func (sp *StreamProcessor) Fetch(ctx context.Context) (*subsystems.InitialPayload, error) { + // TODO: there's no point in implementing this, as it would be highly inefficient to open a streaming + // connection just to get a PUT and then close it again. + return nil, errors.New("fetch capability not implemented") +} + //nolint:revive // no doc comment for standard method func (sp *StreamProcessor) IsInitialized() bool { return sp.isInitialized.Get() } -//nolint:revive // no doc comment for standard method -func (sp *StreamProcessor) Start(closeWhenReady chan<- struct{}) { +//nolint:revive // DataSynchronizer method. +func (sp *StreamProcessor) Sync(closeWhenReady chan<- struct{}, payloadVersion *int) { sp.loggers.Info("Starting LaunchDarkly streaming connection") go sp.subscribe(closeWhenReady) } -// Sync satisfies the new Synchronizer interface, which is similar to the old DataSource interface, but -// can take a payload version. For now, just ignore the payload version. -func (sp *StreamProcessor) Sync(closeWhenReady chan struct{}, payloadVersion *int) { - sp.Start(closeWhenReady) -} - // TODO: Remove this nolint once we have a better implementation. // //nolint:gocyclo,godox // this function is a stepping stone. It will get better over time. diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 0be33a7d..5d0625a7 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -172,7 +172,7 @@ func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{} continue } f.store.Init(payload.Data) - f.store.SwapToMemory(payload.Fresh) + f.store.SwapToMemory(payload.Authoritative) f.readyOnce.Do(func() { close(closeWhenReady) }) diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go index 81e81b0b..ceb17e86 100644 --- a/internal/datasystem/fdv2_store_test.go +++ b/internal/datasystem/fdv2_store_test.go @@ -2,13 +2,14 @@ package datasystem import ( "errors" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" - "github.com/stretchr/testify/require" "math/rand" "sync" "testing" "time" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/stretchr/testify/require" + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 1ab761b5..70f54959 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -15,32 +15,53 @@ type DataSystemConfigurationBuilder struct { config ss.DataSystemConfiguration } +// DataSystem returns a configuration builder that is pre-configured with LaunchDarkly's recommended +// data acquisition strategy. It is equivalent to StreamingMode(). +// +// In this mode, the SDK efficiently streams flag/segment data in the background, +// allowing evaluations to operate on the latest data with no additional latency. func DataSystem() *DataSystemConfigurationBuilder { - return &DataSystemConfigurationBuilder{ - primarySyncBuilder: StreamingDataSourceV2(), - } + return StreamingMode() } -func DaemonModeV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return DataSystem().Initializers().Synchronizers(nil, nil).DataStore(store, ss.StoreModeRead) +// UnconfiguredDataSystem returns a configuration builder with no options set. It is suitable for +// building custom use-cases. +func UnconfiguredDataSystem() *DataSystemConfigurationBuilder { + return &DataSystemConfigurationBuilder{} } -func Offline() *DataSystemConfigurationBuilder { - return DataSystem().Initializers().Synchronizers(nil, nil).Offline(true) +// StreamingMode configures the SDK to efficiently streams flag/segment data in the background, +// allowing evaluations to operate on the latest data with no additional latency. +func StreamingMode() *DataSystemConfigurationBuilder { + return UnconfiguredDataSystem(). + Initializers(PollingDataSourceV2().AsInitializer()).Synchronizers(StreamingDataSourceV2(), PollingDataSourceV2()) } -// func PersistentStoreV2(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { -// return StreamingDataSourceV2().DataStore(store, ss.StoreModeReadWrite) -// } -// -// func PollingDataSourceV2() *DataSystemConfigurationBuilder { -// return DataSystem().Synchronizers(PollingDataSource().V2(), nil) -// } -// -// func StreamingDataSourceV2() *DataSystemConfigurationBuilder { -// return DataSystem().Initializers(PollingDataSource().V2()).Synchronizers(StreamingDataSource().V2(), -// PollingDataSource().V2()) -// } +// PollingMode configures the SDK to regularly poll an endpoint for flag/segment data in the background. +// This is less efficient than streaming, but may be necessary in some network environments. +func PollingMode() *DataSystemConfigurationBuilder { + return UnconfiguredDataSystem().Synchronizers(PollingDataSourceV2(), nil) +} + +// DaemonMode configures the SDK to read from a persistent store integration that is populated by Relay Proxy +// or other SDKs. The SDK will not connect to LaunchDarkly. In this mode, the SDK never writes to the data store. +func DaemonMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return UnconfiguredDataSystem().DataStore(store, ss.StoreModeRead) +} + +// PersistentStoreMode is similar to the default DataSystem configuration, with the addition of a +// persistent store integration. Before data has arrived from the streaming connection, the SDK is able to +// evaluate flags using data from the persistent store. Once data has arrived from the streaming connection, the SDK +// will no longer read from the persistent store, although it will keep it up-to-date. +func PersistentStoreMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return StreamingMode().DataStore(store, ss.StoreModeReadWrite) +} + +// Offline configures the SDK to evaluate flags using only the default values defined in the application code. No +// outbound connections will be made by the SDK. +func Offline() *DataSystemConfigurationBuilder { + return UnconfiguredDataSystem().Offline(true) +} func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store diff --git a/ldcomponents/polling_data_source_builder.go b/ldcomponents/polling_data_source_builder.go index 1570eca0..1bb3f267 100644 --- a/ldcomponents/polling_data_source_builder.go +++ b/ldcomponents/polling_data_source_builder.go @@ -4,8 +4,6 @@ import ( "errors" "time" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" - "github.com/launchdarkly/go-sdk-common/v3/ldvalue" "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" @@ -22,9 +20,8 @@ const DefaultPollInterval = 30 * time.Second // // See [PollingDataSource] for usage. type PollingDataSourceBuilder struct { - pollInterval time.Duration - filterKey ldvalue.OptionalString - protocolVersion int + pollInterval time.Duration + filterKey ldvalue.OptionalString } // PollingDataSource returns a configurable factory for using polling mode to get feature flag data. @@ -43,8 +40,7 @@ type PollingDataSourceBuilder struct { // } func PollingDataSource() *PollingDataSourceBuilder { return &PollingDataSourceBuilder{ - pollInterval: DefaultPollInterval, - protocolVersion: 1, + pollInterval: DefaultPollInterval, } } @@ -82,16 +78,6 @@ func (b *PollingDataSourceBuilder) PayloadFilter(filterKey string) *PollingDataS return b } -// V2 uses the next generation polling protocol. This method is not stable, and not subject to any backwards -// compatibility guarantees or semantic versioning. -// It is not suitable for production usage. -// Do not use it. -// You have been warned. -func (b *PollingDataSourceBuilder) V2() *PollingDataSourceBuilder { - b.protocolVersion = 2 - return b -} - // Build is called internally by the SDK. func (b *PollingDataSourceBuilder) Build(context subsystems.ClientContext) (subsystems.DataSource, error) { context.GetLogging().Loggers.Warn( @@ -110,11 +96,7 @@ func (b *PollingDataSourceBuilder) Build(context subsystems.ClientContext) (subs PollInterval: b.pollInterval, FilterKey: filterKey, } - if b.protocolVersion == 1 { - return datasource.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg), nil - } else { - return datasourcev2.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg), nil - } + return datasource.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg), nil } // DescribeConfiguration is used internally by the SDK to inspect the configuration. diff --git a/ldcomponents/polling_data_source_builder_v2.go b/ldcomponents/polling_data_source_builder_v2.go new file mode 100644 index 00000000..ca5cc299 --- /dev/null +++ b/ldcomponents/polling_data_source_builder_v2.go @@ -0,0 +1,112 @@ +package ldcomponents + +import ( + "errors" + "github.com/launchdarkly/go-sdk-common/v3/ldvalue" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "time" +) + +// PollingDataSourceBuilderV2 provides methods for configuring the polling data source. +// +// V2 uses the next generation polling protocol. This struct is not stable, and not subject to any backwards +// compatibility guarantees or semantic versioning. It is not suitable for production usage. +// +// See [PollingDataSource] for usage. +type PollingDataSourceBuilderV2 struct { + pollInterval time.Duration + filterKey ldvalue.OptionalString +} + +// PollingDataSourceV2 returns a configurable factory for using polling mode to get feature flag data. +// +// Polling is not the default behavior; by default, the SDK uses a streaming connection to receive feature flag +// data from LaunchDarkly. In polling mode, the SDK instead makes a new HTTP request to LaunchDarkly at regular +// intervals. HTTP caching allows it to avoid redundantly downloading data if there have been no changes, but +// polling is still less efficient than streaming and should only be used on the advice of LaunchDarkly support. +// +// To use polling mode, create a builder with PollingDataSource(), set its properties with the methods of +// [PollingDataSourceBuilderV2], and then store it in the DataSource field of +// [github.com/launchdarkly/go-server-sdk/v7.Config]: +// +// config := ld.Config{ +// DataSource: ldcomponents.PollingDataSourceV2().PollInterval(45 * time.Second), +// } +func PollingDataSourceV2() *PollingDataSourceBuilderV2 { + return &PollingDataSourceBuilderV2{ + pollInterval: DefaultPollInterval, + } +} + +// PollInterval sets the interval at which the SDK will poll for feature flag updates. +// +// The default and minimum value is [DefaultPollInterval]. Values less than this will be set to the default. +func (b *PollingDataSourceBuilderV2) PollInterval(pollInterval time.Duration) *PollingDataSourceBuilderV2 { + if pollInterval < DefaultPollInterval { + b.pollInterval = DefaultPollInterval + } else { + b.pollInterval = pollInterval + } + return b +} + +// Used in tests to skip parameter validation. +// +//nolint:unused // it is used in tests +func (b *PollingDataSourceBuilderV2) forcePollInterval( + pollInterval time.Duration, +) *PollingDataSourceBuilderV2 { + b.pollInterval = pollInterval + return b +} + +// PayloadFilter sets the filter key for the polling connection. +// +// By default, the SDK is able to evaluate all flags in an environment. If this is undesirable - +// for example, the environment contains thousands of flags, but this application only needs to evaluate +// a smaller, known subset - then a filter may be setup in LaunchDarkly, and the filter's key specified here. +// +// Evaluations for flags that aren't part of the filtered environment will return default values. +func (b *PollingDataSourceBuilderV2) PayloadFilter(filterKey string) *PollingDataSourceBuilderV2 { + b.filterKey = ldvalue.NewOptionalString(filterKey) + return b +} + +// Build is called internally by the SDK. +func (b *PollingDataSourceBuilderV2) Build(context subsystems.ClientContext) (subsystems.DataSynchronizer, error) { + context.GetLogging().Loggers.Warn( + "You should only disable the streaming API if instructed to do so by LaunchDarkly support") + filterKey, wasSet := b.filterKey.Get() + if wasSet && filterKey == "" { + return nil, errors.New("payload filter key cannot be an empty string") + } + configuredBaseURI := endpoints.SelectBaseURI( + context.GetServiceEndpoints(), + endpoints.PollingService, + context.GetLogging().Loggers, + ) + cfg := datasource.PollingConfig{ + BaseURI: configuredBaseURI, + PollInterval: b.pollInterval, + FilterKey: filterKey, + } + return datasourcev2.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg), nil +} + +func (b *PollingDataSourceBuilderV2) AsInitializer() subsystems.ComponentConfigurer[subsystems.DataInitializer] { + return subsystems.AsInitializer(b) +} + +// DescribeConfiguration is used internally by the SDK to inspect the configuration. +func (b *PollingDataSourceBuilderV2) DescribeConfiguration(context subsystems.ClientContext) ldvalue.Value { + return ldvalue.ObjectBuild(). + SetBool("streamingDisabled", true). + SetBool("customBaseURI", + endpoints.IsCustom(context.GetServiceEndpoints(), endpoints.PollingService)). + Set("pollingIntervalMillis", durationToMillisValue(b.pollInterval)). + SetBool("usingRelayDaemon", false). + Build() +} diff --git a/ldcomponents/streaming_data_source_builder_v2.go b/ldcomponents/streaming_data_source_builder_v2.go index aaa42dce..03cd4207 100644 --- a/ldcomponents/streaming_data_source_builder_v2.go +++ b/ldcomponents/streaming_data_source_builder_v2.go @@ -12,7 +12,7 @@ import ( // StreamingDataSourceBuilderV2 provides methods for configuring the streaming data source in v2 mode. // -// V2 uses the next generation streaming protocol. This method is not stable, and not subject to any backwards +// V2 uses the next generation streaming protocol. This struct is not stable, and not subject to any backwards // compatibility guarantees or semantic versioning. It is not suitable for production usage. // // Do not use it. diff --git a/subsystems/data_source.go b/subsystems/data_source.go index 93164537..f56b3ec1 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -25,9 +25,9 @@ type DataSource interface { } type InitialPayload struct { - Data []ldstoretypes.Collection - Version *int - Fresh bool + Data []ldstoretypes.Collection + Version *int + Authoritative bool } type DataInitializer interface { @@ -36,6 +36,27 @@ type DataInitializer interface { } type DataSynchronizer interface { - Sync(closeWhenReady chan struct{}, payloadVersion *int) + DataInitializer + Sync(closeWhenReady chan<- struct{}, payloadVersion *int) + // IsInitialized returns true if the data source has successfully initialized at some point. + // + // Once this is true, it should remain true even if a problem occurs later. + IsInitialized() bool io.Closer } + +type toInitializer struct { + cc ComponentConfigurer[DataSynchronizer] +} + +func (t toInitializer) Build(context ClientContext) (DataInitializer, error) { + sync, err := t.cc.Build(context) + if err != nil { + return nil, err + } + return sync, nil +} + +func AsInitializer(cc ComponentConfigurer[DataSynchronizer]) ComponentConfigurer[DataInitializer] { + return toInitializer{cc: cc} +} From bfb1e25a7a28637bf54736889c6f2294951a34f7 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 10 Sep 2024 17:01:17 -0700 Subject: [PATCH 23/62] ensure closeWhenReady is closed in offline mode --- internal/datasystem/fdv2_datasystem.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 5d0625a7..989eaba4 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -113,6 +113,7 @@ func (n noStatusMonitoring) IsStatusMonitoringEnabled() bool { func (f *FDv2) Start(closeWhenReady chan struct{}) { if f.offline { + close(closeWhenReady) return } ctx, cancel := context.WithCancel(context.Background()) From 8579fe4393297b86ee40b6d946508062f5fe067b Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Wed, 11 Sep 2024 12:50:02 -0700 Subject: [PATCH 24/62] expose some fdv2 types in datasourcev2, and use them to implement e2e unit tests --- .../datasourcev2/streaming_data_source.go | 2 +- internal/datasourcev2/types.go | 10 ++-- internal/datasystem/fdv2_datasystem.go | 60 ++++++++++++++++--- ldclient_end_to_end_test.go | 37 +++++++++++- testhelpers/ldservices/server_sdk_data.go | 39 +++++++++++- 5 files changed, 132 insertions(+), 16 deletions(-) diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index c2c99a8d..12ebe350 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -213,7 +213,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< case "server-intent": //nolint: godox // TODO: Replace all this json unmarshalling with a nicer jreader implementation. - var serverIntent serverIntent + var serverIntent ServerIntent err := json.Unmarshal([]byte(event.Data()), &serverIntent) if err != nil { gotMalformedEvent(event, err) diff --git a/internal/datasourcev2/types.go b/internal/datasourcev2/types.go index a3e912e5..675537a0 100644 --- a/internal/datasourcev2/types.go +++ b/internal/datasourcev2/types.go @@ -35,15 +35,15 @@ func (e event) Data() string { // En es.Event interface implementation type changeSet struct { - intent *serverIntent + intent *ServerIntent events []es.Event } -type serverIntent struct { - Payloads []payload `json:"payloads"` +type ServerIntent struct { + Payloads []Payload `json:"payloads"` } -type payload struct { +type Payload struct { // The id here doesn't seem to match the state that is included in the // payload transferred object. @@ -51,7 +51,7 @@ type payload struct { // use that as the key consistently throughout the the process. ID string `json:"id"` Target int `json:"target"` - Code string `json:"code"` + Code string `json:"intentCode"` Reason string `json:"reason"` } diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 989eaba4..2ae59ea1 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -4,6 +4,7 @@ import ( "context" "errors" "sync" + "time" "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" @@ -60,6 +61,8 @@ type FDv2 struct { // We hold a reference to the dataStoreStatusProvider because it's required for the public interface of the // SDK client. dataStoreStatusProvider interfaces.DataStoreStatusProvider + + dataSourceStatusProvider *dataStatusProvider } func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { @@ -83,15 +86,19 @@ func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConf } fdv2 := &FDv2{ - store: store, - initializers: cfg.Initializers, - primarySync: cfg.Synchronizers.Primary, - secondarySync: cfg.Synchronizers.Secondary, - offline: cfg.Offline, - loggers: clientContext.GetLogging().Loggers, - broadcasters: bcasters, + store: store, + initializers: cfg.Initializers, + primarySync: cfg.Synchronizers.Primary, + secondarySync: cfg.Synchronizers.Secondary, + offline: cfg.Offline, + loggers: clientContext.GetLogging().Loggers, + broadcasters: bcasters, + dataSourceStatusProvider: &dataStatusProvider{}, } + // Yay circular reference. + fdv2.dataSourceStatusProvider.system = fdv2 + if cfg.Store != nil { // If there's a persistent Store, we should provide a status monitor and inform Store that it's present. fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(cfg.Store, dataStoreUpdateSink) @@ -251,7 +258,7 @@ func (f *FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.Da } func (f *FDv2) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { - panic("implement me") + return f.dataSourceStatusProvider } func (f *FDv2) DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] { @@ -269,3 +276,40 @@ func (f *FDv2) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.Fla func (f *FDv2) Offline() bool { return f.offline } + +type dataStatusProvider struct { + system *FDv2 +} + +func (d *dataStatusProvider) GetStatus() interfaces.DataSourceStatus { + var state interfaces.DataSourceState + if d.system.primarySync != nil { + if d.system.primarySync.IsInitialized() { + state = interfaces.DataSourceStateValid + } else { + state = interfaces.DataSourceStateInitializing + } + } else { + state = interfaces.DataSourceStateOff + } + return interfaces.DataSourceStatus{ + State: state, + StateSince: time.Now(), + LastError: interfaces.DataSourceErrorInfo{}, + } +} + +func (d *dataStatusProvider) AddStatusListener() <-chan interfaces.DataSourceStatus { + return d.system.broadcasters.dataSourceStatus.AddListener() +} + +func (d *dataStatusProvider) RemoveStatusListener(listener <-chan interfaces.DataSourceStatus) { + d.system.broadcasters.dataSourceStatus.RemoveListener(listener) +} + +func (d *dataStatusProvider) WaitFor(desiredState interfaces.DataSourceState, timeout time.Duration) bool { + //TODO implement me + panic("implement me") +} + +var _ interfaces.DataSourceStatusProvider = (*dataStatusProvider)(nil) diff --git a/ldclient_end_to_end_test.go b/ldclient_end_to_end_test.go index a8e0dc18..cb3a2aed 100644 --- a/ldclient_end_to_end_test.go +++ b/ldclient_end_to_end_test.go @@ -3,6 +3,7 @@ package ldclient import ( "crypto/x509" "encoding/json" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" "net/http" "net/http/httptest" "testing" @@ -69,8 +70,42 @@ func TestDefaultDataSourceIsStreaming(t *testing.T) { }) t.Run("fdv2", func(t *testing.T) { + + requireIntent := func(t *testing.T, code string, reason string) httphelpers.SSEEvent { + intent := datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + {ID: "fake-id", Target: 0, Code: code, Reason: reason}, + }} + intentData, err := json.Marshal(intent) + require.NoError(t, err) + return httphelpers.SSEEvent{ + Event: "server-intent", + Data: string(intentData), + } + } + + requireTransferred := func(t *testing.T) httphelpers.SSEEvent { + type payloadTransferred struct { + State string `json:"state"` + Version int `json:"version"` + } + transferredData, err := json.Marshal(payloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) + require.NoError(t, err) + return httphelpers.SSEEvent{ + Event: "payload-transferred", + Data: string(transferredData), + } + } + + intent := requireIntent(t, "xfer-full", "payload-missing") + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) - streamHandler, _ := ldservices.ServerSideStreamingServiceHandler(data.ToPutEvent()) + + streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(intent) + for _, object := range data.ToPutObjects() { + streamSender.Enqueue(object) + } + streamSender.Enqueue(requireTransferred(t)) + httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { logCapture := ldlogtest.NewMockLog() defer logCapture.DumpIfTestFailed(t) diff --git a/testhelpers/ldservices/server_sdk_data.go b/testhelpers/ldservices/server_sdk_data.go index 600a3710..dad1e322 100644 --- a/testhelpers/ldservices/server_sdk_data.go +++ b/testhelpers/ldservices/server_sdk_data.go @@ -3,7 +3,6 @@ package ldservices import ( "encoding/json" "fmt" - "github.com/launchdarkly/go-sdk-common/v3/ldvalue" "github.com/launchdarkly/go-test-helpers/v3/httphelpers" "github.com/launchdarkly/go-test-helpers/v3/jsonhelpers" @@ -82,3 +81,41 @@ func (s *ServerSDKData) ToPutEvent() httphelpers.SSEEvent { Data: fmt.Sprintf(`{"path": "/", "data": %s}`, s), } } + +// TODO: Refactor into dedicated FDv2 testing support package. +func (s *ServerSDKData) ToPutObjects() []httphelpers.SSEEvent { + type baseObject struct { + Version int `json:"version"` + Kind string `json:"kind"` + Key string `json:"key"` + Object json.RawMessage `json:"object"` + } + var puts []httphelpers.SSEEvent + for _, flag := range s.FlagsMap { + base := baseObject{ + Version: 1, + Kind: "flag", + Key: getKeyFromJSON(flag), + Object: jsonhelpers.ToJSON(flag), + } + data, _ := json.Marshal(base) + puts = append(puts, httphelpers.SSEEvent{ + Event: "put-object", + Data: string(data), + }) + } + for _, segment := range s.SegmentsMap { + base := baseObject{ + Version: 1, + Kind: "segment", + Key: getKeyFromJSON(segment), + Object: jsonhelpers.ToJSON(segment), + } + data, _ := json.Marshal(base) + puts = append(puts, httphelpers.SSEEvent{ + Event: "put-object", + Data: string(data), + }) + } + return puts +} From 169df91d9b473c44dc5a28e55eea30a0d7cc4837 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Wed, 11 Sep 2024 14:44:39 -0700 Subject: [PATCH 25/62] break out data destination/status reporter from DataSourceUpdateSink implementation: --- .../datasourcev2/streaming_data_source.go | 37 +++++++++--------- internal/datasystem/fdv1_datasystem.go | 15 +++++--- internal/datasystem/fdv2_datasystem.go | 12 +++++- internal/datasystem/fdv2_store.go | 10 ----- ldclient.go | 2 +- ldclient_end_to_end_test.go | 2 + .../streaming_data_source_builder_v2.go | 3 +- subsystems/client_context.go | 38 +++++++++++++++---- subsystems/data_destination.go | 28 ++++++++++++++ subsystems/data_source_status.go | 9 +++++ 10 files changed, 112 insertions(+), 44 deletions(-) create mode 100644 subsystems/data_destination.go create mode 100644 subsystems/data_source_status.go diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 12ebe350..67555e14 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -75,7 +75,8 @@ const ( // DataSource interface. type StreamProcessor struct { cfg datasource.StreamConfig - dataSourceUpdates subsystems.DataSourceUpdateSink + dataDestination subsystems.DataDestination + statusReporter subsystems.DataSourceStatusReporter client *http.Client headers http.Header diagnosticsManager *ldevents.DiagnosticsManager @@ -91,15 +92,17 @@ type StreamProcessor struct { // NewStreamProcessor creates the internal implementation of the streaming data source. func NewStreamProcessor( context subsystems.ClientContext, - dataSourceUpdates subsystems.DataSourceUpdateSink, + dataDestination subsystems.DataDestination, + statusReporter subsystems.DataSourceStatusReporter, cfg datasource.StreamConfig, ) *StreamProcessor { sp := &StreamProcessor{ - dataSourceUpdates: dataSourceUpdates, - headers: context.GetHTTP().DefaultHeaders, - loggers: context.GetLogging().Loggers, - halt: make(chan struct{}), - cfg: cfg, + dataDestination: dataDestination, + statusReporter: statusReporter, + headers: context.GetHTTP().DefaultHeaders, + loggers: context.GetLogging().Loggers, + halt: make(chan struct{}), + cfg: cfg, } if cci, ok := context.(*internal.ClientContextImpl); ok { sp.diagnosticsManager = cci.DiagnosticsManager @@ -193,7 +196,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< Message: err.Error(), Time: time.Now(), } - sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) + sp.statusReporter.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) shouldRestart = true // scenario 1 in error handling comments at top of file processedEvent = false @@ -273,12 +276,12 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< for _, update := range updates { switch u := update.(type) { case datasource.PatchData: - if !sp.dataSourceUpdates.Upsert(u.Kind, u.Key, u.Data) { + if !sp.dataDestination.Upsert(u.Kind, u.Key, u.Data) { //TODO: indicate that this can't actually fail anymore from the perspective of the data source storeUpdateFailed("streaming update of " + u.Key) } case datasource.PutData: - if sp.dataSourceUpdates.Init(u.Data) { + if sp.dataDestination.Init(u.Data) { sp.setInitializedAndNotifyClient(true, closeWhenReady) } else { //TODO: indicate that this can't actually fail anymore from the perspective of the data source @@ -287,7 +290,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< } case datasource.DeleteData: deletedItem := ldstoretypes.ItemDescriptor{Version: u.Version, Item: nil} - if !sp.dataSourceUpdates.Upsert(u.Kind, u.Key, deletedItem) { + if !sp.dataDestination.Upsert(u.Kind, u.Key, deletedItem) { //TODO: indicate that this can't actually fail anymore from the perspective of the data source storeUpdateFailed("streaming deletion of " + u.Key) } @@ -302,7 +305,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< } if processedEvent { - sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateValid, interfaces.DataSourceErrorInfo{}) + sp.statusReporter.UpdateStatus(interfaces.DataSourceStateValid, interfaces.DataSourceErrorInfo{}) } if shouldRestart { stream.Restart() @@ -322,7 +325,7 @@ func (sp *StreamProcessor) subscribe(closeWhenReady chan<- struct{}) { "Unable to create a stream request; this is not a network problem, most likely a bad base URI: %s", reqErr, ) - sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateOff, interfaces.DataSourceErrorInfo{ + sp.statusReporter.UpdateStatus(interfaces.DataSourceStateOff, interfaces.DataSourceErrorInfo{ Kind: interfaces.DataSourceErrorKindUnknown, Message: reqErr.Error(), Time: time.Now(), @@ -366,10 +369,10 @@ func (sp *StreamProcessor) subscribe(closeWhenReady chan<- struct{}) { ) if recoverable { sp.logConnectionStarted() - sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) + sp.statusReporter.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) return es.StreamErrorHandlerResult{CloseNow: false} } - sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateOff, errorInfo) + sp.statusReporter.UpdateStatus(interfaces.DataSourceStateOff, errorInfo) return es.StreamErrorHandlerResult{CloseNow: true} } @@ -385,7 +388,7 @@ func (sp *StreamProcessor) subscribe(closeWhenReady chan<- struct{}) { Message: err.Error(), Time: time.Now(), } - sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) + sp.statusReporter.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) sp.logConnectionStarted() return es.StreamErrorHandlerResult{CloseNow: false} } @@ -446,7 +449,7 @@ func (sp *StreamProcessor) logConnectionResult(success bool) { func (sp *StreamProcessor) Close() error { sp.closeOnce.Do(func() { close(sp.halt) - sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateOff, interfaces.DataSourceErrorInfo{}) + sp.statusReporter.UpdateStatus(interfaces.DataSourceStateOff, interfaces.DataSourceErrorInfo{}) }) return nil } diff --git a/internal/datasystem/fdv1_datasystem.go b/internal/datasystem/fdv1_datasystem.go index 0ef11efa..e56ef624 100644 --- a/internal/datasystem/fdv1_datasystem.go +++ b/internal/datasystem/fdv1_datasystem.go @@ -17,13 +17,15 @@ type FDv1 struct { flagChangeEventBroadcaster *internal.Broadcaster[interfaces.FlagChangeEvent] dataStore subsystems.DataStore dataSource subsystems.DataSource + offline bool } -func NewFDv1(dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], clientContext *internal.ClientContextImpl) (*FDv1, error) { +func NewFDv1(offline bool, dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], clientContext *internal.ClientContextImpl) (*FDv1, error) { system := &FDv1{ dataSourceStatusBroadcaster: internal.NewBroadcaster[interfaces.DataSourceStatus](), dataStoreStatusBroadcaster: internal.NewBroadcaster[interfaces.DataStoreStatus](), flagChangeEventBroadcaster: internal.NewBroadcaster[interfaces.FlagChangeEvent](), + offline: offline, } dataStoreUpdateSink := datastore.NewDataStoreUpdateSinkImpl(system.dataStoreStatusBroadcaster) @@ -128,17 +130,20 @@ func (f *FDv1) Stop() error { } func (f *FDv1) Offline() bool { - return f.dataSource == datasource.NewNullDataSource() + return f.offline || f.dataSource == datasource.NewNullDataSource() } func (f *FDv1) DataStatus() DataStatus { + if f.Offline() { + return Defaults + } if f.dataSource.IsInitialized() { return Refreshed - } else if f.dataStore.IsInitialized() { + } + if f.dataStore.IsInitialized() { return Cached - } else { - return Defaults } + return Defaults } func (f *FDv1) Store() subsystems.ReadOnlyStore { diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 2ae59ea1..8e4f205b 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -13,7 +13,7 @@ import ( "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) -var _ subsystems.DataSourceUpdateSink = (*Store)(nil) +var _ subsystems.DataDestination = (*Store)(nil) var _ subsystems.ReadOnlyStore = (*Store)(nil) type broadcasters struct { @@ -65,6 +65,13 @@ type FDv2 struct { dataSourceStatusProvider *dataStatusProvider } +type nullStatusReporter struct { +} + +func (n *nullStatusReporter) UpdateStatus(status interfaces.DataSourceState, err interfaces.DataSourceErrorInfo) { + // no-op +} + func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { store := NewStore(clientContext.GetLogging().Loggers) @@ -78,7 +85,8 @@ func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConf dataStoreUpdateSink := datastore.NewDataStoreUpdateSinkImpl(bcasters.dataStoreStatus) clientContextCopy := *clientContext clientContextCopy.DataStoreUpdateSink = dataStoreUpdateSink - clientContextCopy.DataSourceUpdateSink = store + clientContextCopy.DataDestination = store + clientContextCopy.DataSourceStatusReporter = &nullStatusReporter{} cfg, err := cfgBuilder.Build(clientContextCopy) if err != nil { diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index 68ffd9e2..226b39ca 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -162,16 +162,6 @@ func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes return memErr == nil && persErr == nil } -// nolint:revive // Standard DataSourceUpdateSink method -func (s *Store) UpdateStatus(newState interfaces.DataSourceState, newError interfaces.DataSourceErrorInfo) { - //TODO: although DataSourceUpdateSink is where data is pushed to the store by the data source, it doesn't really - // make sense to have it also be the place that status updates are received. It only cares whether data has - // *ever* been received, and that is already known by the store. - // This should probably be refactored so that the data source takes a separate injected dependency for the - // status updates. - s.loggers.Info("fdv2_store: swallowing status update (", newState, ", ", newError, ")") -} - // nolint:revive // Standard DataSourceUpdateSink method func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { s.mu.RLock() diff --git a/ldclient.go b/ldclient.go index 98fa1563..b69896b6 100644 --- a/ldclient.go +++ b/ldclient.go @@ -245,7 +245,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC client.offline = config.Offline if config.DataSystem == nil { - system, err := datasystem.NewFDv1(config.DataStore, config.DataSource, clientContext) + system, err := datasystem.NewFDv1(config.Offline, config.DataStore, config.DataSource, clientContext) if err != nil { return nil, err } diff --git a/ldclient_end_to_end_test.go b/ldclient_end_to_end_test.go index cb3a2aed..f4153c33 100644 --- a/ldclient_end_to_end_test.go +++ b/ldclient_end_to_end_test.go @@ -125,6 +125,8 @@ func TestDefaultDataSourceIsStreaming(t *testing.T) { value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) assert.True(t, value) + + assert.True(t, client.Initialized()) }) }) } diff --git a/ldcomponents/streaming_data_source_builder_v2.go b/ldcomponents/streaming_data_source_builder_v2.go index 03cd4207..5feb8e33 100644 --- a/ldcomponents/streaming_data_source_builder_v2.go +++ b/ldcomponents/streaming_data_source_builder_v2.go @@ -87,7 +87,8 @@ func (b *StreamingDataSourceBuilderV2) Build(context subsystems.ClientContext) ( } return datasourcev2.NewStreamProcessor( context, - context.GetDataSourceUpdateSink(), + context.GetDataDestination(), + context.GetDataSourceStatusReporter(), cfg, ), nil } diff --git a/subsystems/client_context.go b/subsystems/client_context.go index 3d70fc2d..aab660a1 100644 --- a/subsystems/client_context.go +++ b/subsystems/client_context.go @@ -45,19 +45,27 @@ type ClientContext interface { // This component is only available when the SDK is creating a DataStore. Otherwise the method // returns nil. GetDataStoreUpdateSink() DataStoreUpdateSink + + // FDV2 method + GetDataDestination() DataDestination + + // FDV2 method + GetDataSourceStatusReporter() DataSourceStatusReporter } // BasicClientContext is the basic implementation of the ClientContext interface, not including any // private fields that the SDK may use for implementation details. type BasicClientContext struct { - SDKKey string - ApplicationInfo interfaces.ApplicationInfo - HTTP HTTPConfiguration - Logging LoggingConfiguration - Offline bool - ServiceEndpoints interfaces.ServiceEndpoints - DataSourceUpdateSink DataSourceUpdateSink - DataStoreUpdateSink DataStoreUpdateSink + SDKKey string + ApplicationInfo interfaces.ApplicationInfo + HTTP HTTPConfiguration + Logging LoggingConfiguration + Offline bool + ServiceEndpoints interfaces.ServiceEndpoints + DataSourceUpdateSink DataSourceUpdateSink + DataStoreUpdateSink DataStoreUpdateSink + DataDestination DataDestination + DataSourceStatusReporter DataSourceStatusReporter } func (b BasicClientContext) GetSDKKey() string { return b.SDKKey } //nolint:revive @@ -90,3 +98,17 @@ func (b BasicClientContext) GetDataSourceUpdateSink() DataSourceUpdateSink { //n func (b BasicClientContext) GetDataStoreUpdateSink() DataStoreUpdateSink { //nolint:revive return b.DataStoreUpdateSink } + +func (b BasicClientContext) GetDataDestination() DataDestination { + if b.DataDestination != nil { + return b.DataDestination + } + return b.DataSourceUpdateSink +} + +func (b BasicClientContext) GetDataSourceStatusReporter() DataSourceStatusReporter { + if b.DataSourceStatusReporter != nil { + return b.DataSourceStatusReporter + } + return b.DataSourceUpdateSink +} diff --git a/subsystems/data_destination.go b/subsystems/data_destination.go new file mode 100644 index 00000000..a6e94df6 --- /dev/null +++ b/subsystems/data_destination.go @@ -0,0 +1,28 @@ +package subsystems + +import ( + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) + +type DataDestination interface { + // Init overwrites the current contents of the data store with a set of items for each collection. + // + // If the underlying data store returns an error during this operation, the SDK will log it, + // and set the data source state to DataSourceStateInterrupted with an error of + // DataSourceErrorKindStoreError. It will not return the error to the data source, but will + // return false to indicate that the operation failed. + Init(allData []ldstoretypes.Collection) bool + + // Upsert updates or inserts an item in the specified collection. For updates, the object will only be + // updated if the existing version is less than the new version. + // + // To mark an item as deleted, pass an ItemDescriptor with a nil Item and a nonzero version + // number. Deletions must be versioned so that they do not overwrite a later update in case updates + // are received out of order. + // + // If the underlying data store returns an error during this operation, the SDK will log it, + // and set the data source state to DataSourceStateInterrupted with an error of + // DataSourceErrorKindStoreError. It will not return the error to the data source, but will + // return false to indicate that the operation failed. + Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool +} diff --git a/subsystems/data_source_status.go b/subsystems/data_source_status.go new file mode 100644 index 00000000..0ddd55b8 --- /dev/null +++ b/subsystems/data_source_status.go @@ -0,0 +1,9 @@ +package subsystems + +import ( + "github.com/launchdarkly/go-server-sdk/v7/interfaces" +) + +type DataSourceStatusReporter interface { + UpdateStatus(newState interfaces.DataSourceState, newError interfaces.DataSourceErrorInfo) +} From b6da0dac3e4ab29f714cfee5cc6d0e5d83ee5a57 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Wed, 11 Sep 2024 17:20:41 -0700 Subject: [PATCH 26/62] make separate test file for fdv2 e2e tests --- internal/datasystem/fdv2_datasystem.go | 4 +- ldclient_end_to_end_fdv2_test.go | 80 ++++++++++++++++++++++ ldclient_end_to_end_test.go | 94 ++++---------------------- 3 files changed, 98 insertions(+), 80 deletions(-) create mode 100644 ldclient_end_to_end_fdv2_test.go diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 8e4f205b..407a59f0 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -179,14 +179,16 @@ func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <- func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) *int { for _, initializer := range f.initializers { + f.loggers.Infof("Attempting initialization via %s", initializer.Name()) payload, err := initializer.Fetch(ctx) if errors.Is(err, context.Canceled) { return nil } if err != nil { - // TODO: log that this initializer failed + f.loggers.Warnf("Initializer %s failed: %v", initializer, err) continue } + f.loggers.Info("Initialized via %s", initializer.Name()) f.store.Init(payload.Data) f.store.SwapToMemory(payload.Authoritative) f.readyOnce.Do(func() { diff --git a/ldclient_end_to_end_fdv2_test.go b/ldclient_end_to_end_fdv2_test.go new file mode 100644 index 00000000..29ee7fc7 --- /dev/null +++ b/ldclient_end_to_end_fdv2_test.go @@ -0,0 +1,80 @@ +package ldclient + +import ( + "encoding/json" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "net/http/httptest" + "testing" + "time" + + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" + "github.com/launchdarkly/go-server-sdk/v7/interfaces" + "github.com/launchdarkly/go-server-sdk/v7/ldcomponents" + "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldservices" + + "github.com/launchdarkly/go-test-helpers/v3/httphelpers" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { + requireIntent := func(t *testing.T, code string, reason string) httphelpers.SSEEvent { + intent := datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + {ID: "fake-id", Target: 0, Code: code, Reason: reason}, + }} + intentData, err := json.Marshal(intent) + require.NoError(t, err) + return httphelpers.SSEEvent{ + Event: "server-intent", + Data: string(intentData), + } + } + + requireTransferred := func(t *testing.T) httphelpers.SSEEvent { + type payloadTransferred struct { + State string `json:"state"` + Version int `json:"version"` + } + transferredData, err := json.Marshal(payloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) + require.NoError(t, err) + return httphelpers.SSEEvent{ + Event: "payload-transferred", + Data: string(transferredData), + } + } + + intent := requireIntent(t, "xfer-full", "payload-missing") + + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + + streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(intent) + for _, object := range data.ToPutObjects() { + streamSender.Enqueue(object) + } + streamSender.Enqueue(requireTransferred(t)) + + httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + defer logCapture.DumpIfTestFailed(t) + + config := Config{ + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, + DataSystem: ldcomponents.DataSystem(), + } + + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.NoError(t, err) + defer client.Close() + + assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) + + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.True(t, value) + + assert.True(t, client.Initialized()) + }) + +} diff --git a/ldclient_end_to_end_test.go b/ldclient_end_to_end_test.go index f4153c33..685c9276 100644 --- a/ldclient_end_to_end_test.go +++ b/ldclient_end_to_end_test.go @@ -3,7 +3,6 @@ package ldclient import ( "crypto/x509" "encoding/json" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" "net/http" "net/http/httptest" "testing" @@ -45,89 +44,26 @@ func assertNoMoreRequests(t *testing.T, requestsCh <-chan httphelpers.HTTPReques } func TestDefaultDataSourceIsStreaming(t *testing.T) { - t.Run("fdv1", func(t *testing.T) { - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) - streamHandler, _ := ldservices.ServerSideStreamingServiceHandler(data.ToPutEvent()) - httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { - logCapture := ldlogtest.NewMockLog() - defer logCapture.DumpIfTestFailed(t) - - config := Config{ - Events: ldcomponents.NoEvents(), - Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), - ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, - } - - client, err := MakeCustomClient(testSdkKey, config, time.Second*5) - require.NoError(t, err) - defer client.Close() - - assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) - - value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) - assert.True(t, value) - }) - }) - - t.Run("fdv2", func(t *testing.T) { - - requireIntent := func(t *testing.T, code string, reason string) httphelpers.SSEEvent { - intent := datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ - {ID: "fake-id", Target: 0, Code: code, Reason: reason}, - }} - intentData, err := json.Marshal(intent) - require.NoError(t, err) - return httphelpers.SSEEvent{ - Event: "server-intent", - Data: string(intentData), - } - } - - requireTransferred := func(t *testing.T) httphelpers.SSEEvent { - type payloadTransferred struct { - State string `json:"state"` - Version int `json:"version"` - } - transferredData, err := json.Marshal(payloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) - require.NoError(t, err) - return httphelpers.SSEEvent{ - Event: "payload-transferred", - Data: string(transferredData), - } - } - - intent := requireIntent(t, "xfer-full", "payload-missing") - - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + streamHandler, _ := ldservices.ServerSideStreamingServiceHandler(data.ToPutEvent()) + httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + defer logCapture.DumpIfTestFailed(t) - streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(intent) - for _, object := range data.ToPutObjects() { - streamSender.Enqueue(object) + config := Config{ + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, } - streamSender.Enqueue(requireTransferred(t)) - httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { - logCapture := ldlogtest.NewMockLog() - defer logCapture.DumpIfTestFailed(t) - - config := Config{ - Events: ldcomponents.NoEvents(), - Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), - ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, - DataSystem: ldcomponents.DataSystem(), - } - - client, err := MakeCustomClient(testSdkKey, config, time.Second*5) - require.NoError(t, err) - defer client.Close() - - assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.NoError(t, err) + defer client.Close() - value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) - assert.True(t, value) + assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) - assert.True(t, client.Initialized()) - }) + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.True(t, value) }) } From 7d1586200c398f1b0039a5138c07b4c8348e07ef Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 12 Sep 2024 12:39:38 -0700 Subject: [PATCH 27/62] use top-level cfg.Offline to determine if data system should be enabled --- internal/datasourcev2/polling_data_source.go | 3 +- .../datasourcev2/streaming_data_source.go | 3 +- internal/datastatus/data_status.go | 16 +++ internal/datasystem/data_status.go | 8 +- internal/datasystem/fdv1_datasystem.go | 2 +- internal/datasystem/fdv2_datasystem.go | 75 +++++------ internal/datasystem/fdv2_store.go | 54 ++++---- internal/datasystem/fdv2_store_test.go | 118 ++++++------------ ldclient.go | 12 +- .../data_system_configuration_builder.go | 11 -- subsystems/client_context.go | 5 +- subsystems/data_destination.go | 3 +- subsystems/data_source.go | 7 +- subsystems/datasystem_configuration.go | 1 - 14 files changed, 133 insertions(+), 185 deletions(-) create mode 100644 internal/datastatus/data_status.go diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index e5910477..0d623c76 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -2,6 +2,7 @@ package datasourcev2 import ( "context" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" "sync" "time" @@ -81,7 +82,7 @@ func (pp *PollingProcessor) Fetch(ctx context.Context) (*subsystems.InitialPaylo if err != nil { return nil, err } - return &subsystems.InitialPayload{Data: allData, Authoritative: true, Version: nil}, nil + return &subsystems.InitialPayload{Data: allData, Status: datastatus.Authoritative, Version: nil}, nil } //nolint:revive // DataSynchronizer method. diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 67555e14..c5dbee9c 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" "net/http" "net/url" "strings" @@ -281,7 +282,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< storeUpdateFailed("streaming update of " + u.Key) } case datasource.PutData: - if sp.dataDestination.Init(u.Data) { + if sp.dataDestination.Init(u.Data, datastatus.Authoritative) { sp.setInitializedAndNotifyClient(true, closeWhenReady) } else { //TODO: indicate that this can't actually fail anymore from the perspective of the data source diff --git a/internal/datastatus/data_status.go b/internal/datastatus/data_status.go new file mode 100644 index 00000000..eb6275b4 --- /dev/null +++ b/internal/datastatus/data_status.go @@ -0,0 +1,16 @@ +package datastatus + +type DataStatus string + +const ( + // Unknown means there is no known status. + Unknown = DataStatus("unknown") + // Authoritative means the data is from an authoritative source. Authoritative data may be replicated + // from the SDK into any connected persistent store (in write mode), and causes the SDK to transition from + // the Defaults/Cached states to Refreshed. + Authoritative = DataStatus("authoritative") + // Derivative means the data may be stale, such as from a local file or persistent store. Derivative data + // is not replicated to any connected persistent store, and causes the SDK to transition from the Defaults + // state to Cached only. + Derivative = DataStatus("derivative") +) diff --git a/internal/datasystem/data_status.go b/internal/datasystem/data_status.go index 9e4e4229..630a10aa 100644 --- a/internal/datasystem/data_status.go +++ b/internal/datasystem/data_status.go @@ -1,12 +1,12 @@ package datasystem -type DataStatus string +type DataAvailability string const ( // Defaults means the SDK has no data and will evaluate flags using the application-provided default values. - Defaults = DataStatus("defaults") + Defaults = DataAvailability("defaults") // Cached means the SDK has data, not necessarily the latest, which will be used to evaluate flags. - Cached = DataStatus("cached") + Cached = DataAvailability("cached") // Refreshed means the SDK has obtained, at least once, the latest known data from LaunchDarkly. - Refreshed = DataStatus("refreshed") + Refreshed = DataAvailability("refreshed") ) diff --git a/internal/datasystem/fdv1_datasystem.go b/internal/datasystem/fdv1_datasystem.go index e56ef624..3e4bfd42 100644 --- a/internal/datasystem/fdv1_datasystem.go +++ b/internal/datasystem/fdv1_datasystem.go @@ -133,7 +133,7 @@ func (f *FDv1) Offline() bool { return f.offline || f.dataSource == datasource.NewNullDataSource() } -func (f *FDv1) DataStatus() DataStatus { +func (f *FDv1) DataAvailability() DataAvailability { if f.Offline() { return Defaults } diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 407a59f0..0da1849b 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -36,7 +36,7 @@ type FDv2 struct { secondarySync subsystems.DataSynchronizer // Whether the SDK should make use of persistent store/initializers/synchronizers or not. - offline bool + disabled bool loggers ldlog.Loggers @@ -65,14 +65,7 @@ type FDv2 struct { dataSourceStatusProvider *dataStatusProvider } -type nullStatusReporter struct { -} - -func (n *nullStatusReporter) UpdateStatus(status interfaces.DataSourceState, err interfaces.DataSourceErrorInfo) { - // no-op -} - -func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { +func NewFDv2(disabled bool, cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { store := NewStore(clientContext.GetLogging().Loggers) @@ -82,35 +75,36 @@ func NewFDv2(cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConf flagChangeEvent: internal.NewBroadcaster[interfaces.FlagChangeEvent](), } + fdv2 := &FDv2{ + store: store, + loggers: clientContext.GetLogging().Loggers, + broadcasters: bcasters, + dataSourceStatusProvider: &dataStatusProvider{}, + } + + // Yay circular reference. + fdv2.dataSourceStatusProvider.system = fdv2 + dataStoreUpdateSink := datastore.NewDataStoreUpdateSinkImpl(bcasters.dataStoreStatus) clientContextCopy := *clientContext clientContextCopy.DataStoreUpdateSink = dataStoreUpdateSink clientContextCopy.DataDestination = store - clientContextCopy.DataSourceStatusReporter = &nullStatusReporter{} + clientContextCopy.DataSourceStatusReporter = fdv2 cfg, err := cfgBuilder.Build(clientContextCopy) if err != nil { return nil, err } - fdv2 := &FDv2{ - store: store, - initializers: cfg.Initializers, - primarySync: cfg.Synchronizers.Primary, - secondarySync: cfg.Synchronizers.Secondary, - offline: cfg.Offline, - loggers: clientContext.GetLogging().Loggers, - broadcasters: bcasters, - dataSourceStatusProvider: &dataStatusProvider{}, - } + fdv2.initializers = cfg.Initializers + fdv2.primarySync = cfg.Synchronizers.Primary + fdv2.secondarySync = cfg.Synchronizers.Secondary + fdv2.disabled = disabled - // Yay circular reference. - fdv2.dataSourceStatusProvider.system = fdv2 - - if cfg.Store != nil { + if cfg.Store != nil && !disabled { // If there's a persistent Store, we should provide a status monitor and inform Store that it's present. fdv2.dataStoreStatusProvider = datastore.NewDataStoreStatusProviderImpl(cfg.Store, dataStoreUpdateSink) - store.SwapToPersistent(cfg.Store, cfg.StoreMode, fdv2.dataStoreStatusProvider) + store.WithPersistence(cfg.Store, cfg.StoreMode, fdv2.dataStoreStatusProvider) } else { // If there's no persistent Store, we still need to satisfy the SDK's public interface of having // a data Store status provider. So we create one that just says "I don't know what's going on". @@ -127,7 +121,8 @@ func (n noStatusMonitoring) IsStatusMonitoringEnabled() bool { } func (f *FDv2) Start(closeWhenReady chan struct{}) { - if f.offline { + if f.disabled { + f.loggers.Infof("Data system is disabled, SDK will return application-defined default values") close(closeWhenReady) return } @@ -188,9 +183,8 @@ func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{} f.loggers.Warnf("Initializer %s failed: %v", initializer, err) continue } - f.loggers.Info("Initialized via %s", initializer.Name()) - f.store.Init(payload.Data) - f.store.SwapToMemory(payload.Authoritative) + f.loggers.Infof("Initialized via %s", initializer.Name()) + f.store.Init(payload.Data, payload.Status) f.readyOnce.Do(func() { close(closeWhenReady) }) @@ -218,16 +212,6 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ for { select { case <-ready: - // SwapToMemory takes a bool representing if the data is "fresh" or not. Fresh meaning we think it's from - // LaunchDarkly and represents the latest available. Here, we're assuming that any data from a synchronizer - // is fresh (since we currently control all the synchronizer implementations.) Theoretically it could be - // not fresh though, like polling some database. - - // TODO: this is an incorrect hack. The responsibility of this loop should be limited to - // calling readyOnce/close. - // To trigger the swapping to the in-memory Store, we need to be independently monitoring the Data Source status - // for "valid" status. This hack will currently swap even if the data source has failed. - f.store.SwapToMemory(true) f.readyOnce.Do(func() { close(closeWhenReady) }) @@ -256,11 +240,8 @@ func (f *FDv2) Store() subsystems.ReadOnlyStore { return f.store } -func (f *FDv2) DataStatus() DataStatus { - if f.offline { - return Defaults - } - return f.store.DataStatus() +func (f *FDv2) DataAvailability() DataAvailability { + return f.store.DataAvailability() } func (f *FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { @@ -284,7 +265,11 @@ func (f *FDv2) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.Fla } func (f *FDv2) Offline() bool { - return f.offline + return f.disabled +} + +func (f *FDv2) UpdateStatus(status interfaces.DataSourceState, err interfaces.DataSourceErrorInfo) { + } type dataStatusProvider struct { diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index 226b39ca..772af2d0 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -1,6 +1,7 @@ package datasystem import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" "sync" "github.com/launchdarkly/go-sdk-common/v3/ldlog" @@ -69,7 +70,7 @@ type Store struct { // data from a local file (refreshed=false), we may not want to pollute a connected Redis database with it. // TODO: this could also be called "Authoritative". "It was the latest at some point.. that point being when we asked // if it was the latest". - refreshed bool + availability DataAvailability // Protects the refreshed, persistentStore, persistentStoreMode, and active fields. mu sync.RWMutex @@ -84,10 +85,10 @@ func NewStore(loggers ldlog.Loggers) *Store { persistentStore: nil, persistentStoreMode: subsystems.StoreModeRead, memoryStore: datastore.NewInMemoryDataStore(loggers), - refreshed: false, + availability: Defaults, loggers: loggers, } - s.SwapToMemory(false) + s.active = s.memoryStore return s } @@ -109,18 +110,12 @@ func (s *Store) getActive() subsystems.DataStore { return s.active } -// DataStatus returns the status of the store's data. Defaults means there is no data, Cached means there is +// DataAvailability returns the status of the store's data. Defaults means there is no data, Cached means there is // data, but it's not guaranteed to be recent, and Refreshed means the data has been refreshed from the server. -func (s *Store) DataStatus() DataStatus { +func (s *Store) DataAvailability() DataAvailability { s.mu.RLock() defer s.mu.RUnlock() - if s.active.IsInitialized() { - if s.refreshed { - return Refreshed - } - return Cached - } - return Defaults + return s.availability } // Mirroring returns true data is being mirrored to a persistent store. @@ -129,13 +124,20 @@ func (s *Store) mirroring() bool { } // nolint:revive // Standard DataSourceUpdateSink method -func (s *Store) Init(allData []ldstoretypes.Collection) bool { - s.mu.RLock() - defer s.mu.RUnlock() +func (s *Store) Init(allData []ldstoretypes.Collection, dataStatus datastatus.DataStatus) bool { + s.mu.Lock() + defer s.mu.Unlock() // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. // TODO: handle errors from initializing the memory or persistent stores. - _ = s.memoryStore.Init(allData) + if err := s.memoryStore.Init(allData); err == nil { + s.active = s.memoryStore + if dataStatus == datastatus.Authoritative { + s.availability = Refreshed + } else { + s.availability = Cached + } + } if s.mirroring() { _ = s.persistentStore.Init(allData) // TODO: insert in topo-sort order @@ -169,31 +171,31 @@ func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider return s.persistentStoreStatusProvider } -// SwapToPersistent exists only because of the weird way the Go SDK is configured - we need a ClientContext +// WithPersistence exists only because of the way the SDK's configuration builders work - we need a ClientContext // before we can call Build to actually get the persistent store. That ClientContext requires the // DataStoreUpdateSink, which is what this store struct implements. -func (s *Store) SwapToPersistent(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) { +func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) *Store { s.mu.Lock() defer s.mu.Unlock() s.persistentStore = persistent s.persistentStoreMode = mode s.persistentStoreStatusProvider = statusProvider s.active = s.persistentStore -} -func (s *Store) SwapToMemory(isRefreshed bool) { - s.mu.Lock() - defer s.mu.Unlock() - s.refreshed = isRefreshed - s.active = s.memoryStore + if s.persistentStore.IsInitialized() { + s.availability = Cached + } else { + s.availability = Defaults + } + return s } func (s *Store) Commit() error { s.mu.RLock() defer s.mu.RUnlock() - // Note: DataStatus() will also take a read lock. - if s.DataStatus() == Refreshed && s.mirroring() { + // Note: DataAvailability() will also take a read lock. + if s.availability == Refreshed && s.mirroring() { flags, err := s.memoryStore.GetAll(datakinds.Features) if err != nil { return err diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go index ceb17e86..941414ee 100644 --- a/internal/datasystem/fdv2_store_test.go +++ b/internal/datasystem/fdv2_store_test.go @@ -2,14 +2,14 @@ package datasystem import ( "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/stretchr/testify/require" "math/rand" "sync" "testing" "time" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" - "github.com/stretchr/testify/require" - "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" @@ -26,7 +26,7 @@ func TestStore_NoPersistence_NewStore_DataStatus(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() - assert.Equal(t, store.DataStatus(), Defaults) + assert.Equal(t, store.DataAvailability(), Defaults) } func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { @@ -38,31 +38,25 @@ func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { tests := []struct { - name string - refreshed bool - expected DataStatus + name string + datastatus datastatus.DataStatus + expected DataAvailability }{ - {"fresh data", true, Refreshed}, - {"cached data", false, Cached}, + {"fresh data", datastatus.Authoritative, Refreshed}, + {"stale data", datastatus.Derivative, Cached}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() - store.Init([]ldstoretypes.Collection{}) - assert.Equal(t, store.DataStatus(), Cached) + store.Init([]ldstoretypes.Collection{}, tt.datastatus) + assert.Equal(t, store.DataAvailability(), tt.expected) assert.True(t, store.IsInitialized()) - store.SwapToMemory(tt.refreshed) - assert.Equal(t, store.DataStatus(), tt.expected) }) } } -func TestStore_NoPersistence_Commit_NoCrashesCaused(t *testing.T) { - -} - func TestStore_Commit(t *testing.T) { t.Run("no persistent store doesn't cause an error", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() @@ -73,7 +67,10 @@ func TestStore_Commit(t *testing.T) { t.Run("refreshed memory items are copied to persistent store in r/w mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) + + spy := &fakeStore{isDown: true} + + store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.StoreModeReadWrite, nil) defer store.Close() initPayload := []ldstoretypes.Collection{ @@ -85,19 +82,12 @@ func TestStore_Commit(t *testing.T) { }}, } - assert.True(t, store.Init(initPayload)) - - spy := &fakeStore{} - // This is kind of awkward, but the idea is to simulate a data store outage. Therefore, we can't have the - // persistent store already configured at the start of the test (or else the data would have been inserted - // automatically.) This way, it should be empty before the commit, and we'll assert that fact. - store.SwapToPersistent(spy, subsystems.StoreModeReadWrite, nil) - // Need to set refreshed == true, otherwise nothing will be commited. This stops stale data from polluting - // the database. - store.SwapToMemory(true) + assert.True(t, store.Init(initPayload, datastatus.Authoritative)) require.Empty(t, spy.initPayload) + spy.isDown = false + require.NoError(t, store.Commit()) assert.Equal(t, initPayload, spy.initPayload) @@ -105,7 +95,8 @@ func TestStore_Commit(t *testing.T) { t.Run("stale memory items are not copied to persistent store in r/w mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) + spy := &fakeStore{} + store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.StoreModeReadWrite, nil) defer store.Close() initPayload := []ldstoretypes.Collection{ @@ -117,16 +108,7 @@ func TestStore_Commit(t *testing.T) { }}, } - assert.True(t, store.Init(initPayload)) - - spy := &fakeStore{} - // This is kind of awkward, but the idea is to simulate a data store outage. Therefore, we can't have the - // persistent store already configured at the start of the test (or else the data would have been inserted - // automatically.) This way, it should be empty before the commit, and we'll assert that fact. - store.SwapToPersistent(spy, subsystems.StoreModeReadWrite, nil) - - // Need to set refreshed == false, which should make Commit a no-op. - store.SwapToMemory(false) + assert.True(t, store.Init(initPayload, datastatus.Derivative)) require.Empty(t, spy.initPayload) @@ -137,7 +119,8 @@ func TestStore_Commit(t *testing.T) { t.Run("refreshed memory items are not copied to persistent store in r-only mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) + spy := &fakeStore{} + store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.StoreModeRead, nil) defer store.Close() initPayload := []ldstoretypes.Collection{ @@ -149,16 +132,7 @@ func TestStore_Commit(t *testing.T) { }}, } - assert.True(t, store.Init(initPayload)) - - spy := &fakeStore{} - // This is kind of awkward, but the idea is to simulate a data store outage. Therefore, we can't have the - // persistent store already configured at the start of the test (or else the data would have been inserted - // automatically.) This way, it should be empty before the commit, and we'll assert that fact. - store.SwapToPersistent(spy, subsystems.StoreModeRead, nil) - // Need to set refreshed == true, otherwise nothing will be commited. This stops stale data from polluting - // the database. - store.SwapToMemory(true) + assert.True(t, store.Init(initPayload, datastatus.Authoritative)) require.Empty(t, spy.initPayload) @@ -181,7 +155,7 @@ func TestStore_GetActive(t *testing.T) { {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, }}, - })) + }, datastatus.Authoritative)) foo, err = store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) @@ -190,38 +164,26 @@ func TestStore_GetActive(t *testing.T) { t.Run("persistent store is active if configured", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) + store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.StoreModeReadWrite, nil) defer store.Close() - assert.True(t, store.Init([]ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - })) - - store.SwapToPersistent(&fakeStore{}, subsystems.StoreModeReadWrite, nil) - _, err := store.Get(ldstoreimpl.Features(), "foo") assert.Equal(t, errImAPersistentStore, err) }) t.Run("active store swaps from persistent to memory", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) + store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.StoreModeReadWrite, nil) defer store.Close() + _, err := store.Get(ldstoreimpl.Features(), "foo") + assert.Equal(t, errImAPersistentStore, err) + assert.True(t, store.Init([]ldstoretypes.Collection{ {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, }}, - })) - - store.SwapToPersistent(&fakeStore{}, subsystems.StoreModeReadWrite, nil) - - _, err := store.Get(ldstoreimpl.Features(), "foo") - assert.Equal(t, errImAPersistentStore, err) - - store.SwapToMemory(false) + }, datastatus.Authoritative)) foo, err := store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) @@ -240,17 +202,7 @@ func TestStore_Concurrency(t *testing.T) { wg.Add(1) defer wg.Done() for i := 0; i < 100; i++ { - store.SwapToMemory(true) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - store.SwapToPersistent(&fakeStore{}, subsystems.StoreModeReadWrite, nil) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _ = store.DataStatus() + _ = store.DataAvailability() time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) } }() @@ -283,7 +235,7 @@ func TestStore_Concurrency(t *testing.T) { wg.Add(1) defer wg.Done() for i := 0; i < 100; i++ { - _ = store.Init([]ldstoretypes.Collection{}) + _ = store.Init([]ldstoretypes.Collection{}, datastatus.Authoritative) time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) } }() @@ -292,6 +244,7 @@ func TestStore_Concurrency(t *testing.T) { type fakeStore struct { initPayload []ldstoretypes.Collection + isDown bool } var errImAPersistentStore = errors.New("i'm a persistent store") @@ -309,6 +262,9 @@ func (f *fakeStore) IsInitialized() bool { } func (f *fakeStore) Init(allData []ldstoretypes.Collection) error { + if f.isDown { + return errors.New("store is down") + } f.initPayload = allData return nil } diff --git a/ldclient.go b/ldclient.go index b69896b6..ca3eea49 100644 --- a/ldclient.go +++ b/ldclient.go @@ -87,7 +87,7 @@ type dataSystem interface { Store() subsystems.ReadOnlyStore - DataStatus() datasystem.DataStatus + DataAvailability() datasystem.DataAvailability } var ( @@ -251,7 +251,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC } client.dataSystem = system } else { - system, err := datasystem.NewFDv2(config.DataSystem, clientContext) + system, err := datasystem.NewFDv2(config.Offline, config.DataSystem, clientContext) if err != nil { return nil, err } @@ -344,7 +344,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC for { select { case <-closeWhenReady: - if client.dataSystem.DataStatus() != datasystem.Refreshed { + if client.dataSystem.DataAvailability() != datasystem.Refreshed { loggers.Warn("LaunchDarkly client initialization failed") return client, ErrInitializationFailed } @@ -570,7 +570,7 @@ func (client *LDClient) SecureModeHash(context ldcontext.Context) string { // already been stored in the database by a successfully connected SDK in the past. You can use // [LDClient.GetDataSourceStatusProvider] to get information on errors, or to wait for a successful retry. func (client *LDClient) Initialized() bool { - return client.dataSystem.DataStatus() == datasystem.Refreshed + return client.dataSystem.DataAvailability() == datasystem.Refreshed } // Close shuts down the LaunchDarkly client. After calling this, the LaunchDarkly client @@ -655,7 +655,7 @@ func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flag client.loggers.Warn("Called AllFlagsState in offline mode. Returning empty state") valid = false } else if !client.Initialized() { - if client.dataSystem.DataStatus() == datasystem.Cached { + if client.dataSystem.DataAvailability() == datasystem.Cached { client.loggers.Warn("Called AllFlagsState before client initialization; using last known values from data store") } else { client.loggers.Warn("Called AllFlagsState before client initialization. Data store not available; returning empty state") //nolint:lll @@ -1251,7 +1251,7 @@ func (client *LDClient) evaluateInternal( } if !client.Initialized() { - if client.dataSystem.DataStatus() == datasystem.Cached { + if client.dataSystem.DataAvailability() == datasystem.Cached { client.loggers.Warn("Feature flag evaluation called before LaunchDarkly client initialization completed; using last known values from data store") //nolint:lll } else { return evalErrorResult(ldreason.EvalErrorClientNotReady, nil, ErrClientNotInitialized) diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 70f54959..ab3c87b8 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -57,12 +57,6 @@ func PersistentStoreMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystem return StreamingMode().DataStore(store, ss.StoreModeReadWrite) } -// Offline configures the SDK to evaluate flags using only the default values defined in the application code. No -// outbound connections will be made by the SDK. -func Offline() *DataSystemConfigurationBuilder { - return UnconfiguredDataSystem().Offline(true) -} - func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store d.storeMode = storeMode @@ -80,11 +74,6 @@ func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary ss.Com return d } -func (d *DataSystemConfigurationBuilder) Offline(offline bool) *DataSystemConfigurationBuilder { - d.config.Offline = offline - return d -} - func (d *DataSystemConfigurationBuilder) Build( context ss.ClientContext, ) (ss.DataSystemConfiguration, error) { diff --git a/subsystems/client_context.go b/subsystems/client_context.go index aab660a1..c3408943 100644 --- a/subsystems/client_context.go +++ b/subsystems/client_context.go @@ -100,10 +100,7 @@ func (b BasicClientContext) GetDataStoreUpdateSink() DataStoreUpdateSink { //nol } func (b BasicClientContext) GetDataDestination() DataDestination { - if b.DataDestination != nil { - return b.DataDestination - } - return b.DataSourceUpdateSink + return b.DataDestination } func (b BasicClientContext) GetDataSourceStatusReporter() DataSourceStatusReporter { diff --git a/subsystems/data_destination.go b/subsystems/data_destination.go index a6e94df6..450b43f5 100644 --- a/subsystems/data_destination.go +++ b/subsystems/data_destination.go @@ -1,6 +1,7 @@ package subsystems import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) @@ -11,7 +12,7 @@ type DataDestination interface { // and set the data source state to DataSourceStateInterrupted with an error of // DataSourceErrorKindStoreError. It will not return the error to the data source, but will // return false to indicate that the operation failed. - Init(allData []ldstoretypes.Collection) bool + Init(allData []ldstoretypes.Collection, status datastatus.DataStatus) bool // Upsert updates or inserts an item in the specified collection. For updates, the object will only be // updated if the existing version is less than the new version. diff --git a/subsystems/data_source.go b/subsystems/data_source.go index f56b3ec1..60df291d 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -2,6 +2,7 @@ package subsystems import ( "context" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" "io" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" @@ -25,9 +26,9 @@ type DataSource interface { } type InitialPayload struct { - Data []ldstoretypes.Collection - Version *int - Authoritative bool + Data []ldstoretypes.Collection + Version *int + Status datastatus.DataStatus } type DataInitializer interface { diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go index ae01e01d..c23e7fdc 100644 --- a/subsystems/datasystem_configuration.go +++ b/subsystems/datasystem_configuration.go @@ -19,5 +19,4 @@ type DataSystemConfiguration struct { // into a state where it is serving somewhat fresh values as fast as possible. Initializers []DataInitializer Synchronizers SynchronizersConfiguration - Offline bool } From 155b24adbf1527a4489231fa6088ce8bbc00e8af Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 12 Sep 2024 14:46:24 -0700 Subject: [PATCH 28/62] add dedicated FDv2 streaming protocol builder for unit tests --- ldclient_end_to_end_fdv2_test.go | 42 ++------ testhelpers/ldservices/server_sdk_data.go | 30 ++---- .../streaming_protocol_builder.go | 96 +++++++++++++++++++ 3 files changed, 114 insertions(+), 54 deletions(-) create mode 100644 testhelpers/ldservicesv2/streaming_protocol_builder.go diff --git a/ldclient_end_to_end_fdv2_test.go b/ldclient_end_to_end_fdv2_test.go index 29ee7fc7..cdeae387 100644 --- a/ldclient_end_to_end_fdv2_test.go +++ b/ldclient_end_to_end_fdv2_test.go @@ -1,8 +1,8 @@ package ldclient import ( - "encoding/json" "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldservicesv2" "net/http/httptest" "testing" "time" @@ -19,40 +19,18 @@ import ( ) func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { - requireIntent := func(t *testing.T, code string, reason string) httphelpers.SSEEvent { - intent := datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ - {ID: "fake-id", Target: 0, Code: code, Reason: reason}, - }} - intentData, err := json.Marshal(intent) - require.NoError(t, err) - return httphelpers.SSEEvent{ - Event: "server-intent", - Data: string(intentData), - } - } - - requireTransferred := func(t *testing.T) httphelpers.SSEEvent { - type payloadTransferred struct { - State string `json:"state"` - Version int `json:"version"` - } - transferredData, err := json.Marshal(payloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) - require.NoError(t, err) - return httphelpers.SSEEvent{ - Event: "payload-transferred", - Data: string(transferredData), - } - } + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) - intent := requireIntent(t, "xfer-full", "payload-missing") + protocol := ldservicesv2.NewStreamingProtocol(). + WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, + }}). + WithPutObjects(data.ToBaseObjects()). + WithTransferred() - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) - streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(intent) - for _, object := range data.ToPutObjects() { - streamSender.Enqueue(object) - } - streamSender.Enqueue(requireTransferred(t)) + protocol.Enqueue(streamSender) httphelpers.WithServer(streamHandler, func(streamServer *httptest.Server) { logCapture := ldlogtest.NewMockLog() diff --git a/testhelpers/ldservices/server_sdk_data.go b/testhelpers/ldservices/server_sdk_data.go index dad1e322..fb18a193 100644 --- a/testhelpers/ldservices/server_sdk_data.go +++ b/testhelpers/ldservices/server_sdk_data.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "github.com/launchdarkly/go-sdk-common/v3/ldvalue" + "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldservicesv2" "github.com/launchdarkly/go-test-helpers/v3/httphelpers" "github.com/launchdarkly/go-test-helpers/v3/jsonhelpers" ) @@ -82,40 +83,25 @@ func (s *ServerSDKData) ToPutEvent() httphelpers.SSEEvent { } } -// TODO: Refactor into dedicated FDv2 testing support package. -func (s *ServerSDKData) ToPutObjects() []httphelpers.SSEEvent { - type baseObject struct { - Version int `json:"version"` - Kind string `json:"kind"` - Key string `json:"key"` - Object json.RawMessage `json:"object"` - } - var puts []httphelpers.SSEEvent +func (s *ServerSDKData) ToBaseObjects() []ldservicesv2.BaseObject { + var objs []ldservicesv2.BaseObject for _, flag := range s.FlagsMap { - base := baseObject{ + base := ldservicesv2.BaseObject{ Version: 1, Kind: "flag", Key: getKeyFromJSON(flag), Object: jsonhelpers.ToJSON(flag), } - data, _ := json.Marshal(base) - puts = append(puts, httphelpers.SSEEvent{ - Event: "put-object", - Data: string(data), - }) + objs = append(objs, base) } for _, segment := range s.SegmentsMap { - base := baseObject{ + base := ldservicesv2.BaseObject{ Version: 1, Kind: "segment", Key: getKeyFromJSON(segment), Object: jsonhelpers.ToJSON(segment), } - data, _ := json.Marshal(base) - puts = append(puts, httphelpers.SSEEvent{ - Event: "put-object", - Data: string(data), - }) + objs = append(objs, base) } - return puts + return objs } diff --git a/testhelpers/ldservicesv2/streaming_protocol_builder.go b/testhelpers/ldservicesv2/streaming_protocol_builder.go new file mode 100644 index 00000000..156f2e24 --- /dev/null +++ b/testhelpers/ldservicesv2/streaming_protocol_builder.go @@ -0,0 +1,96 @@ +package ldservicesv2 + +import ( + "encoding/json" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-test-helpers/v3/httphelpers" +) + +type ProtocolEvents []httphelpers.SSEEvent + +func (p ProtocolEvents) Enqueue(control httphelpers.SSEStreamControl) { + for _, msg := range p { + control.Enqueue(msg) + } +} + +type protoState string + +const ( + start = protoState("start") + intentSent = protoState("intent-sent") + transferred = protoState("transferred") +) + +type BaseObject struct { + Version int `json:"version"` + Kind string `json:"kind"` + Key string `json:"key"` + Object json.RawMessage `json:"object"` +} + +type event struct { + name string + data BaseObject +} + +type payloadTransferred struct { + State string `json:"state"` + Version int `json:"version"` +} + +type StreamingProtocol struct { + events []httphelpers.SSEEvent +} + +func NewStreamingProtocol() *StreamingProtocol { + return &StreamingProtocol{} +} + +func (f *StreamingProtocol) WithIntent(intent datasourcev2.ServerIntent) *StreamingProtocol { + return f.pushEvent("server-intent", intent) +} + +func (f *StreamingProtocol) WithPutObject(object BaseObject) *StreamingProtocol { + return f.pushEvent("put-object", object) +} + +func (f *StreamingProtocol) WithTransferred() *StreamingProtocol { + return f.pushEvent("payload-transferred", payloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) +} + +func (f *StreamingProtocol) WithPutObjects(objects []BaseObject) *StreamingProtocol { + for _, object := range objects { + f.WithPutObject(object) + } + return f +} + +func (f *StreamingProtocol) pushEvent(event string, data any) *StreamingProtocol { + marshalled, err := json.Marshal(data) + if err != nil { + panic(err) + } + f.events = append(f.events, httphelpers.SSEEvent{Event: event, Data: string(marshalled)}) + return f +} + +func (f *StreamingProtocol) HasNext() bool { + return len(f.events) != 0 +} + +func (f *StreamingProtocol) Next() httphelpers.SSEEvent { + if !f.HasNext() { + panic("protocol has no events") + } + event := f.events[0] + f.events = f.events[1:] + return event +} + +func (f *StreamingProtocol) Enqueue(control httphelpers.SSEStreamControl) { + for _, event := range f.events { + control.Enqueue(event) + } + f.events = nil +} From 543b387f38057dd120fe1c444bb132161a46c2d3 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 13 Sep 2024 14:58:30 -0700 Subject: [PATCH 29/62] add another unit test --- internal/datasystem/fdv2_datasystem.go | 4 +- ldclient_end_to_end_fdv2_test.go | 45 ++++++++++++++- .../data_system_configuration_builder.go | 56 +++++++++++-------- 3 files changed, 78 insertions(+), 27 deletions(-) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 0da1849b..91d15455 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -174,13 +174,13 @@ func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <- func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) *int { for _, initializer := range f.initializers { - f.loggers.Infof("Attempting initialization via %s", initializer.Name()) + f.loggers.Infof("Attempting to initialize via %s", initializer.Name()) payload, err := initializer.Fetch(ctx) if errors.Is(err, context.Canceled) { return nil } if err != nil { - f.loggers.Warnf("Initializer %s failed: %v", initializer, err) + f.loggers.Warnf("Initializer %s failed: %v", initializer.Name(), err) continue } f.loggers.Infof("Initialized via %s", initializer.Name()) diff --git a/ldclient_end_to_end_fdv2_test.go b/ldclient_end_to_end_fdv2_test.go index cdeae387..88a7b60a 100644 --- a/ldclient_end_to_end_fdv2_test.go +++ b/ldclient_end_to_end_fdv2_test.go @@ -1,6 +1,7 @@ package ldclient import ( + "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldservicesv2" "net/http/httptest" @@ -40,7 +41,7 @@ func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { Events: ldcomponents.NoEvents(), Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, - DataSystem: ldcomponents.DataSystem(), + DataSystem: ldcomponents.DataSystem().DefaultMode(), } client, err := MakeCustomClient(testSdkKey, config, time.Second*5) @@ -54,5 +55,47 @@ func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { assert.True(t, client.Initialized()) }) +} + +func TestFDV2ClientStartsInStreamingMode(t *testing.T) { + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + protocol := ldservicesv2.NewStreamingProtocol(). + WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, + }}). + WithPutObjects(data.ToBaseObjects()). + WithTransferred() + + streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) + protocol.Enqueue(streamSender) + + handler, requestsCh := httphelpers.RecordingHandler(streamHandler) + httphelpers.WithServer(handler, func(streamServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + defer logCapture.DumpIfTestFailed(t) + + config := Config{ + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, + DataSystem: ldcomponents.DataSystem().StreamingMode(), + } + + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.NoError(t, err) + defer client.Close() + + assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) + + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.True(t, value) + + r := <-requestsCh + assert.Equal(t, testSdkKey, r.Request.Header.Get("Authorization")) + assertNoMoreRequests(t, requestsCh) + + assert.Len(t, logCapture.GetOutput(ldlog.Error), 0) + assert.Len(t, logCapture.GetOutput(ldlog.Warn), 0) + }) } diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index ab3c87b8..83bc500a 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -15,46 +15,54 @@ type DataSystemConfigurationBuilder struct { config ss.DataSystemConfiguration } -// DataSystem returns a configuration builder that is pre-configured with LaunchDarkly's recommended -// data acquisition strategy. It is equivalent to StreamingMode(). -// -// In this mode, the SDK efficiently streams flag/segment data in the background, -// allowing evaluations to operate on the latest data with no additional latency. -func DataSystem() *DataSystemConfigurationBuilder { - return StreamingMode() -} +type DataSystemModes struct{} -// UnconfiguredDataSystem returns a configuration builder with no options set. It is suitable for -// building custom use-cases. -func UnconfiguredDataSystem() *DataSystemConfigurationBuilder { - return &DataSystemConfigurationBuilder{} +// DefaultMode is LaunchDarkly's recommended flag data acquisition strategy. Currently, it operates a +// two-phase method for obtaining data: first, it requests data from LaunchDarkly's global CDN. Then, it initiates +// a streaming connection to LaunchDarkly's Flag Delivery services to receive real-time updates. If +// the streaming connection is interrupted for an extended period of time, the SDK will automatically fall back +// to polling the global CDN for updates. +func (d *DataSystemModes) DefaultMode() *DataSystemConfigurationBuilder { + return d.CustomMode(). + Initializers(PollingDataSourceV2().AsInitializer()).Synchronizers(StreamingDataSourceV2(), PollingDataSourceV2()) } // StreamingMode configures the SDK to efficiently streams flag/segment data in the background, // allowing evaluations to operate on the latest data with no additional latency. -func StreamingMode() *DataSystemConfigurationBuilder { - return UnconfiguredDataSystem(). - Initializers(PollingDataSourceV2().AsInitializer()).Synchronizers(StreamingDataSourceV2(), PollingDataSourceV2()) +func (d *DataSystemModes) StreamingMode() *DataSystemConfigurationBuilder { + return d.CustomMode().Synchronizers(StreamingDataSourceV2(), nil) } // PollingMode configures the SDK to regularly poll an endpoint for flag/segment data in the background. // This is less efficient than streaming, but may be necessary in some network environments. -func PollingMode() *DataSystemConfigurationBuilder { - return UnconfiguredDataSystem().Synchronizers(PollingDataSourceV2(), nil) +func (d *DataSystemModes) PollingMode() *DataSystemConfigurationBuilder { + return d.CustomMode().Synchronizers(PollingDataSourceV2(), nil) } // DaemonMode configures the SDK to read from a persistent store integration that is populated by Relay Proxy // or other SDKs. The SDK will not connect to LaunchDarkly. In this mode, the SDK never writes to the data store. -func DaemonMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return UnconfiguredDataSystem().DataStore(store, ss.StoreModeRead) +func (d *DataSystemModes) DaemonMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return d.CustomMode().DataStore(store, ss.StoreModeRead) } -// PersistentStoreMode is similar to the default DataSystem configuration, with the addition of a -// persistent store integration. Before data has arrived from the streaming connection, the SDK is able to -// evaluate flags using data from the persistent store. Once data has arrived from the streaming connection, the SDK +// PersistentStoreMode is similar to DefaultMode, with the addition of a +// persistent store integration. Before data has arrived from LaunchDarkly, the SDK is able to +// evaluate flags using data from the persistent store. Once fresh data is available, the SDK // will no longer read from the persistent store, although it will keep it up-to-date. -func PersistentStoreMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return StreamingMode().DataStore(store, ss.StoreModeReadWrite) +func (d *DataSystemModes) PersistentStoreMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return d.DefaultMode().DataStore(store, ss.StoreModeReadWrite) +} + +// CustomMode returns a builder suitable for creating a custom data acquisition strategy. You may configure +// how the SDK uses a Persistent Store, how the SDK obtains an initial set of data, and how the SDK keeps data up-to-date. +func (d *DataSystemModes) CustomMode() *DataSystemConfigurationBuilder { + return &DataSystemConfigurationBuilder{} +} + +// DataSystem provides a high-level selection of the SDK's data acquisition strategy. Use the returned builder to select +// a mode, or to create a custom data acquisition strategy. To use LaunchDarkly's recommended mode, use DefaultMode. +func DataSystem() *DataSystemModes { + return &DataSystemModes{} } func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { From d30042da1fff2aad2b1147ad4570c4b83722d08f Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 13 Sep 2024 17:49:25 -0700 Subject: [PATCH 30/62] more fdv2 parity e2e tests --- internal/datasourcev2/polling_data_source.go | 32 ++-- internal/datasystem/fdv2_datasystem.go | 24 +-- ldclient_end_to_end_fdv2_test.go | 161 +++++++++++++++++- .../data_system_configuration_builder.go | 36 ++-- .../polling_data_source_builder_v2.go | 2 +- 5 files changed, 204 insertions(+), 51 deletions(-) diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index 0d623c76..8fdeeb3b 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -34,7 +34,8 @@ type Requester interface { // configuration. All other code outside of this package should interact with it only via the // DataSource interface. type PollingProcessor struct { - dataSourceUpdates subsystems.DataSourceUpdateSink + dataDestination subsystems.DataDestination + statusReporter subsystems.DataSourceStatusReporter requester Requester pollInterval time.Duration loggers ldlog.Loggers @@ -47,25 +48,28 @@ type PollingProcessor struct { // NewPollingProcessor creates the internal implementation of the polling data source. func NewPollingProcessor( context subsystems.ClientContext, - dataSourceUpdates subsystems.DataSourceUpdateSink, + dataDestination subsystems.DataDestination, + statusReporter subsystems.DataSourceStatusReporter, cfg datasource.PollingConfig, ) *PollingProcessor { httpRequester := newPollingRequester(context, context.GetHTTP().CreateHTTPClient(), cfg.BaseURI, cfg.FilterKey) - return newPollingProcessor(context, dataSourceUpdates, httpRequester, cfg.PollInterval) + return newPollingProcessor(context, dataDestination, statusReporter, httpRequester, cfg.PollInterval) } func newPollingProcessor( context subsystems.ClientContext, - dataSourceUpdates subsystems.DataSourceUpdateSink, + dataDestination subsystems.DataDestination, + statusReporter subsystems.DataSourceStatusReporter, requester Requester, pollInterval time.Duration, ) *PollingProcessor { pp := &PollingProcessor{ - dataSourceUpdates: dataSourceUpdates, - requester: requester, - pollInterval: pollInterval, - loggers: context.GetLogging().Loggers, - quit: make(chan struct{}), + dataDestination: dataDestination, + statusReporter: statusReporter, + requester: requester, + pollInterval: pollInterval, + loggers: context.GetLogging().Loggers, + quit: make(chan struct{}), } return pp } @@ -123,9 +127,9 @@ func (pp *PollingProcessor) Sync(closeWhenReady chan<- struct{}, payloadVersion pollingWillRetryMessage, ) if recoverable { - pp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) + pp.statusReporter.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) } else { - pp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateOff, errorInfo) + pp.statusReporter.UpdateStatus(interfaces.DataSourceStateOff, errorInfo) notifyReady() return } @@ -139,11 +143,11 @@ func (pp *PollingProcessor) Sync(closeWhenReady chan<- struct{}, payloadVersion errorInfo.Kind = interfaces.DataSourceErrorKindInvalidData } checkIfErrorIsRecoverableAndLog(pp.loggers, err.Error(), pollingErrorContext, 0, pollingWillRetryMessage) - pp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) + pp.statusReporter.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo) } continue } - pp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateValid, interfaces.DataSourceErrorInfo{}) + pp.statusReporter.UpdateStatus(interfaces.DataSourceStateValid, interfaces.DataSourceErrorInfo{}) pp.setInitializedOnce.Do(func() { pp.isInitialized.Set(true) pp.loggers.Info("First polling request successful") @@ -163,7 +167,7 @@ func (pp *PollingProcessor) poll() error { // We initialize the store only if the request wasn't cached if !cached { - pp.dataSourceUpdates.Init(allData) + pp.dataDestination.Init(allData, datastatus.Authoritative) } return nil } diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 91d15455..b9437d93 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -63,6 +63,8 @@ type FDv2 struct { dataStoreStatusProvider interfaces.DataStoreStatusProvider dataSourceStatusProvider *dataStatusProvider + + status interfaces.DataSourceStatus } func NewFDv2(disabled bool, cfgBuilder subsystems.ComponentConfigurer[subsystems.DataSystemConfiguration], clientContext *internal.ClientContextImpl) (*FDv2, error) { @@ -269,7 +271,11 @@ func (f *FDv2) Offline() bool { } func (f *FDv2) UpdateStatus(status interfaces.DataSourceState, err interfaces.DataSourceErrorInfo) { - + f.status = interfaces.DataSourceStatus{ + State: status, + LastError: err, + StateSince: time.Now(), + } } type dataStatusProvider struct { @@ -277,21 +283,7 @@ type dataStatusProvider struct { } func (d *dataStatusProvider) GetStatus() interfaces.DataSourceStatus { - var state interfaces.DataSourceState - if d.system.primarySync != nil { - if d.system.primarySync.IsInitialized() { - state = interfaces.DataSourceStateValid - } else { - state = interfaces.DataSourceStateInitializing - } - } else { - state = interfaces.DataSourceStateOff - } - return interfaces.DataSourceStatus{ - State: state, - StateSince: time.Now(), - LastError: interfaces.DataSourceErrorInfo{}, - } + return d.system.status } func (d *dataStatusProvider) AddStatusListener() <-chan interfaces.DataSourceStatus { diff --git a/ldclient_end_to_end_fdv2_test.go b/ldclient_end_to_end_fdv2_test.go index 88a7b60a..569acf84 100644 --- a/ldclient_end_to_end_fdv2_test.go +++ b/ldclient_end_to_end_fdv2_test.go @@ -1,9 +1,12 @@ package ldclient import ( + "crypto/x509" "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest" "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldservicesv2" + "net/http" "net/http/httptest" "testing" "time" @@ -41,7 +44,7 @@ func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { Events: ldcomponents.NoEvents(), Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, - DataSystem: ldcomponents.DataSystem().DefaultMode(), + DataSystem: ldcomponents.DataSystem().Default(), } client, err := MakeCustomClient(testSdkKey, config, time.Second*5) @@ -79,7 +82,7 @@ func TestFDV2ClientStartsInStreamingMode(t *testing.T) { Events: ldcomponents.NoEvents(), Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, - DataSystem: ldcomponents.DataSystem().StreamingMode(), + DataSystem: ldcomponents.DataSystem().Streaming(), } client, err := MakeCustomClient(testSdkKey, config, time.Second*5) @@ -99,3 +102,157 @@ func TestFDV2ClientStartsInStreamingMode(t *testing.T) { assert.Len(t, logCapture.GetOutput(ldlog.Warn), 0) }) } + +func TestFDV2ClientRetriesConnectionInStreamingModeWithNonFatalError(t *testing.T) { + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + + protocol := ldservicesv2.NewStreamingProtocol(). + WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, + }}). + WithPutObjects(data.ToBaseObjects()). + WithTransferred() + + streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) + protocol.Enqueue(streamSender) + + failThenSucceedHandler := httphelpers.SequentialHandler(httphelpers.HandlerWithStatus(503), streamHandler) + handler, requestsCh := httphelpers.RecordingHandler(failThenSucceedHandler) + httphelpers.WithServer(handler, func(streamServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + + config := Config{ + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, + DataSystem: ldcomponents.DataSystem().Streaming(), + } + + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.NoError(t, err) + defer client.Close() + + assert.Equal(t, string(interfaces.DataSourceStateValid), string(client.GetDataSourceStatusProvider().GetStatus().State)) + + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.True(t, value) + + r0 := <-requestsCh + assert.Equal(t, testSdkKey, r0.Request.Header.Get("Authorization")) + r1 := <-requestsCh + assert.Equal(t, testSdkKey, r1.Request.Header.Get("Authorization")) + assertNoMoreRequests(t, requestsCh) + + expectedWarning := "Error in stream connection (will retry): HTTP error 503" + assert.Equal(t, []string{expectedWarning}, logCapture.GetOutput(ldlog.Warn)) + assert.Len(t, logCapture.GetOutput(ldlog.Error), 0) + }) +} + +func TestFDV2ClientFailsToStartInPollingModeWith401Error(t *testing.T) { + handler, requestsCh := httphelpers.RecordingHandler(httphelpers.HandlerWithStatus(401)) + httphelpers.WithServer(handler, func(pollServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + + config := Config{ + DataSystem: ldcomponents.DataSystem().Polling(), + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Polling: pollServer.URL}, + } + + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.Error(t, err) + require.NotNil(t, client) + defer client.Close() + + assert.Equal(t, initializationFailedErrorMessage, err.Error()) + + assert.Equal(t, string(interfaces.DataSourceStateOff), string(client.GetDataSourceStatusProvider().GetStatus().State)) + + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.False(t, value) + + r := <-requestsCh + assert.Equal(t, testSdkKey, r.Request.Header.Get("Authorization")) + assertNoMoreRequests(t, requestsCh) + + expectedError := "Error on polling request (giving up permanently): HTTP error 401 (invalid SDK key)" + assert.Equal(t, []string{expectedError}, logCapture.GetOutput(ldlog.Error)) + assert.Equal(t, []string{pollingModeWarningMessage, initializationFailedErrorMessage}, logCapture.GetOutput(ldlog.Warn)) + }) +} + +func TestFDV2ClientUsesCustomTLSConfiguration(t *testing.T) { + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + + protocol := ldservicesv2.NewStreamingProtocol(). + WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, + }}). + WithPutObjects(data.ToBaseObjects()). + WithTransferred() + + streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) + protocol.Enqueue(streamSender) + + httphelpers.WithSelfSignedServer(streamHandler, func(server *httptest.Server, certData []byte, certs *x509.CertPool) { + config := Config{ + Events: ldcomponents.NoEvents(), + HTTP: ldcomponents.HTTPConfiguration().CACert(certData), + Logging: ldcomponents.Logging().Loggers(sharedtest.NewTestLoggers()), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: server.URL}, + DataSystem: ldcomponents.DataSystem().Streaming(), + } + + client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + require.NoError(t, err) + defer client.Close() + + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.True(t, value) + }) +} + +func TestFDV2ClientStartupTimesOut(t *testing.T) { + data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + + protocol := ldservicesv2.NewStreamingProtocol(). + WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, + }}). + WithPutObjects(data.ToBaseObjects()). + WithTransferred() + + streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) + protocol.Enqueue(streamSender) + + slowHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(300 * time.Millisecond) + streamHandler.ServeHTTP(w, r) + }) + + httphelpers.WithServer(slowHandler, func(streamServer *httptest.Server) { + logCapture := ldlogtest.NewMockLog() + + config := Config{ + Events: ldcomponents.NoEvents(), + Logging: ldcomponents.Logging().Loggers(logCapture.Loggers), + ServiceEndpoints: interfaces.ServiceEndpoints{Streaming: streamServer.URL}, + DataSystem: ldcomponents.DataSystem().Streaming(), + } + + client, err := MakeCustomClient(testSdkKey, config, time.Millisecond*100) + require.Error(t, err) + require.NotNil(t, client) + defer client.Close() + + assert.Equal(t, "timeout encountered waiting for LaunchDarkly client initialization", err.Error()) + + value, _ := client.BoolVariation(alwaysTrueFlag.Key, testUser, false) + assert.False(t, value) + + assert.Equal(t, []string{"Timeout encountered waiting for LaunchDarkly client initialization"}, logCapture.GetOutput(ldlog.Warn)) + assert.Len(t, logCapture.GetOutput(ldlog.Error), 0) + }) +} diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 83bc500a..92117a11 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -17,50 +17,50 @@ type DataSystemConfigurationBuilder struct { type DataSystemModes struct{} -// DefaultMode is LaunchDarkly's recommended flag data acquisition strategy. Currently, it operates a +// Default is LaunchDarkly's recommended flag data acquisition strategy. Currently, it operates a // two-phase method for obtaining data: first, it requests data from LaunchDarkly's global CDN. Then, it initiates // a streaming connection to LaunchDarkly's Flag Delivery services to receive real-time updates. If // the streaming connection is interrupted for an extended period of time, the SDK will automatically fall back // to polling the global CDN for updates. -func (d *DataSystemModes) DefaultMode() *DataSystemConfigurationBuilder { - return d.CustomMode(). +func (d *DataSystemModes) Default() *DataSystemConfigurationBuilder { + return d.Custom(). Initializers(PollingDataSourceV2().AsInitializer()).Synchronizers(StreamingDataSourceV2(), PollingDataSourceV2()) } -// StreamingMode configures the SDK to efficiently streams flag/segment data in the background, +// Streaming configures the SDK to efficiently streams flag/segment data in the background, // allowing evaluations to operate on the latest data with no additional latency. -func (d *DataSystemModes) StreamingMode() *DataSystemConfigurationBuilder { - return d.CustomMode().Synchronizers(StreamingDataSourceV2(), nil) +func (d *DataSystemModes) Streaming() *DataSystemConfigurationBuilder { + return d.Custom().Synchronizers(StreamingDataSourceV2(), nil) } -// PollingMode configures the SDK to regularly poll an endpoint for flag/segment data in the background. +// Polling configures the SDK to regularly poll an endpoint for flag/segment data in the background. // This is less efficient than streaming, but may be necessary in some network environments. -func (d *DataSystemModes) PollingMode() *DataSystemConfigurationBuilder { - return d.CustomMode().Synchronizers(PollingDataSourceV2(), nil) +func (d *DataSystemModes) Polling() *DataSystemConfigurationBuilder { + return d.Custom().Synchronizers(PollingDataSourceV2(), nil) } -// DaemonMode configures the SDK to read from a persistent store integration that is populated by Relay Proxy +// Daemon configures the SDK to read from a persistent store integration that is populated by Relay Proxy // or other SDKs. The SDK will not connect to LaunchDarkly. In this mode, the SDK never writes to the data store. -func (d *DataSystemModes) DaemonMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return d.CustomMode().DataStore(store, ss.StoreModeRead) +func (d *DataSystemModes) Daemon(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return d.Custom().DataStore(store, ss.StoreModeRead) } -// PersistentStoreMode is similar to DefaultMode, with the addition of a +// PersistentStore is similar to Default, with the addition of a // persistent store integration. Before data has arrived from LaunchDarkly, the SDK is able to // evaluate flags using data from the persistent store. Once fresh data is available, the SDK // will no longer read from the persistent store, although it will keep it up-to-date. -func (d *DataSystemModes) PersistentStoreMode(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return d.DefaultMode().DataStore(store, ss.StoreModeReadWrite) +func (d *DataSystemModes) PersistentStore(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { + return d.Default().DataStore(store, ss.StoreModeReadWrite) } -// CustomMode returns a builder suitable for creating a custom data acquisition strategy. You may configure +// Custom returns a builder suitable for creating a custom data acquisition strategy. You may configure // how the SDK uses a Persistent Store, how the SDK obtains an initial set of data, and how the SDK keeps data up-to-date. -func (d *DataSystemModes) CustomMode() *DataSystemConfigurationBuilder { +func (d *DataSystemModes) Custom() *DataSystemConfigurationBuilder { return &DataSystemConfigurationBuilder{} } // DataSystem provides a high-level selection of the SDK's data acquisition strategy. Use the returned builder to select -// a mode, or to create a custom data acquisition strategy. To use LaunchDarkly's recommended mode, use DefaultMode. +// a mode, or to create a custom data acquisition strategy. To use LaunchDarkly's recommended mode, use Default. func DataSystem() *DataSystemModes { return &DataSystemModes{} } diff --git a/ldcomponents/polling_data_source_builder_v2.go b/ldcomponents/polling_data_source_builder_v2.go index ca5cc299..49b470d3 100644 --- a/ldcomponents/polling_data_source_builder_v2.go +++ b/ldcomponents/polling_data_source_builder_v2.go @@ -93,7 +93,7 @@ func (b *PollingDataSourceBuilderV2) Build(context subsystems.ClientContext) (su PollInterval: b.pollInterval, FilterKey: filterKey, } - return datasourcev2.NewPollingProcessor(context, context.GetDataSourceUpdateSink(), cfg), nil + return datasourcev2.NewPollingProcessor(context, context.GetDataDestination(), context.GetDataSourceStatusReporter(), cfg), nil } func (b *PollingDataSourceBuilderV2) AsInitializer() subsystems.ComponentConfigurer[subsystems.DataInitializer] { From 99cffc205000ed07d5acd41eb89a94d80d85207e Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 13 Sep 2024 17:49:47 -0700 Subject: [PATCH 31/62] goimports --- internal/datasystem/fdv2_store.go | 3 ++- internal/datasystem/fdv2_store_test.go | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go index 772af2d0..5b4c81d1 100644 --- a/internal/datasystem/fdv2_store.go +++ b/internal/datasystem/fdv2_store.go @@ -1,9 +1,10 @@ package datasystem import ( - "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" "sync" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" + "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go index 941414ee..c0894277 100644 --- a/internal/datasystem/fdv2_store_test.go +++ b/internal/datasystem/fdv2_store_test.go @@ -2,14 +2,15 @@ package datasystem import ( "errors" - "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" - "github.com/stretchr/testify/require" "math/rand" "sync" "testing" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/stretchr/testify/require" + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" From c47eb1b0cfd438b60d673187f0531d144c8d088f Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 17 Sep 2024 11:50:42 -0700 Subject: [PATCH 32/62] merge dataSystem refactor changes --- .../{data_status.go => data_availability.go} | 1 + internal/datasystem/fdv1_datasystem.go | 24 ++++++++++++----- internal/datasystem/package.go | 5 ++++ ldclient.go | 27 ++++++++++--------- ldclient_test.go | 4 +-- subsystems/read_only_store.go | 2 ++ 6 files changed, 42 insertions(+), 21 deletions(-) rename internal/datasystem/{data_status.go => data_availability.go} (87%) create mode 100644 internal/datasystem/package.go diff --git a/internal/datasystem/data_status.go b/internal/datasystem/data_availability.go similarity index 87% rename from internal/datasystem/data_status.go rename to internal/datasystem/data_availability.go index 630a10aa..5e0fecbd 100644 --- a/internal/datasystem/data_status.go +++ b/internal/datasystem/data_availability.go @@ -1,5 +1,6 @@ package datasystem +// DataAvailability represents the availability of data in the SDK. type DataAvailability string const ( diff --git a/internal/datasystem/fdv1_datasystem.go b/internal/datasystem/fdv1_datasystem.go index 3e4bfd42..abc649e2 100644 --- a/internal/datasystem/fdv1_datasystem.go +++ b/internal/datasystem/fdv1_datasystem.go @@ -9,6 +9,8 @@ import ( "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) +// FDv1 implements the configuration and interactions between the SDK's data store, data source, and +// other related components. type FDv1 struct { dataSourceStatusBroadcaster *internal.Broadcaster[interfaces.DataSourceStatus] dataSourceStatusProvider interfaces.DataSourceStatusProvider @@ -20,7 +22,11 @@ type FDv1 struct { offline bool } -func NewFDv1(offline bool, dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], clientContext *internal.ClientContextImpl) (*FDv1, error) { +// NewFDv1 creates a new FDv1 instance from data store and data source configurers. Offline determines if the +// client is in offline mode. If configuration is invalid, an error will be returned. +func NewFDv1(offline bool, dataStoreFactory subsystems.ComponentConfigurer[subsystems.DataStore], + dataSourceFactory subsystems.ComponentConfigurer[subsystems.DataSource], + clientContext *internal.ClientContextImpl) (*FDv1, error) { system := &FDv1{ dataSourceStatusBroadcaster: internal.NewBroadcaster[interfaces.DataSourceStatus](), dataStoreStatusBroadcaster: internal.NewBroadcaster[interfaces.DataStoreStatus](), @@ -63,7 +69,6 @@ func NewFDv1(offline bool, dataStoreFactory subsystems.ComponentConfigurer[subsy ) return system, nil - } func createDataSource( @@ -86,30 +91,37 @@ func createDataSource( return factory.Build(&contextCopy) } +//nolint:revive // Data system implementation. func (f *FDv1) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { return f.dataSourceStatusBroadcaster } +//nolint:revive // Data system implementation. func (f *FDv1) DataSourceStatusProvider() interfaces.DataSourceStatusProvider { return f.dataSourceStatusProvider } +//nolint:revive // Data system implementation. func (f *FDv1) DataStoreStatusBroadcaster() *internal.Broadcaster[interfaces.DataStoreStatus] { return f.dataStoreStatusBroadcaster } +//nolint:revive // Data system implementation. func (f *FDv1) DataStoreStatusProvider() interfaces.DataStoreStatusProvider { return f.dataStoreStatusProvider } +//nolint:revive // Data system implementation. func (f *FDv1) FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] { return f.flagChangeEventBroadcaster } +//nolint:revive // Data system implementation. func (f *FDv1) Start(closeWhenReady chan struct{}) { f.dataSource.Start(closeWhenReady) } +//nolint:revive // Data system implementation. func (f *FDv1) Stop() error { if f.dataSource != nil { _ = f.dataSource.Close() @@ -129,12 +141,9 @@ func (f *FDv1) Stop() error { return nil } -func (f *FDv1) Offline() bool { - return f.offline || f.dataSource == datasource.NewNullDataSource() -} - +//nolint:revive // Data system implementation. func (f *FDv1) DataAvailability() DataAvailability { - if f.Offline() { + if f.offline { return Defaults } if f.dataSource.IsInitialized() { @@ -146,6 +155,7 @@ func (f *FDv1) DataAvailability() DataAvailability { return Defaults } +//nolint:revive // Data system implementation. func (f *FDv1) Store() subsystems.ReadOnlyStore { return f.dataStore } diff --git a/internal/datasystem/package.go b/internal/datasystem/package.go new file mode 100644 index 00000000..1410623d --- /dev/null +++ b/internal/datasystem/package.go @@ -0,0 +1,5 @@ +// Package datasystem encapsulates the interactions between the SDK's data store, data source, and other related +// components. +// Currently, there is only one data system implementation, FDv1, which represents the functionality of the SDK +// before the FDv2 protocol was introduced. +package datasystem diff --git a/ldclient.go b/ldclient.go index ca3eea49..32e0f9fe 100644 --- a/ldclient.go +++ b/ldclient.go @@ -67,8 +67,8 @@ const ( migrationVarExFuncName = "LDClient.MigrationVariationCtx" ) -// The dataSystem interface represents the requirements for the client to retrieve data necessary -// for evaluations, as well as the related status updates related to the data. +// dataSystem represents the requirements the client has for storing/retrieving/detecting changes related +// to the SDK's data model. type dataSystem interface { DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] DataSourceStatusProvider() interfaces.DataSourceStatusProvider @@ -76,23 +76,22 @@ type dataSystem interface { DataStoreStatusProvider() interfaces.DataStoreStatusProvider FlagChangeEventBroadcaster() *internal.Broadcaster[interfaces.FlagChangeEvent] - // Offline indicates whether the SDK is configured to be offline, either because the offline config item was - // explicitly set, or because a NullDataSource was used. - Offline() bool // Start starts the data system; the given channel will be closed when the system has reached an initial state // (either permanently failed, e.g. due to bad auth, or succeeded, where Initialized() == true). Start(closeWhenReady chan struct{}) - // Stop halts the data system. Should be called when the client is closed to stop any long running operations. + + // Stop halts the data system. Should be called when the client is closed to stop any long-running operations. Stop() error + // Store returns a read-only accessor for the data model. Store() subsystems.ReadOnlyStore + // DataAvailability indicates what form of data is available. DataAvailability() datasystem.DataAvailability } var ( _ dataSystem = &datasystem.FDv1{} - _ dataSystem = &datasystem.FDv2{} ) // LDClient is the LaunchDarkly client. @@ -291,7 +290,6 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC ) } - // TODO: We can't actually pass STore() here because it wont' swap between the active ones. dataProvider := ldstoreimpl.NewDataStoreEvaluatorDataProvider(client.dataSystem.Store(), loggers) evalOptions := []ldeval.EvaluatorOption{ ldeval.EvaluatorOptionErrorLogger(client.loggers.ForLevel(ldlog.Error)), @@ -329,7 +327,7 @@ func MakeCustomClient(sdkKey string, config Config, waitFor time.Duration) (*LDC clientValid = true client.dataSystem.Start(closeWhenReady) - if waitFor > 0 && !client.dataSystem.Offline() { + if waitFor > 0 && !client.offline { loggers.Infof("Waiting up to %d milliseconds for LaunchDarkly client to start...", waitFor/time.Millisecond) @@ -564,13 +562,18 @@ func (client *LDClient) SecureModeHash(context ldcontext.Context) string { // this does not guarantee that the flags are up to date; if you need to know its status in more detail, use // [LDClient.GetDataSourceStatusProvider]. // +// Additionally, if the client was configured to be offline, this will always return true. +// // If this value is false, it means the client has not yet connected to LaunchDarkly, or has permanently // failed. See [MakeClient] for the reasons that this could happen. In this state, feature flag evaluations // will always return default values-- unless you are using a database integration and feature flags had // already been stored in the database by a successfully connected SDK in the past. You can use // [LDClient.GetDataSourceStatusProvider] to get information on errors, or to wait for a successful retry. func (client *LDClient) Initialized() bool { - return client.dataSystem.DataAvailability() == datasystem.Refreshed + if client.offline { + return true + } + return client.dataSystem.DataAvailability() != datasystem.Defaults } // Close shuts down the LaunchDarkly client. After calling this, the LaunchDarkly client @@ -654,7 +657,7 @@ func (client *LDClient) AllFlagsState(context ldcontext.Context, options ...flag if client.IsOffline() { client.loggers.Warn("Called AllFlagsState in offline mode. Returning empty state") valid = false - } else if !client.Initialized() { + } else if client.dataSystem.DataAvailability() != datasystem.Refreshed { if client.dataSystem.DataAvailability() == datasystem.Cached { client.loggers.Warn("Called AllFlagsState before client initialization; using last known values from data store") } else { @@ -1250,7 +1253,7 @@ func (client *LDClient) evaluateInternal( return ldeval.Result{Detail: detail}, flag, err } - if !client.Initialized() { + if client.dataSystem.DataAvailability() != datasystem.Refreshed { if client.dataSystem.DataAvailability() == datasystem.Cached { client.loggers.Warn("Feature flag evaluation called before LaunchDarkly client initialization completed; using last known values from data store") //nolint:lll } else { diff --git a/ldclient_test.go b/ldclient_test.go index 5904c9f6..ef82e3e7 100644 --- a/ldclient_test.go +++ b/ldclient_test.go @@ -100,8 +100,8 @@ func makeTestClientWithConfigAndStore(modConfig func(*Config), populate func(sto return client } -// populateStore (which is a function) is defined here a type so that we can implement the ComponentConfigurer interface -// on it. That way, when the SDK configures the data store, we can hook in additional logic to populate the store +// The populateStore type exist so that we can implement the ComponentConfigurer interface +// on it. When the SDK configures the data store, we can hook in additional logic to populate the store // via the callback provided in makeTestClientWithConfigAndStore. type populateStore func(store subsystems.DataStore) diff --git a/subsystems/read_only_store.go b/subsystems/read_only_store.go index c1eefcbf..ad3b79d8 100644 --- a/subsystems/read_only_store.go +++ b/subsystems/read_only_store.go @@ -2,6 +2,8 @@ package subsystems import "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +// ReadOnlyStore represents a read-only data store that can be used to retrieve +// any of the SDK's supported DataKinds. type ReadOnlyStore interface { // Get retrieves an item from the specified collection, if available. // From 5f6af7426cc3e98f7d4f73b6625bd92194c60f8a Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 17 Sep 2024 16:31:53 -0700 Subject: [PATCH 33/62] chore: add fdv2 store --- internal/datasystem/store.go | 224 +++++++++++++++++++++++ internal/datasystem/store_test.go | 288 ++++++++++++++++++++++++++++++ subsystems/data_store_mode.go | 19 ++ 3 files changed, 531 insertions(+) create mode 100644 internal/datasystem/store.go create mode 100644 internal/datasystem/store_test.go create mode 100644 subsystems/data_store_mode.go diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go new file mode 100644 index 00000000..356dde79 --- /dev/null +++ b/internal/datasystem/store.go @@ -0,0 +1,224 @@ +package datasystem + +import ( + "sync" + + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk/v7/interfaces" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) + +// Store is a hybrid persistent/in-memory store that serves queries for data from the evaluation +// algorithm. +// +// At any given moment, 1 of 2 stores is active: in-memory, or persistent. This doesn't preclude a caller +// from holding on to a reference to the persistent store even when we swap to the in-memory store. +// +// Once the in-memory store has data (either from initializers running, or from a synchronizer), the persistent +// store is no longer regarded as active. From that point forward, GetActive() will return the in-memory store. +// +// The idea is that persistent stores can offer a way to immediately start evaluating flags before a connection +// is made to LD (or even in a very brief moment before an initializer has run.) The persistent store has caching +// logic which can result in inconsistent/stale date being used. Therefore, once we have fresh data, we don't +// want to use the persistent store at all. +// +// A complication is that persistent stores have historically operated in multiple regimes. The first is "daemon mode", +// where the SDK is effectively using the store in read-only mode, with the store being populated by Relay or another SDK. +// The second is just plain persistent store mode, where it is both read and written to. In the FDv2 system, we explicitly +// differentiate these cases using a read/read-write mode. In all cases, the in-memory store is used once it has data available. +// This contrasts from FDv1 where even if data from LD is available, that data may fall out of memory due to the persistent +// store's caching logic ("sparse mode", when the TTL is non-infinite). +// +// We have found this to almost always be undesirable for users. +type Store struct { + // Represents a remote store, like Redis. This is optional; if present, it's only used + // before the in-memory store is initialized. + persistentStore subsystems.DataStore + + // The persistentStore is read-only, or read-write. In read-only mode, the store + // is *never* written to, and only read before the in-memory store is initialized. + // This is equivalent to the concept of "daemon mode". + // + // In read-write mode, data from initializers/synchronizers is written to the store + // as it is received. This is equivalent to the normal "persistent store" configuration + // that an SDK can use to collaborate with zero or more other SDKs with a (possibly shared) database. + persistentStoreMode subsystems.DataStoreMode + + // This exists as a quirk of the DataSourceUpdateSink interface, which store implements. The DataSourceUpdateSink + // has a method to return a DataStoreStatusProvider so that a DataSource can monitor the state of the store. This + // was originally used in fdv1 to know when the store went offline/online, so that data could be committed back + // to the store when it came back online. In fdv2 system, this is handled by the FDv2 struct itself, so the + // data source doesn't need any knowledge of it. We can delete this piece of infrastructure when we no longer + // need to support fdv1 (or we could refactor the fdv2 data sources to use a different set of interfaces that don't + // require this.) + persistentStoreStatusProvider interfaces.DataStoreStatusProvider + + // Represents the store that all flag/segment data queries are served from after data is received from + // initializers/synchronizers. Before the in-memory store is initialized, queries are served from the + // persistentStore (if configured). + memoryStore subsystems.DataStore + + active subsystems.DataStore + + // Whether the memoryStore's data should be considered authoritative, or fresh - that is, if it is known + // to be the latest data. Data from a baked in file for example would not be considered refreshed. The purpose + // of this is to know if we should commit data to the persistentStore. For example, if we initialize with "stale" + // data from a local file (refreshed=false), we may not want to pollute a connected Redis database with it. + // TODO: this could also be called "Authoritative". "It was the latest at some point.. that point being when we asked + // if it was the latest". + availability DataAvailability + + // Protects the refreshed, persistentStore, persistentStoreMode, and active fields. + mu sync.RWMutex + + loggers ldlog.Loggers +} + +// NewStore creates a new store. By default the store is in-memory. To add a persistent store, call SwapToPersistent. Ensure this is +// called at configuration time, only once and before the store is ever accessed. +func NewStore(loggers ldlog.Loggers) *Store { + s := &Store{ + persistentStore: nil, + persistentStoreMode: subsystems.DataStoreModeRead, + memoryStore: datastore.NewInMemoryDataStore(loggers), + availability: Defaults, + loggers: loggers, + } + s.active = s.memoryStore + return s +} + +// Close closes the store. If there is a persistent store configured, it will be closed. +func (s *Store) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + if s.persistentStore != nil { + return s.persistentStore.Close() + } + return nil +} + +// GetActive returns the active store, either persistent or in-memory. If there is no persistent store configured, +// the in-memory store is always active. +func (s *Store) getActive() subsystems.DataStore { + s.mu.RLock() + defer s.mu.RUnlock() + return s.active +} + +// DataAvailability returns the status of the store's data. Defaults means there is no data, Cached means there is +// data, but it's not guaranteed to be recent, and Refreshed means the data has been refreshed from the server. +func (s *Store) DataAvailability() DataAvailability { + s.mu.RLock() + defer s.mu.RUnlock() + return s.availability +} + +// Mirroring returns true data is being mirrored to a persistent store. +func (s *Store) mirroring() bool { + return s.persistentStore != nil && s.persistentStoreMode == subsystems.DataStoreModeReadWrite +} + +// nolint:revive // Standard DataSourceUpdateSink method +func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) bool { + s.mu.Lock() + defer s.mu.Unlock() + + // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. + // TODO: handle errors from initializing the memory or persistent stores. + if err := s.memoryStore.Init(allData); err == nil { + s.active = s.memoryStore + if payloadVersion != nil { + s.availability = Refreshed + } else { + s.availability = Cached + } + } + + if s.mirroring() { + _ = s.persistentStore.Init(allData) // TODO: insert in topo-sort order + } + return true +} + +// nolint:revive // Standard DataSourceUpdateSink method +func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { + s.mu.RLock() + defer s.mu.RUnlock() + + var ( + memErr error + persErr error + ) + + // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. + _, memErr = s.memoryStore.Upsert(kind, key, item) + + if s.mirroring() { + _, persErr = s.persistentStore.Upsert(kind, key, item) + } + return memErr == nil && persErr == nil +} + +// nolint:revive // Standard DataSourceUpdateSink method +func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { + s.mu.RLock() + defer s.mu.RUnlock() + return s.persistentStoreStatusProvider +} + +// WithPersistence exists only because of the way the SDK's configuration builders work - we need a ClientContext +// before we can call Build to actually get the persistent store. That ClientContext requires the +// DataStoreUpdateSink, which is what this store struct implements. +func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems.DataStoreMode, statusProvider interfaces.DataStoreStatusProvider) *Store { + s.mu.Lock() + defer s.mu.Unlock() + s.persistentStore = persistent + s.persistentStoreMode = mode + s.persistentStoreStatusProvider = statusProvider + s.active = s.persistentStore + + if s.persistentStore.IsInitialized() { + s.availability = Cached + } else { + s.availability = Defaults + } + return s +} + +func (s *Store) Commit() error { + s.mu.RLock() + defer s.mu.RUnlock() + + // Note: DataAvailability() will also take a read lock. + if s.availability == Refreshed && s.mirroring() { + flags, err := s.memoryStore.GetAll(datakinds.Features) + if err != nil { + return err + } + segments, err := s.memoryStore.GetAll(datakinds.Segments) + if err != nil { + return err + } + return s.persistentStore.Init([]ldstoretypes.Collection{ + {Kind: datakinds.Features, Items: flags}, + {Kind: datakinds.Segments, Items: segments}, + }) + } + return nil +} + +func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { + return s.getActive().GetAll(kind) +} + +func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { + return s.getActive().Get(kind, key) +} + +func (s *Store) IsInitialized() bool { + return s.getActive().IsInitialized() +} diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go new file mode 100644 index 00000000..74c54970 --- /dev/null +++ b/internal/datasystem/store_test.go @@ -0,0 +1,288 @@ +package datasystem + +import ( + "errors" + "math/rand" + "sync" + "testing" + "time" + + "github.com/launchdarkly/go-server-sdk/v7/subsystems" + "github.com/stretchr/testify/require" + + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + "github.com/stretchr/testify/assert" +) + +func TestStore_New(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + assert.NoError(t, store.Close()) +} + +func TestStore_NoPersistence_NewStore_DataStatus(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.Equal(t, store.DataAvailability(), Defaults) +} + +func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.False(t, store.IsInitialized()) +} + +func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { + version1 := 1 + tests := []struct { + name string + payloadVersion *int + expected DataAvailability + }{ + {"fresh data", &version1, Refreshed}, + {"stale data", nil, Cached}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + store.Init([]ldstoretypes.Collection{}, tt.payloadVersion) + assert.Equal(t, store.DataAvailability(), tt.expected) + assert.True(t, store.IsInitialized()) + }) + } +} + +func TestStore_Commit(t *testing.T) { + t.Run("no persistent store doesn't cause an error", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.NoError(t, store.Commit()) + }) + + t.Run("refreshed memory items are copied to persistent store in r/w mode", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + + spy := &fakeStore{isDown: true} + + store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeReadWrite, nil) + defer store.Close() + + initPayload := []ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, + }}, + } + + version := 1 + assert.True(t, store.Init(initPayload, &version)) + + require.Empty(t, spy.initPayload) + + spy.isDown = false + + require.NoError(t, store.Commit()) + + assert.Equal(t, initPayload, spy.initPayload) + }) + + t.Run("stale memory items are not copied to persistent store in r/w mode", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + spy := &fakeStore{} + store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) + defer store.Close() + + initPayload := []ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, + }}, + } + + assert.True(t, store.Init(initPayload, nil)) + + require.Empty(t, spy.initPayload) + + require.NoError(t, store.Commit()) + + assert.Empty(t, spy.initPayload) + }) + + t.Run("refreshed memory items are not copied to persistent store in r-only mode", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + spy := &fakeStore{} + store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeRead, nil) + defer store.Close() + + initPayload := []ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, + }}, + } + + version := 1 + assert.True(t, store.Init(initPayload, &version)) + + require.Empty(t, spy.initPayload) + + require.NoError(t, store.Commit()) + + assert.Empty(t, spy.initPayload) + }) +} + +func TestStore_GetActive(t *testing.T) { + t.Run("memory store is active if no persistent store configured", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + foo, err := store.Get(ldstoreimpl.Features(), "foo") + assert.NoError(t, err) + assert.Equal(t, foo, ldstoretypes.ItemDescriptor{}.NotFound()) + + version := 1 + assert.True(t, store.Init([]ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + }, &version)) + + foo, err = store.Get(ldstoreimpl.Features(), "foo") + assert.NoError(t, err) + assert.Equal(t, 1, foo.Version) + }) + + t.Run("persistent store is active if configured", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) + defer store.Close() + + _, err := store.Get(ldstoreimpl.Features(), "foo") + assert.Equal(t, errImAPersistentStore, err) + }) + + t.Run("active store swaps from persistent to memory", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) + defer store.Close() + + _, err := store.Get(ldstoreimpl.Features(), "foo") + assert.Equal(t, errImAPersistentStore, err) + + version := 1 + assert.True(t, store.Init([]ldstoretypes.Collection{ + {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }}, + }, &version)) + + foo, err := store.Get(ldstoreimpl.Features(), "foo") + assert.NoError(t, err) + assert.Equal(t, 1, foo.Version) + }) +} + +func TestStore_Concurrency(t *testing.T) { + t.Run("methods using the active store", func(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + var wg sync.WaitGroup + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _ = store.DataAvailability() + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _, _ = store.Get(ldstoreimpl.Features(), "foo") + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _, _ = store.GetAll(ldstoreimpl.Features()) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + _ = store.IsInitialized() + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + version := 1 + _ = store.Init([]ldstoretypes.Collection{}, &version) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + }) +} + +type fakeStore struct { + initPayload []ldstoretypes.Collection + isDown bool +} + +var errImAPersistentStore = errors.New("i'm a persistent store") + +func (f *fakeStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { + return nil, nil +} + +func (f *fakeStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { + return ldstoretypes.ItemDescriptor{}, errImAPersistentStore +} + +func (f *fakeStore) IsInitialized() bool { + return false +} + +func (f *fakeStore) Init(allData []ldstoretypes.Collection) error { + if f.isDown { + return errors.New("store is down") + } + f.initPayload = allData + return nil +} + +func (f *fakeStore) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) (bool, error) { + return false, nil +} + +func (f *fakeStore) IsStatusMonitoringEnabled() bool { + return false +} + +func (f *fakeStore) Close() error { + return nil +} diff --git a/subsystems/data_store_mode.go b/subsystems/data_store_mode.go new file mode 100644 index 00000000..056750e9 --- /dev/null +++ b/subsystems/data_store_mode.go @@ -0,0 +1,19 @@ +package subsystems + +// DataStoreMode represents the mode of operation of a Data Store in FDV2 mode. +// +// This enum is not stable, and not subject to any backwards +// compatibility guarantees or semantic versioning. It is not suitable for production usage. +// +// Do not use it. +// You have been warned. +type DataStoreMode int + +const ( + // DataStoreModeRead indicates that the data store is read-only. Data will never be written back to the store by + // the SDK. + DataStoreModeRead = 0 + // DataStoreModeReadWrite indicates that the data store is read-write. Data from initializers/synchronizers may be + // written to the store as necessary. + DataStoreModeReadWrite = 1 +) From 941fee4101ef315e9bf4cc60cfbdb46198cebafb Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Wed, 18 Sep 2024 13:03:15 -0700 Subject: [PATCH 34/62] remove old comment --- internal/datasystem/store.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 356dde79..314a3709 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -193,7 +193,6 @@ func (s *Store) Commit() error { s.mu.RLock() defer s.mu.RUnlock() - // Note: DataAvailability() will also take a read lock. if s.availability == Refreshed && s.mirroring() { flags, err := s.memoryStore.GetAll(datakinds.Features) if err != nil { From d11e039f9e66da5805c2385d44455dd7cc6efa7d Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Wed, 18 Sep 2024 15:10:05 -0700 Subject: [PATCH 35/62] introduce DataQuality enum --- internal/datasystem/store.go | 120 +++++++++++++++--------------- internal/datasystem/store_test.go | 21 ++++-- 2 files changed, 73 insertions(+), 68 deletions(-) diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 314a3709..9b863e8c 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -34,10 +34,27 @@ import ( // // We have found this to almost always be undesirable for users. type Store struct { - // Represents a remote store, like Redis. This is optional; if present, it's only used - // before the in-memory store is initialized. - persistentStore subsystems.DataStore + // Represents the SDK's source of truth for flag evals before initialization, or permanently if there are + // no initializers/synchronizers configured. This is option; if not defined, only the memoryStore is used. + persistentStore *persistentStore + // Represents the SDK's source of truth for flag evaluations (once initialized). Before initialization, + // the persistentStore may be used if configured. + memoryStore subsystems.DataStore + + // Points to the active store. Swapped upon initialization. + active subsystems.DataStore + + quality DataQuality + + // Protects the availability, persistentStore, quality, and active fields. + mu sync.RWMutex + + loggers ldlog.Loggers +} + +type persistentStore struct { + impl subsystems.DataStore // The persistentStore is read-only, or read-write. In read-only mode, the store // is *never* written to, and only read before the in-memory store is initialized. // This is equivalent to the concept of "daemon mode". @@ -45,8 +62,7 @@ type Store struct { // In read-write mode, data from initializers/synchronizers is written to the store // as it is received. This is equivalent to the normal "persistent store" configuration // that an SDK can use to collaborate with zero or more other SDKs with a (possibly shared) database. - persistentStoreMode subsystems.DataStoreMode - + mode subsystems.DataStoreMode // This exists as a quirk of the DataSourceUpdateSink interface, which store implements. The DataSourceUpdateSink // has a method to return a DataStoreStatusProvider so that a DataSource can monitor the state of the store. This // was originally used in fdv1 to know when the store went offline/online, so that data could be committed back @@ -54,38 +70,17 @@ type Store struct { // data source doesn't need any knowledge of it. We can delete this piece of infrastructure when we no longer // need to support fdv1 (or we could refactor the fdv2 data sources to use a different set of interfaces that don't // require this.) - persistentStoreStatusProvider interfaces.DataStoreStatusProvider - - // Represents the store that all flag/segment data queries are served from after data is received from - // initializers/synchronizers. Before the in-memory store is initialized, queries are served from the - // persistentStore (if configured). - memoryStore subsystems.DataStore - - active subsystems.DataStore - - // Whether the memoryStore's data should be considered authoritative, or fresh - that is, if it is known - // to be the latest data. Data from a baked in file for example would not be considered refreshed. The purpose - // of this is to know if we should commit data to the persistentStore. For example, if we initialize with "stale" - // data from a local file (refreshed=false), we may not want to pollute a connected Redis database with it. - // TODO: this could also be called "Authoritative". "It was the latest at some point.. that point being when we asked - // if it was the latest". - availability DataAvailability - - // Protects the refreshed, persistentStore, persistentStoreMode, and active fields. - mu sync.RWMutex - - loggers ldlog.Loggers + statusProvider interfaces.DataStoreStatusProvider } // NewStore creates a new store. By default the store is in-memory. To add a persistent store, call SwapToPersistent. Ensure this is // called at configuration time, only once and before the store is ever accessed. func NewStore(loggers ldlog.Loggers) *Store { s := &Store{ - persistentStore: nil, - persistentStoreMode: subsystems.DataStoreModeRead, - memoryStore: datastore.NewInMemoryDataStore(loggers), - availability: Defaults, - loggers: loggers, + persistentStore: nil, + memoryStore: datastore.NewInMemoryDataStore(loggers), + quality: QualityNone, + loggers: loggers, } s.active = s.memoryStore return s @@ -96,7 +91,7 @@ func (s *Store) Close() error { s.mu.Lock() defer s.mu.Unlock() if s.persistentStore != nil { - return s.persistentStore.Close() + return s.persistentStore.impl.Close() } return nil } @@ -109,17 +104,10 @@ func (s *Store) getActive() subsystems.DataStore { return s.active } -// DataAvailability returns the status of the store's data. Defaults means there is no data, Cached means there is -// data, but it's not guaranteed to be recent, and Refreshed means the data has been refreshed from the server. -func (s *Store) DataAvailability() DataAvailability { - s.mu.RLock() - defer s.mu.RUnlock() - return s.availability -} - // Mirroring returns true data is being mirrored to a persistent store. func (s *Store) mirroring() bool { - return s.persistentStore != nil && s.persistentStoreMode == subsystems.DataStoreModeReadWrite + return s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite && + s.quality == QualityTrusted } // nolint:revive // Standard DataSourceUpdateSink method @@ -131,15 +119,11 @@ func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) boo // TODO: handle errors from initializing the memory or persistent stores. if err := s.memoryStore.Init(allData); err == nil { s.active = s.memoryStore - if payloadVersion != nil { - s.availability = Refreshed - } else { - s.availability = Cached - } + s.quality = QualityTrusted } if s.mirroring() { - _ = s.persistentStore.Init(allData) // TODO: insert in topo-sort order + _ = s.persistentStore.impl.Init(allData) // TODO: insert in topo-sort order } return true } @@ -158,7 +142,7 @@ func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes _, memErr = s.memoryStore.Upsert(kind, key, item) if s.mirroring() { - _, persErr = s.persistentStore.Upsert(kind, key, item) + _, persErr = s.persistentStore.impl.Upsert(kind, key, item) } return memErr == nil && persErr == nil } @@ -167,7 +151,10 @@ func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { s.mu.RLock() defer s.mu.RUnlock() - return s.persistentStoreStatusProvider + if s.persistentStore == nil { + return nil + } + return s.persistentStore.statusProvider } // WithPersistence exists only because of the way the SDK's configuration builders work - we need a ClientContext @@ -176,16 +163,15 @@ func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems.DataStoreMode, statusProvider interfaces.DataStoreStatusProvider) *Store { s.mu.Lock() defer s.mu.Unlock() - s.persistentStore = persistent - s.persistentStoreMode = mode - s.persistentStoreStatusProvider = statusProvider - s.active = s.persistentStore - - if s.persistentStore.IsInitialized() { - s.availability = Cached - } else { - s.availability = Defaults + + s.persistentStore = &persistentStore{ + impl: persistent, + mode: mode, + statusProvider: statusProvider, } + + s.active = s.persistentStore.impl + s.quality = QualityUntrusted return s } @@ -193,7 +179,7 @@ func (s *Store) Commit() error { s.mu.RLock() defer s.mu.RUnlock() - if s.availability == Refreshed && s.mirroring() { + if s.mirroring() { flags, err := s.memoryStore.GetAll(datakinds.Features) if err != nil { return err @@ -202,7 +188,7 @@ func (s *Store) Commit() error { if err != nil { return err } - return s.persistentStore.Init([]ldstoretypes.Collection{ + return s.persistentStore.impl.Init([]ldstoretypes.Collection{ {Kind: datakinds.Features, Items: flags}, {Kind: datakinds.Segments, Items: segments}, }) @@ -221,3 +207,17 @@ func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDe func (s *Store) IsInitialized() bool { return s.getActive().IsInitialized() } + +type DataQuality int + +const ( + QualityNone = DataQuality(0) + QualityUntrusted = DataQuality(1) + QualityTrusted = DataQuality(2) +) + +func (s *Store) DataQuality() DataQuality { + s.mu.RLock() + defer s.mu.RUnlock() + return s.quality +} diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index 74c54970..e3ac9a8f 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -22,11 +22,11 @@ func TestStore_New(t *testing.T) { assert.NoError(t, store.Close()) } -func TestStore_NoPersistence_NewStore_DataStatus(t *testing.T) { +func TestStore_NoPersistence_NewStore_DataQuality(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() - assert.Equal(t, store.DataAvailability(), Defaults) + assert.Equal(t, store.DataQuality(), QualityNone) } func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { @@ -36,15 +36,20 @@ func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { assert.False(t, store.IsInitialized()) } -func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { +func TestStore_NoPersistence_MemoryStoreInitialized_DataQualityIsTrusted(t *testing.T) { + // It doesn't matter if the data has a payload version or not: the data quality should be + // trusted if it came from an initializer or synchronizer. + // This isn't necessarily what we want going forward, the quality should vary depending on the + // initializer/synchronizer implementation. + version1 := 1 tests := []struct { name string payloadVersion *int - expected DataAvailability + quality DataQuality }{ - {"fresh data", &version1, Refreshed}, - {"stale data", nil, Cached}, + {"fresh data", &version1, QualityTrusted}, + {"stale data", nil, QualityTrusted}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -52,7 +57,7 @@ func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { store := NewStore(logCapture.Loggers) defer store.Close() store.Init([]ldstoretypes.Collection{}, tt.payloadVersion) - assert.Equal(t, store.DataAvailability(), tt.expected) + assert.Equal(t, store.DataQuality(), tt.quality) assert.True(t, store.IsInitialized()) }) } @@ -207,7 +212,7 @@ func TestStore_Concurrency(t *testing.T) { wg.Add(1) defer wg.Done() for i := 0; i < 100; i++ { - _ = store.DataAvailability() + _ = store.DataQuality() time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) } }() From 4d033052c0d1b9e0dc2d29015baa1bd4bc7bdd4a Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 19 Sep 2024 13:24:59 -0700 Subject: [PATCH 36/62] add SetPersist --- internal/datasystem/store.go | 44 ++++++++++++------------------- internal/datasystem/store_test.go | 27 +++++++------------ subsystems/data_destination.go | 1 + 3 files changed, 28 insertions(+), 44 deletions(-) diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 9b863e8c..15c39b2c 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -45,7 +45,7 @@ type Store struct { // Points to the active store. Swapped upon initialization. active subsystems.DataStore - quality DataQuality + persist bool // Protects the availability, persistentStore, quality, and active fields. mu sync.RWMutex @@ -79,13 +79,19 @@ func NewStore(loggers ldlog.Loggers) *Store { s := &Store{ persistentStore: nil, memoryStore: datastore.NewInMemoryDataStore(loggers), - quality: QualityNone, + persist: false, loggers: loggers, } s.active = s.memoryStore return s } +func (s *Store) SetPersist(persist bool) { + s.mu.Lock() + defer s.mu.Unlock() + s.persist = persist +} + // Close closes the store. If there is a persistent store configured, it will be closed. func (s *Store) Close() error { s.mu.Lock() @@ -105,12 +111,11 @@ func (s *Store) getActive() subsystems.DataStore { } // Mirroring returns true data is being mirrored to a persistent store. -func (s *Store) mirroring() bool { - return s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite && - s.quality == QualityTrusted +func (s *Store) shouldPersist() bool { + return s.persist && s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite } -// nolint:revive // Standard DataSourceUpdateSink method +// nolint:revive // Standard DataDestination method func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) bool { s.mu.Lock() defer s.mu.Unlock() @@ -119,16 +124,15 @@ func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) boo // TODO: handle errors from initializing the memory or persistent stores. if err := s.memoryStore.Init(allData); err == nil { s.active = s.memoryStore - s.quality = QualityTrusted } - if s.mirroring() { + if s.shouldPersist() { _ = s.persistentStore.impl.Init(allData) // TODO: insert in topo-sort order } return true } -// nolint:revive // Standard DataSourceUpdateSink method +// nolint:revive // Standard DataDestination method func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { s.mu.RLock() defer s.mu.RUnlock() @@ -141,13 +145,14 @@ func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. _, memErr = s.memoryStore.Upsert(kind, key, item) - if s.mirroring() { + if s.shouldPersist() { _, persErr = s.persistentStore.impl.Upsert(kind, key, item) } return memErr == nil && persErr == nil } -// nolint:revive // Standard DataSourceUpdateSink method +// GetDataStoreStatusProvider returns the status provider for the persistent store, if one is configured, otherwise +// nil. func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { s.mu.RLock() defer s.mu.RUnlock() @@ -171,7 +176,6 @@ func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems } s.active = s.persistentStore.impl - s.quality = QualityUntrusted return s } @@ -179,7 +183,7 @@ func (s *Store) Commit() error { s.mu.RLock() defer s.mu.RUnlock() - if s.mirroring() { + if s.shouldPersist() { flags, err := s.memoryStore.GetAll(datakinds.Features) if err != nil { return err @@ -207,17 +211,3 @@ func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDe func (s *Store) IsInitialized() bool { return s.getActive().IsInitialized() } - -type DataQuality int - -const ( - QualityNone = DataQuality(0) - QualityUntrusted = DataQuality(1) - QualityTrusted = DataQuality(2) -) - -func (s *Store) DataQuality() DataQuality { - s.mu.RLock() - defer s.mu.RUnlock() - return s.quality -} diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index e3ac9a8f..4657a922 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -22,13 +22,6 @@ func TestStore_New(t *testing.T) { assert.NoError(t, store.Close()) } -func TestStore_NoPersistence_NewStore_DataQuality(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - assert.Equal(t, store.DataQuality(), QualityNone) -} - func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) @@ -36,20 +29,15 @@ func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { assert.False(t, store.IsInitialized()) } -func TestStore_NoPersistence_MemoryStoreInitialized_DataQualityIsTrusted(t *testing.T) { - // It doesn't matter if the data has a payload version or not: the data quality should be - // trusted if it came from an initializer or synchronizer. - // This isn't necessarily what we want going forward, the quality should vary depending on the - // initializer/synchronizer implementation. +func TestStore_NoPersistence_MemoryStore_IsInitialized(t *testing.T) { version1 := 1 tests := []struct { name string payloadVersion *int - quality DataQuality }{ - {"fresh data", &version1, QualityTrusted}, - {"stale data", nil, QualityTrusted}, + {"versioned data", &version1}, + {"unversioned data", nil}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -57,7 +45,6 @@ func TestStore_NoPersistence_MemoryStoreInitialized_DataQualityIsTrusted(t *test store := NewStore(logCapture.Loggers) defer store.Close() store.Init([]ldstoretypes.Collection{}, tt.payloadVersion) - assert.Equal(t, store.DataQuality(), tt.quality) assert.True(t, store.IsInitialized()) }) } @@ -95,6 +82,8 @@ func TestStore_Commit(t *testing.T) { spy.isDown = false + store.SetPersist(true) + require.NoError(t, store.Commit()) assert.Equal(t, initPayload, spy.initPayload) @@ -119,6 +108,8 @@ func TestStore_Commit(t *testing.T) { require.Empty(t, spy.initPayload) + store.SetPersist(true) + require.NoError(t, store.Commit()) assert.Empty(t, spy.initPayload) @@ -144,6 +135,8 @@ func TestStore_Commit(t *testing.T) { require.Empty(t, spy.initPayload) + store.SetPersist(true) + require.NoError(t, store.Commit()) assert.Empty(t, spy.initPayload) @@ -212,7 +205,7 @@ func TestStore_Concurrency(t *testing.T) { wg.Add(1) defer wg.Done() for i := 0; i < 100; i++ { - _ = store.DataQuality() + store.SetPersist(true) time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) } }() diff --git a/subsystems/data_destination.go b/subsystems/data_destination.go index 7f7dbc0a..fe2129e4 100644 --- a/subsystems/data_destination.go +++ b/subsystems/data_destination.go @@ -11,6 +11,7 @@ import ( // Do not use it. // You have been warned. type DataDestination interface { + // Init overwrites the current contents of the data store with a set of items for each collection. // // If the underlying data store returns an error during this operation, the SDK will log it, From 02f2627f59452dd5935f9432a8b6cc28c6ca0836 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 19 Sep 2024 15:23:36 -0700 Subject: [PATCH 37/62] plumbing the persist bool around the store implementation --- internal/datasourcev2/polling_data_source.go | 7 +- .../datasourcev2/streaming_data_source.go | 8 +- internal/datasystem/fdv2_datasystem.go | 8 +- internal/datasystem/fdv2_store.go | 226 -------------- internal/datasystem/fdv2_store_test.go | 283 ------------------ internal/datasystem/store.go | 41 ++- .../data_system_configuration_builder.go | 8 +- subsystems/data_destination.go | 4 +- subsystems/data_source.go | 3 +- subsystems/data_source_status.go | 9 - subsystems/datasystem_configuration.go | 9 +- 11 files changed, 43 insertions(+), 563 deletions(-) delete mode 100644 internal/datasystem/fdv2_store.go delete mode 100644 internal/datasystem/fdv2_store_test.go delete mode 100644 subsystems/data_source_status.go diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index 022cfdd5..9940ae78 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -1,6 +1,7 @@ package datasourcev2 import ( + "context" "sync" "time" @@ -41,6 +42,7 @@ type PollingProcessor struct { isInitialized internal.AtomicBoolean quit chan struct{} closeOnce sync.Once + persist bool } // NewPollingProcessor creates the internal implementation of the polling data source. @@ -68,6 +70,7 @@ func newPollingProcessor( pollInterval: pollInterval, loggers: context.GetLogging().Loggers, quit: make(chan struct{}), + persist: true, } return pp } @@ -84,7 +87,7 @@ func (pp *PollingProcessor) Fetch(ctx context.Context) (*subsystems.InitialPaylo if err != nil { return nil, err } - return &subsystems.InitialPayload{Data: allData, Status: datastatus.Authoritative, Version: nil}, nil + return &subsystems.InitialPayload{Data: allData, Persist: true, Version: nil}, nil } //nolint:revive // DataSynchronizer method. @@ -165,7 +168,7 @@ func (pp *PollingProcessor) poll() error { // We initialize the store only if the request wasn't cached if !cached { - pp.dataDestination.Init(allData, nil) + pp.dataDestination.Init(allData, nil, pp.persist) } return nil } diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 4e757341..8553a274 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -87,6 +87,7 @@ type StreamProcessor struct { connectionAttemptLock sync.Mutex readyOnce sync.Once closeOnce sync.Once + persist bool } // NewStreamProcessor creates the internal implementation of the streaming data source. @@ -103,6 +104,7 @@ func NewStreamProcessor( loggers: context.GetLogging().Loggers, halt: make(chan struct{}), cfg: cfg, + persist: true, } if cci, ok := context.(*internal.ClientContextImpl); ok { sp.diagnosticsManager = cci.DiagnosticsManager @@ -276,12 +278,12 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< for _, update := range updates { switch u := update.(type) { case datasource.PatchData: - if !sp.dataDestination.Upsert(u.Kind, u.Key, u.Data) { + if !sp.dataDestination.Upsert(u.Kind, u.Key, u.Data, sp.persist) { //TODO: indicate that this can't actually fail anymore from the perspective of the data source storeUpdateFailed("streaming update of " + u.Key) } case datasource.PutData: - if sp.dataDestination.Init(u.Data, nil) { + if sp.dataDestination.Init(u.Data, nil, sp.persist) { sp.setInitializedAndNotifyClient(true, closeWhenReady) } else { //TODO: indicate that this can't actually fail anymore from the perspective of the data source @@ -290,7 +292,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< } case datasource.DeleteData: deletedItem := ldstoretypes.ItemDescriptor{Version: u.Version, Item: nil} - if !sp.dataDestination.Upsert(u.Kind, u.Key, deletedItem) { + if !sp.dataDestination.Upsert(u.Kind, u.Key, deletedItem, sp.persist) { //TODO: indicate that this can't actually fail anymore from the perspective of the data source storeUpdateFailed("streaming deletion of " + u.Key) } diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index b9437d93..e7bd2576 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -143,10 +143,14 @@ func (f *FDv2) launchTask(task func()) { }() } +func (f *FDv2) hasDataSources() bool { + return len(f.initializers) > 0 || f.primarySync != nil +} + func (f *FDv2) run(ctx context.Context, closeWhenReady chan struct{}) { payloadVersion := f.runInitializers(ctx, closeWhenReady) - if f.dataStoreStatusProvider.IsStatusMonitoringEnabled() { + if f.hasDataSources() && f.dataStoreStatusProvider.IsStatusMonitoringEnabled() { f.launchTask(func() { f.runPersistentStoreOutageRecovery(ctx, f.dataStoreStatusProvider.AddStatusListener()) }) @@ -186,7 +190,7 @@ func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{} continue } f.loggers.Infof("Initialized via %s", initializer.Name()) - f.store.Init(payload.Data, payload.Status) + f.store.Init(payload.Data, payload.Version, payload.Persist) f.readyOnce.Do(func() { close(closeWhenReady) }) diff --git a/internal/datasystem/fdv2_store.go b/internal/datasystem/fdv2_store.go deleted file mode 100644 index 5b4c81d1..00000000 --- a/internal/datasystem/fdv2_store.go +++ /dev/null @@ -1,226 +0,0 @@ -package datasystem - -import ( - "sync" - - "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" - - "github.com/launchdarkly/go-sdk-common/v3/ldlog" - "github.com/launchdarkly/go-server-sdk/v7/interfaces" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" - "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" -) - -// Store is a hybrid persistent/in-memory store that serves queries for data from the evaluation -// algorithm. -// -// At any given moment, 1 of 2 stores is active: in-memory, or persistent. This doesn't preclude a caller -// from holding on to a reference to the persistent store even when we swap to the in-memory store. -// -// Once the in-memory store has data (either from initializers running, or from a synchronizer), the persistent -// store is no longer regarded as active. From that point forward, GetActive() will return the in-memory store. -// -// The idea is that persistent stores can offer a way to immediately start evaluating flags before a connection -// is made to LD (or even in a very brief moment before an initializer has run.) The persistent store has caching -// logic which can result in inconsistent/stale date being used. Therefore, once we have fresh data, we don't -// want to use the persistent store at all. -// -// A complication is that persistent stores have historically operated in multiple regimes. The first is "daemon mode", -// where the SDK is effectively using the store in read-only mode, with the store being populated by Relay or another SDK. -// The second is just plain persistent store mode, where it is both read and written to. In the FDv2 system, we explicitly -// differentiate these cases using a read/read-write mode. In all cases, the in-memory store is used once it has data available. -// This contrasts from FDv1 where even if data from LD is available, that data may fall out of memory due to the persistent -// store's caching logic ("sparse mode", when the TTL is non-infinite). -// -// We have found this to almost always be undesirable for users. -type Store struct { - // Represents a remote store, like Redis. This is optional; if present, it's only used - // before the in-memory store is initialized. - persistentStore subsystems.DataStore - - // The persistentStore is read-only, or read-write. In read-only mode, the store - // is *never* written to, and only read before the in-memory store is initialized. - // This is equivalent to the concept of "daemon mode". - // - // In read-write mode, data from initializers/synchronizers is written to the store - // as it is received. This is equivalent to the normal "persistent store" configuration - // that an SDK can use to collaborate with zero or more other SDKs with a (possibly shared) database. - persistentStoreMode subsystems.StoreMode - - // This exists as a quirk of the DataSourceUpdateSink interface, which store implements. The DataSourceUpdateSink - // has a method to return a DataStoreStatusProvider so that a DataSource can monitor the state of the store. This - // was originally used in fdv1 to know when the store went offline/online, so that data could be committed back - // to the store when it came back online. In fdv2 system, this is handled by the FDv2 struct itself, so the - // data source doesn't need any knowledge of it. We can delete this piece of infrastructure when we no longer - // need to support fdv1 (or we could refactor the fdv2 data sources to use a different set of interfaces that don't - // require this.) - persistentStoreStatusProvider interfaces.DataStoreStatusProvider - - // Represents the store that all flag/segment data queries are served from after data is received from - // initializers/synchronizers. Before the in-memory store is initialized, queries are served from the - // persistentStore (if configured). - memoryStore subsystems.DataStore - - active subsystems.DataStore - - // Whether the memoryStore's data should be considered authoritative, or fresh - that is, if it is known - // to be the latest data. Data from a baked in file for example would not be considered refreshed. The purpose - // of this is to know if we should commit data to the persistentStore. For example, if we initialize with "stale" - // data from a local file (refreshed=false), we may not want to pollute a connected Redis database with it. - // TODO: this could also be called "Authoritative". "It was the latest at some point.. that point being when we asked - // if it was the latest". - availability DataAvailability - - // Protects the refreshed, persistentStore, persistentStoreMode, and active fields. - mu sync.RWMutex - - loggers ldlog.Loggers -} - -// NewStore creates a new store. By default the store is in-memory. To add a persistent store, call SwapToPersistent. Ensure this is -// called at configuration time, only once and before the store is ever accessed. -func NewStore(loggers ldlog.Loggers) *Store { - s := &Store{ - persistentStore: nil, - persistentStoreMode: subsystems.StoreModeRead, - memoryStore: datastore.NewInMemoryDataStore(loggers), - availability: Defaults, - loggers: loggers, - } - s.active = s.memoryStore - return s -} - -// Close closes the store. If there is a persistent store configured, it will be closed. -func (s *Store) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - if s.persistentStore != nil { - return s.persistentStore.Close() - } - return nil -} - -// GetActive returns the active store, either persistent or in-memory. If there is no persistent store configured, -// the in-memory store is always active. -func (s *Store) getActive() subsystems.DataStore { - s.mu.RLock() - defer s.mu.RUnlock() - return s.active -} - -// DataAvailability returns the status of the store's data. Defaults means there is no data, Cached means there is -// data, but it's not guaranteed to be recent, and Refreshed means the data has been refreshed from the server. -func (s *Store) DataAvailability() DataAvailability { - s.mu.RLock() - defer s.mu.RUnlock() - return s.availability -} - -// Mirroring returns true data is being mirrored to a persistent store. -func (s *Store) mirroring() bool { - return s.persistentStore != nil && s.persistentStoreMode == subsystems.StoreModeReadWrite -} - -// nolint:revive // Standard DataSourceUpdateSink method -func (s *Store) Init(allData []ldstoretypes.Collection, dataStatus datastatus.DataStatus) bool { - s.mu.Lock() - defer s.mu.Unlock() - - // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. - // TODO: handle errors from initializing the memory or persistent stores. - if err := s.memoryStore.Init(allData); err == nil { - s.active = s.memoryStore - if dataStatus == datastatus.Authoritative { - s.availability = Refreshed - } else { - s.availability = Cached - } - } - - if s.mirroring() { - _ = s.persistentStore.Init(allData) // TODO: insert in topo-sort order - } - return true -} - -// nolint:revive // Standard DataSourceUpdateSink method -func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { - s.mu.RLock() - defer s.mu.RUnlock() - - var ( - memErr error - persErr error - ) - - // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. - _, memErr = s.memoryStore.Upsert(kind, key, item) - - if s.mirroring() { - _, persErr = s.persistentStore.Upsert(kind, key, item) - } - return memErr == nil && persErr == nil -} - -// nolint:revive // Standard DataSourceUpdateSink method -func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider { - s.mu.RLock() - defer s.mu.RUnlock() - return s.persistentStoreStatusProvider -} - -// WithPersistence exists only because of the way the SDK's configuration builders work - we need a ClientContext -// before we can call Build to actually get the persistent store. That ClientContext requires the -// DataStoreUpdateSink, which is what this store struct implements. -func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems.StoreMode, statusProvider interfaces.DataStoreStatusProvider) *Store { - s.mu.Lock() - defer s.mu.Unlock() - s.persistentStore = persistent - s.persistentStoreMode = mode - s.persistentStoreStatusProvider = statusProvider - s.active = s.persistentStore - - if s.persistentStore.IsInitialized() { - s.availability = Cached - } else { - s.availability = Defaults - } - return s -} - -func (s *Store) Commit() error { - s.mu.RLock() - defer s.mu.RUnlock() - - // Note: DataAvailability() will also take a read lock. - if s.availability == Refreshed && s.mirroring() { - flags, err := s.memoryStore.GetAll(datakinds.Features) - if err != nil { - return err - } - segments, err := s.memoryStore.GetAll(datakinds.Segments) - if err != nil { - return err - } - return s.persistentStore.Init([]ldstoretypes.Collection{ - {Kind: datakinds.Features, Items: flags}, - {Kind: datakinds.Segments, Items: segments}, - }) - } - return nil -} - -func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { - return s.getActive().GetAll(kind) -} - -func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { - return s.getActive().Get(kind, key) -} - -func (s *Store) IsInitialized() bool { - return s.getActive().IsInitialized() -} diff --git a/internal/datasystem/fdv2_store_test.go b/internal/datasystem/fdv2_store_test.go deleted file mode 100644 index c0894277..00000000 --- a/internal/datasystem/fdv2_store_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package datasystem - -import ( - "errors" - "math/rand" - "sync" - "testing" - "time" - - "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" - "github.com/stretchr/testify/require" - - "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoreimpl" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" - "github.com/stretchr/testify/assert" -) - -func TestStore_New(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - assert.NoError(t, store.Close()) -} - -func TestStore_NoPersistence_NewStore_DataStatus(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - assert.Equal(t, store.DataAvailability(), Defaults) -} - -func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - assert.False(t, store.IsInitialized()) -} - -func TestStore_NoPersistence_MemoryStoreInitialized_DataStatus(t *testing.T) { - tests := []struct { - name string - datastatus datastatus.DataStatus - expected DataAvailability - }{ - {"fresh data", datastatus.Authoritative, Refreshed}, - {"stale data", datastatus.Derivative, Cached}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - store.Init([]ldstoretypes.Collection{}, tt.datastatus) - assert.Equal(t, store.DataAvailability(), tt.expected) - assert.True(t, store.IsInitialized()) - }) - } -} - -func TestStore_Commit(t *testing.T) { - t.Run("no persistent store doesn't cause an error", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - assert.NoError(t, store.Commit()) - }) - - t.Run("refreshed memory items are copied to persistent store in r/w mode", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - - spy := &fakeStore{isDown: true} - - store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.StoreModeReadWrite, nil) - defer store.Close() - - initPayload := []ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, - }}, - } - - assert.True(t, store.Init(initPayload, datastatus.Authoritative)) - - require.Empty(t, spy.initPayload) - - spy.isDown = false - - require.NoError(t, store.Commit()) - - assert.Equal(t, initPayload, spy.initPayload) - }) - - t.Run("stale memory items are not copied to persistent store in r/w mode", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - spy := &fakeStore{} - store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.StoreModeReadWrite, nil) - defer store.Close() - - initPayload := []ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, - }}, - } - - assert.True(t, store.Init(initPayload, datastatus.Derivative)) - - require.Empty(t, spy.initPayload) - - require.NoError(t, store.Commit()) - - assert.Empty(t, spy.initPayload) - }) - - t.Run("refreshed memory items are not copied to persistent store in r-only mode", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - spy := &fakeStore{} - store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.StoreModeRead, nil) - defer store.Close() - - initPayload := []ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, - }}, - } - - assert.True(t, store.Init(initPayload, datastatus.Authoritative)) - - require.Empty(t, spy.initPayload) - - require.NoError(t, store.Commit()) - - assert.Empty(t, spy.initPayload) - }) -} - -func TestStore_GetActive(t *testing.T) { - t.Run("memory store is active if no persistent store configured", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - foo, err := store.Get(ldstoreimpl.Features(), "foo") - assert.NoError(t, err) - assert.Equal(t, foo, ldstoretypes.ItemDescriptor{}.NotFound()) - - assert.True(t, store.Init([]ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - }, datastatus.Authoritative)) - - foo, err = store.Get(ldstoreimpl.Features(), "foo") - assert.NoError(t, err) - assert.Equal(t, 1, foo.Version) - }) - - t.Run("persistent store is active if configured", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.StoreModeReadWrite, nil) - defer store.Close() - - _, err := store.Get(ldstoreimpl.Features(), "foo") - assert.Equal(t, errImAPersistentStore, err) - }) - - t.Run("active store swaps from persistent to memory", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.StoreModeReadWrite, nil) - defer store.Close() - - _, err := store.Get(ldstoreimpl.Features(), "foo") - assert.Equal(t, errImAPersistentStore, err) - - assert.True(t, store.Init([]ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - }, datastatus.Authoritative)) - - foo, err := store.Get(ldstoreimpl.Features(), "foo") - assert.NoError(t, err) - assert.Equal(t, 1, foo.Version) - }) -} - -func TestStore_Concurrency(t *testing.T) { - t.Run("methods using the active store", func(t *testing.T) { - logCapture := ldlogtest.NewMockLog() - store := NewStore(logCapture.Loggers) - defer store.Close() - - var wg sync.WaitGroup - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _ = store.DataAvailability() - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _, _ = store.Get(ldstoreimpl.Features(), "foo") - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _, _ = store.GetAll(ldstoreimpl.Features()) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _ = store.IsInitialized() - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _ = store.Init([]ldstoretypes.Collection{}, datastatus.Authoritative) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - }) -} - -type fakeStore struct { - initPayload []ldstoretypes.Collection - isDown bool -} - -var errImAPersistentStore = errors.New("i'm a persistent store") - -func (f *fakeStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { - return nil, nil -} - -func (f *fakeStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { - return ldstoretypes.ItemDescriptor{}, errImAPersistentStore -} - -func (f *fakeStore) IsInitialized() bool { - return false -} - -func (f *fakeStore) Init(allData []ldstoretypes.Collection) error { - if f.isDown { - return errors.New("store is down") - } - f.initPayload = allData - return nil -} - -func (f *fakeStore) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) (bool, error) { - return false, nil -} - -func (f *fakeStore) IsStatusMonitoringEnabled() bool { - return false -} - -func (f *fakeStore) Close() error { - return nil -} diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 15c39b2c..3a32512e 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -42,11 +42,11 @@ type Store struct { // the persistentStore may be used if configured. memoryStore subsystems.DataStore + persist bool + // Points to the active store. Swapped upon initialization. active subsystems.DataStore - persist bool - // Protects the availability, persistentStore, quality, and active fields. mu sync.RWMutex @@ -79,19 +79,13 @@ func NewStore(loggers ldlog.Loggers) *Store { s := &Store{ persistentStore: nil, memoryStore: datastore.NewInMemoryDataStore(loggers), - persist: false, loggers: loggers, + persist: false, } s.active = s.memoryStore return s } -func (s *Store) SetPersist(persist bool) { - s.mu.Lock() - defer s.mu.Unlock() - s.persist = persist -} - // Close closes the store. If there is a persistent store configured, it will be closed. func (s *Store) Close() error { s.mu.Lock() @@ -116,15 +110,17 @@ func (s *Store) shouldPersist() bool { } // nolint:revive // Standard DataDestination method -func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) bool { +func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int, persist bool) bool { s.mu.Lock() defer s.mu.Unlock() // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. // TODO: handle errors from initializing the memory or persistent stores. - if err := s.memoryStore.Init(allData); err == nil { - s.active = s.memoryStore - } + _ = s.memoryStore.Init(allData) + + s.persist = persist + + s.active = s.memoryStore if s.shouldPersist() { _ = s.persistentStore.impl.Init(allData) // TODO: insert in topo-sort order @@ -133,22 +129,23 @@ func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) boo } // nolint:revive // Standard DataDestination method -func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { +func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor, persist bool) bool { s.mu.RLock() defer s.mu.RUnlock() - var ( - memErr error - persErr error - ) - // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. - _, memErr = s.memoryStore.Upsert(kind, key, item) + _, _ = s.memoryStore.Upsert(kind, key, item) + + s.persist = persist if s.shouldPersist() { - _, persErr = s.persistentStore.impl.Upsert(kind, key, item) + _, err := s.persistentStore.impl.Upsert(kind, key, item) + if err != nil { + return false + } } - return memErr == nil && persErr == nil + + return true } // GetDataStoreStatusProvider returns the status provider for the persistent store, if one is configured, otherwise diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 92117a11..cbbfcd6c 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -8,7 +8,7 @@ import ( type DataSystemConfigurationBuilder struct { storeBuilder ss.ComponentConfigurer[ss.DataStore] - storeMode ss.StoreMode + storeMode ss.DataStoreMode initializerBuilders []ss.ComponentConfigurer[ss.DataInitializer] primarySyncBuilder ss.ComponentConfigurer[ss.DataSynchronizer] secondarySyncBuilder ss.ComponentConfigurer[ss.DataSynchronizer] @@ -42,7 +42,7 @@ func (d *DataSystemModes) Polling() *DataSystemConfigurationBuilder { // Daemon configures the SDK to read from a persistent store integration that is populated by Relay Proxy // or other SDKs. The SDK will not connect to LaunchDarkly. In this mode, the SDK never writes to the data store. func (d *DataSystemModes) Daemon(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return d.Custom().DataStore(store, ss.StoreModeRead) + return d.Custom().DataStore(store, ss.DataStoreModeRead) } // PersistentStore is similar to Default, with the addition of a @@ -50,7 +50,7 @@ func (d *DataSystemModes) Daemon(store ss.ComponentConfigurer[ss.DataStore]) *Da // evaluate flags using data from the persistent store. Once fresh data is available, the SDK // will no longer read from the persistent store, although it will keep it up-to-date. func (d *DataSystemModes) PersistentStore(store ss.ComponentConfigurer[ss.DataStore]) *DataSystemConfigurationBuilder { - return d.Default().DataStore(store, ss.StoreModeReadWrite) + return d.Default().DataStore(store, ss.DataStoreModeReadWrite) } // Custom returns a builder suitable for creating a custom data acquisition strategy. You may configure @@ -65,7 +65,7 @@ func DataSystem() *DataSystemModes { return &DataSystemModes{} } -func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.StoreMode) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.DataStoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store d.storeMode = storeMode return d diff --git a/subsystems/data_destination.go b/subsystems/data_destination.go index fe2129e4..fca2726f 100644 --- a/subsystems/data_destination.go +++ b/subsystems/data_destination.go @@ -18,7 +18,7 @@ type DataDestination interface { // and set the data source state to DataSourceStateInterrupted with an error of // DataSourceErrorKindStoreError. It will not return the error to the data source, but will // return false to indicate that the operation failed. - Init(allData []ldstoretypes.Collection, payloadVersion *int) bool + Init(allData []ldstoretypes.Collection, payloadVersion *int, persist bool) bool // Upsert updates or inserts an item in the specified collection. For updates, the object will only be // updated if the existing version is less than the new version. @@ -31,5 +31,5 @@ type DataDestination interface { // and set the data source state to DataSourceStateInterrupted with an error of // DataSourceErrorKindStoreError. It will not return the error to the data source, but will // return false to indicate that the operation failed. - Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool + Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor, persist bool) bool } diff --git a/subsystems/data_source.go b/subsystems/data_source.go index 60df291d..0ee8e2d0 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -2,7 +2,6 @@ package subsystems import ( "context" - "github.com/launchdarkly/go-server-sdk/v7/internal/datastatus" "io" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" @@ -27,8 +26,8 @@ type DataSource interface { type InitialPayload struct { Data []ldstoretypes.Collection + Persist bool Version *int - Status datastatus.DataStatus } type DataInitializer interface { diff --git a/subsystems/data_source_status.go b/subsystems/data_source_status.go deleted file mode 100644 index 0ddd55b8..00000000 --- a/subsystems/data_source_status.go +++ /dev/null @@ -1,9 +0,0 @@ -package subsystems - -import ( - "github.com/launchdarkly/go-server-sdk/v7/interfaces" -) - -type DataSourceStatusReporter interface { - UpdateStatus(newState interfaces.DataSourceState, newError interfaces.DataSourceErrorInfo) -} diff --git a/subsystems/datasystem_configuration.go b/subsystems/datasystem_configuration.go index c23e7fdc..b2f662ab 100644 --- a/subsystems/datasystem_configuration.go +++ b/subsystems/datasystem_configuration.go @@ -5,16 +5,9 @@ type SynchronizersConfiguration struct { Secondary DataSynchronizer } -type StoreMode int - -const ( - StoreModeRead = 0 - StoreModeReadWrite = 1 -) - type DataSystemConfiguration struct { Store DataStore - StoreMode StoreMode + StoreMode DataStoreMode // Initializers obtain data for the SDK in a one-shot manner at startup. Their job is to get the SDK // into a state where it is serving somewhat fresh values as fast as possible. Initializers []DataInitializer From 24f380e3120469a1ec0b9f0686360b36a833945c Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Thu, 19 Sep 2024 15:28:58 -0700 Subject: [PATCH 38/62] use persist concept --- internal/datasystem/store.go | 41 ++++++++++++++----------------- internal/datasystem/store_test.go | 41 +++++++++++-------------------- 2 files changed, 34 insertions(+), 48 deletions(-) diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 15c39b2c..3a32512e 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -42,11 +42,11 @@ type Store struct { // the persistentStore may be used if configured. memoryStore subsystems.DataStore + persist bool + // Points to the active store. Swapped upon initialization. active subsystems.DataStore - persist bool - // Protects the availability, persistentStore, quality, and active fields. mu sync.RWMutex @@ -79,19 +79,13 @@ func NewStore(loggers ldlog.Loggers) *Store { s := &Store{ persistentStore: nil, memoryStore: datastore.NewInMemoryDataStore(loggers), - persist: false, loggers: loggers, + persist: false, } s.active = s.memoryStore return s } -func (s *Store) SetPersist(persist bool) { - s.mu.Lock() - defer s.mu.Unlock() - s.persist = persist -} - // Close closes the store. If there is a persistent store configured, it will be closed. func (s *Store) Close() error { s.mu.Lock() @@ -116,15 +110,17 @@ func (s *Store) shouldPersist() bool { } // nolint:revive // Standard DataDestination method -func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) bool { +func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int, persist bool) bool { s.mu.Lock() defer s.mu.Unlock() // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. // TODO: handle errors from initializing the memory or persistent stores. - if err := s.memoryStore.Init(allData); err == nil { - s.active = s.memoryStore - } + _ = s.memoryStore.Init(allData) + + s.persist = persist + + s.active = s.memoryStore if s.shouldPersist() { _ = s.persistentStore.impl.Init(allData) // TODO: insert in topo-sort order @@ -133,22 +129,23 @@ func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int) boo } // nolint:revive // Standard DataDestination method -func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool { +func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor, persist bool) bool { s.mu.RLock() defer s.mu.RUnlock() - var ( - memErr error - persErr error - ) - // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. - _, memErr = s.memoryStore.Upsert(kind, key, item) + _, _ = s.memoryStore.Upsert(kind, key, item) + + s.persist = persist if s.shouldPersist() { - _, persErr = s.persistentStore.impl.Upsert(kind, key, item) + _, err := s.persistentStore.impl.Upsert(kind, key, item) + if err != nil { + return false + } } - return memErr == nil && persErr == nil + + return true } // GetDataStoreStatusProvider returns the status provider for the persistent store, if one is configured, otherwise diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index 4657a922..afc07bba 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -35,16 +35,19 @@ func TestStore_NoPersistence_MemoryStore_IsInitialized(t *testing.T) { tests := []struct { name string payloadVersion *int + persist bool }{ - {"versioned data", &version1}, - {"unversioned data", nil}, + {"versioned data, persist", &version1, true}, + {"versioned data, do not persist", &version1, false}, + {"unversioned data, persist", nil, true}, + {"unversioned data, do not persist", nil, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() - store.Init([]ldstoretypes.Collection{}, tt.payloadVersion) + store.Init([]ldstoretypes.Collection{}, tt.payloadVersion, tt.persist) assert.True(t, store.IsInitialized()) }) } @@ -58,7 +61,7 @@ func TestStore_Commit(t *testing.T) { assert.NoError(t, store.Commit()) }) - t.Run("refreshed memory items are copied to persistent store in r/w mode", func(t *testing.T) { + t.Run("persist memory items are copied to persistent store in r/w mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() spy := &fakeStore{isDown: true} @@ -76,20 +79,18 @@ func TestStore_Commit(t *testing.T) { } version := 1 - assert.True(t, store.Init(initPayload, &version)) + assert.True(t, store.Init(initPayload, &version, true)) require.Empty(t, spy.initPayload) spy.isDown = false - store.SetPersist(true) - require.NoError(t, store.Commit()) assert.Equal(t, initPayload, spy.initPayload) }) - t.Run("stale memory items are not copied to persistent store in r/w mode", func(t *testing.T) { + t.Run("non-persist memory items are not copied to persistent store in r/w mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() spy := &fakeStore{} store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) @@ -104,18 +105,16 @@ func TestStore_Commit(t *testing.T) { }}, } - assert.True(t, store.Init(initPayload, nil)) + assert.True(t, store.Init(initPayload, nil, false)) require.Empty(t, spy.initPayload) - store.SetPersist(true) - require.NoError(t, store.Commit()) assert.Empty(t, spy.initPayload) }) - t.Run("refreshed memory items are not copied to persistent store in r-only mode", func(t *testing.T) { + t.Run("persist memory items are not copied to persistent store in r-only mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() spy := &fakeStore{} store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeRead, nil) @@ -131,12 +130,10 @@ func TestStore_Commit(t *testing.T) { } version := 1 - assert.True(t, store.Init(initPayload, &version)) + assert.True(t, store.Init(initPayload, &version, true)) require.Empty(t, spy.initPayload) - store.SetPersist(true) - require.NoError(t, store.Commit()) assert.Empty(t, spy.initPayload) @@ -157,7 +154,7 @@ func TestStore_GetActive(t *testing.T) { {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, }}, - }, &version)) + }, &version, false)) foo, err = store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) @@ -186,7 +183,7 @@ func TestStore_GetActive(t *testing.T) { {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, }}, - }, &version)) + }, &version, false)) foo, err := store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) @@ -201,14 +198,6 @@ func TestStore_Concurrency(t *testing.T) { defer store.Close() var wg sync.WaitGroup - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - store.SetPersist(true) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() go func() { wg.Add(1) defer wg.Done() @@ -239,7 +228,7 @@ func TestStore_Concurrency(t *testing.T) { defer wg.Done() for i := 0; i < 100; i++ { version := 1 - _ = store.Init([]ldstoretypes.Collection{}, &version) + _ = store.Init([]ldstoretypes.Collection{}, &version, true) time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) } }() From 097429eda8ce74259a00ca60a36a7ec9a919862c Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 20 Sep 2024 16:44:58 -0700 Subject: [PATCH 39/62] make store implement FDv2 protocol --- .../streaming_data_source_events.go | 91 ++---------- internal/datasourcev2/polling_data_source.go | 73 ++++++++-- internal/datasourcev2/polling_http_request.go | 113 ++++++++++++--- .../datasourcev2/streaming_data_source.go | 126 +++------------- internal/datasourcev2/types.go | 15 +- .../datastore/in_memory_data_store_impl.go | 56 +++++--- internal/datasystem/store.go | 52 ++++--- internal/datasystem/store_test.go | 112 ++++++++------- internal/fdv2proto/event_to_storable_item.go | 58 ++++++++ internal/fdv2proto/proto.go | 136 ++++++++++++++++++ .../sharedtest/mocks/mock_data_destination.go | 45 +++--- subsystems/client_context.go | 6 +- subsystems/data_destination.go | 6 + 13 files changed, 558 insertions(+), 331 deletions(-) create mode 100644 internal/fdv2proto/event_to_storable_item.go create mode 100644 internal/fdv2proto/proto.go diff --git a/internal/datasource/streaming_data_source_events.go b/internal/datasource/streaming_data_source_events.go index 4c03ac62..918272e4 100644 --- a/internal/datasource/streaming_data_source_events.go +++ b/internal/datasource/streaming_data_source_events.go @@ -2,12 +2,11 @@ package datasource import ( "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "strings" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" - "github.com/launchdarkly/go-jsonstream/v3/jreader" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" ) var ( @@ -16,70 +15,8 @@ var ( deleteDataRequiredProperties = []string{"path", "version"} //nolint:gochecknoglobals ) -// PutData is the logical representation of the data in the "put" event. In the JSON representation, -// the "data" property is actually a map of maps, but the schema we use internally is a list of -// lists instead. -// -// The "path" property is normally always "/"; the LD streaming service sends this property, but -// some versions of Relay do not, so we do not require it. -// -// Example JSON representation: -// -// { -// "path": "/", -// "data": { -// "flags": { -// "flag1": { "key": "flag1", "version": 1, ...etc. }, -// "flag2": { "key": "flag2", "version": 1, ...etc. }, -// }, -// "segments": { -// "segment1": { "key", "segment1", "version": 1, ...etc. } -// } -// } -// } -type PutData struct { - Path string // we don't currently do anything with this - Data []ldstoretypes.Collection -} - -// PatchData is the logical representation of the data in the "patch" event. In the JSON representation, -// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into -// Kind and Key when we parse it. The "data" property is the JSON representation of the flag or -// segment, which we deserialize into an ItemDescriptor. -// -// Example JSON representation: -// -// { -// "path": "/flags/flagkey", -// "data": { -// "key": "flagkey", -// "version": 2, ...etc. -// } -// } -type PatchData struct { - Kind ldstoretypes.DataKind - Key string - Data ldstoretypes.ItemDescriptor -} - -// DeleteData is the logical representation of the data in the "delete" event. In the JSON representation, -// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into -// Kind and Key when we parse it. -// -// Example JSON representation: -// -// { -// "path": "/flags/flagkey", -// "version": 3 -// } -type DeleteData struct { - Kind ldstoretypes.DataKind - Key string - Version int -} - -func parsePutData(data []byte) (PutData, error) { - var ret PutData +func parsePutData(data []byte) (fdv2proto.PutData, error) { + var ret fdv2proto.PutData r := jreader.NewReader(data) for obj := r.Object().WithRequiredProperties(putDataRequiredProperties); obj.Next(); { switch string(obj.Name()) { @@ -92,15 +29,15 @@ func parsePutData(data []byte) (PutData, error) { return ret, r.Error() } -func parsePatchData(data []byte) (PatchData, error) { - var ret PatchData +func parsePatchData(data []byte) (fdv2proto.PatchData, error) { + var ret fdv2proto.PatchData r := jreader.NewReader(data) var kind datakinds.DataKindInternal var key string - parseItem := func() (PatchData, error) { + parseItem := func() (fdv2proto.PatchData, error) { item, err := kind.DeserializeFromJSONReader(&r) if err != nil { - return PatchData{}, err + return fdv2proto.PatchData{}, err } ret.Data = item return ret, nil @@ -126,7 +63,7 @@ func parsePatchData(data []byte) (PatchData, error) { } } if err := r.Error(); err != nil { - return PatchData{}, err + return fdv2proto.PatchData{}, err } // If we got here, it means we couldn't parse the data model object yet because we saw the // "data" property first. But we definitely saw both properties (otherwise we would've got @@ -138,13 +75,13 @@ func parsePatchData(data []byte) (PatchData, error) { } } if r.Error() != nil { - return PatchData{}, r.Error() + return fdv2proto.PatchData{}, r.Error() } - return PatchData{}, errors.New("patch event had no data property") + return fdv2proto.PatchData{}, errors.New("patch event had no data property") } -func parseDeleteData(data []byte) (DeleteData, error) { - var ret DeleteData +func parseDeleteData(data []byte) (fdv2proto.DeleteData, error) { + var ret fdv2proto.DeleteData r := jreader.NewReader(data) for obj := r.Object().WithRequiredProperties(deleteDataRequiredProperties); obj.Next(); { switch string(obj.Name()) { @@ -161,7 +98,7 @@ func parseDeleteData(data []byte) (DeleteData, error) { } } if r.Error() != nil { - return DeleteData{}, r.Error() + return fdv2proto.DeleteData{}, r.Error() } return ret, nil } diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index 131f2f4f..ec9085d4 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -1,15 +1,14 @@ package datasourcev2 import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "sync" "time" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" - "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal" + "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" "github.com/launchdarkly/go-server-sdk/v7/subsystems" ) @@ -18,10 +17,47 @@ const ( pollingWillRetryMessage = "will retry at next scheduled poll interval" ) -// Requester allows PollingProcessor to delegate fetching data to another component. +type PollingResponse struct { + events []fdv2proto.Event + cached bool + intent fdv2proto.IntentCode + selector fdv2proto.Selector +} + +func (p *PollingResponse) Events() []fdv2proto.Event { + return p.events +} + +func (p *PollingResponse) Cached() bool { + return p.cached +} + +func (p *PollingResponse) Intent() fdv2proto.IntentCode { + return p.intent +} + +func (p *PollingResponse) Selector() fdv2proto.Selector { + return p.selector +} + +func NewCachedPollingResponse() *PollingResponse { + return &PollingResponse{ + cached: true, + } +} + +func NewPollingResponse(intent fdv2proto.IntentCode, events []fdv2proto.Event, selector fdv2proto.Selector) *PollingResponse { + return &PollingResponse{ + events: events, + intent: intent, + selector: selector, + } +} + +// PollingRequester allows PollingProcessor to delegate fetching data to another component. // This is useful for testing the PollingProcessor without needing to set up a test HTTP server. -type Requester interface { - Request() (data []ldstoretypes.Collection, cached bool, err error) +type PollingRequester interface { + Request() (*PollingResponse, error) BaseURI() string FilterKey() string } @@ -32,9 +68,9 @@ type Requester interface { // configuration. All other code outside of this package should interact with it only via the // DataSource interface. type PollingProcessor struct { - dataDestination subsystems.DataDestination + dataDestination subsystems.DataDestination2 statusReporter subsystems.DataSourceStatusReporter - requester Requester + requester PollingRequester pollInterval time.Duration loggers ldlog.Loggers setInitializedOnce sync.Once @@ -46,7 +82,7 @@ type PollingProcessor struct { // NewPollingProcessor creates the internal implementation of the polling data source. func NewPollingProcessor( context subsystems.ClientContext, - dataDestination subsystems.DataDestination, + dataDestination subsystems.DataDestination2, statusReporter subsystems.DataSourceStatusReporter, cfg datasource.PollingConfig, ) *PollingProcessor { @@ -56,9 +92,9 @@ func NewPollingProcessor( func newPollingProcessor( context subsystems.ClientContext, - dataDestination subsystems.DataDestination, + dataDestination subsystems.DataDestination2, statusReporter subsystems.DataSourceStatusReporter, - requester Requester, + requester PollingRequester, pollInterval time.Duration, ) *PollingProcessor { pp := &PollingProcessor{ @@ -142,16 +178,23 @@ func (pp *PollingProcessor) Start(closeWhenReady chan<- struct{}) { } func (pp *PollingProcessor) poll() error { - allData, cached, err := pp.requester.Request() + response, err := pp.requester.Request() if err != nil { return err } - // We initialize the store only if the request wasn't cached - if !cached { - pp.dataDestination.Init(allData, nil) + if response.Cached() { + return nil } + + switch response.Intent() { + case fdv2proto.IntentTransferFull: + pp.dataDestination.SetBasis(response.Events(), response.Selector(), true) + case fdv2proto.IntentTransferChanges: + pp.dataDestination.ApplyDelta(response.Events(), response.Selector(), true) + } + return nil } diff --git a/internal/datasourcev2/polling_http_request.go b/internal/datasourcev2/polling_http_request.go index 447a03de..6cb09c18 100644 --- a/internal/datasourcev2/polling_http_request.go +++ b/internal/datasourcev2/polling_http_request.go @@ -4,13 +4,15 @@ import ( "encoding/json" "errors" "fmt" + "github.com/launchdarkly/go-jsonstream/v3/jreader" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "io" "net/http" "net/url" + "strings" - es "github.com/launchdarkly/eventsource" "github.com/launchdarkly/go-sdk-common/v3/ldlog" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" @@ -68,42 +70,113 @@ func (r *pollingRequester) BaseURI() string { func (r *pollingRequester) FilterKey() string { return r.filterKey } -func (r *pollingRequester) Request() ([]ldstoretypes.Collection, bool, error) { +func (r *pollingRequester) Request() (*PollingResponse, error) { + if r.loggers.IsDebugEnabled() { r.loggers.Debug("Polling LaunchDarkly for feature flag updates") } body, cached, err := r.makeRequest(endpoints.PollingRequestPath) if err != nil { - return nil, false, err + return nil, err } if cached { - return nil, true, nil + return NewCachedPollingResponse(), nil } var payload pollingPayload if err = json.Unmarshal(body, &payload); err != nil { - return nil, false, malformedJSONError{err} + return nil, malformedJSONError{err} } - esEvents := make([]es.Event, 0, len(payload.Events)) - for _, event := range payload.Events { - esEvents = append(esEvents, event) + parseItem := func(r jreader.Reader, kind datakinds.DataKindInternal) (ldstoretypes.ItemDescriptor, error) { + item, err := kind.DeserializeFromJSONReader(&r) + return item, err } - data, err := convertChangesetEventsToPutData(esEvents) - if err != nil { - return nil, false, malformedJSONError{err} - } else if len(data) != 1 { - return nil, false, malformedJSONError{errors.New("missing expected put event")} - } + updates := make([]fdv2proto.Event, 0, len(payload.Events)) - putData, ok := data[0].(datasource.PutData) - if !ok { - return nil, false, malformedJSONError{errors.New("payload is not a PutData")} - } + var intent fdv2proto.IntentCode - return putData.Data, cached, nil + for _, event := range payload.Events { + switch event.Event() { + case fdv2proto.EventServerIntent: + { + var serverIntent serverIntent + err := json.Unmarshal([]byte(event.Data()), &serverIntent) + if err != nil { + return nil, err + } else if len(serverIntent.Payloads) == 0 { + return nil, errors.New("server-intent event has no payloads") + } + + intent = serverIntent.Payloads[0].Code + if intent == "none" { + return NewCachedPollingResponse(), nil + } + } + case fdv2proto.EventPutObject: + { + r := jreader.NewReader([]byte(event.Data())) + var kind, key string + var item ldstoretypes.ItemDescriptor + var err error + var dataKind datakinds.DataKindInternal + + for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, "key", "object"}); obj.Next(); { + switch string(obj.Name()) { + case versionField: + // version = r.Int() + case kindField: + kind = strings.TrimRight(r.String(), "s") + dataKind = dataKindFromKind(kind) + case "key": + key = r.String() + case "object": + item, err = parseItem(r, dataKind) + if err != nil { + return nil, err + } + } + } + updates = append(updates, fdv2proto.PutObject{Kind: dataKind, Key: key, Object: item}) + } + case fdv2proto.EventDeleteObject: + { + r := jreader.NewReader([]byte(event.Data())) + var version int + var dataKind datakinds.DataKindInternal + var kind, key string + + for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, keyField}); obj.Next(); { + switch string(obj.Name()) { + case versionField: + version = r.Int() + case kindField: + kind = strings.TrimRight(r.String(), "s") + dataKind = dataKindFromKind(kind) + if dataKind == nil { + //nolint: godox + // TODO: We are skipping here without showing a warning. Need to address that later. + continue + } + case keyField: + key = r.String() + } + } + updates = append(updates, fdv2proto.DeleteObject{Kind: dataKind, Key: key, Version: version}) + + } + case fdv2proto.EventPayloadTransferred: + // TODO: deserialize the state and create a fdv2proto.Selector. + } + } + + if intent == "" { + return nil, errors.New("no server-intent event found in polling response") + } + + return NewPollingResponse(intent, updates, fdv2proto.NoSelector()), nil } func (r *pollingRequester) makeRequest(resource string) ([]byte, bool, error) { diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 00b46598..e56f8ffd 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -3,6 +3,7 @@ package datasourcev2 import ( "encoding/json" "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "net/http" "net/url" "strings" @@ -74,7 +75,7 @@ const ( // DataSource interface. type StreamProcessor struct { cfg datasource.StreamConfig - dataDestination subsystems.DataDestination + dataDestination subsystems.DataDestination2 statusReporter subsystems.DataSourceStatusReporter client *http.Client headers http.Header @@ -91,7 +92,7 @@ type StreamProcessor struct { // NewStreamProcessor creates the internal implementation of the streaming data source. func NewStreamProcessor( context subsystems.ClientContext, - dataDestination subsystems.DataDestination, + dataDestination subsystems.DataDestination2, statusReporter subsystems.DataSourceStatusReporter, cfg datasource.StreamConfig, ) *StreamProcessor { @@ -188,12 +189,6 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< processedEvent = false } - storeUpdateFailed := func(updateDesc string) { - sp.loggers.Errorf("Failed to store %s in data store; will restart stream until successful", updateDesc) - shouldRestart = true // scenario 2b - processedEvent = false - } - switch event.Event() { case "heart-beat": // Swallow the event and move on. @@ -251,34 +246,23 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< // TODO: Do we need to restart here? case "payload-transferred": currentChangeSet.events = append(currentChangeSet.events, event) - updates, err := processChangeset(currentChangeSet) + updates, err := deserializeEvents(currentChangeSet.events) if err != nil { sp.loggers.Errorf("Error processing changeset: %s", err) gotMalformedEvent(nil, err) break } - for _, update := range updates { - switch u := update.(type) { - case datasource.PatchData: - if !sp.dataDestination.Upsert(u.Kind, u.Key, u.Data) { - storeUpdateFailed("streaming update of " + u.Key) - } - case datasource.PutData: - if sp.dataDestination.Init(u.Data, nil) { - sp.setInitializedAndNotifyClient(true, closeWhenReady) - } else { - storeUpdateFailed("initial streaming data") - } - case datasource.DeleteData: - deletedItem := ldstoretypes.ItemDescriptor{Version: u.Version, Item: nil} - if !sp.dataDestination.Upsert(u.Kind, u.Key, deletedItem) { - storeUpdateFailed("streaming deletion of " + u.Key) - } - - default: - sp.loggers.Infof("Unexpected update found in changeset: %s", update) + + switch currentChangeSet.intent.Payloads[0].Code { + case fdv2proto.IntentTransferFull: + { + sp.dataDestination.SetBasis(updates, fdv2proto.NoSelector(), true) + sp.setInitializedAndNotifyClient(true, closeWhenReady) } + case fdv2proto.IntentTransferChanges: + sp.dataDestination.ApplyDelta(updates, fdv2proto.NoSelector(), true) } + currentChangeSet = changeSet{events: make([]es.Event, 0)} default: sp.loggers.Infof("Unexpected event found in stream: %s", event.Event()) @@ -449,16 +433,8 @@ func (sp *StreamProcessor) GetFilterKey() string { return sp.cfg.FilterKey } -func processChangeset(changeSet changeSet) ([]any, error) { - if changeSet.intent == nil || changeSet.intent.Payloads[0].Code != "xfer-full" { - return convertChangesetEventsToPatchData(changeSet.events) - } - - return convertChangesetEventsToPutData(changeSet.events) -} - -func convertChangesetEventsToPatchData(events []es.Event) ([]any, error) { - updates := make([]interface{}, 0, len(events)) +func deserializeEvents(events []es.Event) ([]fdv2proto.Event, error) { + updates := make([]fdv2proto.Event, 0, len(events)) parseItem := func(r jreader.Reader, kind datakinds.DataKindInternal) (ldstoretypes.ItemDescriptor, error) { item, err := kind.DeserializeFromJSONReader(&r) @@ -466,8 +442,8 @@ func convertChangesetEventsToPatchData(events []es.Event) ([]any, error) { } for _, event := range events { - switch event.Event() { - case putEventName: + switch fdv2proto.EventName(event.Event()) { + case fdv2proto.EventPutObject: r := jreader.NewReader([]byte(event.Data())) // var version int var dataKind datakinds.DataKindInternal @@ -496,10 +472,8 @@ func convertChangesetEventsToPatchData(events []es.Event) ([]any, error) { } } } - - patchData := datasource.PatchData{Kind: dataKind, Key: key, Data: item} - updates = append(updates, patchData) - case deleteEventName: + updates = append(updates, fdv2proto.PutObject{Kind: dataKind, Key: key, Object: item}) + case fdv2proto.EventDeleteObject: r := jreader.NewReader([]byte(event.Data())) var version int var dataKind datakinds.DataKindInternal @@ -521,73 +495,13 @@ func convertChangesetEventsToPatchData(events []es.Event) ([]any, error) { key = r.String() } } - patchData := datasource.DeleteData{Kind: dataKind, Key: key, Version: version} - updates = append(updates, patchData) + updates = append(updates, fdv2proto.DeleteObject{Kind: dataKind, Key: key, Version: version}) } } return updates, nil } -func convertChangesetEventsToPutData(events []es.Event) ([]any, error) { - segmentCollection := ldstoretypes.Collection{ - Kind: datakinds.Segments, - Items: make([]ldstoretypes.KeyedItemDescriptor, 0)} - flagCollection := ldstoretypes.Collection{ - Kind: datakinds.Features, - Items: make([]ldstoretypes.KeyedItemDescriptor, 0)} - - parseItem := func(r jreader.Reader, kind datakinds.DataKindInternal) (ldstoretypes.ItemDescriptor, error) { - item, err := kind.DeserializeFromJSONReader(&r) - return item, err - } - - for _, event := range events { - switch event.Event() { - case putEventName: - r := jreader.NewReader([]byte(event.Data())) - // var version int - var kind, key string - var item ldstoretypes.ItemDescriptor - var err error - var dataKind datakinds.DataKindInternal - - for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, "key", "object"}); obj.Next(); { - switch string(obj.Name()) { - case versionField: - // version = r.Int() - case kindField: - kind = strings.TrimRight(r.String(), "s") - dataKind = dataKindFromKind(kind) - case "key": - key = r.String() - case "object": - item, err = parseItem(r, dataKind) - if err != nil { - return []any{}, err - } - } - } - - //nolint: godox - // TODO: What is the actual name we should use here? - if kind == "flag" { - flagCollection.Items = append(flagCollection.Items, ldstoretypes.KeyedItemDescriptor{Key: key, Item: item}) - } else if kind == "segment" { - segmentCollection.Items = append(segmentCollection.Items, ldstoretypes.KeyedItemDescriptor{Key: key, Item: item}) - } - case deleteEventName: - // NOTE: We can skip this. We are replacing everything in the - // store so who cares if something was deleted. This shouldn't - // even occur really. - } - } - - putData := datasource.PutData{Path: "/", Data: []ldstoretypes.Collection{flagCollection, segmentCollection}} - - return []any{putData}, nil -} - func dataKindFromKind(kind string) datakinds.DataKindInternal { switch kind { case "flag": diff --git a/internal/datasourcev2/types.go b/internal/datasourcev2/types.go index a3e912e5..e5d59b24 100644 --- a/internal/datasourcev2/types.go +++ b/internal/datasourcev2/types.go @@ -2,6 +2,7 @@ package datasourcev2 import ( "encoding/json" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" es "github.com/launchdarkly/eventsource" ) @@ -11,8 +12,8 @@ type pollingPayload struct { } type event struct { - Name string `json:"name"` - EventData json.RawMessage `json:"data"` + Name fdv2proto.EventName `json:"name"` + EventData json.RawMessage `json:"data"` } // Begin es.Event interface implementation @@ -23,7 +24,7 @@ func (e event) Id() string { //nolint:stylecheck // The interface requires this } // Event returns the name of the event. -func (e event) Event() string { +func (e event) Event() fdv2proto.EventName { return e.Name } @@ -49,10 +50,10 @@ type payload struct { // It would be nice if we had the same value available in both so we could // use that as the key consistently throughout the the process. - ID string `json:"id"` - Target int `json:"target"` - Code string `json:"code"` - Reason string `json:"reason"` + ID string `json:"id"` + Target int `json:"target"` + Code fdv2proto.IntentCode `json:"code"` + Reason string `json:"reason"` } // This is the general shape of a put-object event. The delete-object is the same, with the object field being nil. diff --git a/internal/datastore/in_memory_data_store_impl.go b/internal/datastore/in_memory_data_store_impl.go index c00fa88f..029a88e7 100644 --- a/internal/datastore/in_memory_data_store_impl.go +++ b/internal/datastore/in_memory_data_store_impl.go @@ -4,11 +4,10 @@ import ( "sync" "github.com/launchdarkly/go-sdk-common/v3/ldlog" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) -// inMemoryDataStore is a memory based DataStore implementation, backed by a lock-striped map. +// MemoryStore is a memory based DataStore implementation, backed by a lock-striped map. // // Implementation notes: // @@ -17,24 +16,23 @@ import ( // Get and IsInitialized). To make it safe to hold a lock without deferring the unlock, we must ensure that // there is only one return point from each method, and that there is no operation that could possibly cause a // panic after the lock has been acquired. See notes on performance in CONTRIBUTING.md. -type inMemoryDataStore struct { +type MemoryStore struct { allData map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor isInitialized bool sync.RWMutex loggers ldlog.Loggers } -// NewInMemoryDataStore creates an instance of the in-memory data store. This is not part of the public API; it is -// always called through ldcomponents.inMemoryDataStore(). -func NewInMemoryDataStore(loggers ldlog.Loggers) subsystems.DataStore { - return &inMemoryDataStore{ +// NewInMemoryDataStore creates an instance of the in-memory data store. This is not part of the public API. +func NewInMemoryDataStore(loggers ldlog.Loggers) *MemoryStore { + return &MemoryStore{ allData: make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor), isInitialized: false, loggers: loggers, } } -func (store *inMemoryDataStore) Init(allData []ldstoretypes.Collection) error { +func (store *MemoryStore) Init(allData []ldstoretypes.Collection) error { store.Lock() store.allData = make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor) @@ -54,7 +52,23 @@ func (store *inMemoryDataStore) Init(allData []ldstoretypes.Collection) error { return nil } -func (store *inMemoryDataStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { +func (store *MemoryStore) SetBasis(allData []ldstoretypes.Collection) { + _ = store.Init(allData) +} + +func (store *MemoryStore) ApplyDelta(allData []ldstoretypes.Collection) { + store.Lock() + + for _, coll := range allData { + for _, item := range coll.Items { + store.upsert(coll.Kind, item.Key, item.Item) + } + } + + store.Unlock() +} + +func (store *MemoryStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { store.RLock() var coll map[string]ldstoretypes.ItemDescriptor @@ -76,7 +90,7 @@ func (store *inMemoryDataStore) Get(kind ldstoretypes.DataKind, key string) (lds return ldstoretypes.ItemDescriptor{}.NotFound(), nil } -func (store *inMemoryDataStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { +func (store *MemoryStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { store.RLock() var itemsOut []ldstoretypes.KeyedItemDescriptor @@ -94,13 +108,10 @@ func (store *inMemoryDataStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretyp return itemsOut, nil } -func (store *inMemoryDataStore) Upsert( +func (store *MemoryStore) upsert( kind ldstoretypes.DataKind, key string, - newItem ldstoretypes.ItemDescriptor, -) (bool, error) { - store.Lock() - + newItem ldstoretypes.ItemDescriptor) bool { var coll map[string]ldstoretypes.ItemDescriptor var ok bool shouldUpdate := true @@ -120,23 +131,32 @@ func (store *inMemoryDataStore) Upsert( coll[key] = newItem updated = true } + return updated +} +func (store *MemoryStore) Upsert( + kind ldstoretypes.DataKind, + key string, + newItem ldstoretypes.ItemDescriptor, +) (bool, error) { + store.Lock() + updated := store.upsert(kind, key, newItem) store.Unlock() return updated, nil } -func (store *inMemoryDataStore) IsInitialized() bool { +func (store *MemoryStore) IsInitialized() bool { store.RLock() ret := store.isInitialized store.RUnlock() return ret } -func (store *inMemoryDataStore) IsStatusMonitoringEnabled() bool { +func (store *MemoryStore) IsStatusMonitoringEnabled() bool { return false } -func (store *inMemoryDataStore) Close() error { +func (store *MemoryStore) Close() error { return nil } diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 3a32512e..de62e4f5 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -1,6 +1,7 @@ package datasystem import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "sync" "github.com/launchdarkly/go-sdk-common/v3/ldlog" @@ -40,7 +41,7 @@ type Store struct { // Represents the SDK's source of truth for flag evaluations (once initialized). Before initialization, // the persistentStore may be used if configured. - memoryStore subsystems.DataStore + memoryStore *datastore.MemoryStore persist bool @@ -109,43 +110,62 @@ func (s *Store) shouldPersist() bool { return s.persist && s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite } -// nolint:revive // Standard DataDestination method -func (s *Store) Init(allData []ldstoretypes.Collection, payloadVersion *int, persist bool) bool { +func (s *Store) init(allData []ldstoretypes.Collection, selector fdv2proto.Selector, persist bool) error { s.mu.Lock() defer s.mu.Unlock() // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. // TODO: handle errors from initializing the memory or persistent stores. - _ = s.memoryStore.Init(allData) + s.memoryStore.SetBasis(allData) s.persist = persist s.active = s.memoryStore if s.shouldPersist() { - _ = s.persistentStore.impl.Init(allData) // TODO: insert in topo-sort order + return s.persistentStore.impl.Init(allData) // TODO: insert in topo-sort order } - return true + + return nil } -// nolint:revive // Standard DataDestination method -func (s *Store) Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor, persist bool) bool { - s.mu.RLock() - defer s.mu.RUnlock() +func (s *Store) SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { + collections := fdv2proto.ToStorableItems(events) + return s.init(collections, selector, persist) +} - // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent store. - _, _ = s.memoryStore.Upsert(kind, key, item) +func (s *Store) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { + collections := fdv2proto.ToStorableItems(events) + + s.mu.Lock() + defer s.mu.Unlock() + + s.memoryStore.ApplyDelta(collections) s.persist = persist + // The process for applying the delta to the memory store is different than the persistent store + // because persistent stores are not yet transactional in regards to payload version. This means + // we still need to apply a series of upserts, so the state of the store may be inconsistent when that + // is happening. In practice, we often don't receive more than one event at a time, but this may change + // in the future. if s.shouldPersist() { - _, err := s.persistentStore.impl.Upsert(kind, key, item) - if err != nil { - return false + for _, event := range events { + var err error + switch e := event.(type) { + case fdv2proto.PutObject: + _, err = s.persistentStore.impl.Upsert(e.Kind, e.Key, ldstoretypes.ItemDescriptor{Version: e.Version, Item: e.Object}) + case fdv2proto.DeleteObject: + _, err = s.persistentStore.impl.Upsert(e.Kind, e.Key, ldstoretypes.ItemDescriptor{Version: e.Version, Item: nil}) + } + // TODO: return error? + if err != nil { + s.loggers.Errorf("Error applying %s to persistent store: %s", event.Name(), err) + } } } - return true + return nil } // GetDataStoreStatusProvider returns the status provider for the persistent store, if one is configured, otherwise diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index afc07bba..d2b7e577 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -2,6 +2,8 @@ package datasystem import ( "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "math/rand" "sync" "testing" @@ -31,23 +33,24 @@ func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { func TestStore_NoPersistence_MemoryStore_IsInitialized(t *testing.T) { - version1 := 1 + v1 := fdv2proto.NewSelector("", 1) + none := fdv2proto.NoSelector() tests := []struct { - name string - payloadVersion *int - persist bool + name string + selector fdv2proto.Selector + persist bool }{ - {"versioned data, persist", &version1, true}, - {"versioned data, do not persist", &version1, false}, - {"unversioned data, persist", nil, true}, - {"unversioned data, do not persist", nil, false}, + {"versioned data, persist", v1, true}, + {"versioned data, do not persist", v1, false}, + {"unversioned data, persist", none, true}, + {"unversioned data, do not persist", none, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() - store.Init([]ldstoretypes.Collection{}, tt.payloadVersion, tt.persist) + store.SetBasis([]fdv2proto.Event{}, tt.selector, tt.persist) assert.True(t, store.IsInitialized()) }) } @@ -61,7 +64,7 @@ func TestStore_Commit(t *testing.T) { assert.NoError(t, store.Commit()) }) - t.Run("persist memory items are copied to persistent store in r/w mode", func(t *testing.T) { + t.Run("persist-marked memory items are copied to persistent store in r/w mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() spy := &fakeStore{isDown: true} @@ -69,17 +72,27 @@ func TestStore_Commit(t *testing.T) { store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeReadWrite, nil) defer store.Close() - initPayload := []ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, - }}, + input := []fdv2proto.Event{ + fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, } - version := 1 - assert.True(t, store.Init(initPayload, &version, true)) + output := []ldstoretypes.Collection{ + { + Kind: ldstoreimpl.Features(), + Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, + }, + }, + { + Kind: ldstoreimpl.Segments(), + Items: []ldstoretypes.KeyedItemDescriptor{ + {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, + }, + }} + + // There should be an error since writing to the store will fail. + assert.Error(t, store.SetBasis(input, fdv2proto.NoSelector(), true)) require.Empty(t, spy.initPayload) @@ -87,7 +100,7 @@ func TestStore_Commit(t *testing.T) { require.NoError(t, store.Commit()) - assert.Equal(t, initPayload, spy.initPayload) + assert.Equal(t, output, spy.initPayload) }) t.Run("non-persist memory items are not copied to persistent store in r/w mode", func(t *testing.T) { @@ -96,16 +109,12 @@ func TestStore_Commit(t *testing.T) { store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) defer store.Close() - initPayload := []ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, - }}, + input := []fdv2proto.Event{ + fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, } - assert.True(t, store.Init(initPayload, nil, false)) + assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) require.Empty(t, spy.initPayload) @@ -114,23 +123,18 @@ func TestStore_Commit(t *testing.T) { assert.Empty(t, spy.initPayload) }) - t.Run("persist memory items are not copied to persistent store in r-only mode", func(t *testing.T) { + t.Run("persist-marked memory items are not copied to persistent store in r-only mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() spy := &fakeStore{} store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeRead, nil) defer store.Close() - initPayload := []ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - {Kind: ldstoreimpl.Segments(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "bar", Item: ldstoretypes.ItemDescriptor{Version: 2}}, - }}, + input := []fdv2proto.Event{ + fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, } - version := 1 - assert.True(t, store.Init(initPayload, &version, true)) + assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), true)) require.Empty(t, spy.initPayload) @@ -149,12 +153,11 @@ func TestStore_GetActive(t *testing.T) { assert.NoError(t, err) assert.Equal(t, foo, ldstoretypes.ItemDescriptor{}.NotFound()) - version := 1 - assert.True(t, store.Init([]ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - }, &version, false)) + input := []fdv2proto.Event{ + fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + } + + assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) foo, err = store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) @@ -178,12 +181,10 @@ func TestStore_GetActive(t *testing.T) { _, err := store.Get(ldstoreimpl.Features(), "foo") assert.Equal(t, errImAPersistentStore, err) - version := 1 - assert.True(t, store.Init([]ldstoretypes.Collection{ - {Kind: ldstoreimpl.Features(), Items: []ldstoretypes.KeyedItemDescriptor{ - {Key: "foo", Item: ldstoretypes.ItemDescriptor{Version: 1}}, - }}, - }, &version, false)) + input := []fdv2proto.Event{ + fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + } + assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) foo, err := store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) @@ -227,8 +228,15 @@ func TestStore_Concurrency(t *testing.T) { wg.Add(1) defer wg.Done() for i := 0; i < 100; i++ { - version := 1 - _ = store.Init([]ldstoretypes.Collection{}, &version, true) + _ = store.SetBasis([]fdv2proto.Event{}, fdv2proto.NoSelector(), true) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + } + }() + go func() { + wg.Add(1) + defer wg.Done() + for i := 0; i < 100; i++ { + store.ApplyDelta([]fdv2proto.Event{}, fdv2proto.NoSelector(), true) time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) } }() diff --git a/internal/fdv2proto/event_to_storable_item.go b/internal/fdv2proto/event_to_storable_item.go new file mode 100644 index 00000000..bbcb48a7 --- /dev/null +++ b/internal/fdv2proto/event_to_storable_item.go @@ -0,0 +1,58 @@ +package fdv2proto + +import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) + +func ToStorableItems(events []Event) []ldstoretypes.Collection { + flagCollection := ldstoretypes.Collection{ + Kind: datakinds.Features, + Items: make([]ldstoretypes.KeyedItemDescriptor, 0), + } + + segmentCollection := ldstoretypes.Collection{ + Kind: datakinds.Segments, + Items: make([]ldstoretypes.KeyedItemDescriptor, 0), + } + + for _, event := range events { + switch e := event.(type) { + case PutObject: + switch e.Kind { + case datakinds.Features: + flagCollection.Items = append(flagCollection.Items, ldstoretypes.KeyedItemDescriptor{ + Key: e.Key, + Item: e.Object, + }) + case datakinds.Segments: + segmentCollection.Items = append(segmentCollection.Items, ldstoretypes.KeyedItemDescriptor{ + Key: e.Key, + Item: e.Object, + }) + } + case DeleteObject: + switch e.Kind { + case datakinds.Features: + flagCollection.Items = append(flagCollection.Items, ldstoretypes.KeyedItemDescriptor{ + Key: e.Key, + Item: ldstoretypes.ItemDescriptor{ + Version: e.Version, + Item: nil, + }, + }) + case datakinds.Segments: + segmentCollection.Items = append(segmentCollection.Items, ldstoretypes.KeyedItemDescriptor{ + Key: e.Key, + Item: ldstoretypes.ItemDescriptor{ + Version: e.Version, + Item: nil, + }, + }) + } + } + + } + + return []ldstoretypes.Collection{flagCollection, segmentCollection} +} diff --git a/internal/fdv2proto/proto.go b/internal/fdv2proto/proto.go new file mode 100644 index 00000000..a97ad803 --- /dev/null +++ b/internal/fdv2proto/proto.go @@ -0,0 +1,136 @@ +package fdv2proto + +import "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + +type IntentCode string + +const ( + IntentTransferFull = IntentCode("xfer-full") + IntentTransferChanges = IntentCode("xfer-changes") +) + +type EventName string + +const ( + EventPutObject = EventName("put-object") + EventDeleteObject = EventName("delete-object") + EventServerIntent = EventName("server-intent") + EventPayloadTransferred = EventName("payload-transferred") +) + +type Selector struct { + state string + version int + set bool +} + +func NoSelector() Selector { + return Selector{set: false} +} + +func NewSelector(state string, version int) Selector { + return Selector{state: state, version: version, set: true} +} + +func (s Selector) IsSet() bool { + return s.set +} + +func (s Selector) State() string { + return s.state +} + +func (s Selector) Version() int { + return s.version +} + +func (s Selector) Get() (string, int, bool) { + return s.state, s.version, s.set +} + +type Event interface { + Name() EventName +} + +// DeleteData is the logical representation of the data in the "delete" event. In the JSON representation, +// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into +// Kind and Key when we parse it. +// +// Example JSON representation: +// +// { +// "path": "/flags/flagkey", +// "version": 3 +// } +type DeleteData struct { + Kind ldstoretypes.DataKind + Key string + Version int +} + +type DeleteObject struct { + Version int + Kind ldstoretypes.DataKind + Key string +} + +func (d DeleteObject) Name() EventName { + return EventDeleteObject +} + +// PutData is the logical representation of the data in the "put" event. In the JSON representation, +// the "data" property is actually a map of maps, but the schema we use internally is a list of +// lists instead. +// +// The "path" property is normally always "/"; the LD streaming service sends this property, but +// some versions of Relay do not, so we do not require it. +// +// Example JSON representation: +// +// { +// "path": "/", +// "data": { +// "flags": { +// "flag1": { "key": "flag1", "version": 1, ...etc. }, +// "flag2": { "key": "flag2", "version": 1, ...etc. }, +// }, +// "segments": { +// "segment1": { "key", "segment1", "version": 1, ...etc. } +// } +// } +// } +type PutData struct { + Path string // we don't currently do anything with this + Data []ldstoretypes.Collection +} + +type PutObject struct { + Version int + Kind ldstoretypes.DataKind + Key string + Object ldstoretypes.ItemDescriptor +} + +func (p PutObject) Name() EventName { + return EventPutObject +} + +// PatchData is the logical representation of the data in the "patch" event. In the JSON representation, +// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into +// Kind and Key when we parse it. The "data" property is the JSON representation of the flag or +// segment, which we deserialize into an ItemDescriptor. +// +// Example JSON representation: +// +// { +// "path": "/flags/flagkey", +// "data": { +// "key": "flagkey", +// "version": 2, ...etc. +// } +// } +type PatchData struct { + Kind ldstoretypes.DataKind + Key string + Data ldstoretypes.ItemDescriptor +} diff --git a/internal/sharedtest/mocks/mock_data_destination.go b/internal/sharedtest/mocks/mock_data_destination.go index 4ecc78eb..447009bc 100644 --- a/internal/sharedtest/mocks/mock_data_destination.go +++ b/internal/sharedtest/mocks/mock_data_destination.go @@ -1,14 +1,13 @@ package mocks import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "sync" "testing" "time" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/subsystems" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" - th "github.com/launchdarkly/go-test-helpers/v3" "github.com/stretchr/testify/assert" @@ -41,26 +40,38 @@ func NewMockDataDestination(realStore subsystems.DataStore) *MockDataDestination } } -// Init in this test implementation, delegates to d.DataStore.CapturedUpdates. -func (d *MockDataDestination) Init(allData []ldstoretypes.Collection, _ *int) bool { - // For now, the payloadVersion is ignored. When the data sources start making use of it, it should be +// SetBasis in this test implementation, delegates to d.DataStore.CapturedUpdates. +func (d *MockDataDestination) SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { + // For now, the selector is ignored. When the data sources start making use of it, it should be // stored so that assertions can be made. - for _, coll := range allData { + + collections := fdv2proto.ToStorableItems(events) + + for _, coll := range collections { AssertNotNil(coll.Kind) } - err := d.DataStore.Init(allData) - return err == nil + return d.DataStore.Init(collections) } -// Upsert in this test implementation, delegates to d.DataStore.CapturedUpdates. -func (d *MockDataDestination) Upsert( - kind ldstoretypes.DataKind, - key string, - newItem ldstoretypes.ItemDescriptor, -) bool { - AssertNotNil(kind) - _, err := d.DataStore.Upsert(kind, key, newItem) - return err == nil +func (d *MockDataDestination) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { + // For now, the selector is ignored. When the data sources start making use of it, it should be + // stored so that assertions can be made. + + collections := fdv2proto.ToStorableItems(events) + + for _, coll := range collections { + AssertNotNil(coll.Kind) + } + + for _, coll := range collections { + for _, item := range coll.Items { + if _, err := d.DataStore.Upsert(coll.Kind, item.Key, item.Item); err != nil { + return err + } + } + } + + return nil } // UpdateStatus in this test implementation, pushes a value onto the Statuses channel. diff --git a/subsystems/client_context.go b/subsystems/client_context.go index ec37fa66..bf1c68b2 100644 --- a/subsystems/client_context.go +++ b/subsystems/client_context.go @@ -48,7 +48,7 @@ type ClientContext interface { // GetDataDestination is a FDV2 method, do not use. Not subject to semantic versioning. // This method is a replacement for GetDataSourceUpdateSink when the SDK is in FDv2 mode. - GetDataDestination() DataDestination + GetDataDestination() DataDestination2 // GetDataSourceStatusReporter is a FDV2 method, do not use. Not subject to semantic versioning. // This method is a replacement for GetDataSourceUpdateSink when the SDK is in FDv2 mode. @@ -66,7 +66,7 @@ type BasicClientContext struct { ServiceEndpoints interfaces.ServiceEndpoints DataSourceUpdateSink DataSourceUpdateSink DataStoreUpdateSink DataStoreUpdateSink - DataDestination DataDestination + DataDestination DataDestination2 DataSourceStatusReporter DataSourceStatusReporter } @@ -101,7 +101,7 @@ func (b BasicClientContext) GetDataStoreUpdateSink() DataStoreUpdateSink { //nol return b.DataStoreUpdateSink } -func (b BasicClientContext) GetDataDestination() DataDestination { //nolint:revive +func (b BasicClientContext) GetDataDestination() DataDestination2 { //nolint:revive return b.DataDestination } diff --git a/subsystems/data_destination.go b/subsystems/data_destination.go index fe2129e4..c93ef023 100644 --- a/subsystems/data_destination.go +++ b/subsystems/data_destination.go @@ -1,6 +1,7 @@ package subsystems import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) @@ -33,3 +34,8 @@ type DataDestination interface { // return false to indicate that the operation failed. Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool } + +type DataDestination2 interface { + SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error + ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error +} From c72dd6d240ea7d785c5a1233da5d963aca1b3286 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 20 Sep 2024 16:52:38 -0700 Subject: [PATCH 40/62] remove old data destination interface --- .../streaming_data_source_events.go | 3 +- internal/datasourcev2/polling_data_source.go | 9 ++--- internal/datasourcev2/polling_http_request.go | 7 ++-- .../datasourcev2/streaming_data_source.go | 7 ++-- internal/datasourcev2/types.go | 1 + internal/datasystem/store.go | 3 +- internal/datasystem/store_test.go | 5 +-- .../sharedtest/mocks/mock_data_destination.go | 3 +- subsystems/client_context.go | 6 ++-- subsystems/data_destination.go | 35 +++++++------------ 10 files changed, 39 insertions(+), 40 deletions(-) diff --git a/internal/datasource/streaming_data_source_events.go b/internal/datasource/streaming_data_source_events.go index 918272e4..3b07e657 100644 --- a/internal/datasource/streaming_data_source_events.go +++ b/internal/datasource/streaming_data_source_events.go @@ -2,9 +2,10 @@ package datasource import ( "errors" - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "strings" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-jsonstream/v3/jreader" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" ) diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index ec9085d4..cdbd6c42 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -1,10 +1,11 @@ package datasourcev2 import ( - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "sync" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal" @@ -68,7 +69,7 @@ type PollingRequester interface { // configuration. All other code outside of this package should interact with it only via the // DataSource interface. type PollingProcessor struct { - dataDestination subsystems.DataDestination2 + dataDestination subsystems.DataDestination statusReporter subsystems.DataSourceStatusReporter requester PollingRequester pollInterval time.Duration @@ -82,7 +83,7 @@ type PollingProcessor struct { // NewPollingProcessor creates the internal implementation of the polling data source. func NewPollingProcessor( context subsystems.ClientContext, - dataDestination subsystems.DataDestination2, + dataDestination subsystems.DataDestination, statusReporter subsystems.DataSourceStatusReporter, cfg datasource.PollingConfig, ) *PollingProcessor { @@ -92,7 +93,7 @@ func NewPollingProcessor( func newPollingProcessor( context subsystems.ClientContext, - dataDestination subsystems.DataDestination2, + dataDestination subsystems.DataDestination, statusReporter subsystems.DataSourceStatusReporter, requester PollingRequester, pollInterval time.Duration, diff --git a/internal/datasourcev2/polling_http_request.go b/internal/datasourcev2/polling_http_request.go index 6cb09c18..3bd21b1f 100644 --- a/internal/datasourcev2/polling_http_request.go +++ b/internal/datasourcev2/polling_http_request.go @@ -4,14 +4,15 @@ import ( "encoding/json" "errors" "fmt" - "github.com/launchdarkly/go-jsonstream/v3/jreader" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "io" "net/http" "net/url" "strings" + "github.com/launchdarkly/go-jsonstream/v3/jreader" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" "github.com/launchdarkly/go-server-sdk/v7/subsystems" diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index e56f8ffd..8c729fb7 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -3,13 +3,14 @@ package datasourcev2 import ( "encoding/json" "errors" - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "net/http" "net/url" "strings" "sync" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-jsonstream/v3/jreader" "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-sdk-common/v3/ldtime" @@ -75,7 +76,7 @@ const ( // DataSource interface. type StreamProcessor struct { cfg datasource.StreamConfig - dataDestination subsystems.DataDestination2 + dataDestination subsystems.DataDestination statusReporter subsystems.DataSourceStatusReporter client *http.Client headers http.Header @@ -92,7 +93,7 @@ type StreamProcessor struct { // NewStreamProcessor creates the internal implementation of the streaming data source. func NewStreamProcessor( context subsystems.ClientContext, - dataDestination subsystems.DataDestination2, + dataDestination subsystems.DataDestination, statusReporter subsystems.DataSourceStatusReporter, cfg datasource.StreamConfig, ) *StreamProcessor { diff --git a/internal/datasourcev2/types.go b/internal/datasourcev2/types.go index e5d59b24..dbb7b9b2 100644 --- a/internal/datasourcev2/types.go +++ b/internal/datasourcev2/types.go @@ -2,6 +2,7 @@ package datasourcev2 import ( "encoding/json" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" es "github.com/launchdarkly/eventsource" diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index de62e4f5..aaa67164 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -1,9 +1,10 @@ package datasystem import ( - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "sync" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index d2b7e577..f5767d82 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -2,13 +2,14 @@ package datasystem import ( "errors" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "math/rand" "sync" "testing" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/stretchr/testify/require" diff --git a/internal/sharedtest/mocks/mock_data_destination.go b/internal/sharedtest/mocks/mock_data_destination.go index 447009bc..84e91309 100644 --- a/internal/sharedtest/mocks/mock_data_destination.go +++ b/internal/sharedtest/mocks/mock_data_destination.go @@ -1,11 +1,12 @@ package mocks import ( - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "sync" "testing" "time" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/subsystems" th "github.com/launchdarkly/go-test-helpers/v3" diff --git a/subsystems/client_context.go b/subsystems/client_context.go index bf1c68b2..ec37fa66 100644 --- a/subsystems/client_context.go +++ b/subsystems/client_context.go @@ -48,7 +48,7 @@ type ClientContext interface { // GetDataDestination is a FDV2 method, do not use. Not subject to semantic versioning. // This method is a replacement for GetDataSourceUpdateSink when the SDK is in FDv2 mode. - GetDataDestination() DataDestination2 + GetDataDestination() DataDestination // GetDataSourceStatusReporter is a FDV2 method, do not use. Not subject to semantic versioning. // This method is a replacement for GetDataSourceUpdateSink when the SDK is in FDv2 mode. @@ -66,7 +66,7 @@ type BasicClientContext struct { ServiceEndpoints interfaces.ServiceEndpoints DataSourceUpdateSink DataSourceUpdateSink DataStoreUpdateSink DataStoreUpdateSink - DataDestination DataDestination2 + DataDestination DataDestination DataSourceStatusReporter DataSourceStatusReporter } @@ -101,7 +101,7 @@ func (b BasicClientContext) GetDataStoreUpdateSink() DataStoreUpdateSink { //nol return b.DataStoreUpdateSink } -func (b BasicClientContext) GetDataDestination() DataDestination2 { //nolint:revive +func (b BasicClientContext) GetDataDestination() DataDestination { //nolint:revive return b.DataDestination } diff --git a/subsystems/data_destination.go b/subsystems/data_destination.go index c93ef023..3fbb54c2 100644 --- a/subsystems/data_destination.go +++ b/subsystems/data_destination.go @@ -2,7 +2,6 @@ package subsystems import ( "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) // DataDestination represents a sink for data obtained from a data source. @@ -12,30 +11,22 @@ import ( // Do not use it. // You have been warned. type DataDestination interface { - - // Init overwrites the current contents of the data store with a set of items for each collection. + // SetBasis defines a new basis for the data store. This means the store must + // be emptied of any existing data before applying the events. This operation should be + // atomic with respect to any other operations that modify the store. + // + // The selector defines the version of the basis. // - // If the underlying data store returns an error during this operation, the SDK will log it, - // and set the data source state to DataSourceStateInterrupted with an error of - // DataSourceErrorKindStoreError. It will not return the error to the data source, but will - // return false to indicate that the operation failed. - Init(allData []ldstoretypes.Collection, payloadVersion *int) bool + // If persist is true, it indicates that the data should be propagated to any connected persistent + // store. + SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error - // Upsert updates or inserts an item in the specified collection. For updates, the object will only be - // updated if the existing version is less than the new version. + // ApplyDelta applies a set of changes to an existing basis. This operation should be atomic with + // respect to any other operations that modify the store. // - // To mark an item as deleted, pass an ItemDescriptor with a nil Item and a nonzero version - // number. Deletions must be versioned so that they do not overwrite a later update in case updates - // are received out of order. + // The selector defines the new version of the basis. // - // If the underlying data store returns an error during this operation, the SDK will log it, - // and set the data source state to DataSourceStateInterrupted with an error of - // DataSourceErrorKindStoreError. It will not return the error to the data source, but will - // return false to indicate that the operation failed. - Upsert(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) bool -} - -type DataDestination2 interface { - SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error + // If persist is true, it indicates that the changes should be propagated to any connected persistent + // store. ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error } From c5304510c979af6d0927b42ce65684a72e086d27 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 20 Sep 2024 17:00:29 -0700 Subject: [PATCH 41/62] revert chnages to datasource/streaming_data_source_events.go --- .../streaming_data_source_events.go | 90 ++++++++++++++++--- 1 file changed, 76 insertions(+), 14 deletions(-) diff --git a/internal/datasource/streaming_data_source_events.go b/internal/datasource/streaming_data_source_events.go index 3b07e657..20408dce 100644 --- a/internal/datasource/streaming_data_source_events.go +++ b/internal/datasource/streaming_data_source_events.go @@ -4,10 +4,10 @@ import ( "errors" "strings" - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" "github.com/launchdarkly/go-jsonstream/v3/jreader" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" ) var ( @@ -16,8 +16,70 @@ var ( deleteDataRequiredProperties = []string{"path", "version"} //nolint:gochecknoglobals ) -func parsePutData(data []byte) (fdv2proto.PutData, error) { - var ret fdv2proto.PutData +// putData is the logical representation of the data in the "put" event. In the JSON representation, +// the "data" property is actually a map of maps, but the schema we use internally is a list of +// lists instead. +// +// The "path" property is normally always "/"; the LD streaming service sends this property, but +// some versions of Relay do not, so we do not require it. +// +// Example JSON representation: +// +// { +// "path": "/", +// "data": { +// "flags": { +// "flag1": { "key": "flag1", "version": 1, ...etc. }, +// "flag2": { "key": "flag2", "version": 1, ...etc. }, +// }, +// "segments": { +// "segment1": { "key", "segment1", "version": 1, ...etc. } +// } +// } +// } +type putData struct { + Path string // we don't currently do anything with this + Data []ldstoretypes.Collection +} + +// patchData is the logical representation of the data in the "patch" event. In the JSON representation, +// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into +// Kind and Key when we parse it. The "data" property is the JSON representation of the flag or +// segment, which we deserialize into an ItemDescriptor. +// +// Example JSON representation: +// +// { +// "path": "/flags/flagkey", +// "data": { +// "key": "flagkey", +// "version": 2, ...etc. +// } +// } +type patchData struct { + Kind ldstoretypes.DataKind + Key string + Data ldstoretypes.ItemDescriptor +} + +// deleteData is the logical representation of the data in the "delete" event. In the JSON representation, +// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into +// Kind and Key when we parse it. +// +// Example JSON representation: +// +// { +// "path": "/flags/flagkey", +// "version": 3 +// } +type deleteData struct { + Kind ldstoretypes.DataKind + Key string + Version int +} + +func parsePutData(data []byte) (putData, error) { + var ret putData r := jreader.NewReader(data) for obj := r.Object().WithRequiredProperties(putDataRequiredProperties); obj.Next(); { switch string(obj.Name()) { @@ -30,15 +92,15 @@ func parsePutData(data []byte) (fdv2proto.PutData, error) { return ret, r.Error() } -func parsePatchData(data []byte) (fdv2proto.PatchData, error) { - var ret fdv2proto.PatchData +func parsePatchData(data []byte) (patchData, error) { + var ret patchData r := jreader.NewReader(data) var kind datakinds.DataKindInternal var key string - parseItem := func() (fdv2proto.PatchData, error) { + parseItem := func() (patchData, error) { item, err := kind.DeserializeFromJSONReader(&r) if err != nil { - return fdv2proto.PatchData{}, err + return patchData{}, err } ret.Data = item return ret, nil @@ -64,7 +126,7 @@ func parsePatchData(data []byte) (fdv2proto.PatchData, error) { } } if err := r.Error(); err != nil { - return fdv2proto.PatchData{}, err + return patchData{}, err } // If we got here, it means we couldn't parse the data model object yet because we saw the // "data" property first. But we definitely saw both properties (otherwise we would've got @@ -76,13 +138,13 @@ func parsePatchData(data []byte) (fdv2proto.PatchData, error) { } } if r.Error() != nil { - return fdv2proto.PatchData{}, r.Error() + return patchData{}, r.Error() } - return fdv2proto.PatchData{}, errors.New("patch event had no data property") + return patchData{}, errors.New("patch event had no data property") } -func parseDeleteData(data []byte) (fdv2proto.DeleteData, error) { - var ret fdv2proto.DeleteData +func parseDeleteData(data []byte) (deleteData, error) { + var ret deleteData r := jreader.NewReader(data) for obj := r.Object().WithRequiredProperties(deleteDataRequiredProperties); obj.Next(); { switch string(obj.Name()) { @@ -99,7 +161,7 @@ func parseDeleteData(data []byte) (fdv2proto.DeleteData, error) { } } if r.Error() != nil { - return fdv2proto.DeleteData{}, r.Error() + return deleteData{}, r.Error() } return ret, nil } From 01aa57b279ebf61b7eccce3d828272e12f2fd2c0 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 20 Sep 2024 17:04:40 -0700 Subject: [PATCH 42/62] use fdv2 types in streaming data source event handler --- .../datasourcev2/streaming_data_source.go | 16 ++--- internal/fdv2proto/proto.go | 65 +------------------ 2 files changed, 11 insertions(+), 70 deletions(-) diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 8c729fb7..e04d5b9c 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -190,10 +190,10 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< processedEvent = false } - switch event.Event() { - case "heart-beat": + switch fdv2proto.EventName(event.Event()) { + case fdv2proto.EventHeartbeat: // Swallow the event and move on. - case "server-intent": + case fdv2proto.EventServerIntent: //nolint: godox // TODO: Replace all this json unmarshalling with a nicer jreader implementation. var serverIntent serverIntent @@ -213,11 +213,11 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< currentChangeSet = changeSet{events: make([]es.Event, 0), intent: &serverIntent} - case putEventName: + case fdv2proto.EventPutObject: currentChangeSet.events = append(currentChangeSet.events, event) - case deleteEventName: + case fdv2proto.EventDeleteObject: currentChangeSet.events = append(currentChangeSet.events, event) - case "goodbye": + case fdv2proto.EventGoodbye: var goodbye goodbye err := json.Unmarshal([]byte(event.Data()), &goodbye) if err != nil { @@ -228,7 +228,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< if !goodbye.Silent { sp.loggers.Errorf("SSE server received error: %s (%s)", goodbye.Reason, goodbye.Catastrophe) } - case "error": + case fdv2proto.EventError: var errorData errorEvent err := json.Unmarshal([]byte(event.Data()), &errorData) if err != nil { @@ -245,7 +245,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< currentChangeSet = changeSet{events: make([]es.Event, 0)} //nolint: godox // TODO: Do we need to restart here? - case "payload-transferred": + case fdv2proto.EventPayloadTransferred: currentChangeSet.events = append(currentChangeSet.events, event) updates, err := deserializeEvents(currentChangeSet.events) if err != nil { diff --git a/internal/fdv2proto/proto.go b/internal/fdv2proto/proto.go index a97ad803..982f2914 100644 --- a/internal/fdv2proto/proto.go +++ b/internal/fdv2proto/proto.go @@ -16,6 +16,9 @@ const ( EventDeleteObject = EventName("delete-object") EventServerIntent = EventName("server-intent") EventPayloadTransferred = EventName("payload-transferred") + EventHeartbeat = EventName("heart-beat") + EventGoodbye = EventName("goodbye") + EventError = EventName("error") ) type Selector struct { @@ -52,22 +55,6 @@ type Event interface { Name() EventName } -// DeleteData is the logical representation of the data in the "delete" event. In the JSON representation, -// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into -// Kind and Key when we parse it. -// -// Example JSON representation: -// -// { -// "path": "/flags/flagkey", -// "version": 3 -// } -type DeleteData struct { - Kind ldstoretypes.DataKind - Key string - Version int -} - type DeleteObject struct { Version int Kind ldstoretypes.DataKind @@ -78,32 +65,6 @@ func (d DeleteObject) Name() EventName { return EventDeleteObject } -// PutData is the logical representation of the data in the "put" event. In the JSON representation, -// the "data" property is actually a map of maps, but the schema we use internally is a list of -// lists instead. -// -// The "path" property is normally always "/"; the LD streaming service sends this property, but -// some versions of Relay do not, so we do not require it. -// -// Example JSON representation: -// -// { -// "path": "/", -// "data": { -// "flags": { -// "flag1": { "key": "flag1", "version": 1, ...etc. }, -// "flag2": { "key": "flag2", "version": 1, ...etc. }, -// }, -// "segments": { -// "segment1": { "key", "segment1", "version": 1, ...etc. } -// } -// } -// } -type PutData struct { - Path string // we don't currently do anything with this - Data []ldstoretypes.Collection -} - type PutObject struct { Version int Kind ldstoretypes.DataKind @@ -114,23 +75,3 @@ type PutObject struct { func (p PutObject) Name() EventName { return EventPutObject } - -// PatchData is the logical representation of the data in the "patch" event. In the JSON representation, -// there is a "path" property in the format "/flags/key" or "/segments/key", which we convert into -// Kind and Key when we parse it. The "data" property is the JSON representation of the flag or -// segment, which we deserialize into an ItemDescriptor. -// -// Example JSON representation: -// -// { -// "path": "/flags/flagkey", -// "data": { -// "key": "flagkey", -// "version": 2, ...etc. -// } -// } -type PatchData struct { - Kind ldstoretypes.DataKind - Key string - Data ldstoretypes.ItemDescriptor -} From aa67ce4510c2dbe66684a84315c4b59cac64fffa Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 20 Sep 2024 17:32:59 -0700 Subject: [PATCH 43/62] refactor concurrency tests --- internal/datasystem/store.go | 19 ++- internal/datasystem/store_test.go | 138 ++++++++++++++------- internal/fdv2proto/{proto.go => events.go} | 38 +----- internal/fdv2proto/selector.go | 31 +++++ 4 files changed, 140 insertions(+), 86 deletions(-) rename internal/fdv2proto/{proto.go => events.go} (67%) create mode 100644 internal/fdv2proto/selector.go diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index aaa67164..7ea8370d 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -44,12 +44,15 @@ type Store struct { // the persistentStore may be used if configured. memoryStore *datastore.MemoryStore + // True if the data in the memory store may be persisted to the persistent store. persist bool // Points to the active store. Swapped upon initialization. active subsystems.DataStore - // Protects the availability, persistentStore, quality, and active fields. + // Identifies the current data set. + selector fdv2proto.Selector + mu sync.RWMutex loggers ldlog.Loggers @@ -82,12 +85,19 @@ func NewStore(loggers ldlog.Loggers) *Store { persistentStore: nil, memoryStore: datastore.NewInMemoryDataStore(loggers), loggers: loggers, + selector: fdv2proto.NoSelector(), persist: false, } s.active = s.memoryStore return s } +func (s *Store) Selector() fdv2proto.Selector { + s.mu.RLock() + defer s.mu.RUnlock() + return s.selector +} + // Close closes the store. If there is a persistent store configured, it will be closed. func (s *Store) Close() error { s.mu.Lock() @@ -106,7 +116,6 @@ func (s *Store) getActive() subsystems.DataStore { return s.active } -// Mirroring returns true data is being mirrored to a persistent store. func (s *Store) shouldPersist() bool { return s.persist && s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite } @@ -115,16 +124,15 @@ func (s *Store) init(allData []ldstoretypes.Collection, selector fdv2proto.Selec s.mu.Lock() defer s.mu.Unlock() - // TXNS-PS: Requirement 1.3.3, must apply updates to in-memory before the persistent Store. - // TODO: handle errors from initializing the memory or persistent stores. s.memoryStore.SetBasis(allData) s.persist = persist + s.selector = selector s.active = s.memoryStore if s.shouldPersist() { - return s.persistentStore.impl.Init(allData) // TODO: insert in topo-sort order + return s.persistentStore.impl.Init(allData) // TODO: insert in dependency order } return nil @@ -144,6 +152,7 @@ func (s *Store) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector s.memoryStore.ApplyDelta(collections) s.persist = persist + s.selector = selector // The process for applying the delta to the memory store is different than the persistent store // because persistent stores are not yet transactional in regards to payload version. This means diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index f5767d82..1b892c13 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -25,7 +25,14 @@ func TestStore_New(t *testing.T) { assert.NoError(t, store.Close()) } -func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { +func TestStore_NoSelector(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + assert.Equal(t, fdv2proto.NoSelector(), store.Selector()) +} + +func TestStore_NoPersistence_NewStore_IsNotInitialized(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() @@ -33,32 +40,31 @@ func TestStore_NoPersistence_NewStore_IsInitialized(t *testing.T) { } func TestStore_NoPersistence_MemoryStore_IsInitialized(t *testing.T) { - - v1 := fdv2proto.NewSelector("", 1) + v1 := fdv2proto.NewSelector("foo", 1) none := fdv2proto.NoSelector() tests := []struct { name string selector fdv2proto.Selector persist bool }{ - {"versioned data, persist", v1, true}, - {"versioned data, do not persist", v1, false}, - {"unversioned data, persist", none, true}, - {"unversioned data, do not persist", none, false}, + {"with selector, persist", v1, true}, + {"with selector, do not persist", v1, false}, + {"no selector, persist", none, true}, + {"no selector, do not persist", none, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() - store.SetBasis([]fdv2proto.Event{}, tt.selector, tt.persist) + assert.NoError(t, store.SetBasis([]fdv2proto.Event{}, tt.selector, tt.persist)) assert.True(t, store.IsInitialized()) }) } } func TestStore_Commit(t *testing.T) { - t.Run("no persistent store doesn't cause an error", func(t *testing.T) { + t.Run("absence of persistent store doesn't cause error when committing", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() store := NewStore(logCapture.Loggers) defer store.Close() @@ -68,11 +74,14 @@ func TestStore_Commit(t *testing.T) { t.Run("persist-marked memory items are copied to persistent store in r/w mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() + // isDown causes the fake to reject updates (until flipped to false). spy := &fakeStore{isDown: true} store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeReadWrite, nil) defer store.Close() + // The store receives data as a list of events, but the persistent store receives them as an + // []ldstoretypes.Collection. input := []fdv2proto.Event{ fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, @@ -95,10 +104,12 @@ func TestStore_Commit(t *testing.T) { // There should be an error since writing to the store will fail. assert.Error(t, store.SetBasis(input, fdv2proto.NoSelector(), true)) + // Since writing should have failed, there should be no data in the persistent store. require.Empty(t, spy.initPayload) spy.isDown = false + // This time, the data should be stored properly. require.NoError(t, store.Commit()) assert.Equal(t, output, spy.initPayload) @@ -106,8 +117,10 @@ func TestStore_Commit(t *testing.T) { t.Run("non-persist memory items are not copied to persistent store in r/w mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() - spy := &fakeStore{} - store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) + + // The fake should accept updates. + spy := &fakeStore{isDown: false} + store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeReadWrite, nil) defer store.Close() input := []fdv2proto.Event{ @@ -117,16 +130,20 @@ func TestStore_Commit(t *testing.T) { assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) + // Since SetBasis will immediately mirror the data if persist == true, we can check this is empty now. require.Empty(t, spy.initPayload) require.NoError(t, store.Commit()) + // Commit should be a no-op. This tests that the persist status was saved. assert.Empty(t, spy.initPayload) }) t.Run("persist-marked memory items are not copied to persistent store in r-only mode", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() - spy := &fakeStore{} + + // The fake should accept updates. + spy := &fakeStore{isDown: false} store := NewStore(logCapture.Loggers).WithPersistence(spy, subsystems.DataStoreModeRead, nil) defer store.Close() @@ -135,12 +152,14 @@ func TestStore_Commit(t *testing.T) { fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, } + // Even though persist is true, the store was marked as read-only, so it shouldn't be written to. assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), true)) require.Empty(t, spy.initPayload) require.NoError(t, store.Commit()) + // Same with commit. assert.Empty(t, spy.initPayload) }) } @@ -167,10 +186,13 @@ func TestStore_GetActive(t *testing.T) { t.Run("persistent store is active if configured", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) defer store.Close() _, err := store.Get(ldstoreimpl.Features(), "foo") + + // The fakeStore should return a specific error when Get is called. assert.Equal(t, errImAPersistentStore, err) }) @@ -179,20 +201,52 @@ func TestStore_GetActive(t *testing.T) { store := NewStore(logCapture.Loggers).WithPersistence(&fakeStore{}, subsystems.DataStoreModeReadWrite, nil) defer store.Close() + // Before there's any data, if we call Get the persistent store should be accessed. _, err := store.Get(ldstoreimpl.Features(), "foo") assert.Equal(t, errImAPersistentStore, err) input := []fdv2proto.Event{ fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, } + assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) + // Now that there's memory data, the persistent store should no longer be accessed. foo, err := store.Get(ldstoreimpl.Features(), "foo") assert.NoError(t, err) assert.Equal(t, 1, foo.Version) }) } +func TestStore_SelectorIsRemembered(t *testing.T) { + logCapture := ldlogtest.NewMockLog() + store := NewStore(logCapture.Loggers) + defer store.Close() + + selector1 := fdv2proto.NewSelector("foo", 1) + selector2 := fdv2proto.NewSelector("bar", 2) + selector3 := fdv2proto.NewSelector("baz", 3) + selector4 := fdv2proto.NewSelector("qux", 4) + selector5 := fdv2proto.NewSelector("this better be the last one", 5) + + assert.NoError(t, store.SetBasis([]fdv2proto.Event{}, selector1, false)) + assert.Equal(t, selector1, store.Selector()) + + assert.NoError(t, store.SetBasis([]fdv2proto.Event{}, selector2, false)) + assert.Equal(t, selector2, store.Selector()) + + assert.NoError(t, store.ApplyDelta([]fdv2proto.Event{}, selector3, false)) + assert.Equal(t, selector3, store.Selector()) + + assert.NoError(t, store.ApplyDelta([]fdv2proto.Event{}, selector4, false)) + assert.Equal(t, selector4, store.Selector()) + + assert.NoError(t, store.Commit()) + assert.Equal(t, selector4, store.Selector()) + + assert.NoError(t, store.SetBasis([]fdv2proto.Event{}, selector5, false)) +} + func TestStore_Concurrency(t *testing.T) { t.Run("methods using the active store", func(t *testing.T) { logCapture := ldlogtest.NewMockLog() @@ -200,47 +254,37 @@ func TestStore_Concurrency(t *testing.T) { defer store.Close() var wg sync.WaitGroup - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _, _ = store.Get(ldstoreimpl.Features(), "foo") - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _, _ = store.GetAll(ldstoreimpl.Features()) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - go func() { + run := func(f func()) { wg.Add(1) defer wg.Done() for i := 0; i < 100; i++ { - _ = store.IsInitialized() + f() time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) } - }() - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - _ = store.SetBasis([]fdv2proto.Event{}, fdv2proto.NoSelector(), true) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() - go func() { - wg.Add(1) - defer wg.Done() - for i := 0; i < 100; i++ { - store.ApplyDelta([]fdv2proto.Event{}, fdv2proto.NoSelector(), true) - time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) - } - }() + } + + go run(func() { + _, _ = store.Get(ldstoreimpl.Features(), "foo") + }) + go run(func() { + _, _ = store.GetAll(ldstoreimpl.Features()) + }) + go run(func() { + _ = store.GetDataStoreStatusProvider() + }) + go run(func() { + _ = store.IsInitialized() + }) + go run(func() { + _ = store.SetBasis([]fdv2proto.Event{}, fdv2proto.NoSelector(), true) + }) + go run(func() { + _ = store.ApplyDelta([]fdv2proto.Event{}, fdv2proto.NoSelector(), true) + }) + go run(func() { + _ = store.Selector() + }) }) } diff --git a/internal/fdv2proto/proto.go b/internal/fdv2proto/events.go similarity index 67% rename from internal/fdv2proto/proto.go rename to internal/fdv2proto/events.go index 982f2914..f99cc3fb 100644 --- a/internal/fdv2proto/proto.go +++ b/internal/fdv2proto/events.go @@ -9,6 +9,10 @@ const ( IntentTransferChanges = IntentCode("xfer-changes") ) +type Event interface { + Name() EventName +} + type EventName string const ( @@ -21,40 +25,6 @@ const ( EventError = EventName("error") ) -type Selector struct { - state string - version int - set bool -} - -func NoSelector() Selector { - return Selector{set: false} -} - -func NewSelector(state string, version int) Selector { - return Selector{state: state, version: version, set: true} -} - -func (s Selector) IsSet() bool { - return s.set -} - -func (s Selector) State() string { - return s.state -} - -func (s Selector) Version() int { - return s.version -} - -func (s Selector) Get() (string, int, bool) { - return s.state, s.version, s.set -} - -type Event interface { - Name() EventName -} - type DeleteObject struct { Version int Kind ldstoretypes.DataKind diff --git a/internal/fdv2proto/selector.go b/internal/fdv2proto/selector.go new file mode 100644 index 00000000..8d462352 --- /dev/null +++ b/internal/fdv2proto/selector.go @@ -0,0 +1,31 @@ +package fdv2proto + +type Selector struct { + state string + version int + set bool +} + +func NoSelector() Selector { + return Selector{set: false} +} + +func NewSelector(state string, version int) Selector { + return Selector{state: state, version: version, set: true} +} + +func (s Selector) IsSet() bool { + return s.set +} + +func (s Selector) State() string { + return s.state +} + +func (s Selector) Version() int { + return s.version +} + +func (s Selector) Get() (string, int, bool) { + return s.state, s.version, s.set +} From 2a54265957d2093307da8b34f9eae88445ac0c0e Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 20 Sep 2024 17:43:26 -0700 Subject: [PATCH 44/62] optimize comments on store.go --- internal/datasystem/store.go | 104 +++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 46 deletions(-) diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 7ea8370d..d1a87921 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -20,37 +20,43 @@ import ( // from holding on to a reference to the persistent store even when we swap to the in-memory store. // // Once the in-memory store has data (either from initializers running, or from a synchronizer), the persistent -// store is no longer regarded as active. From that point forward, GetActive() will return the in-memory store. +// store is no longer regarded as active. From that point forward, calls to Get will serve data from the memory +// store. // -// The idea is that persistent stores can offer a way to immediately start evaluating flags before a connection -// is made to LD (or even in a very brief moment before an initializer has run.) The persistent store has caching -// logic which can result in inconsistent/stale date being used. Therefore, once we have fresh data, we don't -// want to use the persistent store at all. +// One motivation behind using persistent stores in this way is to offer a way to immediately start evaluating +// flags before a connection is made to LD (or even in a very brief moment before an initializer has run.) +// The persistent store has caching logic which can result in inconsistent/stale date being used. Therefore, once we +// have fresh data, we don't want to use the persistent store at all for reads. +// +// One complication is that persistent stores have historically operated in multiple regimes. The first: "daemon mode", +// where the SDK is effectively using the store in read-only mode, with the store being populated by Relay/another SDK. +// +// The second is plain persistent store mode, where it is both read and written to. In the FDv2 system, we explicitly +// differentiate these cases using a read/read-write mode. In all cases, the in-memory store is used once it has data +// available. // -// A complication is that persistent stores have historically operated in multiple regimes. The first is "daemon mode", -// where the SDK is effectively using the store in read-only mode, with the store being populated by Relay or another SDK. -// The second is just plain persistent store mode, where it is both read and written to. In the FDv2 system, we explicitly -// differentiate these cases using a read/read-write mode. In all cases, the in-memory store is used once it has data available. // This contrasts from FDv1 where even if data from LD is available, that data may fall out of memory due to the persistent // store's caching logic ("sparse mode", when the TTL is non-infinite). // // We have found this to almost always be undesirable for users. type Store struct { - // Represents the SDK's source of truth for flag evals before initialization, or permanently if there are - // no initializers/synchronizers configured. This is option; if not defined, only the memoryStore is used. + // Source of truth for flag evals (before initialization), or permanently if there are + // no initializers/synchronizers configured. Optional; if not defined, only memoryStore is used. persistentStore *persistentStore - // Represents the SDK's source of truth for flag evaluations (once initialized). Before initialization, + // Source of truth for flag evaluations (once initialized). Before initialization, // the persistentStore may be used if configured. memoryStore *datastore.MemoryStore - // True if the data in the memory store may be persisted to the persistent store. + // True if the data in the memory store may be persisted to the persistent store. This may be false + // in the case of an initializer/synchronizer that doesn't want to propagate memory to the persistent store, + // such as another database or untrusted file. Generally only LD data sources should request persisting data. persist bool // Points to the active store. Swapped upon initialization. active subsystems.DataStore - // Identifies the current data set. + // Identifies the current data. selector fdv2proto.Selector mu sync.RWMutex @@ -59,6 +65,7 @@ type Store struct { } type persistentStore struct { + // Contains the actual store implementation. impl subsystems.DataStore // The persistentStore is read-only, or read-write. In read-only mode, the store // is *never* written to, and only read before the in-memory store is initialized. @@ -78,8 +85,8 @@ type persistentStore struct { statusProvider interfaces.DataStoreStatusProvider } -// NewStore creates a new store. By default the store is in-memory. To add a persistent store, call SwapToPersistent. Ensure this is -// called at configuration time, only once and before the store is ever accessed. +// NewStore creates a new store. If a persistent store needs to be configured, call WithPersistence before any other +// method is called. func NewStore(loggers ldlog.Loggers) *Store { s := &Store{ persistentStore: nil, @@ -92,6 +99,25 @@ func NewStore(loggers ldlog.Loggers) *Store { return s } +// WithPersistence exists to accommodate the SDK's configuration builders. We need a ClientContext +// before we can call Build to actually get the persistent store. That ClientContext requires the +// DataDestination, which is what this store struct implements. Therefore, the call to NewStore and +// WithPersistence will be separated. +func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems.DataStoreMode, statusProvider interfaces.DataStoreStatusProvider) *Store { + s.mu.Lock() + defer s.mu.Unlock() + + s.persistentStore = &persistentStore{ + impl: persistent, + mode: mode, + statusProvider: statusProvider, + } + + s.active = s.persistentStore.impl + return s +} + +// Selector returns the current selector. func (s *Store) Selector() fdv2proto.Selector { s.mu.RLock() defer s.mu.RUnlock() @@ -108,16 +134,9 @@ func (s *Store) Close() error { return nil } -// GetActive returns the active store, either persistent or in-memory. If there is no persistent store configured, -// the in-memory store is always active. -func (s *Store) getActive() subsystems.DataStore { - s.mu.RLock() - defer s.mu.RUnlock() - return s.active -} - -func (s *Store) shouldPersist() bool { - return s.persist && s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite +func (s *Store) SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { + collections := fdv2proto.ToStorableItems(events) + return s.init(collections, selector, persist) } func (s *Store) init(allData []ldstoretypes.Collection, selector fdv2proto.Selector, persist bool) error { @@ -138,9 +157,8 @@ func (s *Store) init(allData []ldstoretypes.Collection, selector fdv2proto.Selec return nil } -func (s *Store) SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { - collections := fdv2proto.ToStorableItems(events) - return s.init(collections, selector, persist) +func (s *Store) shouldPersist() bool { + return s.persist && s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite } func (s *Store) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { @@ -189,23 +207,8 @@ func (s *Store) GetDataStoreStatusProvider() interfaces.DataStoreStatusProvider return s.persistentStore.statusProvider } -// WithPersistence exists only because of the way the SDK's configuration builders work - we need a ClientContext -// before we can call Build to actually get the persistent store. That ClientContext requires the -// DataStoreUpdateSink, which is what this store struct implements. -func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems.DataStoreMode, statusProvider interfaces.DataStoreStatusProvider) *Store { - s.mu.Lock() - defer s.mu.Unlock() - - s.persistentStore = &persistentStore{ - impl: persistent, - mode: mode, - statusProvider: statusProvider, - } - - s.active = s.persistentStore.impl - return s -} - +// Commit persists the data in the memory store to the persistent store, if configured. The persistent store +// must also be in write mode, and the last call to SetBasis or ApplyDelta must have had persist set to true. func (s *Store) Commit() error { s.mu.RLock() defer s.mu.RUnlock() @@ -227,14 +230,23 @@ func (s *Store) Commit() error { return nil } +func (s *Store) getActive() subsystems.DataStore { + s.mu.RLock() + defer s.mu.RUnlock() + return s.active +} + +//nolint:revive // Implementation for ReadOnlyStore. func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { return s.getActive().GetAll(kind) } +//nolint:revive // Implementation for ReadOnlyStore. func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { return s.getActive().Get(kind, key) } +//nolint:revive // Implementation for ReadOnlyStore. func (s *Store) IsInitialized() bool { return s.getActive().IsInitialized() } From 76da39e47137eb20caa9433b3076e9e5c8924026 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Fri, 20 Sep 2024 17:52:29 -0700 Subject: [PATCH 45/62] add dump method to memory store for atomically getting segments and features --- .../datastore/in_memory_data_store_impl.go | 21 +++++++++++++++++- internal/datasystem/store.go | 22 ++++++------------- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/internal/datastore/in_memory_data_store_impl.go b/internal/datastore/in_memory_data_store_impl.go index 029a88e7..ed1aba50 100644 --- a/internal/datastore/in_memory_data_store_impl.go +++ b/internal/datastore/in_memory_data_store_impl.go @@ -93,6 +93,14 @@ func (store *MemoryStore) Get(kind ldstoretypes.DataKind, key string) (ldstorety func (store *MemoryStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { store.RLock() + itemsOut := store.getAll(kind) + + store.RUnlock() + + return itemsOut, nil +} + +func (store *MemoryStore) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescriptor { var itemsOut []ldstoretypes.KeyedItemDescriptor if itemsMap, ok := store.allData[kind]; ok { if len(itemsMap) > 0 { @@ -102,10 +110,21 @@ func (store *MemoryStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.Key } } } + return itemsOut +} + +func (store *MemoryStore) Dump() []ldstoretypes.Collection { + store.RLock() + + var allData []ldstoretypes.Collection + for kind := range store.allData { + itemsOut := store.getAll(kind) + allData = append(allData, ldstoretypes.Collection{Kind: kind, Items: itemsOut}) + } store.RUnlock() - return itemsOut, nil + return allData } func (store *MemoryStore) upsert( diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index d1a87921..07a9a8fa 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -7,13 +7,12 @@ import ( "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) -// Store is a hybrid persistent/in-memory store that serves queries for data from the evaluation +// Store is a dual-mode persistent/in-memory store that serves queries for data from the evaluation // algorithm. // // At any given moment, 1 of 2 stores is active: in-memory, or persistent. This doesn't preclude a caller @@ -85,6 +84,10 @@ type persistentStore struct { statusProvider interfaces.DataStoreStatusProvider } +func (p *persistentStore) writable() bool { + return p != nil && p.mode == subsystems.DataStoreModeReadWrite +} + // NewStore creates a new store. If a persistent store needs to be configured, call WithPersistence before any other // method is called. func NewStore(loggers ldlog.Loggers) *Store { @@ -158,7 +161,7 @@ func (s *Store) init(allData []ldstoretypes.Collection, selector fdv2proto.Selec } func (s *Store) shouldPersist() bool { - return s.persist && s.persistentStore != nil && s.persistentStore.mode == subsystems.DataStoreModeReadWrite + return s.persist && s.persistentStore.writable() } func (s *Store) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { @@ -214,18 +217,7 @@ func (s *Store) Commit() error { defer s.mu.RUnlock() if s.shouldPersist() { - flags, err := s.memoryStore.GetAll(datakinds.Features) - if err != nil { - return err - } - segments, err := s.memoryStore.GetAll(datakinds.Segments) - if err != nil { - return err - } - return s.persistentStore.impl.Init([]ldstoretypes.Collection{ - {Kind: datakinds.Features, Items: flags}, - {Kind: datakinds.Segments, Items: segments}, - }) + return s.persistentStore.impl.Init(s.memoryStore.Dump()) } return nil } From f4772f61568b2932317a483deb5f016dd8f53cb8 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 12:06:51 -0700 Subject: [PATCH 46/62] inMemoryStore tests for SetBasis/ApplyDelta --- .../datastore/in_memory_data_store_impl.go | 13 +- .../in_memory_data_store_impl_test.go | 154 +++++++++++++++++- internal/fdv2proto/event_to_storable_item.go | 1 - 3 files changed, 163 insertions(+), 5 deletions(-) diff --git a/internal/datastore/in_memory_data_store_impl.go b/internal/datastore/in_memory_data_store_impl.go index ed1aba50..4176dd68 100644 --- a/internal/datastore/in_memory_data_store_impl.go +++ b/internal/datastore/in_memory_data_store_impl.go @@ -56,16 +56,25 @@ func (store *MemoryStore) SetBasis(allData []ldstoretypes.Collection) { _ = store.Init(allData) } -func (store *MemoryStore) ApplyDelta(allData []ldstoretypes.Collection) { +func (store *MemoryStore) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.DataKind]map[string]bool { + + updatedMap := make(map[ldstoretypes.DataKind]map[string]bool) + store.Lock() for _, coll := range allData { for _, item := range coll.Items { - store.upsert(coll.Kind, item.Key, item.Item) + updated := store.upsert(coll.Kind, item.Key, item.Item) + if updatedMap[coll.Kind] == nil { + updatedMap[coll.Kind] = make(map[string]bool) + } + updatedMap[coll.Kind][item.Key] = updated } } store.Unlock() + + return updatedMap } func (store *MemoryStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { diff --git a/internal/datastore/in_memory_data_store_impl_test.go b/internal/datastore/in_memory_data_store_impl_test.go index c872b1ec..4d4a9dbf 100644 --- a/internal/datastore/in_memory_data_store_impl_test.go +++ b/internal/datastore/in_memory_data_store_impl_test.go @@ -10,7 +10,6 @@ import ( "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldbuilders" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest" - "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" "github.com/stretchr/testify/assert" @@ -23,6 +22,9 @@ func TestInMemoryDataStore(t *testing.T) { t.Run("GetAll", testInMemoryDataStoreGetAll) t.Run("Upsert", testInMemoryDataStoreUpsert) t.Run("Delete", testInMemoryDataStoreDelete) + t.Run("SetBasis", testInMemoryDataStoreSetBasis) + t.Run("ApplyDelta", testInMemoryDataStoreApplyDelta) + t.Run("Dump", testInMemoryDataStoreDump) t.Run("IsStatusMonitoringEnabled", func(t *testing.T) { assert.False(t, makeInMemoryStore().IsStatusMonitoringEnabled()) @@ -33,7 +35,7 @@ func TestInMemoryDataStore(t *testing.T) { }) } -func makeInMemoryStore() subsystems.DataStore { +func makeInMemoryStore() *MemoryStore { return NewInMemoryDataStore(sharedtest.NewTestLoggers()) } @@ -47,6 +49,8 @@ func extractCollections(allData []ldstoretypes.Collection) [][]ldstoretypes.Keye type dataItemCreator func(key string, version int, otherProperty bool) ldstoretypes.ItemDescriptor +type collectionCreator func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) + func forAllDataKinds(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, dataItemCreator)) { test(t, datakinds.Features, func(key string, version int, otherProperty bool) ldstoretypes.ItemDescriptor { flag := ldbuilders.NewFlagBuilder(key).Version(version).On(otherProperty).Build() @@ -61,6 +65,42 @@ func forAllDataKinds(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, }) } +func forAllDataKindsCollection(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, collectionCreator)) { + test(t, datakinds.Features, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + flag := ldbuilders.NewFlagBuilder(key).Version(version).On(otherProperty).Build() + descriptor := sharedtest.FlagDescriptor(flag) + return descriptor, []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: flag.Key, + Item: descriptor, + }, + }, + }, + } + }) + test(t, datakinds.Segments, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + segment := ldbuilders.NewSegmentBuilder(key).Version(version).Build() + if otherProperty { + segment.Included = []string{"arbitrary value"} + } + descriptor := sharedtest.SegmentDescriptor(segment) + return descriptor, []ldstoretypes.Collection{ + { + Kind: datakinds.Segments, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: segment.Key, + Item: descriptor, + }, + }, + }, + } + }) +} + func testInMemoryDataStoreInit(t *testing.T) { t.Run("makes store initialized", func(t *testing.T) { store := makeInMemoryStore() @@ -100,6 +140,48 @@ func testInMemoryDataStoreInit(t *testing.T) { }) } +func testInMemoryDataStoreSetBasis(t *testing.T) { + // SetBasis is currently an alias for Init, so the tests should be the same. Once there is no longer a use-case + // for Init (when fdv1 data system is removed, the Init tests can be deleted.) + + t.Run("makes store initialized", func(t *testing.T) { + store := makeInMemoryStore() + allData := sharedtest.NewDataSetBuilder().Flags(ldbuilders.NewFlagBuilder("key").Build()).Build() + + store.SetBasis(allData) + + assert.True(t, store.IsInitialized()) + }) + + t.Run("completely replaces previous data", func(t *testing.T) { + store := makeInMemoryStore() + flag1 := ldbuilders.NewFlagBuilder("key1").Build() + segment1 := ldbuilders.NewSegmentBuilder("key1").Build() + allData1 := sharedtest.NewDataSetBuilder().Flags(flag1).Segments(segment1).Build() + + store.SetBasis(allData1) + + flags, err := store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err := store.GetAll(datakinds.Segments) + require.NoError(t, err) + sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) + assert.Equal(t, extractCollections(allData1), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + + flag2 := ldbuilders.NewFlagBuilder("key2").Build() + segment2 := ldbuilders.NewSegmentBuilder("key2").Build() + allData2 := sharedtest.NewDataSetBuilder().Flags(flag2).Segments(segment2).Build() + + store.SetBasis(allData2) + + flags, err = store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err = store.GetAll(datakinds.Segments) + require.NoError(t, err) + assert.Equal(t, extractCollections(allData2), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + }) +} + func testInMemoryDataStoreGet(t *testing.T) { const unknownKey = "unknown-key" @@ -304,3 +386,71 @@ func testInMemoryDataStoreDelete(t *testing.T) { }) }) } + +func testInMemoryDataStoreApplyDelta(t *testing.T) { + + // These are the equivalent of the existing upsert tests. + forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionCreator) { + t.Run("newer version", func(t *testing.T) { + store := makeInMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + _, collection1 := makeItem("key", 10, false) + + updates := store.ApplyDelta(collection1) + assert.True(t, updates[kind]["key"]) + + item1a, collection1a := makeItem("key", 11, true) + + updates = store.ApplyDelta(collection1a) + assert.True(t, updates[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1a, result) + + }) + + t.Run("older version", func(t *testing.T) { + store := makeInMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1Version := 10 + item1, collection1 := makeItem("key", item1Version, false) + + updates := store.ApplyDelta(collection1) + assert.True(t, updates[kind]["key"]) + + _, collection1a := makeItem("key", item1Version-1, true) + + updates = store.ApplyDelta(collection1a) + assert.False(t, updates[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + + t.Run("same version", func(t *testing.T) { + store := makeInMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1Version := 10 + item1, collection1 := makeItem("key", item1Version, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + _, collection1a := makeItem("key", item1Version, true) + updated = store.ApplyDelta(collection1a) + assert.False(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + }) +} + +func testInMemoryDataStoreDump(t *testing.T) { + +} diff --git a/internal/fdv2proto/event_to_storable_item.go b/internal/fdv2proto/event_to_storable_item.go index bbcb48a7..5db3e2f2 100644 --- a/internal/fdv2proto/event_to_storable_item.go +++ b/internal/fdv2proto/event_to_storable_item.go @@ -51,7 +51,6 @@ func ToStorableItems(events []Event) []ldstoretypes.Collection { }) } } - } return []ldstoretypes.Collection{flagCollection, segmentCollection} From 521c1d3693fc41383792e329a712225e5876b272 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 14:27:04 -0700 Subject: [PATCH 47/62] add new memorystorev2 package --- .../datastore/in_memory_data_store_impl.go | 86 +---- .../in_memory_data_store_impl_test.go | 154 +------- internal/memorystorev2/memory_store.go | 164 +++++++++ internal/memorystorev2/memory_store_test.go | 330 ++++++++++++++++++ 4 files changed, 515 insertions(+), 219 deletions(-) create mode 100644 internal/memorystorev2/memory_store.go create mode 100644 internal/memorystorev2/memory_store_test.go diff --git a/internal/datastore/in_memory_data_store_impl.go b/internal/datastore/in_memory_data_store_impl.go index 4176dd68..c00fa88f 100644 --- a/internal/datastore/in_memory_data_store_impl.go +++ b/internal/datastore/in_memory_data_store_impl.go @@ -4,10 +4,11 @@ import ( "sync" "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) -// MemoryStore is a memory based DataStore implementation, backed by a lock-striped map. +// inMemoryDataStore is a memory based DataStore implementation, backed by a lock-striped map. // // Implementation notes: // @@ -16,23 +17,24 @@ import ( // Get and IsInitialized). To make it safe to hold a lock without deferring the unlock, we must ensure that // there is only one return point from each method, and that there is no operation that could possibly cause a // panic after the lock has been acquired. See notes on performance in CONTRIBUTING.md. -type MemoryStore struct { +type inMemoryDataStore struct { allData map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor isInitialized bool sync.RWMutex loggers ldlog.Loggers } -// NewInMemoryDataStore creates an instance of the in-memory data store. This is not part of the public API. -func NewInMemoryDataStore(loggers ldlog.Loggers) *MemoryStore { - return &MemoryStore{ +// NewInMemoryDataStore creates an instance of the in-memory data store. This is not part of the public API; it is +// always called through ldcomponents.inMemoryDataStore(). +func NewInMemoryDataStore(loggers ldlog.Loggers) subsystems.DataStore { + return &inMemoryDataStore{ allData: make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor), isInitialized: false, loggers: loggers, } } -func (store *MemoryStore) Init(allData []ldstoretypes.Collection) error { +func (store *inMemoryDataStore) Init(allData []ldstoretypes.Collection) error { store.Lock() store.allData = make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor) @@ -52,32 +54,7 @@ func (store *MemoryStore) Init(allData []ldstoretypes.Collection) error { return nil } -func (store *MemoryStore) SetBasis(allData []ldstoretypes.Collection) { - _ = store.Init(allData) -} - -func (store *MemoryStore) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.DataKind]map[string]bool { - - updatedMap := make(map[ldstoretypes.DataKind]map[string]bool) - - store.Lock() - - for _, coll := range allData { - for _, item := range coll.Items { - updated := store.upsert(coll.Kind, item.Key, item.Item) - if updatedMap[coll.Kind] == nil { - updatedMap[coll.Kind] = make(map[string]bool) - } - updatedMap[coll.Kind][item.Key] = updated - } - } - - store.Unlock() - - return updatedMap -} - -func (store *MemoryStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { +func (store *inMemoryDataStore) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { store.RLock() var coll map[string]ldstoretypes.ItemDescriptor @@ -99,17 +76,9 @@ func (store *MemoryStore) Get(kind ldstoretypes.DataKind, key string) (ldstorety return ldstoretypes.ItemDescriptor{}.NotFound(), nil } -func (store *MemoryStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { +func (store *inMemoryDataStore) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { store.RLock() - itemsOut := store.getAll(kind) - - store.RUnlock() - - return itemsOut, nil -} - -func (store *MemoryStore) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescriptor { var itemsOut []ldstoretypes.KeyedItemDescriptor if itemsMap, ok := store.allData[kind]; ok { if len(itemsMap) > 0 { @@ -119,27 +88,19 @@ func (store *MemoryStore) getAll(kind ldstoretypes.DataKind) []ldstoretypes.Keye } } } - return itemsOut -} - -func (store *MemoryStore) Dump() []ldstoretypes.Collection { - store.RLock() - - var allData []ldstoretypes.Collection - for kind := range store.allData { - itemsOut := store.getAll(kind) - allData = append(allData, ldstoretypes.Collection{Kind: kind, Items: itemsOut}) - } store.RUnlock() - return allData + return itemsOut, nil } -func (store *MemoryStore) upsert( +func (store *inMemoryDataStore) Upsert( kind ldstoretypes.DataKind, key string, - newItem ldstoretypes.ItemDescriptor) bool { + newItem ldstoretypes.ItemDescriptor, +) (bool, error) { + store.Lock() + var coll map[string]ldstoretypes.ItemDescriptor var ok bool shouldUpdate := true @@ -159,32 +120,23 @@ func (store *MemoryStore) upsert( coll[key] = newItem updated = true } - return updated -} -func (store *MemoryStore) Upsert( - kind ldstoretypes.DataKind, - key string, - newItem ldstoretypes.ItemDescriptor, -) (bool, error) { - store.Lock() - updated := store.upsert(kind, key, newItem) store.Unlock() return updated, nil } -func (store *MemoryStore) IsInitialized() bool { +func (store *inMemoryDataStore) IsInitialized() bool { store.RLock() ret := store.isInitialized store.RUnlock() return ret } -func (store *MemoryStore) IsStatusMonitoringEnabled() bool { +func (store *inMemoryDataStore) IsStatusMonitoringEnabled() bool { return false } -func (store *MemoryStore) Close() error { +func (store *inMemoryDataStore) Close() error { return nil } diff --git a/internal/datastore/in_memory_data_store_impl_test.go b/internal/datastore/in_memory_data_store_impl_test.go index 4d4a9dbf..c872b1ec 100644 --- a/internal/datastore/in_memory_data_store_impl_test.go +++ b/internal/datastore/in_memory_data_store_impl_test.go @@ -10,6 +10,7 @@ import ( "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldbuilders" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest" + "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" "github.com/stretchr/testify/assert" @@ -22,9 +23,6 @@ func TestInMemoryDataStore(t *testing.T) { t.Run("GetAll", testInMemoryDataStoreGetAll) t.Run("Upsert", testInMemoryDataStoreUpsert) t.Run("Delete", testInMemoryDataStoreDelete) - t.Run("SetBasis", testInMemoryDataStoreSetBasis) - t.Run("ApplyDelta", testInMemoryDataStoreApplyDelta) - t.Run("Dump", testInMemoryDataStoreDump) t.Run("IsStatusMonitoringEnabled", func(t *testing.T) { assert.False(t, makeInMemoryStore().IsStatusMonitoringEnabled()) @@ -35,7 +33,7 @@ func TestInMemoryDataStore(t *testing.T) { }) } -func makeInMemoryStore() *MemoryStore { +func makeInMemoryStore() subsystems.DataStore { return NewInMemoryDataStore(sharedtest.NewTestLoggers()) } @@ -49,8 +47,6 @@ func extractCollections(allData []ldstoretypes.Collection) [][]ldstoretypes.Keye type dataItemCreator func(key string, version int, otherProperty bool) ldstoretypes.ItemDescriptor -type collectionCreator func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) - func forAllDataKinds(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, dataItemCreator)) { test(t, datakinds.Features, func(key string, version int, otherProperty bool) ldstoretypes.ItemDescriptor { flag := ldbuilders.NewFlagBuilder(key).Version(version).On(otherProperty).Build() @@ -65,42 +61,6 @@ func forAllDataKinds(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, }) } -func forAllDataKindsCollection(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, collectionCreator)) { - test(t, datakinds.Features, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { - flag := ldbuilders.NewFlagBuilder(key).Version(version).On(otherProperty).Build() - descriptor := sharedtest.FlagDescriptor(flag) - return descriptor, []ldstoretypes.Collection{ - { - Kind: datakinds.Features, - Items: []ldstoretypes.KeyedItemDescriptor{ - { - Key: flag.Key, - Item: descriptor, - }, - }, - }, - } - }) - test(t, datakinds.Segments, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { - segment := ldbuilders.NewSegmentBuilder(key).Version(version).Build() - if otherProperty { - segment.Included = []string{"arbitrary value"} - } - descriptor := sharedtest.SegmentDescriptor(segment) - return descriptor, []ldstoretypes.Collection{ - { - Kind: datakinds.Segments, - Items: []ldstoretypes.KeyedItemDescriptor{ - { - Key: segment.Key, - Item: descriptor, - }, - }, - }, - } - }) -} - func testInMemoryDataStoreInit(t *testing.T) { t.Run("makes store initialized", func(t *testing.T) { store := makeInMemoryStore() @@ -140,48 +100,6 @@ func testInMemoryDataStoreInit(t *testing.T) { }) } -func testInMemoryDataStoreSetBasis(t *testing.T) { - // SetBasis is currently an alias for Init, so the tests should be the same. Once there is no longer a use-case - // for Init (when fdv1 data system is removed, the Init tests can be deleted.) - - t.Run("makes store initialized", func(t *testing.T) { - store := makeInMemoryStore() - allData := sharedtest.NewDataSetBuilder().Flags(ldbuilders.NewFlagBuilder("key").Build()).Build() - - store.SetBasis(allData) - - assert.True(t, store.IsInitialized()) - }) - - t.Run("completely replaces previous data", func(t *testing.T) { - store := makeInMemoryStore() - flag1 := ldbuilders.NewFlagBuilder("key1").Build() - segment1 := ldbuilders.NewSegmentBuilder("key1").Build() - allData1 := sharedtest.NewDataSetBuilder().Flags(flag1).Segments(segment1).Build() - - store.SetBasis(allData1) - - flags, err := store.GetAll(datakinds.Features) - require.NoError(t, err) - segments, err := store.GetAll(datakinds.Segments) - require.NoError(t, err) - sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) - assert.Equal(t, extractCollections(allData1), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) - - flag2 := ldbuilders.NewFlagBuilder("key2").Build() - segment2 := ldbuilders.NewSegmentBuilder("key2").Build() - allData2 := sharedtest.NewDataSetBuilder().Flags(flag2).Segments(segment2).Build() - - store.SetBasis(allData2) - - flags, err = store.GetAll(datakinds.Features) - require.NoError(t, err) - segments, err = store.GetAll(datakinds.Segments) - require.NoError(t, err) - assert.Equal(t, extractCollections(allData2), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) - }) -} - func testInMemoryDataStoreGet(t *testing.T) { const unknownKey = "unknown-key" @@ -386,71 +304,3 @@ func testInMemoryDataStoreDelete(t *testing.T) { }) }) } - -func testInMemoryDataStoreApplyDelta(t *testing.T) { - - // These are the equivalent of the existing upsert tests. - forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionCreator) { - t.Run("newer version", func(t *testing.T) { - store := makeInMemoryStore() - store.SetBasis(sharedtest.NewDataSetBuilder().Build()) - - _, collection1 := makeItem("key", 10, false) - - updates := store.ApplyDelta(collection1) - assert.True(t, updates[kind]["key"]) - - item1a, collection1a := makeItem("key", 11, true) - - updates = store.ApplyDelta(collection1a) - assert.True(t, updates[kind]["key"]) - - result, err := store.Get(kind, "key") - require.NoError(t, err) - assert.Equal(t, item1a, result) - - }) - - t.Run("older version", func(t *testing.T) { - store := makeInMemoryStore() - store.SetBasis(sharedtest.NewDataSetBuilder().Build()) - - item1Version := 10 - item1, collection1 := makeItem("key", item1Version, false) - - updates := store.ApplyDelta(collection1) - assert.True(t, updates[kind]["key"]) - - _, collection1a := makeItem("key", item1Version-1, true) - - updates = store.ApplyDelta(collection1a) - assert.False(t, updates[kind]["key"]) - - result, err := store.Get(kind, "key") - require.NoError(t, err) - assert.Equal(t, item1, result) - }) - - t.Run("same version", func(t *testing.T) { - store := makeInMemoryStore() - store.SetBasis(sharedtest.NewDataSetBuilder().Build()) - - item1Version := 10 - item1, collection1 := makeItem("key", item1Version, false) - updated := store.ApplyDelta(collection1) - assert.True(t, updated[kind]["key"]) - - _, collection1a := makeItem("key", item1Version, true) - updated = store.ApplyDelta(collection1a) - assert.False(t, updated[kind]["key"]) - - result, err := store.Get(kind, "key") - require.NoError(t, err) - assert.Equal(t, item1, result) - }) - }) -} - -func testInMemoryDataStoreDump(t *testing.T) { - -} diff --git a/internal/memorystorev2/memory_store.go b/internal/memorystorev2/memory_store.go new file mode 100644 index 00000000..0e50fa37 --- /dev/null +++ b/internal/memorystorev2/memory_store.go @@ -0,0 +1,164 @@ +package memorystorev2 + +import ( + "sync" + + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) + +// Store contains flag and segment data, protected by a lock-striped map. +// +// Implementation notes: +// +// We deliberately do not use a defer pattern to manage the lock in these methods. Using defer adds a small but +// consistent overhead, and these store methods may be called with very high frequency (at least in the case of +// Get and IsInitialized). To make it safe to hold a lock without deferring the unlock, we must ensure that +// there is only one return point from each method, and that there is no operation that could possibly cause a +// panic after the lock has been acquired. See notes on performance in CONTRIBUTING.md. +type Store struct { + allData map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor + isInitialized bool + sync.RWMutex + loggers ldlog.Loggers +} + +// New creates an instance of the in-memory data s. This is not part of the public API. +func New(loggers ldlog.Loggers) *Store { + return &Store{ + allData: make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor), + isInitialized: false, + loggers: loggers, + } +} + +func (s *Store) SetBasis(allData []ldstoretypes.Collection) { + s.Lock() + + s.allData = make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor) + + for _, coll := range allData { + items := make(map[string]ldstoretypes.ItemDescriptor) + for _, item := range coll.Items { + items[item.Key] = item.Item + } + s.allData[coll.Kind] = items + } + + s.isInitialized = true + + s.Unlock() +} + +func (s *Store) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.DataKind]map[string]bool { + + updatedMap := make(map[ldstoretypes.DataKind]map[string]bool) + + s.Lock() + + for _, coll := range allData { + for _, item := range coll.Items { + updated := s.upsert(coll.Kind, item.Key, item.Item) + if updatedMap[coll.Kind] == nil { + updatedMap[coll.Kind] = make(map[string]bool) + } + updatedMap[coll.Kind][item.Key] = updated + } + } + + s.Unlock() + + return updatedMap +} + +func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { + s.RLock() + + var coll map[string]ldstoretypes.ItemDescriptor + var item ldstoretypes.ItemDescriptor + var ok bool + coll, ok = s.allData[kind] + if ok { + item, ok = coll[key] + } + + s.RUnlock() + + if ok { + return item, nil + } + if s.loggers.IsDebugEnabled() { + s.loggers.Debugf(`Key %s not found in "%s"`, key, kind) + } + return ldstoretypes.ItemDescriptor{}.NotFound(), nil +} + +func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { + s.RLock() + + itemsOut := s.getAll(kind) + + s.RUnlock() + + return itemsOut, nil +} + +func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescriptor { + var itemsOut []ldstoretypes.KeyedItemDescriptor + if itemsMap, ok := s.allData[kind]; ok { + if len(itemsMap) > 0 { + itemsOut = make([]ldstoretypes.KeyedItemDescriptor, 0, len(itemsMap)) + for key, item := range itemsMap { + itemsOut = append(itemsOut, ldstoretypes.KeyedItemDescriptor{Key: key, Item: item}) + } + } + } + return itemsOut +} + +func (s *Store) Dump() []ldstoretypes.Collection { + s.RLock() + + var allData []ldstoretypes.Collection + for kind := range s.allData { + itemsOut := s.getAll(kind) + allData = append(allData, ldstoretypes.Collection{Kind: kind, Items: itemsOut}) + } + + s.RUnlock() + + return allData +} + +func (s *Store) upsert( + kind ldstoretypes.DataKind, + key string, + newItem ldstoretypes.ItemDescriptor) bool { + var coll map[string]ldstoretypes.ItemDescriptor + var ok bool + shouldUpdate := true + updated := false + if coll, ok = s.allData[kind]; ok { + if item, ok := coll[key]; ok { + if item.Version >= newItem.Version { + shouldUpdate = false + } + } + } else { + s.allData[kind] = map[string]ldstoretypes.ItemDescriptor{key: newItem} + shouldUpdate = false // because we already initialized the map with the new item + updated = true + } + if shouldUpdate { + coll[key] = newItem + updated = true + } + return updated +} + +func (s *Store) IsInitialized() bool { + s.RLock() + ret := s.isInitialized + s.RUnlock() + return ret +} diff --git a/internal/memorystorev2/memory_store_test.go b/internal/memorystorev2/memory_store_test.go new file mode 100644 index 00000000..238c176c --- /dev/null +++ b/internal/memorystorev2/memory_store_test.go @@ -0,0 +1,330 @@ +package memorystorev2 + +import ( + "fmt" + "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" + "sort" + "testing" + + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" + "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldbuilders" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInMemoryDataStore(t *testing.T) { + t.Run("Get", testInMemoryDataStoreGet) + t.Run("GetAll", testInMemoryDataStoreGetAll) + t.Run("SetBasis", testInMemoryDataStoreSetBasis) + t.Run("ApplyDelta", testInMemoryDataStoreApplyDelta) + t.Run("Dump", testInMemoryDataStoreDump) +} + +func makeMemoryStore() *Store { + return New(sharedtest.NewTestLoggers()) +} + +// The dataItemCreator/forAllDataKinds helpers work for testing the FDv1-style of interacting with the memory store, +// e.g. Upsert/Init. With FDv2, the store is initialized with SetBasis and updates are applied atomically in batches +// with ApplyDelta. In order to easily inject data into the store, and then make assertions based on the result of +// calling Get, we need a slightly more involved pattern. +// The main difference is that forAllDataKindsCollection now returns the ItemDescriptor, along with a collection +// containing only that item. That way, the collection can be passed to ApplyDelta, and the ItemDescriptor can be +// used when making assertions using the result of Get. +type collectionItemCreator func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) + +type collectionItemDeleter func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) + +func makeCollection(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) []ldstoretypes.Collection { + return []ldstoretypes.Collection{ + { + Kind: kind, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: key, + Item: item, + }, + }, + }, + } +} + +func forAllDataKindsCollection(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, collectionItemCreator, collectionItemDeleter)) { + test(t, datakinds.Features, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + flag := ldbuilders.NewFlagBuilder(key).Version(version).On(otherProperty).Build() + descriptor := sharedtest.FlagDescriptor(flag) + + return descriptor, makeCollection(datakinds.Features, flag.Key, descriptor) + }, func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + descriptor := ldstoretypes.ItemDescriptor{Version: version, Item: nil} + + return descriptor, makeCollection(datakinds.Features, key, descriptor) + }) + test(t, datakinds.Segments, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + segment := ldbuilders.NewSegmentBuilder(key).Version(version).Build() + if otherProperty { + segment.Included = []string{"arbitrary value"} + } + descriptor := sharedtest.SegmentDescriptor(segment) + + return descriptor, makeCollection(datakinds.Segments, segment.Key, descriptor) + }, func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + descriptor := ldstoretypes.ItemDescriptor{Version: version, Item: nil} + + return descriptor, makeCollection(datakinds.Segments, key, descriptor) + }) +} + +func testInMemoryDataStoreSetBasis(t *testing.T) { + // SetBasis is currently an alias for Init, so the tests should be the same. Once there is no longer a use-case + // for Init (when fdv1 data system is removed, the Init tests can be deleted.) + + t.Run("makes store initialized", func(t *testing.T) { + store := makeMemoryStore() + allData := sharedtest.NewDataSetBuilder().Flags(ldbuilders.NewFlagBuilder("key").Build()).Build() + + store.SetBasis(allData) + + assert.True(t, store.IsInitialized()) + }) + + t.Run("completely replaces previous data", func(t *testing.T) { + store := makeMemoryStore() + flag1 := ldbuilders.NewFlagBuilder("key1").Build() + segment1 := ldbuilders.NewSegmentBuilder("key1").Build() + allData1 := sharedtest.NewDataSetBuilder().Flags(flag1).Segments(segment1).Build() + + store.SetBasis(allData1) + + flags, err := store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err := store.GetAll(datakinds.Segments) + require.NoError(t, err) + sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) + assert.Equal(t, datastore.extractCollections(allData1), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + + flag2 := ldbuilders.NewFlagBuilder("key2").Build() + segment2 := ldbuilders.NewSegmentBuilder("key2").Build() + allData2 := sharedtest.NewDataSetBuilder().Flags(flag2).Segments(segment2).Build() + + store.SetBasis(allData2) + + flags, err = store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err = store.GetAll(datakinds.Segments) + require.NoError(t, err) + assert.Equal(t, datastore.extractCollections(allData2), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + }) +} + +func testInMemoryDataStoreGet(t *testing.T) { + const unknownKey = "unknown-key" + + forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem datastore.dataItemCreator) { + t.Run("found", func(t *testing.T) { + store := makeMemoryStore() + require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + item := makeItem("key", 1, false) + _, err := store.Upsert(kind, "key", item) + assert.NoError(t, err) + + result, err := store.Get(kind, "key") + assert.NoError(t, err) + assert.Equal(t, item, result) + }) + + t.Run("not found", func(t *testing.T) { + mockLog := ldlogtest.NewMockLog() + mockLog.Loggers.SetMinLevel(ldlog.Info) + store := datastore.NewInMemoryDataStore(mockLog.Loggers) + require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + + result, err := store.Get(kind, unknownKey) + assert.NoError(t, err) + assert.Equal(t, ldstoretypes.ItemDescriptor{}.NotFound(), result) + + assert.Len(t, mockLog.GetAllOutput(), 0) + }) + + t.Run("not found - debug logging", func(t *testing.T) { + mockLog := ldlogtest.NewMockLog() + mockLog.Loggers.SetMinLevel(ldlog.Debug) + store := datastore.NewInMemoryDataStore(mockLog.Loggers) + require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + + result, err := store.Get(kind, unknownKey) + assert.NoError(t, err) + assert.Equal(t, ldstoretypes.ItemDescriptor{}.NotFound(), result) + + assert.Len(t, mockLog.GetAllOutput(), 1) + assert.Equal(t, + ldlogtest.MockLogItem{ + Level: ldlog.Debug, + Message: fmt.Sprintf(`Key %s not found in "%s"`, unknownKey, kind.GetName()), + }, + mockLog.GetAllOutput()[0], + ) + }) + }) +} + +func testInMemoryDataStoreGetAll(t *testing.T) { + store := makeMemoryStore() + require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + + result, err := store.GetAll(datakinds.Features) + require.NoError(t, err) + assert.Len(t, result, 0) + + flag1 := ldbuilders.NewFlagBuilder("flag1").Build() + flag2 := ldbuilders.NewFlagBuilder("flag2").Build() + segment1 := ldbuilders.NewSegmentBuilder("segment1").Build() + _, err = store.Upsert(datakinds.Features, flag1.Key, sharedtest.FlagDescriptor(flag1)) + require.NoError(t, err) + _, err = store.Upsert(datakinds.Features, flag2.Key, sharedtest.FlagDescriptor(flag2)) + require.NoError(t, err) + _, err = store.Upsert(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)) + require.NoError(t, err) + + flags, err := store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err := store.GetAll(datakinds.Segments) + require.NoError(t, err) + + sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) + expected := datastore.extractCollections(sharedtest.NewDataSetBuilder().Flags(flag1, flag2).Segments(segment1).Build()) + assert.Equal(t, expected, [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + + result, err = store.GetAll(datastore.unknownDataKind{}) + require.NoError(t, err) + assert.Len(t, result, 0) +} + +func testInMemoryDataStoreApplyDelta(t *testing.T) { + + forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, deleteItem collectionItemDeleter) { + + t.Run("upserts", func(t *testing.T) { + + t.Run("newer version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + _, collection1 := makeItem("key", 10, false) + + updates := store.ApplyDelta(collection1) + assert.True(t, updates[kind]["key"]) + + item1a, collection1a := makeItem("key", 11, true) + + updates = store.ApplyDelta(collection1a) + assert.True(t, updates[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1a, result) + + }) + + t.Run("older version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1Version := 10 + item1, collection1 := makeItem("key", item1Version, false) + + updates := store.ApplyDelta(collection1) + assert.True(t, updates[kind]["key"]) + + _, collection1a := makeItem("key", item1Version-1, true) + + updates = store.ApplyDelta(collection1a) + assert.False(t, updates[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + + t.Run("same version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1Version := 10 + item1, collection1 := makeItem("key", item1Version, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + _, collection1a := makeItem("key", item1Version, true) + updated = store.ApplyDelta(collection1a) + assert.False(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + }) + + t.Run("deletes", func(t *testing.T) { + t.Run("newer version", func(t *testing.T) { + store := makeMemoryStore() + require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + + item1, collection1 := makeItem("key", 10, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + item1a, collection1a := deleteItem("key", item1.Version+1) + updated = store.ApplyDelta(collection1a) + assert.True(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1a, result) + }) + + t.Run("older version", func(t *testing.T) { + store := makeMemoryStore() + require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + + item1, collection1 := makeItem("key", 10, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + _, collection1a := deleteItem("key", item1.Version-1) + updated = store.ApplyDelta(collection1a) + assert.False(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + + t.Run("same version", func(t *testing.T) { + store := makeMemoryStore() + require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + + item1, collection1 := makeItem("key", 10, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + _, collection1a := deleteItem("key", item1.Version) + updated = store.ApplyDelta(collection1a) + assert.False(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + }) + }) +} + +func testInMemoryDataStoreDump(t *testing.T) { + +} From 0d2f8e93e93e7d2978d1e2e8ca93041d73816be3 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 14:36:30 -0700 Subject: [PATCH 48/62] update tests --- internal/datasystem/store.go | 10 +-- internal/memorystorev2/memory_store_test.go | 91 +++++++++++++++------ 2 files changed, 72 insertions(+), 29 deletions(-) diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 07a9a8fa..7b18618d 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -1,13 +1,13 @@ package datasystem import ( + "github.com/launchdarkly/go-server-sdk/v7/internal/memorystorev2" "sync" "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/interfaces" - "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "github.com/launchdarkly/go-server-sdk/v7/subsystems" "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) @@ -45,7 +45,7 @@ type Store struct { // Source of truth for flag evaluations (once initialized). Before initialization, // the persistentStore may be used if configured. - memoryStore *datastore.MemoryStore + memoryStore *memorystorev2.Store // True if the data in the memory store may be persisted to the persistent store. This may be false // in the case of an initializer/synchronizer that doesn't want to propagate memory to the persistent store, @@ -53,7 +53,7 @@ type Store struct { persist bool // Points to the active store. Swapped upon initialization. - active subsystems.DataStore + active subsystems.ReadOnlyStore // Identifies the current data. selector fdv2proto.Selector @@ -93,7 +93,7 @@ func (p *persistentStore) writable() bool { func NewStore(loggers ldlog.Loggers) *Store { s := &Store{ persistentStore: nil, - memoryStore: datastore.NewInMemoryDataStore(loggers), + memoryStore: memorystorev2.New(loggers), loggers: loggers, selector: fdv2proto.NoSelector(), persist: false, @@ -222,7 +222,7 @@ func (s *Store) Commit() error { return nil } -func (s *Store) getActive() subsystems.DataStore { +func (s *Store) getActive() subsystems.ReadOnlyStore { s.mu.RLock() defer s.mu.RUnlock() return s.active diff --git a/internal/memorystorev2/memory_store_test.go b/internal/memorystorev2/memory_store_test.go index 238c176c..a602f1a1 100644 --- a/internal/memorystorev2/memory_store_test.go +++ b/internal/memorystorev2/memory_store_test.go @@ -1,8 +1,8 @@ package memorystorev2 import ( + "errors" "fmt" - "github.com/launchdarkly/go-server-sdk/v7/internal/datastore" "sort" "testing" @@ -106,7 +106,7 @@ func testInMemoryDataStoreSetBasis(t *testing.T) { segments, err := store.GetAll(datakinds.Segments) require.NoError(t, err) sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) - assert.Equal(t, datastore.extractCollections(allData1), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + assert.Equal(t, extractCollections(allData1), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) flag2 := ldbuilders.NewFlagBuilder("key2").Build() segment2 := ldbuilders.NewSegmentBuilder("key2").Build() @@ -118,20 +118,20 @@ func testInMemoryDataStoreSetBasis(t *testing.T) { require.NoError(t, err) segments, err = store.GetAll(datakinds.Segments) require.NoError(t, err) - assert.Equal(t, datastore.extractCollections(allData2), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + assert.Equal(t, extractCollections(allData2), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) }) } func testInMemoryDataStoreGet(t *testing.T) { const unknownKey = "unknown-key" - forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem datastore.dataItemCreator) { + forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, _ collectionItemDeleter) { t.Run("found", func(t *testing.T) { store := makeMemoryStore() - require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) - item := makeItem("key", 1, false) - _, err := store.Upsert(kind, "key", item) - assert.NoError(t, err) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item, collection := makeItem("key", 1, false) + store.ApplyDelta(collection) result, err := store.Get(kind, "key") assert.NoError(t, err) @@ -141,8 +141,8 @@ func testInMemoryDataStoreGet(t *testing.T) { t.Run("not found", func(t *testing.T) { mockLog := ldlogtest.NewMockLog() mockLog.Loggers.SetMinLevel(ldlog.Info) - store := datastore.NewInMemoryDataStore(mockLog.Loggers) - require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + store := New(mockLog.Loggers) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) result, err := store.Get(kind, unknownKey) assert.NoError(t, err) @@ -154,8 +154,8 @@ func testInMemoryDataStoreGet(t *testing.T) { t.Run("not found - debug logging", func(t *testing.T) { mockLog := ldlogtest.NewMockLog() mockLog.Loggers.SetMinLevel(ldlog.Debug) - store := datastore.NewInMemoryDataStore(mockLog.Loggers) - require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + store := New(mockLog.Loggers) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) result, err := store.Get(kind, unknownKey) assert.NoError(t, err) @@ -175,7 +175,7 @@ func testInMemoryDataStoreGet(t *testing.T) { func testInMemoryDataStoreGetAll(t *testing.T) { store := makeMemoryStore() - require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) result, err := store.GetAll(datakinds.Features) require.NoError(t, err) @@ -184,12 +184,33 @@ func testInMemoryDataStoreGetAll(t *testing.T) { flag1 := ldbuilders.NewFlagBuilder("flag1").Build() flag2 := ldbuilders.NewFlagBuilder("flag2").Build() segment1 := ldbuilders.NewSegmentBuilder("segment1").Build() - _, err = store.Upsert(datakinds.Features, flag1.Key, sharedtest.FlagDescriptor(flag1)) - require.NoError(t, err) - _, err = store.Upsert(datakinds.Features, flag2.Key, sharedtest.FlagDescriptor(flag2)) - require.NoError(t, err) - _, err = store.Upsert(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)) - require.NoError(t, err) + + collection := []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: flag1.Key, + Item: sharedtest.FlagDescriptor(flag1), + }, + { + Key: flag2.Key, + Item: sharedtest.FlagDescriptor(flag2), + }, + }, + }, + { + Kind: datakinds.Segments, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: segment1.Key, + Item: sharedtest.SegmentDescriptor(segment1), + }, + }, + }, + } + + store.ApplyDelta(collection) flags, err := store.GetAll(datakinds.Features) require.NoError(t, err) @@ -197,14 +218,36 @@ func testInMemoryDataStoreGetAll(t *testing.T) { require.NoError(t, err) sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) - expected := datastore.extractCollections(sharedtest.NewDataSetBuilder().Flags(flag1, flag2).Segments(segment1).Build()) + expected := extractCollections(sharedtest.NewDataSetBuilder().Flags(flag1, flag2).Segments(segment1).Build()) assert.Equal(t, expected, [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) - result, err = store.GetAll(datastore.unknownDataKind{}) + result, err = store.GetAll(unknownDataKind{}) require.NoError(t, err) assert.Len(t, result, 0) } +func extractCollections(allData []ldstoretypes.Collection) [][]ldstoretypes.KeyedItemDescriptor { + var ret [][]ldstoretypes.KeyedItemDescriptor + for _, coll := range allData { + ret = append(ret, coll.Items) + } + return ret +} + +type unknownDataKind struct{} + +func (k unknownDataKind) GetName() string { + return "unknown" +} + +func (k unknownDataKind) Serialize(item ldstoretypes.ItemDescriptor) []byte { + return nil +} + +func (k unknownDataKind) Deserialize(data []byte) (ldstoretypes.ItemDescriptor, error) { + return ldstoretypes.ItemDescriptor{}, errors.New("not implemented") +} + func testInMemoryDataStoreApplyDelta(t *testing.T) { forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, deleteItem collectionItemDeleter) { @@ -273,7 +316,7 @@ func testInMemoryDataStoreApplyDelta(t *testing.T) { t.Run("deletes", func(t *testing.T) { t.Run("newer version", func(t *testing.T) { store := makeMemoryStore() - require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) item1, collection1 := makeItem("key", 10, false) updated := store.ApplyDelta(collection1) @@ -290,7 +333,7 @@ func testInMemoryDataStoreApplyDelta(t *testing.T) { t.Run("older version", func(t *testing.T) { store := makeMemoryStore() - require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) item1, collection1 := makeItem("key", 10, false) updated := store.ApplyDelta(collection1) @@ -307,7 +350,7 @@ func testInMemoryDataStoreApplyDelta(t *testing.T) { t.Run("same version", func(t *testing.T) { store := makeMemoryStore() - require.NoError(t, store.Init(sharedtest.NewDataSetBuilder().Build())) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) item1, collection1 := makeItem("key", 10, false) updated := store.ApplyDelta(collection1) From d853f88c8a8f92472f117a1f8b78b73b9478d966 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 16:38:26 -0700 Subject: [PATCH 49/62] more unit tests --- internal/datasystem/store.go | 2 +- internal/memorystorev2/memory_store.go | 2 +- internal/memorystorev2/memory_store_test.go | 228 ++++++++++++++++---- 3 files changed, 192 insertions(+), 40 deletions(-) diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 7b18618d..0a7c4f3e 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -217,7 +217,7 @@ func (s *Store) Commit() error { defer s.mu.RUnlock() if s.shouldPersist() { - return s.persistentStore.impl.Init(s.memoryStore.Dump()) + return s.persistentStore.impl.Init(s.memoryStore.GetAllKinds()) } return nil } diff --git a/internal/memorystorev2/memory_store.go b/internal/memorystorev2/memory_store.go index 0e50fa37..3151299b 100644 --- a/internal/memorystorev2/memory_store.go +++ b/internal/memorystorev2/memory_store.go @@ -116,7 +116,7 @@ func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescr return itemsOut } -func (s *Store) Dump() []ldstoretypes.Collection { +func (s *Store) GetAllKinds() []ldstoretypes.Collection { s.RLock() var allData []ldstoretypes.Collection diff --git a/internal/memorystorev2/memory_store_test.go b/internal/memorystorev2/memory_store_test.go index a602f1a1..f3fdf8cd 100644 --- a/internal/memorystorev2/memory_store_test.go +++ b/internal/memorystorev2/memory_store_test.go @@ -18,52 +18,53 @@ import ( ) func TestInMemoryDataStore(t *testing.T) { - t.Run("Get", testInMemoryDataStoreGet) - t.Run("GetAll", testInMemoryDataStoreGetAll) - t.Run("SetBasis", testInMemoryDataStoreSetBasis) - t.Run("ApplyDelta", testInMemoryDataStoreApplyDelta) - t.Run("Dump", testInMemoryDataStoreDump) + t.Run("Get", testGet) + t.Run("GetAll", testGetAll) + t.Run("GetAllKinds", testGetAllKinds) + t.Run("SetBasis", testSetBasis) + t.Run("ApplyDelta", testApplyDelta) } func makeMemoryStore() *Store { return New(sharedtest.NewTestLoggers()) } -// The dataItemCreator/forAllDataKinds helpers work for testing the FDv1-style of interacting with the memory store, -// e.g. Upsert/Init. With FDv2, the store is initialized with SetBasis and updates are applied atomically in batches -// with ApplyDelta. In order to easily inject data into the store, and then make assertions based on the result of -// calling Get, we need a slightly more involved pattern. -// The main difference is that forAllDataKindsCollection now returns the ItemDescriptor, along with a collection -// containing only that item. That way, the collection can be passed to ApplyDelta, and the ItemDescriptor can be -// used when making assertions using the result of Get. +// Used to create a segment/flag. Returns the individual item, and a collection slice +// containing only that item. type collectionItemCreator func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) +// Used to delete a segment/flag. Returns the individual item, and a collection slice +// containing only that item. type collectionItemDeleter func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) -func makeCollection(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) []ldstoretypes.Collection { +func makeCollections(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) []ldstoretypes.Collection { return []ldstoretypes.Collection{ - { - Kind: kind, - Items: []ldstoretypes.KeyedItemDescriptor{ - { - Key: key, - Item: item, - }, + makeCollection(kind, key, item), + } +} + +func makeCollection(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) ldstoretypes.Collection { + return ldstoretypes.Collection{ + Kind: kind, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: key, + Item: item, }, }, } } -func forAllDataKindsCollection(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, collectionItemCreator, collectionItemDeleter)) { +func forAllDataKinds(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, collectionItemCreator, collectionItemDeleter)) { test(t, datakinds.Features, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { flag := ldbuilders.NewFlagBuilder(key).Version(version).On(otherProperty).Build() descriptor := sharedtest.FlagDescriptor(flag) - return descriptor, makeCollection(datakinds.Features, flag.Key, descriptor) + return descriptor, makeCollections(datakinds.Features, flag.Key, descriptor) }, func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { descriptor := ldstoretypes.ItemDescriptor{Version: version, Item: nil} - return descriptor, makeCollection(datakinds.Features, key, descriptor) + return descriptor, makeCollections(datakinds.Features, key, descriptor) }) test(t, datakinds.Segments, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { segment := ldbuilders.NewSegmentBuilder(key).Version(version).Build() @@ -72,18 +73,15 @@ func forAllDataKindsCollection(t *testing.T, test func(*testing.T, ldstoretypes. } descriptor := sharedtest.SegmentDescriptor(segment) - return descriptor, makeCollection(datakinds.Segments, segment.Key, descriptor) + return descriptor, makeCollections(datakinds.Segments, segment.Key, descriptor) }, func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { descriptor := ldstoretypes.ItemDescriptor{Version: version, Item: nil} - return descriptor, makeCollection(datakinds.Segments, key, descriptor) + return descriptor, makeCollections(datakinds.Segments, key, descriptor) }) } -func testInMemoryDataStoreSetBasis(t *testing.T) { - // SetBasis is currently an alias for Init, so the tests should be the same. Once there is no longer a use-case - // for Init (when fdv1 data system is removed, the Init tests can be deleted.) - +func testSetBasis(t *testing.T) { t.Run("makes store initialized", func(t *testing.T) { store := makeMemoryStore() allData := sharedtest.NewDataSetBuilder().Flags(ldbuilders.NewFlagBuilder("key").Build()).Build() @@ -122,10 +120,10 @@ func testInMemoryDataStoreSetBasis(t *testing.T) { }) } -func testInMemoryDataStoreGet(t *testing.T) { +func testGet(t *testing.T) { const unknownKey = "unknown-key" - forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, _ collectionItemDeleter) { + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, _ collectionItemDeleter) { t.Run("found", func(t *testing.T) { store := makeMemoryStore() store.SetBasis(sharedtest.NewDataSetBuilder().Build()) @@ -173,7 +171,7 @@ func testInMemoryDataStoreGet(t *testing.T) { }) } -func testInMemoryDataStoreGetAll(t *testing.T) { +func testGetAll(t *testing.T) { store := makeMemoryStore() store.SetBasis(sharedtest.NewDataSetBuilder().Build()) @@ -248,12 +246,9 @@ func (k unknownDataKind) Deserialize(data []byte) (ldstoretypes.ItemDescriptor, return ldstoretypes.ItemDescriptor{}, errors.New("not implemented") } -func testInMemoryDataStoreApplyDelta(t *testing.T) { - - forAllDataKindsCollection(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, deleteItem collectionItemDeleter) { - +func testApplyDelta(t *testing.T) { + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, deleteItem collectionItemDeleter) { t.Run("upserts", func(t *testing.T) { - t.Run("newer version", func(t *testing.T) { store := makeMemoryStore() store.SetBasis(sharedtest.NewDataSetBuilder().Build()) @@ -368,6 +363,163 @@ func testInMemoryDataStoreApplyDelta(t *testing.T) { }) } -func testInMemoryDataStoreDump(t *testing.T) { +func testGetAllKinds(t *testing.T) { + t.Run("uninitialized store", func(t *testing.T) { + store := makeMemoryStore() + collections := store.GetAllKinds() + assert.Empty(t, collections) + }) + + t.Run("initialized but empty store", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + collections := store.GetAllKinds() + assert.Len(t, collections, 2) + assert.Empty(t, collections[0].Items) + assert.Empty(t, collections[1].Items) + }) + + t.Run("initialized store with data of a single kind", func(t *testing.T) { + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, _ collectionItemDeleter) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1, collection1 := makeItem("key1", 1, false) + + store.ApplyDelta(collection1) + + collections := store.GetAllKinds() + + assert.Len(t, collections, 2) + + for _, coll := range collections { + if coll.Kind == kind { + assert.Len(t, coll.Items, 1) + assert.Equal(t, item1, coll.Items[0].Item) + } else { + assert.Empty(t, coll.Items) + } + } + }) + }) + + t.Run("initialized store with data of multiple kinds", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + flag1 := ldbuilders.NewFlagBuilder("flag1").Build() + segment1 := ldbuilders.NewSegmentBuilder("segment1").Build() + expectedCollection := []ldstoretypes.Collection{ + makeCollection(datakinds.Features, flag1.Key, sharedtest.FlagDescriptor(flag1)), + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + store.ApplyDelta(expectedCollection) + + gotCollections := store.GetAllKinds() + + assert.ElementsMatch(t, expectedCollection, gotCollections) + }) + + t.Run("multiple deltas applies", func(t *testing.T) { + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, deleteItem collectionItemDeleter) { + store := makeMemoryStore() + + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + _, collection1 := makeItem("key1", 1, false) + store.ApplyDelta(collection1) + + // The collection slice we get from GetAllKinds is going to contain the specific segment or flag + // collection we're creating here in the test, but also an empty collection for the other kind. + expected := []ldstoretypes.Collection{collection1[0]} + if kind == datakinds.Features { + expected = append(expected, ldstoretypes.Collection{Kind: datakinds.Segments, Items: nil}) + } else { + expected = append(expected, ldstoretypes.Collection{Kind: datakinds.Features, Items: nil}) + } + + assert.ElementsMatch(t, expected, store.GetAllKinds()) + + _, collection1a := makeItem("key1", 2, false) + store.ApplyDelta(collection1a) + expected[0] = collection1a[0] + assert.ElementsMatch(t, expected, store.GetAllKinds()) + + _, collection1b := deleteItem("key1", 3) + store.ApplyDelta(collection1b) + expected[0] = collection1b[0] + assert.ElementsMatch(t, expected, store.GetAllKinds()) + }) + }) + + t.Run("deltas containing multiple item kinds", func(t *testing.T) { + + store := makeMemoryStore() + + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + // Flag1 will be deleted. + flag1 := ldbuilders.NewFlagBuilder("flag1").Build() + + // Flag2 is a control and won't be changed. + flag2 := ldbuilders.NewFlagBuilder("flag2").Build() + + // Segment1 will be upserted. + segment1 := ldbuilders.NewSegmentBuilder("segment1").Build() + + collection1 := []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: flag1.Key, + Item: sharedtest.FlagDescriptor(flag1), + }, + { + Key: flag2.Key, + Item: sharedtest.FlagDescriptor(flag2), + }, + }, + }, + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + store.ApplyDelta(collection1) + + assert.ElementsMatch(t, collection1, store.GetAllKinds()) + + // Bumping the segment version is sufficient for an upsert. + // To indicate that there's no change to flag2, we simply don't pass it in the collection. + segment1.Version += 1 + collection2 := []ldstoretypes.Collection{ + // Delete flag1 + makeCollection(datakinds.Features, flag1.Key, ldstoretypes.ItemDescriptor{Version: flag1.Version + 1, Item: nil}), + // Upsert segment1 + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + store.ApplyDelta(collection2) + + expected := []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: flag1.Key, + Item: ldstoretypes.ItemDescriptor{Version: flag1.Version + 1, Item: nil}, + }, + { + Key: flag2.Key, + Item: sharedtest.FlagDescriptor(flag2), + }, + }, + }, + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + assert.ElementsMatch(t, expected, store.GetAllKinds()) + }) } From 3e5a64d502ec1fdda99031a5e19d3ed5cae7a58a Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 17:08:57 -0700 Subject: [PATCH 50/62] chore: introduce memorystorev2 --- internal/memorystorev2/memory_store.go | 190 +++++++ .../memory_store_benchmark_test.go | 236 ++++++++ internal/memorystorev2/memory_store_test.go | 525 ++++++++++++++++++ 3 files changed, 951 insertions(+) create mode 100644 internal/memorystorev2/memory_store.go create mode 100644 internal/memorystorev2/memory_store_benchmark_test.go create mode 100644 internal/memorystorev2/memory_store_test.go diff --git a/internal/memorystorev2/memory_store.go b/internal/memorystorev2/memory_store.go new file mode 100644 index 00000000..ad34a8ce --- /dev/null +++ b/internal/memorystorev2/memory_store.go @@ -0,0 +1,190 @@ +// Package memorystorev2 contains an implementation for a transactional memory store suitable +// for the FDv2 architecture. +package memorystorev2 + +import ( + "sync" + + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) + +// Store provides an abstraction that makes flag and segment data available to other components. +// It accepts updates in batches - for instance, flag A was upserted while segment B was deleted - +// such that the contents of the store are consistent with a single payload version at any given time. +// +// The terminology used is "basis" and "deltas". First, the store's basis is set. This is this initial +// data, upon which subsequent deltas will be applied. Whenever the basis is set, any existing data +// is discarded. +// +// Deltas are then applied to the store. A single delta update transforms the contents of the store +// atomically. The idea is that there's never a moment when the state of the store could be inconsistent +// with regard to the authoritative LaunchDarkly SaaS. +// +// Implementation notes: +// +// We deliberately do not use a defer pattern to manage the lock in these methods. Using defer adds a small but +// consistent overhead, and these store methods may be called with very high frequency (at least in the case of +// Get and IsInitialized). To make it safe to hold a lock without deferring the unlock, we must ensure that +// there is only one return point from each method, and that there is no operation that could possibly cause a +// panic after the lock has been acquired. See notes on performance in CONTRIBUTING.md. +type Store struct { + allData map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor + isInitialized bool + sync.RWMutex + loggers ldlog.Loggers +} + +// New creates a new Store. The Store is uninitialized until SetBasis is called. +func New(loggers ldlog.Loggers) *Store { + return &Store{ + allData: make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor), + isInitialized: false, + loggers: loggers, + } +} + +// SetBasis sets the basis of the Store. Any existing data is discarded. +// When the basis is set, the store becomes initialized. +func (s *Store) SetBasis(allData []ldstoretypes.Collection) { + s.Lock() + + s.allData = make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor) + + for _, coll := range allData { + items := make(map[string]ldstoretypes.ItemDescriptor) + for _, item := range coll.Items { + items[item.Key] = item.Item + } + s.allData[coll.Kind] = items + } + + s.isInitialized = true + + s.Unlock() +} + +// ApplyDelta applies a delta update to the store. ApplyDelta should not be called until +// SetBasis has been called at least once. The return value indicates, for each DataKind +// present in the delta, whether the item in the delta was actually updated or not. +// +// An item is updated only if the version of the item in the delta is greater than the version +// in the store, or it wasn't already present. +func (s *Store) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.DataKind]map[string]bool { + + updatedMap := make(map[ldstoretypes.DataKind]map[string]bool) + + s.Lock() + + for _, coll := range allData { + for _, item := range coll.Items { + updated := s.upsert(coll.Kind, item.Key, item.Item) + if updatedMap[coll.Kind] == nil { + updatedMap[coll.Kind] = make(map[string]bool) + } + updatedMap[coll.Kind][item.Key] = updated + } + } + + s.Unlock() + + return updatedMap +} + +// Get retrieves an item of the specified kind from the store. If the item is not found, then ItemDescriptor{}.NotFound() +// is returned with a nil error. +func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { + s.RLock() + + var coll map[string]ldstoretypes.ItemDescriptor + var item ldstoretypes.ItemDescriptor + var ok bool + coll, ok = s.allData[kind] + if ok { + item, ok = coll[key] + } + + s.RUnlock() + + if ok { + return item, nil + } + if s.loggers.IsDebugEnabled() { + s.loggers.Debugf(`Key %s not found in "%s"`, key, kind) + } + return ldstoretypes.ItemDescriptor{}.NotFound(), nil +} + +// GetAll retrieves all items of the specified kind from the store. +func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { + s.RLock() + + itemsOut := s.getAll(kind) + + s.RUnlock() + + return itemsOut, nil +} + +func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescriptor { + var itemsOut []ldstoretypes.KeyedItemDescriptor + if itemsMap, ok := s.allData[kind]; ok { + if len(itemsMap) > 0 { + itemsOut = make([]ldstoretypes.KeyedItemDescriptor, 0, len(itemsMap)) + for key, item := range itemsMap { + itemsOut = append(itemsOut, ldstoretypes.KeyedItemDescriptor{Key: key, Item: item}) + } + } + } + return itemsOut +} + +// GetAllKinds retrieves all items of all kinds from the store. This is different from calling +// GetAll for each kind because it provides a consistent view at a single point in time. +func (s *Store) GetAllKinds() []ldstoretypes.Collection { + s.RLock() + + var allData []ldstoretypes.Collection + for kind := range s.allData { + itemsOut := s.getAll(kind) + allData = append(allData, ldstoretypes.Collection{Kind: kind, Items: itemsOut}) + } + + s.RUnlock() + + return allData +} + +func (s *Store) upsert( + kind ldstoretypes.DataKind, + key string, + newItem ldstoretypes.ItemDescriptor) bool { + var coll map[string]ldstoretypes.ItemDescriptor + var ok bool + shouldUpdate := true + updated := false + if coll, ok = s.allData[kind]; ok { + if item, ok := coll[key]; ok { + if item.Version >= newItem.Version { + shouldUpdate = false + } + } + } else { + s.allData[kind] = map[string]ldstoretypes.ItemDescriptor{key: newItem} + shouldUpdate = false // because we already initialized the map with the new item + updated = true + } + if shouldUpdate { + coll[key] = newItem + updated = true + } + return updated +} + +// IsInitialized returns true if the store has been initialized with a basis. +func (s *Store) IsInitialized() bool { + s.RLock() + ret := s.isInitialized + s.RUnlock() + return ret +} diff --git a/internal/memorystorev2/memory_store_benchmark_test.go b/internal/memorystorev2/memory_store_benchmark_test.go new file mode 100644 index 00000000..ba076b5f --- /dev/null +++ b/internal/memorystorev2/memory_store_benchmark_test.go @@ -0,0 +1,236 @@ +package memorystorev2 + +import ( + "fmt" + "testing" + + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldbuilders" + "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldmodel" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) + +// These benchmarks cover data store operations with the in-memory store. +// +// There's no reason why the performance for flags should be different from segments, but to be truly +// implementation-neutral we'll benchmark each data kind separately anyway. + +var ( // assign to package-level variables in benchmarks so function calls won't be optimized away + inMemoryStoreBenchmarkResultErr error + inMemoryStoreBenchmarkResultItem ldstoretypes.ItemDescriptor + inMemoryStoreBenchmarkResultItems []ldstoretypes.KeyedItemDescriptor +) + +type inMemoryStoreBenchmarkEnv struct { + store *Store + flags []*ldmodel.FeatureFlag + segments []*ldmodel.Segment + targetFlagKey string + targetSegmentKey string + targetFlagCopy *ldmodel.FeatureFlag + targetSegmentCopy *ldmodel.Segment + unknownKey string + initData []ldstoretypes.Collection +} + +func newInMemoryStoreBenchmarkEnv() *inMemoryStoreBenchmarkEnv { + return &inMemoryStoreBenchmarkEnv{ + store: New(ldlog.NewDisabledLoggers()), + } +} + +func (env *inMemoryStoreBenchmarkEnv) setUp(bc inMemoryStoreBenchmarkCase) { + env.flags = make([]*ldmodel.FeatureFlag, bc.numFlags) + for i := 0; i < bc.numFlags; i++ { + flag := ldbuilders.NewFlagBuilder(fmt.Sprintf("flag-%d", i)).Version(10).Build() + env.flags[i] = &flag + } + for _, flag := range env.flags { + env.store.Upsert(datakinds.Features, flag.Key, sharedtest.FlagDescriptor(*flag)) + } + f := env.flags[bc.numFlags/2] // arbitrarily pick a flag in the middle of the list + env.targetFlagKey = f.Key + f1 := ldbuilders.NewFlagBuilder(f.Key).Version(f.Version).Build() + env.targetFlagCopy = &f1 + + env.segments = make([]*ldmodel.Segment, bc.numFlags) + for i := 0; i < bc.numSegments; i++ { + segment := ldbuilders.NewSegmentBuilder(fmt.Sprintf("segment-%d", i)).Version(10).Build() + env.segments[i] = &segment + } + for _, segment := range env.segments { + env.store.Upsert(datakinds.Segments, segment.Key, sharedtest.SegmentDescriptor(*segment)) + } + s := env.segments[bc.numSegments/2] + env.targetSegmentKey = s.Key + s1 := ldbuilders.NewSegmentBuilder(s.Key).Version(s.Version).Build() + env.targetSegmentCopy = &s1 + + env.unknownKey = "no-match" +} + +func setupInitData(env *inMemoryStoreBenchmarkEnv) { + flags := make([]ldstoretypes.KeyedItemDescriptor, len(env.flags)) + for i, f := range env.flags { + flags[i] = ldstoretypes.KeyedItemDescriptor{Key: f.Key, Item: sharedtest.FlagDescriptor(*f)} + } + segments := make([]ldstoretypes.KeyedItemDescriptor, len(env.segments)) + for i, s := range env.segments { + segments[i] = ldstoretypes.KeyedItemDescriptor{Key: s.Key, Item: sharedtest.SegmentDescriptor(*s)} + } + env.initData = []ldstoretypes.Collection{ + {Kind: datakinds.Features, Items: flags}, + {Kind: datakinds.Segments, Items: segments}, + } +} + +func (env *inMemoryStoreBenchmarkEnv) tearDown() { +} + +type inMemoryStoreBenchmarkCase struct { + numFlags int + numSegments int + withInitData bool +} + +var inMemoryStoreBenchmarkCases = []inMemoryStoreBenchmarkCase{ + { + numFlags: 1, + numSegments: 1, + }, + { + numFlags: 100, + numSegments: 100, + }, + { + numFlags: 1000, + numSegments: 1000, + }, +} + +func benchmarkInMemoryStore( + b *testing.B, + cases []inMemoryStoreBenchmarkCase, + setupAction func(*inMemoryStoreBenchmarkEnv), + benchmarkAction func(*inMemoryStoreBenchmarkEnv, inMemoryStoreBenchmarkCase), +) { + env := newInMemoryStoreBenchmarkEnv() + for _, bc := range cases { + env.setUp(bc) + + if setupAction != nil { + setupAction(env) + } + + b.Run(fmt.Sprintf("%+v", bc), func(b *testing.B) { + for i := 0; i < b.N; i++ { + benchmarkAction(env, bc) + } + }) + env.tearDown() + } +} + +func BenchmarkInMemoryStoreInit(b *testing.B) { + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, setupInitData, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + inMemoryStoreBenchmarkResultErr = env.store.Init(env.initData) + }) +} + +func BenchmarkInMemoryStoreGetFlag(b *testing.B) { + dataKind := datakinds.Features + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + inMemoryStoreBenchmarkResultItem, _ = env.store.Get(dataKind, env.targetFlagKey) + }) +} + +func BenchmarkInMemoryStoreGetSegment(b *testing.B) { + dataKind := datakinds.Segments + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + inMemoryStoreBenchmarkResultItem, _ = env.store.Get(dataKind, env.targetSegmentKey) + }) +} + +func BenchmarkInMemoryStoreGetUnknownFlag(b *testing.B) { + dataKind := datakinds.Features + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + inMemoryStoreBenchmarkResultItem, _ = env.store.Get(dataKind, env.unknownKey) + }) +} + +func BenchmarkInMemoryStoreGetUnknownSegment(b *testing.B) { + dataKind := datakinds.Segments + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + inMemoryStoreBenchmarkResultItem, _ = env.store.Get(dataKind, env.unknownKey) + }) +} + +func BenchmarkInMemoryStoreGetAllFlags(b *testing.B) { + dataKind := datakinds.Features + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + inMemoryStoreBenchmarkResultItems, _ = env.store.GetAll(dataKind) + }) +} + +func BenchmarkInMemoryStoreGetAllSegments(b *testing.B) { + dataKind := datakinds.Segments + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + inMemoryStoreBenchmarkResultItems, _ = env.store.GetAll(dataKind) + }) +} + +func BenchmarkInMemoryStoreUpsertExistingFlagSuccess(b *testing.B) { + dataKind := datakinds.Features + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + env.targetFlagCopy.Version++ + _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetFlagKey, + sharedtest.FlagDescriptor(*env.targetFlagCopy)) + }) +} + +func BenchmarkInMemoryStoreUpsertExistingFlagFailure(b *testing.B) { + dataKind := datakinds.Features + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + env.targetFlagCopy.Version-- + _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetFlagKey, + sharedtest.FlagDescriptor(*env.targetFlagCopy)) + }) +} + +func BenchmarkInMemoryStoreUpsertNewFlag(b *testing.B) { + dataKind := datakinds.Features + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + env.targetFlagCopy.Key = env.unknownKey + _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.unknownKey, + sharedtest.FlagDescriptor(*env.targetFlagCopy)) + }) +} + +func BenchmarkInMemoryStoreUpsertExistingSegmentSuccess(b *testing.B) { + dataKind := datakinds.Segments + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + env.targetSegmentCopy.Version++ + _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetSegmentKey, + sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + }) +} + +func BenchmarkInMemoryStoreUpsertExistingSegmentFailure(b *testing.B) { + dataKind := datakinds.Segments + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + env.targetSegmentCopy.Version-- + _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetSegmentKey, + sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + }) +} + +func BenchmarkInMemoryStoreUpsertNewSegment(b *testing.B) { + dataKind := datakinds.Segments + benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { + env.targetSegmentCopy.Key = env.unknownKey + _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.unknownKey, + sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + }) +} diff --git a/internal/memorystorev2/memory_store_test.go b/internal/memorystorev2/memory_store_test.go new file mode 100644 index 00000000..f3fdf8cd --- /dev/null +++ b/internal/memorystorev2/memory_store_test.go @@ -0,0 +1,525 @@ +package memorystorev2 + +import ( + "errors" + "fmt" + "sort" + "testing" + + "github.com/launchdarkly/go-sdk-common/v3/ldlog" + "github.com/launchdarkly/go-sdk-common/v3/ldlogtest" + "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldbuilders" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInMemoryDataStore(t *testing.T) { + t.Run("Get", testGet) + t.Run("GetAll", testGetAll) + t.Run("GetAllKinds", testGetAllKinds) + t.Run("SetBasis", testSetBasis) + t.Run("ApplyDelta", testApplyDelta) +} + +func makeMemoryStore() *Store { + return New(sharedtest.NewTestLoggers()) +} + +// Used to create a segment/flag. Returns the individual item, and a collection slice +// containing only that item. +type collectionItemCreator func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) + +// Used to delete a segment/flag. Returns the individual item, and a collection slice +// containing only that item. +type collectionItemDeleter func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) + +func makeCollections(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) []ldstoretypes.Collection { + return []ldstoretypes.Collection{ + makeCollection(kind, key, item), + } +} + +func makeCollection(kind ldstoretypes.DataKind, key string, item ldstoretypes.ItemDescriptor) ldstoretypes.Collection { + return ldstoretypes.Collection{ + Kind: kind, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: key, + Item: item, + }, + }, + } +} + +func forAllDataKinds(t *testing.T, test func(*testing.T, ldstoretypes.DataKind, collectionItemCreator, collectionItemDeleter)) { + test(t, datakinds.Features, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + flag := ldbuilders.NewFlagBuilder(key).Version(version).On(otherProperty).Build() + descriptor := sharedtest.FlagDescriptor(flag) + + return descriptor, makeCollections(datakinds.Features, flag.Key, descriptor) + }, func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + descriptor := ldstoretypes.ItemDescriptor{Version: version, Item: nil} + + return descriptor, makeCollections(datakinds.Features, key, descriptor) + }) + test(t, datakinds.Segments, func(key string, version int, otherProperty bool) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + segment := ldbuilders.NewSegmentBuilder(key).Version(version).Build() + if otherProperty { + segment.Included = []string{"arbitrary value"} + } + descriptor := sharedtest.SegmentDescriptor(segment) + + return descriptor, makeCollections(datakinds.Segments, segment.Key, descriptor) + }, func(key string, version int) (ldstoretypes.ItemDescriptor, []ldstoretypes.Collection) { + descriptor := ldstoretypes.ItemDescriptor{Version: version, Item: nil} + + return descriptor, makeCollections(datakinds.Segments, key, descriptor) + }) +} + +func testSetBasis(t *testing.T) { + t.Run("makes store initialized", func(t *testing.T) { + store := makeMemoryStore() + allData := sharedtest.NewDataSetBuilder().Flags(ldbuilders.NewFlagBuilder("key").Build()).Build() + + store.SetBasis(allData) + + assert.True(t, store.IsInitialized()) + }) + + t.Run("completely replaces previous data", func(t *testing.T) { + store := makeMemoryStore() + flag1 := ldbuilders.NewFlagBuilder("key1").Build() + segment1 := ldbuilders.NewSegmentBuilder("key1").Build() + allData1 := sharedtest.NewDataSetBuilder().Flags(flag1).Segments(segment1).Build() + + store.SetBasis(allData1) + + flags, err := store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err := store.GetAll(datakinds.Segments) + require.NoError(t, err) + sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) + assert.Equal(t, extractCollections(allData1), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + + flag2 := ldbuilders.NewFlagBuilder("key2").Build() + segment2 := ldbuilders.NewSegmentBuilder("key2").Build() + allData2 := sharedtest.NewDataSetBuilder().Flags(flag2).Segments(segment2).Build() + + store.SetBasis(allData2) + + flags, err = store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err = store.GetAll(datakinds.Segments) + require.NoError(t, err) + assert.Equal(t, extractCollections(allData2), [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + }) +} + +func testGet(t *testing.T) { + const unknownKey = "unknown-key" + + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, _ collectionItemDeleter) { + t.Run("found", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item, collection := makeItem("key", 1, false) + store.ApplyDelta(collection) + + result, err := store.Get(kind, "key") + assert.NoError(t, err) + assert.Equal(t, item, result) + }) + + t.Run("not found", func(t *testing.T) { + mockLog := ldlogtest.NewMockLog() + mockLog.Loggers.SetMinLevel(ldlog.Info) + store := New(mockLog.Loggers) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + result, err := store.Get(kind, unknownKey) + assert.NoError(t, err) + assert.Equal(t, ldstoretypes.ItemDescriptor{}.NotFound(), result) + + assert.Len(t, mockLog.GetAllOutput(), 0) + }) + + t.Run("not found - debug logging", func(t *testing.T) { + mockLog := ldlogtest.NewMockLog() + mockLog.Loggers.SetMinLevel(ldlog.Debug) + store := New(mockLog.Loggers) + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + result, err := store.Get(kind, unknownKey) + assert.NoError(t, err) + assert.Equal(t, ldstoretypes.ItemDescriptor{}.NotFound(), result) + + assert.Len(t, mockLog.GetAllOutput(), 1) + assert.Equal(t, + ldlogtest.MockLogItem{ + Level: ldlog.Debug, + Message: fmt.Sprintf(`Key %s not found in "%s"`, unknownKey, kind.GetName()), + }, + mockLog.GetAllOutput()[0], + ) + }) + }) +} + +func testGetAll(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + result, err := store.GetAll(datakinds.Features) + require.NoError(t, err) + assert.Len(t, result, 0) + + flag1 := ldbuilders.NewFlagBuilder("flag1").Build() + flag2 := ldbuilders.NewFlagBuilder("flag2").Build() + segment1 := ldbuilders.NewSegmentBuilder("segment1").Build() + + collection := []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: flag1.Key, + Item: sharedtest.FlagDescriptor(flag1), + }, + { + Key: flag2.Key, + Item: sharedtest.FlagDescriptor(flag2), + }, + }, + }, + { + Kind: datakinds.Segments, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: segment1.Key, + Item: sharedtest.SegmentDescriptor(segment1), + }, + }, + }, + } + + store.ApplyDelta(collection) + + flags, err := store.GetAll(datakinds.Features) + require.NoError(t, err) + segments, err := store.GetAll(datakinds.Segments) + require.NoError(t, err) + + sort.Slice(flags, func(i, j int) bool { return flags[i].Key < flags[j].Key }) + expected := extractCollections(sharedtest.NewDataSetBuilder().Flags(flag1, flag2).Segments(segment1).Build()) + assert.Equal(t, expected, [][]ldstoretypes.KeyedItemDescriptor{flags, segments}) + + result, err = store.GetAll(unknownDataKind{}) + require.NoError(t, err) + assert.Len(t, result, 0) +} + +func extractCollections(allData []ldstoretypes.Collection) [][]ldstoretypes.KeyedItemDescriptor { + var ret [][]ldstoretypes.KeyedItemDescriptor + for _, coll := range allData { + ret = append(ret, coll.Items) + } + return ret +} + +type unknownDataKind struct{} + +func (k unknownDataKind) GetName() string { + return "unknown" +} + +func (k unknownDataKind) Serialize(item ldstoretypes.ItemDescriptor) []byte { + return nil +} + +func (k unknownDataKind) Deserialize(data []byte) (ldstoretypes.ItemDescriptor, error) { + return ldstoretypes.ItemDescriptor{}, errors.New("not implemented") +} + +func testApplyDelta(t *testing.T) { + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, deleteItem collectionItemDeleter) { + t.Run("upserts", func(t *testing.T) { + t.Run("newer version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + _, collection1 := makeItem("key", 10, false) + + updates := store.ApplyDelta(collection1) + assert.True(t, updates[kind]["key"]) + + item1a, collection1a := makeItem("key", 11, true) + + updates = store.ApplyDelta(collection1a) + assert.True(t, updates[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1a, result) + + }) + + t.Run("older version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1Version := 10 + item1, collection1 := makeItem("key", item1Version, false) + + updates := store.ApplyDelta(collection1) + assert.True(t, updates[kind]["key"]) + + _, collection1a := makeItem("key", item1Version-1, true) + + updates = store.ApplyDelta(collection1a) + assert.False(t, updates[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + + t.Run("same version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1Version := 10 + item1, collection1 := makeItem("key", item1Version, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + _, collection1a := makeItem("key", item1Version, true) + updated = store.ApplyDelta(collection1a) + assert.False(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + }) + + t.Run("deletes", func(t *testing.T) { + t.Run("newer version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1, collection1 := makeItem("key", 10, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + item1a, collection1a := deleteItem("key", item1.Version+1) + updated = store.ApplyDelta(collection1a) + assert.True(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1a, result) + }) + + t.Run("older version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1, collection1 := makeItem("key", 10, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + _, collection1a := deleteItem("key", item1.Version-1) + updated = store.ApplyDelta(collection1a) + assert.False(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + + t.Run("same version", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1, collection1 := makeItem("key", 10, false) + updated := store.ApplyDelta(collection1) + assert.True(t, updated[kind]["key"]) + + _, collection1a := deleteItem("key", item1.Version) + updated = store.ApplyDelta(collection1a) + assert.False(t, updated[kind]["key"]) + + result, err := store.Get(kind, "key") + require.NoError(t, err) + assert.Equal(t, item1, result) + }) + }) + }) +} + +func testGetAllKinds(t *testing.T) { + t.Run("uninitialized store", func(t *testing.T) { + store := makeMemoryStore() + collections := store.GetAllKinds() + assert.Empty(t, collections) + }) + + t.Run("initialized but empty store", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + collections := store.GetAllKinds() + assert.Len(t, collections, 2) + assert.Empty(t, collections[0].Items) + assert.Empty(t, collections[1].Items) + }) + + t.Run("initialized store with data of a single kind", func(t *testing.T) { + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, _ collectionItemDeleter) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + item1, collection1 := makeItem("key1", 1, false) + + store.ApplyDelta(collection1) + + collections := store.GetAllKinds() + + assert.Len(t, collections, 2) + + for _, coll := range collections { + if coll.Kind == kind { + assert.Len(t, coll.Items, 1) + assert.Equal(t, item1, coll.Items[0].Item) + } else { + assert.Empty(t, coll.Items) + } + } + }) + }) + + t.Run("initialized store with data of multiple kinds", func(t *testing.T) { + store := makeMemoryStore() + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + flag1 := ldbuilders.NewFlagBuilder("flag1").Build() + segment1 := ldbuilders.NewSegmentBuilder("segment1").Build() + + expectedCollection := []ldstoretypes.Collection{ + makeCollection(datakinds.Features, flag1.Key, sharedtest.FlagDescriptor(flag1)), + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + store.ApplyDelta(expectedCollection) + + gotCollections := store.GetAllKinds() + + assert.ElementsMatch(t, expectedCollection, gotCollections) + }) + + t.Run("multiple deltas applies", func(t *testing.T) { + forAllDataKinds(t, func(t *testing.T, kind ldstoretypes.DataKind, makeItem collectionItemCreator, deleteItem collectionItemDeleter) { + store := makeMemoryStore() + + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + _, collection1 := makeItem("key1", 1, false) + store.ApplyDelta(collection1) + + // The collection slice we get from GetAllKinds is going to contain the specific segment or flag + // collection we're creating here in the test, but also an empty collection for the other kind. + expected := []ldstoretypes.Collection{collection1[0]} + if kind == datakinds.Features { + expected = append(expected, ldstoretypes.Collection{Kind: datakinds.Segments, Items: nil}) + } else { + expected = append(expected, ldstoretypes.Collection{Kind: datakinds.Features, Items: nil}) + } + + assert.ElementsMatch(t, expected, store.GetAllKinds()) + + _, collection1a := makeItem("key1", 2, false) + store.ApplyDelta(collection1a) + expected[0] = collection1a[0] + assert.ElementsMatch(t, expected, store.GetAllKinds()) + + _, collection1b := deleteItem("key1", 3) + store.ApplyDelta(collection1b) + expected[0] = collection1b[0] + assert.ElementsMatch(t, expected, store.GetAllKinds()) + }) + }) + + t.Run("deltas containing multiple item kinds", func(t *testing.T) { + + store := makeMemoryStore() + + store.SetBasis(sharedtest.NewDataSetBuilder().Build()) + + // Flag1 will be deleted. + flag1 := ldbuilders.NewFlagBuilder("flag1").Build() + + // Flag2 is a control and won't be changed. + flag2 := ldbuilders.NewFlagBuilder("flag2").Build() + + // Segment1 will be upserted. + segment1 := ldbuilders.NewSegmentBuilder("segment1").Build() + + collection1 := []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: flag1.Key, + Item: sharedtest.FlagDescriptor(flag1), + }, + { + Key: flag2.Key, + Item: sharedtest.FlagDescriptor(flag2), + }, + }, + }, + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + store.ApplyDelta(collection1) + + assert.ElementsMatch(t, collection1, store.GetAllKinds()) + + // Bumping the segment version is sufficient for an upsert. + // To indicate that there's no change to flag2, we simply don't pass it in the collection. + segment1.Version += 1 + collection2 := []ldstoretypes.Collection{ + // Delete flag1 + makeCollection(datakinds.Features, flag1.Key, ldstoretypes.ItemDescriptor{Version: flag1.Version + 1, Item: nil}), + // Upsert segment1 + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + store.ApplyDelta(collection2) + + expected := []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: []ldstoretypes.KeyedItemDescriptor{ + { + Key: flag1.Key, + Item: ldstoretypes.ItemDescriptor{Version: flag1.Version + 1, Item: nil}, + }, + { + Key: flag2.Key, + Item: sharedtest.FlagDescriptor(flag2), + }, + }, + }, + makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), + } + + assert.ElementsMatch(t, expected, store.GetAllKinds()) + }) +} From 84b493c7e0309ac63ed5c795e556fb3605ca77c0 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 17:27:57 -0700 Subject: [PATCH 51/62] benchmarks --- .../memory_store_benchmark_test.go | 57 ++++++++++++------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/internal/memorystorev2/memory_store_benchmark_test.go b/internal/memorystorev2/memory_store_benchmark_test.go index ba076b5f..a1644bbc 100644 --- a/internal/memorystorev2/memory_store_benchmark_test.go +++ b/internal/memorystorev2/memory_store_benchmark_test.go @@ -18,9 +18,9 @@ import ( // implementation-neutral we'll benchmark each data kind separately anyway. var ( // assign to package-level variables in benchmarks so function calls won't be optimized away - inMemoryStoreBenchmarkResultErr error inMemoryStoreBenchmarkResultItem ldstoretypes.ItemDescriptor inMemoryStoreBenchmarkResultItems []ldstoretypes.KeyedItemDescriptor + updates map[ldstoretypes.DataKind]map[string]bool ) type inMemoryStoreBenchmarkEnv struct { @@ -47,9 +47,7 @@ func (env *inMemoryStoreBenchmarkEnv) setUp(bc inMemoryStoreBenchmarkCase) { flag := ldbuilders.NewFlagBuilder(fmt.Sprintf("flag-%d", i)).Version(10).Build() env.flags[i] = &flag } - for _, flag := range env.flags { - env.store.Upsert(datakinds.Features, flag.Key, sharedtest.FlagDescriptor(*flag)) - } + f := env.flags[bc.numFlags/2] // arbitrarily pick a flag in the middle of the list env.targetFlagKey = f.Key f1 := ldbuilders.NewFlagBuilder(f.Key).Version(f.Version).Build() @@ -60,15 +58,34 @@ func (env *inMemoryStoreBenchmarkEnv) setUp(bc inMemoryStoreBenchmarkCase) { segment := ldbuilders.NewSegmentBuilder(fmt.Sprintf("segment-%d", i)).Version(10).Build() env.segments[i] = &segment } - for _, segment := range env.segments { - env.store.Upsert(datakinds.Segments, segment.Key, sharedtest.SegmentDescriptor(*segment)) - } + s := env.segments[bc.numSegments/2] env.targetSegmentKey = s.Key s1 := ldbuilders.NewSegmentBuilder(s.Key).Version(s.Version).Build() env.targetSegmentCopy = &s1 env.unknownKey = "no-match" + + basis := []ldstoretypes.Collection{ + { + Kind: datakinds.Features, + Items: make([]ldstoretypes.KeyedItemDescriptor, len(env.flags)), + }, + { + Kind: datakinds.Segments, + Items: make([]ldstoretypes.KeyedItemDescriptor, len(env.segments)), + }, + } + + for i, f := range env.flags { + basis[0].Items[i] = ldstoretypes.KeyedItemDescriptor{Key: f.Key, Item: sharedtest.FlagDescriptor(*f)} + } + + for i, s := range env.segments { + basis[1].Items[i] = ldstoretypes.KeyedItemDescriptor{Key: s.Key, Item: sharedtest.SegmentDescriptor(*s)} + } + + env.store.SetBasis(basis) } func setupInitData(env *inMemoryStoreBenchmarkEnv) { @@ -135,7 +152,7 @@ func benchmarkInMemoryStore( func BenchmarkInMemoryStoreInit(b *testing.B) { benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, setupInitData, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { - inMemoryStoreBenchmarkResultErr = env.store.Init(env.initData) + env.store.SetBasis(env.initData) }) } @@ -185,8 +202,8 @@ func BenchmarkInMemoryStoreUpsertExistingFlagSuccess(b *testing.B) { dataKind := datakinds.Features benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { env.targetFlagCopy.Version++ - _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetFlagKey, - sharedtest.FlagDescriptor(*env.targetFlagCopy)) + delta := makeCollections(dataKind, env.targetFlagKey, sharedtest.FlagDescriptor(*env.targetFlagCopy)) + updates = env.store.ApplyDelta(delta) }) } @@ -194,8 +211,8 @@ func BenchmarkInMemoryStoreUpsertExistingFlagFailure(b *testing.B) { dataKind := datakinds.Features benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { env.targetFlagCopy.Version-- - _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetFlagKey, - sharedtest.FlagDescriptor(*env.targetFlagCopy)) + delta := makeCollections(dataKind, env.targetFlagKey, sharedtest.FlagDescriptor(*env.targetFlagCopy)) + updates = env.store.ApplyDelta(delta) }) } @@ -203,8 +220,8 @@ func BenchmarkInMemoryStoreUpsertNewFlag(b *testing.B) { dataKind := datakinds.Features benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { env.targetFlagCopy.Key = env.unknownKey - _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.unknownKey, - sharedtest.FlagDescriptor(*env.targetFlagCopy)) + delta := makeCollections(dataKind, env.unknownKey, sharedtest.FlagDescriptor(*env.targetFlagCopy)) + updates = env.store.ApplyDelta(delta) }) } @@ -212,8 +229,8 @@ func BenchmarkInMemoryStoreUpsertExistingSegmentSuccess(b *testing.B) { dataKind := datakinds.Segments benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { env.targetSegmentCopy.Version++ - _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetSegmentKey, - sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + delta := makeCollections(dataKind, env.targetSegmentKey, sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + updates = env.store.ApplyDelta(delta) }) } @@ -221,8 +238,8 @@ func BenchmarkInMemoryStoreUpsertExistingSegmentFailure(b *testing.B) { dataKind := datakinds.Segments benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { env.targetSegmentCopy.Version-- - _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.targetSegmentKey, - sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + delta := makeCollections(dataKind, env.targetSegmentKey, sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + updates = env.store.ApplyDelta(delta) }) } @@ -230,7 +247,7 @@ func BenchmarkInMemoryStoreUpsertNewSegment(b *testing.B) { dataKind := datakinds.Segments benchmarkInMemoryStore(b, inMemoryStoreBenchmarkCases, nil, func(env *inMemoryStoreBenchmarkEnv, bc inMemoryStoreBenchmarkCase) { env.targetSegmentCopy.Key = env.unknownKey - _, inMemoryStoreBenchmarkResultErr = env.store.Upsert(dataKind, env.unknownKey, - sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + delta := makeCollections(dataKind, env.unknownKey, sharedtest.SegmentDescriptor(*env.targetSegmentCopy)) + updates = env.store.ApplyDelta(delta) }) } From 6432563441b32962324c47304fa60ab06e25e71a Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 17:29:38 -0700 Subject: [PATCH 52/62] lints --- internal/memorystorev2/memory_store.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/internal/memorystorev2/memory_store.go b/internal/memorystorev2/memory_store.go index ad34a8ce..b99f92d8 100644 --- a/internal/memorystorev2/memory_store.go +++ b/internal/memorystorev2/memory_store.go @@ -71,7 +71,6 @@ func (s *Store) SetBasis(allData []ldstoretypes.Collection) { // An item is updated only if the version of the item in the delta is greater than the version // in the store, or it wasn't already present. func (s *Store) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.DataKind]map[string]bool { - updatedMap := make(map[ldstoretypes.DataKind]map[string]bool) s.Lock() @@ -91,8 +90,8 @@ func (s *Store) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.D return updatedMap } -// Get retrieves an item of the specified kind from the store. If the item is not found, then ItemDescriptor{}.NotFound() -// is returned with a nil error. +// Get retrieves an item of the specified kind from the store. If the item is not found, then +// ItemDescriptor{}.NotFound() is returned with a nil error. func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { s.RLock() @@ -144,7 +143,7 @@ func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescr func (s *Store) GetAllKinds() []ldstoretypes.Collection { s.RLock() - var allData []ldstoretypes.Collection + allData := make([]ldstoretypes.Collection, 0, len(s.allData)) for kind := range s.allData { itemsOut := s.getAll(kind) allData = append(allData, ldstoretypes.Collection{Kind: kind, Items: itemsOut}) From 746c1a89018ce38caa043424f66a50e3fef6f1df Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 18:03:06 -0700 Subject: [PATCH 53/62] use defer pattern for locks --- internal/memorystorev2/memory_store.go | 30 ++++++-------------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/internal/memorystorev2/memory_store.go b/internal/memorystorev2/memory_store.go index b99f92d8..b55f9206 100644 --- a/internal/memorystorev2/memory_store.go +++ b/internal/memorystorev2/memory_store.go @@ -20,14 +20,6 @@ import ( // Deltas are then applied to the store. A single delta update transforms the contents of the store // atomically. The idea is that there's never a moment when the state of the store could be inconsistent // with regard to the authoritative LaunchDarkly SaaS. -// -// Implementation notes: -// -// We deliberately do not use a defer pattern to manage the lock in these methods. Using defer adds a small but -// consistent overhead, and these store methods may be called with very high frequency (at least in the case of -// Get and IsInitialized). To make it safe to hold a lock without deferring the unlock, we must ensure that -// there is only one return point from each method, and that there is no operation that could possibly cause a -// panic after the lock has been acquired. See notes on performance in CONTRIBUTING.md. type Store struct { allData map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor isInitialized bool @@ -48,6 +40,7 @@ func New(loggers ldlog.Loggers) *Store { // When the basis is set, the store becomes initialized. func (s *Store) SetBasis(allData []ldstoretypes.Collection) { s.Lock() + defer s.Unlock() s.allData = make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor) @@ -60,8 +53,6 @@ func (s *Store) SetBasis(allData []ldstoretypes.Collection) { } s.isInitialized = true - - s.Unlock() } // ApplyDelta applies a delta update to the store. ApplyDelta should not be called until @@ -74,6 +65,7 @@ func (s *Store) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.D updatedMap := make(map[ldstoretypes.DataKind]map[string]bool) s.Lock() + defer s.Unlock() for _, coll := range allData { for _, item := range coll.Items { @@ -85,8 +77,6 @@ func (s *Store) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.D } } - s.Unlock() - return updatedMap } @@ -117,12 +107,8 @@ func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDe // GetAll retrieves all items of the specified kind from the store. func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDescriptor, error) { s.RLock() - - itemsOut := s.getAll(kind) - - s.RUnlock() - - return itemsOut, nil + defer s.RUnlock() + return s.getAll(kind), nil } func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescriptor { @@ -142,6 +128,7 @@ func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescr // GetAll for each kind because it provides a consistent view at a single point in time. func (s *Store) GetAllKinds() []ldstoretypes.Collection { s.RLock() + defer s.RUnlock() allData := make([]ldstoretypes.Collection, 0, len(s.allData)) for kind := range s.allData { @@ -149,8 +136,6 @@ func (s *Store) GetAllKinds() []ldstoretypes.Collection { allData = append(allData, ldstoretypes.Collection{Kind: kind, Items: itemsOut}) } - s.RUnlock() - return allData } @@ -183,7 +168,6 @@ func (s *Store) upsert( // IsInitialized returns true if the store has been initialized with a basis. func (s *Store) IsInitialized() bool { s.RLock() - ret := s.isInitialized - s.RUnlock() - return ret + defer s.RUnlock() + return s.isInitialized } From b0c8a7c78046fe765014820827381b9bf2fcd120 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 18:17:46 -0700 Subject: [PATCH 54/62] make element matcher consistent --- internal/memorystorev2/memory_store.go | 4 +-- internal/memorystorev2/memory_store_test.go | 30 ++++++++++++++++----- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/internal/memorystorev2/memory_store.go b/internal/memorystorev2/memory_store.go index b55f9206..67678e8f 100644 --- a/internal/memorystorev2/memory_store.go +++ b/internal/memorystorev2/memory_store.go @@ -85,10 +85,8 @@ func (s *Store) ApplyDelta(allData []ldstoretypes.Collection) map[ldstoretypes.D func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDescriptor, error) { s.RLock() - var coll map[string]ldstoretypes.ItemDescriptor var item ldstoretypes.ItemDescriptor - var ok bool - coll, ok = s.allData[kind] + coll, ok := s.allData[kind] if ok { item, ok = coll[key] } diff --git a/internal/memorystorev2/memory_store_test.go b/internal/memorystorev2/memory_store_test.go index f3fdf8cd..f86669c2 100644 --- a/internal/memorystorev2/memory_store_test.go +++ b/internal/memorystorev2/memory_store_test.go @@ -420,7 +420,7 @@ func testGetAllKinds(t *testing.T) { gotCollections := store.GetAllKinds() - assert.ElementsMatch(t, expectedCollection, gotCollections) + requireCollectionsMatch(t, expectedCollection, gotCollections) }) t.Run("multiple deltas applies", func(t *testing.T) { @@ -441,17 +441,17 @@ func testGetAllKinds(t *testing.T) { expected = append(expected, ldstoretypes.Collection{Kind: datakinds.Features, Items: nil}) } - assert.ElementsMatch(t, expected, store.GetAllKinds()) + requireCollectionsMatch(t, expected, store.GetAllKinds()) _, collection1a := makeItem("key1", 2, false) store.ApplyDelta(collection1a) expected[0] = collection1a[0] - assert.ElementsMatch(t, expected, store.GetAllKinds()) + requireCollectionsMatch(t, expected, store.GetAllKinds()) _, collection1b := deleteItem("key1", 3) store.ApplyDelta(collection1b) expected[0] = collection1b[0] - assert.ElementsMatch(t, expected, store.GetAllKinds()) + requireCollectionsMatch(t, expected, store.GetAllKinds()) }) }) @@ -489,7 +489,7 @@ func testGetAllKinds(t *testing.T) { store.ApplyDelta(collection1) - assert.ElementsMatch(t, collection1, store.GetAllKinds()) + requireCollectionsMatch(t, collection1, store.GetAllKinds()) // Bumping the segment version is sufficient for an upsert. // To indicate that there's no change to flag2, we simply don't pass it in the collection. @@ -520,6 +520,24 @@ func testGetAllKinds(t *testing.T) { makeCollection(datakinds.Segments, segment1.Key, sharedtest.SegmentDescriptor(segment1)), } - assert.ElementsMatch(t, expected, store.GetAllKinds()) + requireCollectionsMatch(t, expected, store.GetAllKinds()) }) } + +// Make a custom Matcher that will match the result of store.GetAllKinds() with a collection that was passed in via +// ApplyDelta or SetBasis. We need this because: +// 1) The collections (segments, features) might be in random order in the top-level slice. That is, it might be +// {segments, features} or it might be {features, segments}/ +// 2) The items within each of those collections might be in random order. +// This should make use of normal assert functions where possible, and should accept a testing.T +func requireCollectionsMatch(t *testing.T, expected []ldstoretypes.Collection, actual []ldstoretypes.Collection) { + require.Equal(t, len(expected), len(actual)) + for _, expectedCollection := range expected { + for _, actualCollection := range actual { + if expectedCollection.Kind == actualCollection.Kind { + require.ElementsMatch(t, expectedCollection.Items, actualCollection.Items) + break + } + } + } +} From eaab968d0765d4d12bac37d641e2d91bff1066aa Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Mon, 23 Sep 2024 18:28:24 -0700 Subject: [PATCH 55/62] doc tweaks --- internal/memorystorev2/memory_store.go | 34 ++++++++++----------- internal/memorystorev2/memory_store_test.go | 8 ++--- 2 files changed, 19 insertions(+), 23 deletions(-) diff --git a/internal/memorystorev2/memory_store.go b/internal/memorystorev2/memory_store.go index 67678e8f..3ae13e0e 100644 --- a/internal/memorystorev2/memory_store.go +++ b/internal/memorystorev2/memory_store.go @@ -21,8 +21,8 @@ import ( // atomically. The idea is that there's never a moment when the state of the store could be inconsistent // with regard to the authoritative LaunchDarkly SaaS. type Store struct { - allData map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor - isInitialized bool + data map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor + initialized bool sync.RWMutex loggers ldlog.Loggers } @@ -30,9 +30,9 @@ type Store struct { // New creates a new Store. The Store is uninitialized until SetBasis is called. func New(loggers ldlog.Loggers) *Store { return &Store{ - allData: make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor), - isInitialized: false, - loggers: loggers, + data: make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor), + initialized: false, + loggers: loggers, } } @@ -42,17 +42,17 @@ func (s *Store) SetBasis(allData []ldstoretypes.Collection) { s.Lock() defer s.Unlock() - s.allData = make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor) + s.data = make(map[ldstoretypes.DataKind]map[string]ldstoretypes.ItemDescriptor) for _, coll := range allData { items := make(map[string]ldstoretypes.ItemDescriptor) for _, item := range coll.Items { items[item.Key] = item.Item } - s.allData[coll.Kind] = items + s.data[coll.Kind] = items } - s.isInitialized = true + s.initialized = true } // ApplyDelta applies a delta update to the store. ApplyDelta should not be called until @@ -86,7 +86,7 @@ func (s *Store) Get(kind ldstoretypes.DataKind, key string) (ldstoretypes.ItemDe s.RLock() var item ldstoretypes.ItemDescriptor - coll, ok := s.allData[kind] + coll, ok := s.data[kind] if ok { item, ok = coll[key] } @@ -111,7 +111,7 @@ func (s *Store) GetAll(kind ldstoretypes.DataKind) ([]ldstoretypes.KeyedItemDesc func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescriptor { var itemsOut []ldstoretypes.KeyedItemDescriptor - if itemsMap, ok := s.allData[kind]; ok { + if itemsMap, ok := s.data[kind]; ok { if len(itemsMap) > 0 { itemsOut = make([]ldstoretypes.KeyedItemDescriptor, 0, len(itemsMap)) for key, item := range itemsMap { @@ -123,13 +123,13 @@ func (s *Store) getAll(kind ldstoretypes.DataKind) []ldstoretypes.KeyedItemDescr } // GetAllKinds retrieves all items of all kinds from the store. This is different from calling -// GetAll for each kind because it provides a consistent view at a single point in time. +// GetAll for each kind because it provides a consistent view of the entire store at a single point in time. func (s *Store) GetAllKinds() []ldstoretypes.Collection { s.RLock() defer s.RUnlock() - allData := make([]ldstoretypes.Collection, 0, len(s.allData)) - for kind := range s.allData { + allData := make([]ldstoretypes.Collection, 0, len(s.data)) + for kind := range s.data { itemsOut := s.getAll(kind) allData = append(allData, ldstoretypes.Collection{Kind: kind, Items: itemsOut}) } @@ -145,14 +145,14 @@ func (s *Store) upsert( var ok bool shouldUpdate := true updated := false - if coll, ok = s.allData[kind]; ok { + if coll, ok = s.data[kind]; ok { if item, ok := coll[key]; ok { if item.Version >= newItem.Version { shouldUpdate = false } } } else { - s.allData[kind] = map[string]ldstoretypes.ItemDescriptor{key: newItem} + s.data[kind] = map[string]ldstoretypes.ItemDescriptor{key: newItem} shouldUpdate = false // because we already initialized the map with the new item updated = true } @@ -163,9 +163,9 @@ func (s *Store) upsert( return updated } -// IsInitialized returns true if the store has been initialized with a basis. +// IsInitialized returns true if the store has ever been initialized with a basis. func (s *Store) IsInitialized() bool { s.RLock() defer s.RUnlock() - return s.isInitialized + return s.initialized } diff --git a/internal/memorystorev2/memory_store_test.go b/internal/memorystorev2/memory_store_test.go index f86669c2..b47f0ffd 100644 --- a/internal/memorystorev2/memory_store_test.go +++ b/internal/memorystorev2/memory_store_test.go @@ -524,12 +524,8 @@ func testGetAllKinds(t *testing.T) { }) } -// Make a custom Matcher that will match the result of store.GetAllKinds() with a collection that was passed in via -// ApplyDelta or SetBasis. We need this because: -// 1) The collections (segments, features) might be in random order in the top-level slice. That is, it might be -// {segments, features} or it might be {features, segments}/ -// 2) The items within each of those collections might be in random order. -// This should make use of normal assert functions where possible, and should accept a testing.T +// This matcher is required instead of calling ElementsMatch directly on two slices of collections because +// the order of the collections, or the order within each collection, is not defined. func requireCollectionsMatch(t *testing.T, expected []ldstoretypes.Collection, actual []ldstoretypes.Collection) { require.Equal(t, len(expected), len(actual)) for _, expectedCollection := range expected { From 4392516446f59aed368ca60b5f1238b61b612820 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 24 Sep 2024 14:15:54 -0700 Subject: [PATCH 56/62] fix broken tests by refactoring fdv2 data model into single package --- internal/datasourcev2/polling_data_source.go | 8 +- internal/datasourcev2/polling_http_request.go | 63 ++++++------ .../datasourcev2/streaming_data_source.go | 95 +++++++++---------- internal/datasourcev2/types.go | 89 ----------------- internal/datasystem/fdv2_datasystem.go | 28 ++++-- internal/datasystem/store.go | 17 ++-- internal/datasystem/store_test.go | 17 ++-- internal/fdv2proto/event_to_storable_item.go | 8 +- internal/fdv2proto/events.go | 74 +++++++++++++-- internal/fdv2proto/payloads.go | 18 ++++ internal/fdv2proto/raw_event.go | 27 ++++++ ldclient_end_to_end_fdv2_test.go | 32 +++---- subsystems/data_source.go | 15 ++- testhelpers/ldservices/server_sdk_data.go | 24 ----- testhelpers/ldservicesv2/server_sdk_data.go | 89 +++++++++++++++++ .../streaming_protocol_builder.go | 43 ++------- 16 files changed, 351 insertions(+), 296 deletions(-) delete mode 100644 internal/datasourcev2/types.go create mode 100644 internal/fdv2proto/payloads.go create mode 100644 internal/fdv2proto/raw_event.go create mode 100644 testhelpers/ldservicesv2/server_sdk_data.go diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index f2429f58..66171aa7 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -118,17 +118,17 @@ func (pp *PollingProcessor) Name() string { } //nolint:revive // DataInitializer method. -func (pp *PollingProcessor) Fetch(ctx context.Context) (*subsystems.InitialPayload, error) { +func (pp *PollingProcessor) Fetch(ctx context.Context) (*subsystems.Basis, error) { // TODO: ideally, the Request method would take a context so it could be interrupted. - allData, _, err := pp.requester.Request() + basis, err := pp.requester.Request() if err != nil { return nil, err } - return &subsystems.InitialPayload{Data: allData, Persist: true, Version: nil}, nil + return &subsystems.Basis{Data: basis.Events(), Selector: basis.Selector(), Persist: true}, nil } //nolint:revive // DataSynchronizer method. -func (pp *PollingProcessor) Sync(closeWhenReady chan<- struct{}, payloadVersion *int) { +func (pp *PollingProcessor) Sync(closeWhenReady chan<- struct{}, _ fdv2proto.Selector) { pp.loggers.Infof("Starting LaunchDarkly polling with interval: %+v", pp.pollInterval) ticker := newTickerWithInitialTick(pp.pollInterval) diff --git a/internal/datasourcev2/polling_http_request.go b/internal/datasourcev2/polling_http_request.go index 3bd21b1f..e5272089 100644 --- a/internal/datasourcev2/polling_http_request.go +++ b/internal/datasourcev2/polling_http_request.go @@ -4,14 +4,11 @@ import ( "encoding/json" "errors" "fmt" + "github.com/launchdarkly/go-jsonstream/v3/jreader" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "io" "net/http" "net/url" - "strings" - - "github.com/launchdarkly/go-jsonstream/v3/jreader" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "github.com/launchdarkly/go-sdk-common/v3/ldlog" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" @@ -85,13 +82,17 @@ func (r *pollingRequester) Request() (*PollingResponse, error) { return NewCachedPollingResponse(), nil } - var payload pollingPayload + var payload fdv2proto.PollingPayload if err = json.Unmarshal(body, &payload); err != nil { return nil, malformedJSONError{err} } - parseItem := func(r jreader.Reader, kind datakinds.DataKindInternal) (ldstoretypes.ItemDescriptor, error) { - item, err := kind.DeserializeFromJSONReader(&r) + parseItem := func(r jreader.Reader, kind fdv2proto.ObjectKind) (ldstoretypes.ItemDescriptor, error) { + dataKind, err := kind.ToFDV1() + if err != nil { + return ldstoretypes.ItemDescriptor{}, err + } + item, err := dataKind.DeserializeFromJSONReader(&r) return item, err } @@ -100,10 +101,10 @@ func (r *pollingRequester) Request() (*PollingResponse, error) { var intent fdv2proto.IntentCode for _, event := range payload.Events { - switch event.Event() { + switch fdv2proto.EventName(event.Event()) { case fdv2proto.EventServerIntent: { - var serverIntent serverIntent + var serverIntent fdv2proto.ServerIntent err := json.Unmarshal([]byte(event.Data()), &serverIntent) if err != nil { return nil, err @@ -119,53 +120,55 @@ func (r *pollingRequester) Request() (*PollingResponse, error) { case fdv2proto.EventPutObject: { r := jreader.NewReader([]byte(event.Data())) - var kind, key string - var item ldstoretypes.ItemDescriptor - var err error - var dataKind datakinds.DataKindInternal + + var ( + key string + kind fdv2proto.ObjectKind + item ldstoretypes.ItemDescriptor + err error + version int + ) for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, "key", "object"}); obj.Next(); { switch string(obj.Name()) { case versionField: - // version = r.Int() + version = r.Int() case kindField: - kind = strings.TrimRight(r.String(), "s") - dataKind = dataKindFromKind(kind) + kind = fdv2proto.ObjectKind(r.String()) case "key": key = r.String() case "object": - item, err = parseItem(r, dataKind) + item, err = parseItem(r, kind) if err != nil { return nil, err } } } - updates = append(updates, fdv2proto.PutObject{Kind: dataKind, Key: key, Object: item}) + updates = append(updates, fdv2proto.PutObject{Kind: kind, Key: key, Object: item, Version: version}) } case fdv2proto.EventDeleteObject: { r := jreader.NewReader([]byte(event.Data())) - var version int - var dataKind datakinds.DataKindInternal - var kind, key string + + var ( + version int + kind fdv2proto.ObjectKind + key string + ) for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, keyField}); obj.Next(); { switch string(obj.Name()) { case versionField: version = r.Int() case kindField: - kind = strings.TrimRight(r.String(), "s") - dataKind = dataKindFromKind(kind) - if dataKind == nil { - //nolint: godox - // TODO: We are skipping here without showing a warning. Need to address that later. - continue - } + kind = fdv2proto.ObjectKind(r.String()) + // TODO: An unrecognized kind should be ignored for forwards compat; the question is, + // do we throw out the DeleteObject here, or let the SDK's store handle it? case keyField: key = r.String() } } - updates = append(updates, fdv2proto.DeleteObject{Kind: dataKind, Key: key, Version: version}) + updates = append(updates, fdv2proto.DeleteObject{Kind: kind, Key: key, Version: version}) } case fdv2proto.EventPayloadTransferred: diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 7a1fa58c..05d6dce1 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -6,7 +6,6 @@ import ( "errors" "net/http" "net/url" - "strings" "sync" "time" @@ -18,7 +17,6 @@ import ( ldevents "github.com/launchdarkly/go-sdk-events/v3" "github.com/launchdarkly/go-server-sdk/v7/interfaces" "github.com/launchdarkly/go-server-sdk/v7/internal" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" "github.com/launchdarkly/go-server-sdk/v7/internal/datasource" "github.com/launchdarkly/go-server-sdk/v7/internal/endpoints" "github.com/launchdarkly/go-server-sdk/v7/subsystems" @@ -33,6 +31,7 @@ const ( keyField = "key" kindField = "kind" versionField = "version" + objectField = "object" putEventName = "put-object" deleteEventName = "delete-object" @@ -47,6 +46,11 @@ const ( streamingWillRetryMessage = "will retry" ) +type changeSet struct { + intent *fdv2proto.ServerIntent + events []es.Event +} + // Implementation of the streaming data source, not including the lower-level SSE implementation which is in // the eventsource package. // @@ -66,7 +70,7 @@ const ( // 3. If we receive an unrecoverable error like HTTP 401, we close the stream and don't retry, and set the state // to OFF. Any other HTTP error or network error causes a retry with backoff, with a state of INTERRUPTED. // 4. We set the Future returned by start() to tell the client initialization logic that initialization has either -// succeeded (we got an initial payload and successfully stored it) or permanently failed (we got a 401, etc.). +// succeeded (we got an initial Payload and successfully stored it) or permanently failed (we got a 401, etc.). // Otherwise, the client initialization method may time out but we will still be retrying in the background, and // if we succeed then the client can detect that we're initialized now by calling our Initialized method. @@ -127,7 +131,7 @@ func (sp *StreamProcessor) Name() string { return "StreamingDataSourceV2" } -func (sp *StreamProcessor) Fetch(ctx context.Context) (*subsystems.InitialPayload, error) { +func (sp *StreamProcessor) Fetch(ctx context.Context) (*subsystems.Basis, error) { // TODO: there's no point in implementing this, as it would be highly inefficient to open a streaming // connection just to get a PUT and then close it again. return nil, errors.New("fetch capability not implemented") @@ -139,7 +143,7 @@ func (sp *StreamProcessor) IsInitialized() bool { } //nolint:revive // DataSynchronizer method. -func (sp *StreamProcessor) Sync(closeWhenReady chan<- struct{}, payloadVersion *int) { +func (sp *StreamProcessor) Sync(closeWhenReady chan<- struct{}, _ fdv2proto.Selector) { sp.loggers.Info("Starting LaunchDarkly streaming connection") go sp.subscribe(closeWhenReady) } @@ -212,7 +216,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< case fdv2proto.EventServerIntent: //nolint: godox // TODO: Replace all this json unmarshalling with a nicer jreader implementation. - var serverIntent ServerIntent + var serverIntent fdv2proto.ServerIntent err := json.Unmarshal([]byte(event.Data()), &serverIntent) if err != nil { gotMalformedEvent(event, err) @@ -234,7 +238,7 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< case fdv2proto.EventDeleteObject: currentChangeSet.events = append(currentChangeSet.events, event) case fdv2proto.EventGoodbye: - var goodbye goodbye + var goodbye fdv2proto.Goodbye err := json.Unmarshal([]byte(event.Data()), &goodbye) if err != nil { gotMalformedEvent(event, err) @@ -242,10 +246,10 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< } if !goodbye.Silent { - sp.loggers.Errorf("SSE server received error: %s (%s)", goodbye.Reason, goodbye.Catastrophe) + sp.loggers.Errorf("SSE server received error: %s (%v)", goodbye.Reason, goodbye.Catastrophe) } case fdv2proto.EventError: - var errorData errorEvent + var errorData fdv2proto.Error err := json.Unmarshal([]byte(event.Data()), &errorData) if err != nil { //nolint: godox @@ -453,8 +457,12 @@ func (sp *StreamProcessor) GetFilterKey() string { func deserializeEvents(events []es.Event) ([]fdv2proto.Event, error) { updates := make([]fdv2proto.Event, 0, len(events)) - parseItem := func(r jreader.Reader, kind datakinds.DataKindInternal) (ldstoretypes.ItemDescriptor, error) { - item, err := kind.DeserializeFromJSONReader(&r) + parseItem := func(r jreader.Reader, kind fdv2proto.ObjectKind) (ldstoretypes.ItemDescriptor, error) { + dataKind, err := kind.ToFDV1() + if err != nil { + return ldstoretypes.ItemDescriptor{}, err + } + item, err := dataKind.DeserializeFromJSONReader(&r) return item, err } @@ -462,72 +470,59 @@ func deserializeEvents(events []es.Event) ([]fdv2proto.Event, error) { switch fdv2proto.EventName(event.Event()) { case fdv2proto.EventPutObject: r := jreader.NewReader([]byte(event.Data())) - // var version int - var dataKind datakinds.DataKindInternal - var key string - var item ldstoretypes.ItemDescriptor - var err error - for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, keyField, "object"}); obj.Next(); { + var ( + kind fdv2proto.ObjectKind + key string + version int + item ldstoretypes.ItemDescriptor + err error + ) + + for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, keyField, objectField}); obj.Next(); { switch string(obj.Name()) { case versionField: - // version = r.Int() + version = r.Int() case kindField: - kind := r.String() - dataKind = dataKindFromKind(kind) - if dataKind == nil { - //nolint: godox - // TODO: We are skipping here without showing a warning. Need to address that later. - continue - } + kind = fdv2proto.ObjectKind(r.String()) + // TODO: An unrecognized kind should be ignored for forwards compat; the question is, + // do we throw out the DeleteObject here, or let the SDK's store handle it? case keyField: key = r.String() - case "object": - item, err = parseItem(r, dataKind) + case objectField: + item, err = parseItem(r, kind) if err != nil { return updates, err } } } - updates = append(updates, fdv2proto.PutObject{Kind: dataKind, Key: key, Object: item}) + updates = append(updates, fdv2proto.PutObject{Kind: kind, Key: key, Object: item, Version: version}) case fdv2proto.EventDeleteObject: r := jreader.NewReader([]byte(event.Data())) - var version int - var dataKind datakinds.DataKindInternal - var kind, key string + + var ( + version int + kind fdv2proto.ObjectKind + key string + ) for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, keyField}); obj.Next(); { switch string(obj.Name()) { case versionField: version = r.Int() case kindField: - kind = strings.TrimRight(r.String(), "s") - dataKind = dataKindFromKind(kind) - if dataKind == nil { - //nolint: godox - // TODO: We are skipping here without showing a warning. Need to address that later. - continue - } + kind = fdv2proto.ObjectKind(r.String()) + // TODO: An unrecognized kind should be ignored for forwards compat; the question is, + // do we throw out the DeleteObject here, or let the SDK's store handle it? case keyField: key = r.String() } } - updates = append(updates, fdv2proto.DeleteObject{Kind: dataKind, Key: key, Version: version}) + updates = append(updates, fdv2proto.DeleteObject{Kind: kind, Key: key, Version: version}) } } return updates, nil } -func dataKindFromKind(kind string) datakinds.DataKindInternal { - switch kind { - case "flag": - return datakinds.Features - case "segment": - return datakinds.Segments - default: - return nil - } -} - // vim: foldmethod=marker foldlevel=0 diff --git a/internal/datasourcev2/types.go b/internal/datasourcev2/types.go deleted file mode 100644 index dbb7b9b2..00000000 --- a/internal/datasourcev2/types.go +++ /dev/null @@ -1,89 +0,0 @@ -package datasourcev2 - -import ( - "encoding/json" - - "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" - - es "github.com/launchdarkly/eventsource" -) - -type pollingPayload struct { - Events []event `json:"events"` -} - -type event struct { - Name fdv2proto.EventName `json:"name"` - EventData json.RawMessage `json:"data"` -} - -// Begin es.Event interface implementation - -// Id returns the id of the event. -func (e event) Id() string { //nolint:stylecheck // The interface requires this method. - return "" -} - -// Event returns the name of the event. -func (e event) Event() fdv2proto.EventName { - return e.Name -} - -// Data returns the raw data of the event. -func (e event) Data() string { - return string(e.EventData) -} - -// En es.Event interface implementation - -type changeSet struct { - intent *serverIntent - events []es.Event -} - -type serverIntent struct { - Payloads []payload `json:"payloads"` -} - -type payload struct { - // The id here doesn't seem to match the state that is included in the - // payload transferred object. - - // It would be nice if we had the same value available in both so we could - // use that as the key consistently throughout the the process. - ID string `json:"id"` - Target int `json:"target"` - Code fdv2proto.IntentCode `json:"code"` - Reason string `json:"reason"` -} - -// This is the general shape of a put-object event. The delete-object is the same, with the object field being nil. -// type baseObject struct { -// Version int `json:"version"` -// Kind string `json:"kind"` -// Key string `json:"key"` -// Object json.RawMessage `json:"object"` -// } - -// type payloadTransferred struct { -// State string `json:"state"` -// Version int `json:"version"` -// } - -// TODO: Todd doesn't have this in his spec. What are we going to do here? -// -//nolint:godox -type errorEvent struct { - PayloadID string `json:"payloadId"` - Reason string `json:"reason"` -} - -// type heartBeat struct{} - -type goodbye struct { - Reason string `json:"reason"` - Silent bool `json:"silent"` - Catastrophe bool `json:"catastrophe"` - //nolint:godox - // TODO: Might later include some advice or backoff information -} diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index e7bd2576..7c53afdc 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -3,6 +3,7 @@ package datasystem import ( "context" "errors" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "sync" "time" @@ -178,28 +179,31 @@ func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <- } } -func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) *int { +func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) fdv2proto.Selector { for _, initializer := range f.initializers { f.loggers.Infof("Attempting to initialize via %s", initializer.Name()) - payload, err := initializer.Fetch(ctx) + basis, err := initializer.Fetch(ctx) if errors.Is(err, context.Canceled) { - return nil + return fdv2proto.NoSelector() } if err != nil { f.loggers.Warnf("Initializer %s failed: %v", initializer.Name(), err) continue } f.loggers.Infof("Initialized via %s", initializer.Name()) - f.store.Init(payload.Data, payload.Version, payload.Persist) + if err := f.store.SetBasis(basis.Data, basis.Selector, basis.Persist); err != nil { + f.loggers.Errorf("Failed to set basis: %v", err) + continue + } f.readyOnce.Do(func() { close(closeWhenReady) }) - return payload.Version + return basis.Selector } - return nil + return fdv2proto.NoSelector() } -func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{}, payloadVersion *int) { +func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{}, selector fdv2proto.Selector) { // If the SDK was configured with no synchronizer, then (assuming no initializer succeeded), we should // trigger the ready signal to let the call to MakeClient unblock immediately. if f.primarySync == nil { @@ -213,7 +217,7 @@ func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{ // Instead, create a "proxy" channel just for the data source; if that is closed, we close the real one // using the sync.Once. ready := make(chan struct{}) - f.primarySync.Sync(ready, payloadVersion) + f.primarySync.Sync(ready, selector) for { select { @@ -247,7 +251,13 @@ func (f *FDv2) Store() subsystems.ReadOnlyStore { } func (f *FDv2) DataAvailability() DataAvailability { - return f.store.DataAvailability() + if f.store.Selector().IsSet() { + return Refreshed + } + if f.store.IsInitialized() { + return Cached + } + return Defaults } func (f *FDv2) DataSourceStatusBroadcaster() *internal.Broadcaster[interfaces.DataSourceStatus] { diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 0a7c4f3e..4a7a3a80 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -181,17 +181,12 @@ func (s *Store) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector // is happening. In practice, we often don't receive more than one event at a time, but this may change // in the future. if s.shouldPersist() { - for _, event := range events { - var err error - switch e := event.(type) { - case fdv2proto.PutObject: - _, err = s.persistentStore.impl.Upsert(e.Kind, e.Key, ldstoretypes.ItemDescriptor{Version: e.Version, Item: e.Object}) - case fdv2proto.DeleteObject: - _, err = s.persistentStore.impl.Upsert(e.Kind, e.Key, ldstoretypes.ItemDescriptor{Version: e.Version, Item: nil}) - } - // TODO: return error? - if err != nil { - s.loggers.Errorf("Error applying %s to persistent store: %s", event.Name(), err) + for _, coll := range collections { + for _, item := range coll.Items { + _, err := s.persistentStore.impl.Upsert(coll.Kind, item.Key, item.Item) + if err != nil { + s.loggers.Errorf("Failed to apply delta to persistent store: %s", err) + } } } } diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index 1b892c13..2edaed8f 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "github.com/launchdarkly/go-server-sdk/v7/subsystems" @@ -83,8 +82,8 @@ func TestStore_Commit(t *testing.T) { // The store receives data as a list of events, but the persistent store receives them as an // []ldstoretypes.Collection. input := []fdv2proto.Event{ - fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, - fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, + fdv2proto.PutObject{Kind: fdv2proto.FlagKind, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: fdv2proto.SegmentKind, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, } output := []ldstoretypes.Collection{ @@ -124,8 +123,8 @@ func TestStore_Commit(t *testing.T) { defer store.Close() input := []fdv2proto.Event{ - fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, - fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, + fdv2proto.PutObject{Kind: fdv2proto.FlagKind, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: fdv2proto.SegmentKind, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, } assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) @@ -148,8 +147,8 @@ func TestStore_Commit(t *testing.T) { defer store.Close() input := []fdv2proto.Event{ - fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, - fdv2proto.PutObject{Kind: datakinds.Segments, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, + fdv2proto.PutObject{Kind: fdv2proto.FlagKind, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: fdv2proto.SegmentKind, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, } // Even though persist is true, the store was marked as read-only, so it shouldn't be written to. @@ -174,7 +173,7 @@ func TestStore_GetActive(t *testing.T) { assert.Equal(t, foo, ldstoretypes.ItemDescriptor{}.NotFound()) input := []fdv2proto.Event{ - fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: fdv2proto.FlagKind, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, } assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) @@ -206,7 +205,7 @@ func TestStore_GetActive(t *testing.T) { assert.Equal(t, errImAPersistentStore, err) input := []fdv2proto.Event{ - fdv2proto.PutObject{Kind: datakinds.Features, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, + fdv2proto.PutObject{Kind: fdv2proto.FlagKind, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, } assert.NoError(t, store.SetBasis(input, fdv2proto.NoSelector(), false)) diff --git a/internal/fdv2proto/event_to_storable_item.go b/internal/fdv2proto/event_to_storable_item.go index 5db3e2f2..d1c0d952 100644 --- a/internal/fdv2proto/event_to_storable_item.go +++ b/internal/fdv2proto/event_to_storable_item.go @@ -20,12 +20,12 @@ func ToStorableItems(events []Event) []ldstoretypes.Collection { switch e := event.(type) { case PutObject: switch e.Kind { - case datakinds.Features: + case FlagKind: flagCollection.Items = append(flagCollection.Items, ldstoretypes.KeyedItemDescriptor{ Key: e.Key, Item: e.Object, }) - case datakinds.Segments: + case SegmentKind: segmentCollection.Items = append(segmentCollection.Items, ldstoretypes.KeyedItemDescriptor{ Key: e.Key, Item: e.Object, @@ -33,7 +33,7 @@ func ToStorableItems(events []Event) []ldstoretypes.Collection { } case DeleteObject: switch e.Kind { - case datakinds.Features: + case FlagKind: flagCollection.Items = append(flagCollection.Items, ldstoretypes.KeyedItemDescriptor{ Key: e.Key, Item: ldstoretypes.ItemDescriptor{ @@ -41,7 +41,7 @@ func ToStorableItems(events []Event) []ldstoretypes.Collection { Item: nil, }, }) - case datakinds.Segments: + case SegmentKind: segmentCollection.Items = append(segmentCollection.Items, ldstoretypes.KeyedItemDescriptor{ Key: e.Key, Item: ldstoretypes.ItemDescriptor{ diff --git a/internal/fdv2proto/events.go b/internal/fdv2proto/events.go index f99cc3fb..6ccd421b 100644 --- a/internal/fdv2proto/events.go +++ b/internal/fdv2proto/events.go @@ -1,6 +1,10 @@ package fdv2proto -import "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +import ( + "fmt" + "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) type IntentCode string @@ -25,10 +29,45 @@ const ( EventError = EventName("error") ) +type ObjectKind string + +const ( + FlagKind = ObjectKind("flag") + SegmentKind = ObjectKind("segment") +) + +func (o ObjectKind) ToFDV1() (datakinds.DataKindInternal, error) { + switch o { + case FlagKind: + return datakinds.Features, nil + case SegmentKind: + return datakinds.Segments, nil + default: + return nil, fmt.Errorf("no FDv1 equivalent for object kind (%s)", string(o)) + } +} + +type ServerIntent struct { + Payloads []Payload `json:"payloads"` +} + +func (ServerIntent) Name() EventName { + return EventServerIntent +} + +type PayloadTransferred struct { + State string `json:"state"` + Version int `json:"version"` +} + +func (p PayloadTransferred) Name() EventName { + return EventPayloadTransferred +} + type DeleteObject struct { - Version int - Kind ldstoretypes.DataKind - Key string + Version int `json:"version"` + Kind ObjectKind `json:"kind"` + Key string `json:"key"` } func (d DeleteObject) Name() EventName { @@ -36,12 +75,31 @@ func (d DeleteObject) Name() EventName { } type PutObject struct { - Version int - Kind ldstoretypes.DataKind - Key string - Object ldstoretypes.ItemDescriptor + Version int `json:"version"` + Kind ObjectKind `json:"kind"` + Key string `json:"key"` + Object ldstoretypes.ItemDescriptor `json:"object"` } func (p PutObject) Name() EventName { return EventPutObject } + +type Error struct { + PayloadID string `json:"payloadId"` + Reason string `json:"reason"` +} + +func (e Error) Name() EventName { + return EventError +} + +type Goodbye struct { + Reason string `json:"reason"` + Silent bool `json:"silent"` + Catastrophe bool `json:"catastrophe"` +} + +func (g Goodbye) Name() EventName { + return EventGoodbye +} diff --git a/internal/fdv2proto/payloads.go b/internal/fdv2proto/payloads.go new file mode 100644 index 00000000..d09122dc --- /dev/null +++ b/internal/fdv2proto/payloads.go @@ -0,0 +1,18 @@ +package fdv2proto + +type Payload struct { + // The id here doesn't seem to match the state that is included in the + // Payload transferred object. + + // It would be nice if we had the same value available in both so we could + // use that as the key consistently throughout the the process. + ID string `json:"id"` + Target int `json:"target"` + Code IntentCode `json:"code"` + Reason string `json:"reason"` +} + +type PollingPayload struct { + // Note: the first event in a PollingPayload should be a Payload. + Events []RawEvent `json:"events"` +} diff --git a/internal/fdv2proto/raw_event.go b/internal/fdv2proto/raw_event.go new file mode 100644 index 00000000..f6cf1779 --- /dev/null +++ b/internal/fdv2proto/raw_event.go @@ -0,0 +1,27 @@ +package fdv2proto + +import ( + "encoding/json" +) + +type RawEvent struct { + Name EventName `json:"name"` + EventData json.RawMessage `json:"data"` +} + +// Begin es.Event interface implementation + +// Id returns the id of the event. +func (e RawEvent) Id() string { //nolint:stylecheck // The interface requires this method. + return "" +} + +// Event returns the name of the event. +func (e RawEvent) Event() string { + return string(e.Name) +} + +// Data returns the raw data of the event. +func (e RawEvent) Data() string { + return string(e.EventData) +} diff --git a/ldclient_end_to_end_fdv2_test.go b/ldclient_end_to_end_fdv2_test.go index 569acf84..63ade444 100644 --- a/ldclient_end_to_end_fdv2_test.go +++ b/ldclient_end_to_end_fdv2_test.go @@ -3,7 +3,7 @@ package ldclient import ( "crypto/x509" "github.com/launchdarkly/go-sdk-common/v3/ldlog" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "github.com/launchdarkly/go-server-sdk/v7/internal/sharedtest" "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldservicesv2" "net/http" @@ -23,13 +23,13 @@ import ( ) func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + data := ldservicesv2.NewServerSDKData().Flags(alwaysTrueFlag) protocol := ldservicesv2.NewStreamingProtocol(). - WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + WithIntent(fdv2proto.ServerIntent{Payloads: []fdv2proto.Payload{ {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). - WithPutObjects(data.ToBaseObjects()). + WithPutObjects(data.ToPutObjects()). WithTransferred() streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) @@ -61,13 +61,13 @@ func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { } func TestFDV2ClientStartsInStreamingMode(t *testing.T) { - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + data := ldservicesv2.NewServerSDKData().Flags(alwaysTrueFlag) protocol := ldservicesv2.NewStreamingProtocol(). - WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + WithIntent(fdv2proto.ServerIntent{Payloads: []fdv2proto.Payload{ {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). - WithPutObjects(data.ToBaseObjects()). + WithPutObjects(data.ToPutObjects()). WithTransferred() streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) @@ -104,13 +104,13 @@ func TestFDV2ClientStartsInStreamingMode(t *testing.T) { } func TestFDV2ClientRetriesConnectionInStreamingModeWithNonFatalError(t *testing.T) { - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + data := ldservicesv2.NewServerSDKData().Flags(alwaysTrueFlag) protocol := ldservicesv2.NewStreamingProtocol(). - WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + WithIntent(fdv2proto.ServerIntent{Payloads: []fdv2proto.Payload{ {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). - WithPutObjects(data.ToBaseObjects()). + WithPutObjects(data.ToPutObjects()). WithTransferred() streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) @@ -184,13 +184,13 @@ func TestFDV2ClientFailsToStartInPollingModeWith401Error(t *testing.T) { } func TestFDV2ClientUsesCustomTLSConfiguration(t *testing.T) { - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + data := ldservicesv2.NewServerSDKData().Flags(alwaysTrueFlag) protocol := ldservicesv2.NewStreamingProtocol(). - WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + WithIntent(fdv2proto.ServerIntent{Payloads: []fdv2proto.Payload{ {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). - WithPutObjects(data.ToBaseObjects()). + WithPutObjects(data.ToPutObjects()). WithTransferred() streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) @@ -215,13 +215,13 @@ func TestFDV2ClientUsesCustomTLSConfiguration(t *testing.T) { } func TestFDV2ClientStartupTimesOut(t *testing.T) { - data := ldservices.NewServerSDKData().Flags(&alwaysTrueFlag) + data := ldservicesv2.NewServerSDKData().Flags(alwaysTrueFlag) protocol := ldservicesv2.NewStreamingProtocol(). - WithIntent(datasourcev2.ServerIntent{Payloads: []datasourcev2.Payload{ + WithIntent(fdv2proto.ServerIntent{Payloads: []fdv2proto.Payload{ {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). - WithPutObjects(data.ToBaseObjects()). + WithPutObjects(data.ToPutObjects()). WithTransferred() streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) diff --git a/subsystems/data_source.go b/subsystems/data_source.go index 0ee8e2d0..d5bec6a7 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -2,9 +2,8 @@ package subsystems import ( "context" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "io" - - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) // DataSource describes the interface for an object that receives feature flag data. @@ -24,20 +23,20 @@ type DataSource interface { Start(closeWhenReady chan<- struct{}) } -type InitialPayload struct { - Data []ldstoretypes.Collection - Persist bool - Version *int +type Basis struct { + Data []fdv2proto.Event + Selector fdv2proto.Selector + Persist bool } type DataInitializer interface { Name() string - Fetch(ctx context.Context) (*InitialPayload, error) + Fetch(ctx context.Context) (*Basis, error) } type DataSynchronizer interface { DataInitializer - Sync(closeWhenReady chan<- struct{}, payloadVersion *int) + Sync(closeWhenReady chan<- struct{}, selector fdv2proto.Selector) // IsInitialized returns true if the data source has successfully initialized at some point. // // Once this is true, it should remain true even if a problem occurs later. diff --git a/testhelpers/ldservices/server_sdk_data.go b/testhelpers/ldservices/server_sdk_data.go index fb18a193..2384bc0f 100644 --- a/testhelpers/ldservices/server_sdk_data.go +++ b/testhelpers/ldservices/server_sdk_data.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "github.com/launchdarkly/go-sdk-common/v3/ldvalue" - "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldservicesv2" "github.com/launchdarkly/go-test-helpers/v3/httphelpers" "github.com/launchdarkly/go-test-helpers/v3/jsonhelpers" ) @@ -82,26 +81,3 @@ func (s *ServerSDKData) ToPutEvent() httphelpers.SSEEvent { Data: fmt.Sprintf(`{"path": "/", "data": %s}`, s), } } - -func (s *ServerSDKData) ToBaseObjects() []ldservicesv2.BaseObject { - var objs []ldservicesv2.BaseObject - for _, flag := range s.FlagsMap { - base := ldservicesv2.BaseObject{ - Version: 1, - Kind: "flag", - Key: getKeyFromJSON(flag), - Object: jsonhelpers.ToJSON(flag), - } - objs = append(objs, base) - } - for _, segment := range s.SegmentsMap { - base := ldservicesv2.BaseObject{ - Version: 1, - Kind: "segment", - Key: getKeyFromJSON(segment), - Object: jsonhelpers.ToJSON(segment), - } - objs = append(objs, base) - } - return objs -} diff --git a/testhelpers/ldservicesv2/server_sdk_data.go b/testhelpers/ldservicesv2/server_sdk_data.go new file mode 100644 index 00000000..ba1d9b40 --- /dev/null +++ b/testhelpers/ldservicesv2/server_sdk_data.go @@ -0,0 +1,89 @@ +package ldservicesv2 + +import ( + "encoding/json" + "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldmodel" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" + "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" +) + +type fakeVersionedKind struct { + Key string `json:"key"` + Version int `json:"version"` +} + +// KeyAndVersionItem provides a simple object that has only "key" and "version" properties. +// This may be enough for some testing purposes that don't require full flag or segment data. +func KeyAndVersionItem(key string, version int) interface{} { + return fakeVersionedKind{Key: key, Version: version} +} + +// ServerSDKData is a convenience type for constructing a test server-side SDK data payload for +// PollingServiceHandler or StreamingServiceHandler. Its String() method returns a JSON object with +// the expected "flags" and "segments" properties. +// +// data := NewServerSDKData().Flags(flag1, flag2) +// handler := PollingServiceHandler(data) +type ServerSDKData struct { + FlagsMap map[string]ldmodel.FeatureFlag `json:"flags"` + SegmentsMap map[string]ldmodel.Segment `json:"segments"` +} + +// NewServerSDKData creates a ServerSDKData instance. +func NewServerSDKData() *ServerSDKData { + return &ServerSDKData{ + make(map[string]ldmodel.FeatureFlag), + make(map[string]ldmodel.Segment), + } +} + +// String returns the JSON encoding of the struct as a string. +func (s *ServerSDKData) String() string { + bytes, _ := json.Marshal(*s) + return string(bytes) +} + +// Flags adds the specified items to the struct's "flags" map. +// +// Each item may be either a object produced by KeyAndVersionItem or a real data model object from the ldmodel +// package. The minimum requirement is that when converted to JSON, it has a "key" property. +func (s *ServerSDKData) Flags(flags ...ldmodel.FeatureFlag) *ServerSDKData { + for _, flag := range flags { + s.FlagsMap[flag.Key] = flag + } + return s +} + +// Segments adds the specified items to the struct's "segments" map. +// +// Each item may be either a object produced by KeyAndVersionItem or a real data model object from the ldmodel +// package. The minimum requirement is that when converted to JSON, it has a "key" property. +func (s *ServerSDKData) Segments(segments ...ldmodel.Segment) *ServerSDKData { + for _, segment := range segments { + s.SegmentsMap[segment.Key] = segment + } + return s +} + +func (s *ServerSDKData) ToPutObjects() []fdv2proto.PutObject { + var objs []fdv2proto.PutObject + for _, flag := range s.FlagsMap { + base := fdv2proto.PutObject{ + Version: 1, + Kind: fdv2proto.FlagKind, + Key: flag.Key, + Object: ldstoretypes.ItemDescriptor{Version: flag.Version, Item: flag}, + } + objs = append(objs, base) + } + for _, segment := range s.SegmentsMap { + base := fdv2proto.PutObject{ + Version: 1, + Kind: fdv2proto.SegmentKind, + Key: segment.Key, + Object: ldstoretypes.ItemDescriptor{Version: segment.Version, Item: segment}, + } + objs = append(objs, base) + } + return objs +} diff --git a/testhelpers/ldservicesv2/streaming_protocol_builder.go b/testhelpers/ldservicesv2/streaming_protocol_builder.go index 156f2e24..92a73cce 100644 --- a/testhelpers/ldservicesv2/streaming_protocol_builder.go +++ b/testhelpers/ldservicesv2/streaming_protocol_builder.go @@ -2,7 +2,7 @@ package ldservicesv2 import ( "encoding/json" - "github.com/launchdarkly/go-server-sdk/v7/internal/datasourcev2" + "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" "github.com/launchdarkly/go-test-helpers/v3/httphelpers" ) @@ -14,31 +14,6 @@ func (p ProtocolEvents) Enqueue(control httphelpers.SSEStreamControl) { } } -type protoState string - -const ( - start = protoState("start") - intentSent = protoState("intent-sent") - transferred = protoState("transferred") -) - -type BaseObject struct { - Version int `json:"version"` - Kind string `json:"kind"` - Key string `json:"key"` - Object json.RawMessage `json:"object"` -} - -type event struct { - name string - data BaseObject -} - -type payloadTransferred struct { - State string `json:"state"` - Version int `json:"version"` -} - type StreamingProtocol struct { events []httphelpers.SSEEvent } @@ -47,31 +22,31 @@ func NewStreamingProtocol() *StreamingProtocol { return &StreamingProtocol{} } -func (f *StreamingProtocol) WithIntent(intent datasourcev2.ServerIntent) *StreamingProtocol { - return f.pushEvent("server-intent", intent) +func (f *StreamingProtocol) WithIntent(intent fdv2proto.ServerIntent) *StreamingProtocol { + return f.pushEvent(intent) } -func (f *StreamingProtocol) WithPutObject(object BaseObject) *StreamingProtocol { - return f.pushEvent("put-object", object) +func (f *StreamingProtocol) WithPutObject(object fdv2proto.PutObject) *StreamingProtocol { + return f.pushEvent(object) } func (f *StreamingProtocol) WithTransferred() *StreamingProtocol { - return f.pushEvent("payload-transferred", payloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) + return f.pushEvent(fdv2proto.PayloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) } -func (f *StreamingProtocol) WithPutObjects(objects []BaseObject) *StreamingProtocol { +func (f *StreamingProtocol) WithPutObjects(objects []fdv2proto.PutObject) *StreamingProtocol { for _, object := range objects { f.WithPutObject(object) } return f } -func (f *StreamingProtocol) pushEvent(event string, data any) *StreamingProtocol { +func (f *StreamingProtocol) pushEvent(data fdv2proto.Event) *StreamingProtocol { marshalled, err := json.Marshal(data) if err != nil { panic(err) } - f.events = append(f.events, httphelpers.SSEEvent{Event: event, Data: string(marshalled)}) + f.events = append(f.events, httphelpers.SSEEvent{Event: string(data.Name()), Data: string(marshalled)}) return f } From 47f87481cbabb61e487f2897c3078a6a8dd215a8 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 24 Sep 2024 14:21:43 -0700 Subject: [PATCH 57/62] remove some unused interface implementation from fdv2proto.RawEvent --- internal/datasourcev2/polling_http_request.go | 8 +++---- internal/fdv2proto/raw_event.go | 21 ++----------------- 2 files changed, 6 insertions(+), 23 deletions(-) diff --git a/internal/datasourcev2/polling_http_request.go b/internal/datasourcev2/polling_http_request.go index e5272089..e49a12ea 100644 --- a/internal/datasourcev2/polling_http_request.go +++ b/internal/datasourcev2/polling_http_request.go @@ -101,11 +101,11 @@ func (r *pollingRequester) Request() (*PollingResponse, error) { var intent fdv2proto.IntentCode for _, event := range payload.Events { - switch fdv2proto.EventName(event.Event()) { + switch event.Name { case fdv2proto.EventServerIntent: { var serverIntent fdv2proto.ServerIntent - err := json.Unmarshal([]byte(event.Data()), &serverIntent) + err := json.Unmarshal(event.Data, &serverIntent) if err != nil { return nil, err } else if len(serverIntent.Payloads) == 0 { @@ -119,7 +119,7 @@ func (r *pollingRequester) Request() (*PollingResponse, error) { } case fdv2proto.EventPutObject: { - r := jreader.NewReader([]byte(event.Data())) + r := jreader.NewReader(event.Data) var ( key string @@ -148,7 +148,7 @@ func (r *pollingRequester) Request() (*PollingResponse, error) { } case fdv2proto.EventDeleteObject: { - r := jreader.NewReader([]byte(event.Data())) + r := jreader.NewReader(event.Data) var ( version int diff --git a/internal/fdv2proto/raw_event.go b/internal/fdv2proto/raw_event.go index f6cf1779..c35fdbfa 100644 --- a/internal/fdv2proto/raw_event.go +++ b/internal/fdv2proto/raw_event.go @@ -5,23 +5,6 @@ import ( ) type RawEvent struct { - Name EventName `json:"name"` - EventData json.RawMessage `json:"data"` -} - -// Begin es.Event interface implementation - -// Id returns the id of the event. -func (e RawEvent) Id() string { //nolint:stylecheck // The interface requires this method. - return "" -} - -// Event returns the name of the event. -func (e RawEvent) Event() string { - return string(e.Name) -} - -// Data returns the raw data of the event. -func (e RawEvent) Data() string { - return string(e.EventData) + Name EventName `json:"name"` + Data json.RawMessage `json:"data"` } From c1ae8a41b0cf6213d8e113ce7c42b002be631ae4 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 24 Sep 2024 16:07:09 -0700 Subject: [PATCH 58/62] refactoring selector --- internal/datasourcev2/polling_data_source.go | 8 ++-- internal/datasourcev2/polling_http_request.go | 6 +-- .../datasourcev2/streaming_data_source.go | 17 ++++++-- internal/datasystem/fdv2_datasystem.go | 4 +- internal/datasystem/store.go | 10 ++--- internal/datasystem/store_test.go | 9 ++-- internal/fdv2proto/event_to_storable_item.go | 4 +- internal/fdv2proto/events.go | 9 ++-- internal/fdv2proto/selector.go | 41 ++++++++++++++----- .../sharedtest/mocks/mock_data_destination.go | 4 +- ldclient_end_to_end_fdv2_test.go | 12 +++--- subsystems/data_destination.go | 4 +- subsystems/data_source.go | 4 +- testhelpers/ldservicesv2/server_sdk_data.go | 9 ++-- .../streaming_protocol_builder.go | 4 +- 15 files changed, 86 insertions(+), 59 deletions(-) diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index 66171aa7..85cbcf21 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -23,7 +23,7 @@ type PollingResponse struct { events []fdv2proto.Event cached bool intent fdv2proto.IntentCode - selector fdv2proto.Selector + selector *fdv2proto.Selector } func (p *PollingResponse) Events() []fdv2proto.Event { @@ -38,7 +38,7 @@ func (p *PollingResponse) Intent() fdv2proto.IntentCode { return p.intent } -func (p *PollingResponse) Selector() fdv2proto.Selector { +func (p *PollingResponse) Selector() *fdv2proto.Selector { return p.selector } @@ -48,7 +48,7 @@ func NewCachedPollingResponse() *PollingResponse { } } -func NewPollingResponse(intent fdv2proto.IntentCode, events []fdv2proto.Event, selector fdv2proto.Selector) *PollingResponse { +func NewPollingResponse(intent fdv2proto.IntentCode, events []fdv2proto.Event, selector *fdv2proto.Selector) *PollingResponse { return &PollingResponse{ events: events, intent: intent, @@ -128,7 +128,7 @@ func (pp *PollingProcessor) Fetch(ctx context.Context) (*subsystems.Basis, error } //nolint:revive // DataSynchronizer method. -func (pp *PollingProcessor) Sync(closeWhenReady chan<- struct{}, _ fdv2proto.Selector) { +func (pp *PollingProcessor) Sync(closeWhenReady chan<- struct{}, _ *fdv2proto.Selector) { pp.loggers.Infof("Starting LaunchDarkly polling with interval: %+v", pp.pollInterval) ticker := newTickerWithInitialTick(pp.pollInterval) diff --git a/internal/datasourcev2/polling_http_request.go b/internal/datasourcev2/polling_http_request.go index e49a12ea..68719f93 100644 --- a/internal/datasourcev2/polling_http_request.go +++ b/internal/datasourcev2/polling_http_request.go @@ -129,15 +129,15 @@ func (r *pollingRequester) Request() (*PollingResponse, error) { version int ) - for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, "key", "object"}); obj.Next(); { + for obj := r.Object().WithRequiredProperties([]string{versionField, kindField, keyField, objectField}); obj.Next(); { switch string(obj.Name()) { case versionField: version = r.Int() case kindField: kind = fdv2proto.ObjectKind(r.String()) - case "key": + case keyField: key = r.String() - case "object": + case objectField: item, err = parseItem(r, kind) if err != nil { return nil, err diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 05d6dce1..4077bf4a 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -143,7 +143,7 @@ func (sp *StreamProcessor) IsInitialized() bool { } //nolint:revive // DataSynchronizer method. -func (sp *StreamProcessor) Sync(closeWhenReady chan<- struct{}, _ fdv2proto.Selector) { +func (sp *StreamProcessor) Sync(closeWhenReady chan<- struct{}, _ *fdv2proto.Selector) { sp.loggers.Info("Starting LaunchDarkly streaming connection") go sp.subscribe(closeWhenReady) } @@ -266,6 +266,15 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< //nolint: godox // TODO: Do we need to restart here? case fdv2proto.EventPayloadTransferred: + + selector := &fdv2proto.Selector{} + + err := json.Unmarshal([]byte(event.Data()), selector) + if err != nil { + gotMalformedEvent(event, err) + break + } + currentChangeSet.events = append(currentChangeSet.events, event) updates, err := deserializeEvents(currentChangeSet.events) if err != nil { @@ -277,11 +286,11 @@ func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan< switch currentChangeSet.intent.Payloads[0].Code { case fdv2proto.IntentTransferFull: { - sp.dataDestination.SetBasis(updates, fdv2proto.NoSelector(), true) + sp.dataDestination.SetBasis(updates, selector, true) sp.setInitializedAndNotifyClient(true, closeWhenReady) } case fdv2proto.IntentTransferChanges: - sp.dataDestination.ApplyDelta(updates, fdv2proto.NoSelector(), true) + sp.dataDestination.ApplyDelta(updates, selector, true) } currentChangeSet = changeSet{events: make([]es.Event, 0)} @@ -496,7 +505,7 @@ func deserializeEvents(events []es.Event) ([]fdv2proto.Event, error) { } } } - updates = append(updates, fdv2proto.PutObject{Kind: kind, Key: key, Object: item, Version: version}) + updates = append(updates, fdv2proto.PutObject{Kind: kind, Key: key, Object: item.Item, Version: version}) case fdv2proto.EventDeleteObject: r := jreader.NewReader([]byte(event.Data())) diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index 7c53afdc..338e892d 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -179,7 +179,7 @@ func (f *FDv2) runPersistentStoreOutageRecovery(ctx context.Context, statuses <- } } -func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) fdv2proto.Selector { +func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{}) *fdv2proto.Selector { for _, initializer := range f.initializers { f.loggers.Infof("Attempting to initialize via %s", initializer.Name()) basis, err := initializer.Fetch(ctx) @@ -203,7 +203,7 @@ func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{} return fdv2proto.NoSelector() } -func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{}, selector fdv2proto.Selector) { +func (f *FDv2) runSynchronizers(ctx context.Context, closeWhenReady chan struct{}, selector *fdv2proto.Selector) { // If the SDK was configured with no synchronizer, then (assuming no initializer succeeded), we should // trigger the ready signal to let the call to MakeClient unblock immediately. if f.primarySync == nil { diff --git a/internal/datasystem/store.go b/internal/datasystem/store.go index 4a7a3a80..7bd7f4e8 100644 --- a/internal/datasystem/store.go +++ b/internal/datasystem/store.go @@ -56,7 +56,7 @@ type Store struct { active subsystems.ReadOnlyStore // Identifies the current data. - selector fdv2proto.Selector + selector *fdv2proto.Selector mu sync.RWMutex @@ -121,7 +121,7 @@ func (s *Store) WithPersistence(persistent subsystems.DataStore, mode subsystems } // Selector returns the current selector. -func (s *Store) Selector() fdv2proto.Selector { +func (s *Store) Selector() *fdv2proto.Selector { s.mu.RLock() defer s.mu.RUnlock() return s.selector @@ -137,12 +137,12 @@ func (s *Store) Close() error { return nil } -func (s *Store) SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { +func (s *Store) SetBasis(events []fdv2proto.Event, selector *fdv2proto.Selector, persist bool) error { collections := fdv2proto.ToStorableItems(events) return s.init(collections, selector, persist) } -func (s *Store) init(allData []ldstoretypes.Collection, selector fdv2proto.Selector, persist bool) error { +func (s *Store) init(allData []ldstoretypes.Collection, selector *fdv2proto.Selector, persist bool) error { s.mu.Lock() defer s.mu.Unlock() @@ -164,7 +164,7 @@ func (s *Store) shouldPersist() bool { return s.persist && s.persistentStore.writable() } -func (s *Store) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { +func (s *Store) ApplyDelta(events []fdv2proto.Event, selector *fdv2proto.Selector, persist bool) error { collections := fdv2proto.ToStorableItems(events) s.mu.Lock() diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index 2edaed8f..e4eb354f 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -2,6 +2,7 @@ package datasystem import ( "errors" + "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldmodel" "math/rand" "sync" "testing" @@ -43,7 +44,7 @@ func TestStore_NoPersistence_MemoryStore_IsInitialized(t *testing.T) { none := fdv2proto.NoSelector() tests := []struct { name string - selector fdv2proto.Selector + selector *fdv2proto.Selector persist bool }{ {"with selector, persist", v1, true}, @@ -82,8 +83,8 @@ func TestStore_Commit(t *testing.T) { // The store receives data as a list of events, but the persistent store receives them as an // []ldstoretypes.Collection. input := []fdv2proto.Event{ - fdv2proto.PutObject{Kind: fdv2proto.FlagKind, Key: "foo", Object: ldstoretypes.ItemDescriptor{Version: 1}}, - fdv2proto.PutObject{Kind: fdv2proto.SegmentKind, Key: "bar", Object: ldstoretypes.ItemDescriptor{Version: 2}}, + fdv2proto.PutObject{Kind: fdv2proto.FlagKind, Key: "foo", Object: ldmodel.FeatureFlag{Version: 1}}, + fdv2proto.PutObject{Kind: fdv2proto.SegmentKind, Key: "bar", Object: ldmodel.Segment{Version: 2}}, } output := []ldstoretypes.Collection{ @@ -111,7 +112,7 @@ func TestStore_Commit(t *testing.T) { // This time, the data should be stored properly. require.NoError(t, store.Commit()) - assert.Equal(t, output, spy.initPayload) + assert.ElementsMatch(t, output, spy.initPayload) }) t.Run("non-persist memory items are not copied to persistent store in r/w mode", func(t *testing.T) { diff --git a/internal/fdv2proto/event_to_storable_item.go b/internal/fdv2proto/event_to_storable_item.go index d1c0d952..09d73378 100644 --- a/internal/fdv2proto/event_to_storable_item.go +++ b/internal/fdv2proto/event_to_storable_item.go @@ -23,12 +23,12 @@ func ToStorableItems(events []Event) []ldstoretypes.Collection { case FlagKind: flagCollection.Items = append(flagCollection.Items, ldstoretypes.KeyedItemDescriptor{ Key: e.Key, - Item: e.Object, + Item: ldstoretypes.ItemDescriptor{Version: e.Version, Item: e.Object}, }) case SegmentKind: segmentCollection.Items = append(segmentCollection.Items, ldstoretypes.KeyedItemDescriptor{ Key: e.Key, - Item: e.Object, + Item: ldstoretypes.ItemDescriptor{Version: e.Version, Item: e.Object}, }) } case DeleteObject: diff --git a/internal/fdv2proto/events.go b/internal/fdv2proto/events.go index 6ccd421b..94c164b5 100644 --- a/internal/fdv2proto/events.go +++ b/internal/fdv2proto/events.go @@ -3,7 +3,6 @@ package fdv2proto import ( "fmt" "github.com/launchdarkly/go-server-sdk/v7/internal/datakinds" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) type IntentCode string @@ -75,10 +74,10 @@ func (d DeleteObject) Name() EventName { } type PutObject struct { - Version int `json:"version"` - Kind ObjectKind `json:"kind"` - Key string `json:"key"` - Object ldstoretypes.ItemDescriptor `json:"object"` + Version int `json:"version"` + Kind ObjectKind `json:"kind"` + Key string `json:"key"` + Object any `json:"object"` } func (p PutObject) Name() EventName { diff --git a/internal/fdv2proto/selector.go b/internal/fdv2proto/selector.go index 8d462352..d032889d 100644 --- a/internal/fdv2proto/selector.go +++ b/internal/fdv2proto/selector.go @@ -1,31 +1,50 @@ package fdv2proto +import ( + "encoding/json" + "errors" +) + type Selector struct { state string version int - set bool } -func NoSelector() Selector { - return Selector{set: false} +func NoSelector() *Selector { + return nil } -func NewSelector(state string, version int) Selector { - return Selector{state: state, version: version, set: true} +func NewSelector(state string, version int) *Selector { + return &Selector{state: state, version: version} } -func (s Selector) IsSet() bool { - return s.set +func (s *Selector) IsSet() bool { + return s != nil } -func (s Selector) State() string { +func (s *Selector) State() string { return s.state } -func (s Selector) Version() int { +func (s *Selector) Version() int { return s.version } -func (s Selector) Get() (string, int, bool) { - return s.state, s.version, s.set +func (s *Selector) UnmarshalJSON(data []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if state, ok := raw["state"].(string); ok { + s.state = state + } else { + return errors.New("unmarshal selector: missing state field") + } + if version, ok := raw["version"].(float64); ok { + s.version = int(version) + } else { + return errors.New("unmarshal selector: missing version field") + } + return nil } diff --git a/internal/sharedtest/mocks/mock_data_destination.go b/internal/sharedtest/mocks/mock_data_destination.go index 84e91309..99b89b41 100644 --- a/internal/sharedtest/mocks/mock_data_destination.go +++ b/internal/sharedtest/mocks/mock_data_destination.go @@ -42,7 +42,7 @@ func NewMockDataDestination(realStore subsystems.DataStore) *MockDataDestination } // SetBasis in this test implementation, delegates to d.DataStore.CapturedUpdates. -func (d *MockDataDestination) SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { +func (d *MockDataDestination) SetBasis(events []fdv2proto.Event, _ *fdv2proto.Selector, persist bool) error { // For now, the selector is ignored. When the data sources start making use of it, it should be // stored so that assertions can be made. @@ -54,7 +54,7 @@ func (d *MockDataDestination) SetBasis(events []fdv2proto.Event, selector fdv2pr return d.DataStore.Init(collections) } -func (d *MockDataDestination) ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error { +func (d *MockDataDestination) ApplyDelta(events []fdv2proto.Event, _ *fdv2proto.Selector, persist bool) error { // For now, the selector is ignored. When the data sources start making use of it, it should be // stored so that assertions can be made. diff --git a/ldclient_end_to_end_fdv2_test.go b/ldclient_end_to_end_fdv2_test.go index 63ade444..ac591fe8 100644 --- a/ldclient_end_to_end_fdv2_test.go +++ b/ldclient_end_to_end_fdv2_test.go @@ -30,7 +30,7 @@ func TestFDV2DefaultDataSourceIsStreaming(t *testing.T) { {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). WithPutObjects(data.ToPutObjects()). - WithTransferred() + WithTransferred(1) streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) @@ -68,7 +68,7 @@ func TestFDV2ClientStartsInStreamingMode(t *testing.T) { {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). WithPutObjects(data.ToPutObjects()). - WithTransferred() + WithTransferred(1) streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) protocol.Enqueue(streamSender) @@ -111,7 +111,7 @@ func TestFDV2ClientRetriesConnectionInStreamingModeWithNonFatalError(t *testing. {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). WithPutObjects(data.ToPutObjects()). - WithTransferred() + WithTransferred(1) streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) protocol.Enqueue(streamSender) @@ -191,7 +191,7 @@ func TestFDV2ClientUsesCustomTLSConfiguration(t *testing.T) { {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). WithPutObjects(data.ToPutObjects()). - WithTransferred() + WithTransferred(1) streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) protocol.Enqueue(streamSender) @@ -205,7 +205,7 @@ func TestFDV2ClientUsesCustomTLSConfiguration(t *testing.T) { DataSystem: ldcomponents.DataSystem().Streaming(), } - client, err := MakeCustomClient(testSdkKey, config, time.Second*5) + client, err := MakeCustomClient(testSdkKey, config, time.Second*50000) require.NoError(t, err) defer client.Close() @@ -222,7 +222,7 @@ func TestFDV2ClientStartupTimesOut(t *testing.T) { {ID: "fake-id", Target: 0, Code: "xfer-full", Reason: "payload-missing"}, }}). WithPutObjects(data.ToPutObjects()). - WithTransferred() + WithTransferred(1) streamHandler, streamSender := ldservices.ServerSideStreamingServiceHandler(protocol.Next()) protocol.Enqueue(streamSender) diff --git a/subsystems/data_destination.go b/subsystems/data_destination.go index 3fbb54c2..9028b81a 100644 --- a/subsystems/data_destination.go +++ b/subsystems/data_destination.go @@ -19,7 +19,7 @@ type DataDestination interface { // // If persist is true, it indicates that the data should be propagated to any connected persistent // store. - SetBasis(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error + SetBasis(events []fdv2proto.Event, selector *fdv2proto.Selector, persist bool) error // ApplyDelta applies a set of changes to an existing basis. This operation should be atomic with // respect to any other operations that modify the store. @@ -28,5 +28,5 @@ type DataDestination interface { // // If persist is true, it indicates that the changes should be propagated to any connected persistent // store. - ApplyDelta(events []fdv2proto.Event, selector fdv2proto.Selector, persist bool) error + ApplyDelta(events []fdv2proto.Event, selector *fdv2proto.Selector, persist bool) error } diff --git a/subsystems/data_source.go b/subsystems/data_source.go index d5bec6a7..47a9619a 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -25,7 +25,7 @@ type DataSource interface { type Basis struct { Data []fdv2proto.Event - Selector fdv2proto.Selector + Selector *fdv2proto.Selector Persist bool } @@ -36,7 +36,7 @@ type DataInitializer interface { type DataSynchronizer interface { DataInitializer - Sync(closeWhenReady chan<- struct{}, selector fdv2proto.Selector) + Sync(closeWhenReady chan<- struct{}, selector *fdv2proto.Selector) // IsInitialized returns true if the data source has successfully initialized at some point. // // Once this is true, it should remain true even if a problem occurs later. diff --git a/testhelpers/ldservicesv2/server_sdk_data.go b/testhelpers/ldservicesv2/server_sdk_data.go index ba1d9b40..7c7f00a0 100644 --- a/testhelpers/ldservicesv2/server_sdk_data.go +++ b/testhelpers/ldservicesv2/server_sdk_data.go @@ -4,7 +4,6 @@ import ( "encoding/json" "github.com/launchdarkly/go-server-sdk-evaluation/v3/ldmodel" "github.com/launchdarkly/go-server-sdk/v7/internal/fdv2proto" - "github.com/launchdarkly/go-server-sdk/v7/subsystems/ldstoretypes" ) type fakeVersionedKind struct { @@ -69,19 +68,19 @@ func (s *ServerSDKData) ToPutObjects() []fdv2proto.PutObject { var objs []fdv2proto.PutObject for _, flag := range s.FlagsMap { base := fdv2proto.PutObject{ - Version: 1, + Version: flag.Version, Kind: fdv2proto.FlagKind, Key: flag.Key, - Object: ldstoretypes.ItemDescriptor{Version: flag.Version, Item: flag}, + Object: flag, } objs = append(objs, base) } for _, segment := range s.SegmentsMap { base := fdv2proto.PutObject{ - Version: 1, + Version: segment.Version, Kind: fdv2proto.SegmentKind, Key: segment.Key, - Object: ldstoretypes.ItemDescriptor{Version: segment.Version, Item: segment}, + Object: segment, } objs = append(objs, base) } diff --git a/testhelpers/ldservicesv2/streaming_protocol_builder.go b/testhelpers/ldservicesv2/streaming_protocol_builder.go index 92a73cce..a7bd8813 100644 --- a/testhelpers/ldservicesv2/streaming_protocol_builder.go +++ b/testhelpers/ldservicesv2/streaming_protocol_builder.go @@ -30,8 +30,8 @@ func (f *StreamingProtocol) WithPutObject(object fdv2proto.PutObject) *Streaming return f.pushEvent(object) } -func (f *StreamingProtocol) WithTransferred() *StreamingProtocol { - return f.pushEvent(fdv2proto.PayloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: 1}) +func (f *StreamingProtocol) WithTransferred(version int) *StreamingProtocol { + return f.pushEvent(fdv2proto.PayloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: version}) } func (f *StreamingProtocol) WithPutObjects(objects []fdv2proto.PutObject) *StreamingProtocol { From b755ed9f51e251af58786508c15e331b0c9ab60d Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 24 Sep 2024 16:09:07 -0700 Subject: [PATCH 59/62] add better collection matcher --- internal/datasystem/store_test.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/internal/datasystem/store_test.go b/internal/datasystem/store_test.go index 1b892c13..68ba22bb 100644 --- a/internal/datasystem/store_test.go +++ b/internal/datasystem/store_test.go @@ -112,7 +112,7 @@ func TestStore_Commit(t *testing.T) { // This time, the data should be stored properly. require.NoError(t, store.Commit()) - assert.Equal(t, output, spy.initPayload) + requireCollectionsMatch(t, output, spy.initPayload) }) t.Run("non-persist memory items are not copied to persistent store in r/w mode", func(t *testing.T) { @@ -326,3 +326,17 @@ func (f *fakeStore) IsStatusMonitoringEnabled() bool { func (f *fakeStore) Close() error { return nil } + +// This matcher is required instead of calling ElementsMatch directly on two slices of collections because +// the order of the collections, or the order within each collection, is not defined. +func requireCollectionsMatch(t *testing.T, expected []ldstoretypes.Collection, actual []ldstoretypes.Collection) { + require.Equal(t, len(expected), len(actual)) + for _, expectedCollection := range expected { + for _, actualCollection := range actual { + if expectedCollection.Kind == actualCollection.Kind { + require.ElementsMatch(t, expectedCollection.Items, actualCollection.Items) + break + } + } + } +} From 7008d6388e18e40c133da33b1918d978285c8a88 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 8 Oct 2024 10:32:46 -0700 Subject: [PATCH 60/62] remove obsolete file --- internal/datastatus/data_status.go | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 internal/datastatus/data_status.go diff --git a/internal/datastatus/data_status.go b/internal/datastatus/data_status.go deleted file mode 100644 index eb6275b4..00000000 --- a/internal/datastatus/data_status.go +++ /dev/null @@ -1,16 +0,0 @@ -package datastatus - -type DataStatus string - -const ( - // Unknown means there is no known status. - Unknown = DataStatus("unknown") - // Authoritative means the data is from an authoritative source. Authoritative data may be replicated - // from the SDK into any connected persistent store (in write mode), and causes the SDK to transition from - // the Defaults/Cached states to Refreshed. - Authoritative = DataStatus("authoritative") - // Derivative means the data may be stale, such as from a local file or persistent store. Derivative data - // is not replicated to any connected persistent store, and causes the SDK to transition from the Defaults - // state to Cached only. - Derivative = DataStatus("derivative") -) From 15168a8760851f55991a01680b530455f0f44fcb Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 8 Oct 2024 10:58:13 -0700 Subject: [PATCH 61/62] fixing some lints --- internal/datasourcev2/polling_data_source.go | 7 +++--- .../datasourcev2/streaming_data_source.go | 6 ++--- internal/datasystem/fdv2_datasystem.go | 5 +++-- .../data_system_configuration_builder.go | 22 ++++++++++++------- subsystems/data_source.go | 20 +++++++++++++++-- 5 files changed, 41 insertions(+), 19 deletions(-) diff --git a/internal/datasourcev2/polling_data_source.go b/internal/datasourcev2/polling_data_source.go index c67b4310..ea0a6dcc 100644 --- a/internal/datasourcev2/polling_data_source.go +++ b/internal/datasourcev2/polling_data_source.go @@ -79,13 +79,14 @@ func (pp *PollingProcessor) Name() string { } //nolint:revive // DataInitializer method. -func (pp *PollingProcessor) Fetch(ctx context.Context) (*subsystems.Basis, error) { - // TODO: ideally, the Request method would take a context so it could be interrupted. +func (pp *PollingProcessor) Fetch(_ context.Context) (*subsystems.Basis, error) { + //nolint:godox + // TODO(SDK-752): Plumb the context into the request method. basis, err := pp.requester.Request() if err != nil { return nil, err } - return &subsystems.Basis{Data: basis.Events(), Selector: basis.Selector(), Persist: true}, nil + return &subsystems.Basis{Events: basis.Events(), Selector: basis.Selector(), Persist: true}, nil } //nolint:revive // DataSynchronizer method. diff --git a/internal/datasourcev2/streaming_data_source.go b/internal/datasourcev2/streaming_data_source.go index 818c626a..6d2596c0 100644 --- a/internal/datasourcev2/streaming_data_source.go +++ b/internal/datasourcev2/streaming_data_source.go @@ -127,10 +127,8 @@ func (sp *StreamProcessor) Name() string { return "StreamingDataSourceV2" } -func (sp *StreamProcessor) Fetch(ctx context.Context) (*subsystems.Basis, error) { - // TODO: there's no point in implementing this, as it would be highly inefficient to open a streaming - // connection just to get a PUT and then close it again. - return nil, errors.New("fetch capability not implemented") +func (sp *StreamProcessor) Fetch(_ context.Context) (*subsystems.Basis, error) { + return nil, errors.New("StreamProcessor does not implement Fetch capability") } //nolint:revive // no doc comment for standard method diff --git a/internal/datasystem/fdv2_datasystem.go b/internal/datasystem/fdv2_datasystem.go index f9b97b88..02092032 100644 --- a/internal/datasystem/fdv2_datasystem.go +++ b/internal/datasystem/fdv2_datasystem.go @@ -192,7 +192,7 @@ func (f *FDv2) runInitializers(ctx context.Context, closeWhenReady chan struct{} continue } f.loggers.Infof("Initialized via %s", initializer.Name()) - f.store.SetBasis(basis.Data, basis.Selector, basis.Persist) + f.store.SetBasis(basis.Events, basis.Selector, basis.Persist) f.readyOnce.Do(func() { close(closeWhenReady) }) @@ -307,7 +307,8 @@ func (d *dataStatusProvider) RemoveStatusListener(listener <-chan interfaces.Dat } func (d *dataStatusProvider) WaitFor(desiredState interfaces.DataSourceState, timeout time.Duration) bool { - //TODO implement me + //nolint:godox + // TODO: Implement dataStatusProvider for this data system. panic("implement me") } diff --git a/ldcomponents/data_system_configuration_builder.go b/ldcomponents/data_system_configuration_builder.go index 707fe41e..e45d7117 100644 --- a/ldcomponents/data_system_configuration_builder.go +++ b/ldcomponents/data_system_configuration_builder.go @@ -55,29 +55,33 @@ func (d *DataSystemModes) PersistentStore(store ss.ComponentConfigurer[ss.DataSt } // Custom returns a builder suitable for creating a custom data acquisition strategy. You may configure -// how the SDK uses a Persistent Store, how the SDK obtains an initial set of data, and how the SDK keeps data up-to-date. +// how the SDK uses a Persistent Store, how the SDK obtains an initial set of data, and how the SDK keeps data +// up-to-date. func (d *DataSystemModes) Custom() *DataSystemConfigurationBuilder { return &DataSystemConfigurationBuilder{} } -// DataSystem provides a high-level selection of the SDK's data acquisition strategy. Use the returned builder to select -// a mode, or to create a custom data acquisition strategy. To use LaunchDarkly's recommended mode, use Default. +// DataSystem provides a high-level selection of the SDK's data acquisition strategy. Use the returned builder to +// select a mode, or to create a custom data acquisition strategy. To use LaunchDarkly's recommended mode, use Default. func DataSystem() *DataSystemModes { return &DataSystemModes{} } -func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], storeMode ss.DataStoreMode) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) DataStore(store ss.ComponentConfigurer[ss.DataStore], + storeMode ss.DataStoreMode) *DataSystemConfigurationBuilder { d.storeBuilder = store d.storeMode = storeMode return d } -func (d *DataSystemConfigurationBuilder) Initializers(initializers ...ss.ComponentConfigurer[ss.DataInitializer]) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) Initializers( + initializers ...ss.ComponentConfigurer[ss.DataInitializer]) *DataSystemConfigurationBuilder { d.initializerBuilders = initializers return d } -func (d *DataSystemConfigurationBuilder) Synchronizers(primary, secondary ss.ComponentConfigurer[ss.DataSynchronizer]) *DataSystemConfigurationBuilder { +func (d *DataSystemConfigurationBuilder) Synchronizers(primary, + secondary ss.ComponentConfigurer[ss.DataSynchronizer]) *DataSystemConfigurationBuilder { d.primarySyncBuilder = primary d.secondarySyncBuilder = secondary return d @@ -88,7 +92,8 @@ func (d *DataSystemConfigurationBuilder) Build( ) (ss.DataSystemConfiguration, error) { conf := d.config if d.secondarySyncBuilder != nil && d.primarySyncBuilder == nil { - return ss.DataSystemConfiguration{}, errors.New("cannot have a secondary synchronizer without a primary synchronizer") + return ss.DataSystemConfiguration{}, errors.New("cannot have a secondary synchronizer without " + + "a primary synchronizer") } if d.storeBuilder != nil { store, err := d.storeBuilder.Build(context) @@ -99,7 +104,8 @@ func (d *DataSystemConfigurationBuilder) Build( } for i, initializerBuilder := range d.initializerBuilders { if initializerBuilder == nil { - return ss.DataSystemConfiguration{}, fmt.Errorf("initializer %d is nil", i) + return ss.DataSystemConfiguration{}, + fmt.Errorf("initializer %d is nil", i) } initializer, err := initializerBuilder.Build(context) if err != nil { diff --git a/subsystems/data_source.go b/subsystems/data_source.go index 8c0bd755..c199bfd1 100644 --- a/subsystems/data_source.go +++ b/subsystems/data_source.go @@ -24,19 +24,32 @@ type DataSource interface { Start(closeWhenReady chan<- struct{}) } +// Basis represents the initial payload of data that a data source can provide. Initializers provide this +// via Fetch, whereas Synchronizers provide it asynchronously via the injected DataDestination. type Basis struct { - Data []fdv2proto.Event + // Events is a series of events representing actions applied to data items. + Events []fdv2proto.Event + // Selector identifies this basis. Selector *fdv2proto.Selector - Persist bool + // Persist is true if the data source requests that the data store persist the items to any connected + // Persistent Stores. + Persist bool } +// DataInitializer represents a component capable of obtaining a Basis via a synchronous call. type DataInitializer interface { + // Name returns the name of the data initializer. Name() string + // Fetch returns a Basis, or an error if the Basis could not be retrieved. If the context has expired, + // return the context's error. Fetch(ctx context.Context) (*Basis, error) } +// DataSynchronizer represents a component capable of obtaining a Basis and subsequent delta updates asynchronously. type DataSynchronizer interface { DataInitializer + // Sync tells the data synchronizer to begin synchronizing data, starting from an optional fdv2proto.Selector. + // The selector may be nil indicating that a full Basis should be fetched. Sync(closeWhenReady chan<- struct{}, selector *fdv2proto.Selector) // IsInitialized returns true if the data source has successfully initialized at some point. // @@ -57,6 +70,9 @@ func (t toInitializer) Build(context ClientContext) (DataInitializer, error) { return sync, nil } +// AsInitializer is a helper method that converts a DataSynchronizer to a DataInitializer. This is useful because +// DataSynchronizers are generally also DataInitializers, so it's possible to use one for that purpose if the +// situation calls for it. The primary example is using a Polling synchronizer as an initializer. func AsInitializer(cc ComponentConfigurer[DataSynchronizer]) ComponentConfigurer[DataInitializer] { return toInitializer{cc: cc} } From 67a0a7d2c64acf56b14b0a1afdce58e8127a88c5 Mon Sep 17 00:00:00 2001 From: Casey Waldren Date: Tue, 8 Oct 2024 16:18:18 -0700 Subject: [PATCH 62/62] more lints --- testhelpers/ldservicesv2/package.go | 2 ++ testhelpers/ldservicesv2/server_sdk_data.go | 7 +------ .../ldservicesv2/streaming_protocol_builder.go | 18 ++++++++++++++---- 3 files changed, 17 insertions(+), 10 deletions(-) create mode 100644 testhelpers/ldservicesv2/package.go diff --git a/testhelpers/ldservicesv2/package.go b/testhelpers/ldservicesv2/package.go new file mode 100644 index 00000000..53655b12 --- /dev/null +++ b/testhelpers/ldservicesv2/package.go @@ -0,0 +1,2 @@ +// Package ldservicesv2 provides test helpers for generating fdv2 protocol data. +package ldservicesv2 diff --git a/testhelpers/ldservicesv2/server_sdk_data.go b/testhelpers/ldservicesv2/server_sdk_data.go index 3984bfb5..9b0b4578 100644 --- a/testhelpers/ldservicesv2/server_sdk_data.go +++ b/testhelpers/ldservicesv2/server_sdk_data.go @@ -12,12 +12,6 @@ type fakeVersionedKind struct { Version int `json:"version"` } -// KeyAndVersionItem provides a simple object that has only "key" and "version" properties. -// This may be enough for some testing purposes that don't require full flag or segment data. -func KeyAndVersionItem(key string, version int) interface{} { - return fakeVersionedKind{Key: key, Version: version} -} - // ServerSDKData is a convenience type for constructing a test server-side SDK data payload for // PollingServiceHandler or StreamingServiceHandler. Its String() method returns a JSON object with // the expected "flags" and "segments" properties. @@ -65,6 +59,7 @@ func (s *ServerSDKData) Segments(segments ...ldmodel.Segment) *ServerSDKData { return s } +// ToPutObjects converts the data to a list of PutObject objects that can be fed to a mock streaming data source. func (s *ServerSDKData) ToPutObjects() []fdv2proto.PutObject { var objs []fdv2proto.PutObject for _, flag := range s.FlagsMap { diff --git a/testhelpers/ldservicesv2/streaming_protocol_builder.go b/testhelpers/ldservicesv2/streaming_protocol_builder.go index 6a41529e..1c6e1e98 100644 --- a/testhelpers/ldservicesv2/streaming_protocol_builder.go +++ b/testhelpers/ldservicesv2/streaming_protocol_builder.go @@ -7,34 +7,43 @@ import ( "github.com/launchdarkly/go-test-helpers/v3/httphelpers" ) +// ProtocolEvents represents a list of SSE-formatted events. type ProtocolEvents []httphelpers.SSEEvent +// Enqueue adds all the events to an SSEStreamController. func (p ProtocolEvents) Enqueue(control httphelpers.SSEStreamControl) { for _, msg := range p { control.Enqueue(msg) } } +// StreamingProtocol is a builder for creating a sequence of events that can be sent as an SSE stream. type StreamingProtocol struct { - events []httphelpers.SSEEvent + events ProtocolEvents } +// NewStreamingProtocol creates a new StreamingProtocol instance. func NewStreamingProtocol() *StreamingProtocol { return &StreamingProtocol{} } +// WithIntent adds a ServerIntent event to the protocol. func (f *StreamingProtocol) WithIntent(intent fdv2proto.ServerIntent) *StreamingProtocol { return f.pushEvent(intent) } +// WithPutObject adds a PutObject event to the protocol. func (f *StreamingProtocol) WithPutObject(object fdv2proto.PutObject) *StreamingProtocol { return f.pushEvent(object) } +// WithTransferred adds a PayloadTransferred event to the protocol with a given version. The state is a a placeholder +// string. func (f *StreamingProtocol) WithTransferred(version int) *StreamingProtocol { return f.pushEvent(fdv2proto.PayloadTransferred{State: "[p:17YNC7XBH88Y6RDJJ48EKPCJS7:53]", Version: version}) } +// WithPutObjects adds multiple PutObject events to the protocol. func (f *StreamingProtocol) WithPutObjects(objects []fdv2proto.PutObject) *StreamingProtocol { for _, object := range objects { f.WithPutObject(object) @@ -51,10 +60,12 @@ func (f *StreamingProtocol) pushEvent(data fdv2proto.Event) *StreamingProtocol { return f } +// HasNext returns true if there are more events in the protocol. func (f *StreamingProtocol) HasNext() bool { return len(f.events) != 0 } +// Next returns the next event in the protocol, popping the event from protocol's internal queue. func (f *StreamingProtocol) Next() httphelpers.SSEEvent { if !f.HasNext() { panic("protocol has no events") @@ -64,9 +75,8 @@ func (f *StreamingProtocol) Next() httphelpers.SSEEvent { return event } +// Enqueue adds all the events to an SSEStreamController. func (f *StreamingProtocol) Enqueue(control httphelpers.SSEStreamControl) { - for _, event := range f.events { - control.Enqueue(event) - } + f.events.Enqueue(control) f.events = nil }