diff --git a/CHANGELOG.md b/CHANGELOG.md index 963665fd4b7..2f63ae1bf4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New +- **General**: Add support for time-bound Kubernetes ServiceAccount tokens as a source for TriggerAuthentication ([#6136](https://github.com/kedacore/keda/issues/6136)) - **General**: Enable OpenSSF Scorecard to enhance security practices across the project ([#5913](https://github.com/kedacore/keda/issues/5913)) - **General**: Introduce new NSQ scaler ([#3281](https://github.com/kedacore/keda/issues/3281)) - **General**: Operator flag to control patching of webhook resources certificates ([#6184](https://github.com/kedacore/keda/issues/6184)) diff --git a/apis/keda/v1alpha1/triggerauthentication_types.go b/apis/keda/v1alpha1/triggerauthentication_types.go index 0b0d9ffa315..826dd5249c0 100644 --- a/apis/keda/v1alpha1/triggerauthentication_types.go +++ b/apis/keda/v1alpha1/triggerauthentication_types.go @@ -95,6 +95,9 @@ type TriggerAuthenticationSpec struct { // +optional AwsSecretManager *AwsSecretManager `json:"awsSecretManager,omitempty"` + + // +optional + BoundServiceAccountToken []BoundServiceAccountToken `json:"boundServiceAccountToken,omitempty"` } // TriggerAuthenticationStatus defines the observed state of TriggerAuthentication @@ -378,6 +381,11 @@ type AwsSecretManagerSecret struct { VersionStage string `json:"versionStage,omitempty"` } +type BoundServiceAccountToken struct { + Parameter string `json:"parameter"` + ServiceAccountName string `json:"serviceAccountName"` +} + func init() { SchemeBuilder.Register(&ClusterTriggerAuthentication{}, &ClusterTriggerAuthenticationList{}) SchemeBuilder.Register(&TriggerAuthentication{}, &TriggerAuthenticationList{}) diff --git a/apis/keda/v1alpha1/zz_generated.deepcopy.go b/apis/keda/v1alpha1/zz_generated.deepcopy.go index 70b40bac583..5248166f09f 100755 --- a/apis/keda/v1alpha1/zz_generated.deepcopy.go +++ b/apis/keda/v1alpha1/zz_generated.deepcopy.go @@ -354,6 +354,21 @@ func (in *AzureKeyVaultSecret) DeepCopy() *AzureKeyVaultSecret { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BoundServiceAccountToken) DeepCopyInto(out *BoundServiceAccountToken) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoundServiceAccountToken. +func (in *BoundServiceAccountToken) DeepCopy() *BoundServiceAccountToken { + if in == nil { + return nil + } + out := new(BoundServiceAccountToken) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterTriggerAuthentication) DeepCopyInto(out *ClusterTriggerAuthentication) { *out = *in @@ -1202,6 +1217,11 @@ func (in *TriggerAuthenticationSpec) DeepCopyInto(out *TriggerAuthenticationSpec *out = new(AwsSecretManager) (*in).DeepCopyInto(*out) } + if in.BoundServiceAccountToken != nil { + in, out := &in.BoundServiceAccountToken, &out.BoundServiceAccountToken + *out = make([]BoundServiceAccountToken, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerAuthenticationSpec. diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 1bdaed01954..55e9638b6f0 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -46,6 +46,7 @@ import ( "github.com/kedacore/keda/v2/pkg/k8s" "github.com/kedacore/keda/v2/pkg/metricscollector" "github.com/kedacore/keda/v2/pkg/metricsservice" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/scaling" kedautil "github.com/kedacore/keda/v2/pkg/util" //+kubebuilder:scaffold:imports @@ -201,6 +202,12 @@ func main() { os.Exit(1) } + _, err = kedautil.GetBoundServiceAccountTokenExpiry() + if err != nil { + setupLog.Error(err, "invalid "+kedautil.BoundServiceAccountTokenExpiryEnvVar) + os.Exit(1) + } + globalHTTPTimeout := time.Duration(globalHTTPTimeoutMS) * time.Millisecond eventRecorder := mgr.GetEventRecorderFor("keda-operator") @@ -225,8 +232,13 @@ func main() { os.Exit(1) } - scaledHandler := scaling.NewScaleHandler(mgr.GetClient(), scaleClient, mgr.GetScheme(), globalHTTPTimeout, eventRecorder, secretInformer.Lister()) - eventEmitter := eventemitter.NewEventEmitter(mgr.GetClient(), eventRecorder, k8sClusterName, secretInformer.Lister()) + authClientSet := &authentication.AuthClientSet{ + CoreV1Interface: kubeClientset.CoreV1(), + SecretLister: secretInformer.Lister(), + } + + scaledHandler := scaling.NewScaleHandler(mgr.GetClient(), scaleClient, mgr.GetScheme(), globalHTTPTimeout, eventRecorder, authClientSet) + eventEmitter := eventemitter.NewEventEmitter(mgr.GetClient(), eventRecorder, k8sClusterName, authClientSet) if err = (&kedacontrollers.ScaledObjectReconciler{ Client: mgr.GetClient(), @@ -245,8 +257,7 @@ func main() { Scheme: mgr.GetScheme(), GlobalHTTPTimeout: globalHTTPTimeout, EventEmitter: eventEmitter, - SecretsLister: secretInformer.Lister(), - SecretsSynced: secretInformer.Informer().HasSynced, + AuthClientSet: authClientSet, }).SetupWithManager(mgr, controller.Options{ MaxConcurrentReconciles: scaledJobMaxReconciles, }); err != nil { diff --git a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml index d8a74647ffc..108ca117ef0 100644 --- a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml +++ b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml @@ -302,6 +302,18 @@ spec: - secrets - vaultUri type: object + boundServiceAccountToken: + items: + properties: + parameter: + type: string + serviceAccountName: + type: string + required: + - parameter + - serviceAccountName + type: object + type: array configMapTargetRef: items: description: AuthConfigMapTargetRef is used to authenticate using diff --git a/config/crd/bases/keda.sh_triggerauthentications.yaml b/config/crd/bases/keda.sh_triggerauthentications.yaml index 9c38fa2ada4..dbdeed4dd55 100644 --- a/config/crd/bases/keda.sh_triggerauthentications.yaml +++ b/config/crd/bases/keda.sh_triggerauthentications.yaml @@ -301,6 +301,18 @@ spec: - secrets - vaultUri type: object + boundServiceAccountToken: + items: + properties: + parameter: + type: string + serviceAccountName: + type: string + required: + - parameter + - serviceAccountName + type: object + type: array configMapTargetRef: items: description: AuthConfigMapTargetRef is used to authenticate using diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 386ff71ab25..6cdc1f7fa90 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -45,6 +44,7 @@ import ( "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/eventreason" "github.com/kedacore/keda/v2/pkg/metricscollector" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/scaling" kedastatus "github.com/kedacore/keda/v2/pkg/status" "github.com/kedacore/keda/v2/pkg/util" @@ -59,11 +59,10 @@ type ScaledJobReconciler struct { Scheme *runtime.Scheme GlobalHTTPTimeout time.Duration EventEmitter eventemitter.EventHandler + AuthClientSet *authentication.AuthClientSet scaledJobGenerations *sync.Map scaleHandler scaling.ScaleHandler - SecretsLister corev1listers.SecretLister - SecretsSynced cache.InformerSynced } type scaledJobMetricsData struct { @@ -83,7 +82,7 @@ func init() { // SetupWithManager initializes the ScaledJobReconciler instance and starts a new controller managed by the passed Manager instance. func (r *ScaledJobReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { - r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), nil, mgr.GetScheme(), r.GlobalHTTPTimeout, mgr.GetEventRecorderFor("scale-handler"), r.SecretsLister) + r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), nil, mgr.GetScheme(), r.GlobalHTTPTimeout, mgr.GetEventRecorderFor("scale-handler"), r.AuthClientSet) r.scaledJobGenerations = &sync.Map{} return ctrl.NewControllerManagedBy(mgr). WithOptions(options). diff --git a/controllers/keda/suite_test.go b/controllers/keda/suite_test.go index 482a45b135c..fec3acd241b 100644 --- a/controllers/keda/suite_test.go +++ b/controllers/keda/suite_test.go @@ -36,6 +36,7 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/k8s" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/scaling" //+kubebuilder:scaffold:imports ) @@ -91,19 +92,22 @@ var _ = BeforeSuite(func() { scaleClient, _, err := k8s.InitScaleClient(k8sManager) Expect(err).ToNot(HaveOccurred()) + authClientSet := &authentication.AuthClientSet{} + err = (&ScaledObjectReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - ScaleHandler: scaling.NewScaleHandler(k8sManager.GetClient(), scaleClient, k8sManager.GetScheme(), time.Duration(10), k8sManager.GetEventRecorderFor("keda-operator"), nil), + ScaleHandler: scaling.NewScaleHandler(k8sManager.GetClient(), scaleClient, k8sManager.GetScheme(), time.Duration(10), k8sManager.GetEventRecorderFor("keda-operator"), authClientSet), ScaleClient: scaleClient, EventEmitter: eventemitter.NewEventEmitter(k8sManager.GetClient(), k8sManager.GetEventRecorderFor("keda-operator"), "kubernetes-default", nil), }).SetupWithManager(k8sManager, controller.Options{}) Expect(err).ToNot(HaveOccurred()) err = (&ScaledJobReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - EventEmitter: eventemitter.NewEventEmitter(k8sManager.GetClient(), k8sManager.GetEventRecorderFor("keda-operator"), "kubernetes-default", nil), + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + EventEmitter: eventemitter.NewEventEmitter(k8sManager.GetClient(), k8sManager.GetEventRecorderFor("keda-operator"), "kubernetes-default", nil), + AuthClientSet: authClientSet, }).SetupWithManager(k8sManager, controller.Options{}) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/eventemitter/eventemitter.go b/pkg/eventemitter/eventemitter.go index 91d57f3ca4d..4624018735f 100644 --- a/pkg/eventemitter/eventemitter.go +++ b/pkg/eventemitter/eventemitter.go @@ -36,7 +36,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -44,6 +43,7 @@ import ( eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" "github.com/kedacore/keda/v2/pkg/eventemitter/eventdata" "github.com/kedacore/keda/v2/pkg/metricscollector" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/scaling/resolver" kedastatus "github.com/kedacore/keda/v2/pkg/status" ) @@ -66,7 +66,7 @@ type EventEmitter struct { eventFilterCacheLock *sync.RWMutex eventLoopContexts *sync.Map cloudEventProcessingChan chan eventdata.EventData - secretsLister corev1listers.SecretLister + authClientSet *authentication.AuthClientSet } // EventHandler defines the behavior for EventEmitter clients @@ -96,7 +96,7 @@ const ( ) // NewEventEmitter creates a new EventEmitter -func NewEventEmitter(client client.Client, recorder record.EventRecorder, clusterName string, secretsLister corev1listers.SecretLister) EventHandler { +func NewEventEmitter(client client.Client, recorder record.EventRecorder, clusterName string, authClientSet *authentication.AuthClientSet) EventHandler { return &EventEmitter{ log: logf.Log.WithName("event_emitter"), client: client, @@ -108,7 +108,7 @@ func NewEventEmitter(client client.Client, recorder record.EventRecorder, cluste eventFilterCacheLock: &sync.RWMutex{}, eventLoopContexts: &sync.Map{}, cloudEventProcessingChan: make(chan eventdata.EventData, maxChannelBuffer), - secretsLister: secretsLister, + authClientSet: authClientSet, } } @@ -188,7 +188,7 @@ func (e *EventEmitter) createEventHandlers(ctx context.Context, cloudEventSource } // Resolve auth related - authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, e.client, e.log, spec.AuthenticationRef, nil, cloudEventSourceI.GetNamespace(), e.secretsLister) + authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, e.client, e.log, spec.AuthenticationRef, nil, cloudEventSourceI.GetNamespace(), e.authClientSet) if err != nil { e.log.Error(err, "error resolving auth params", "cloudEventSource", cloudEventSourceI) return diff --git a/pkg/mock/mock_serviceaccounts/mock_interfaces.go b/pkg/mock/mock_serviceaccounts/mock_interfaces.go new file mode 100644 index 00000000000..2f9767461b6 --- /dev/null +++ b/pkg/mock/mock_serviceaccounts/mock_interfaces.go @@ -0,0 +1,94 @@ +// Generated from these commands and then edited: +// +// mockgen -source=k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go -imports=k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +// mockgen k8s.io/client-go/kubernetes/typed/core/v1 CoreV1Interface +// +// Package mock_v1 is a generated GoMock package from various generated sources and edited to remove unnecessary code. +// + +package mock_v1 //nolint:revive,stylecheck + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + v10 "k8s.io/api/authentication/v1" + v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// MockCoreV1Interface is a mock of CoreV1Interface interface. +type MockCoreV1Interface struct { + v1.CoreV1Interface + mockServiceAccount *MockServiceAccountInterface + ctrl *gomock.Controller + recorder *MockCoreV1InterfaceMockRecorder +} + +// MockCoreV1InterfaceMockRecorder is the mock recorder for MockCoreV1Interface. +type MockCoreV1InterfaceMockRecorder struct { + mock *MockCoreV1Interface +} + +// NewMockCoreV1Interface creates a new mock instance. +func NewMockCoreV1Interface(ctrl *gomock.Controller) *MockCoreV1Interface { + mock := &MockCoreV1Interface{ctrl: ctrl} + mock.mockServiceAccount = NewMockServiceAccountInterface(ctrl) + mock.recorder = &MockCoreV1InterfaceMockRecorder{mock} + return mock +} + +// GetServiceAccountInterface returns the mock for ServiceAccountInterface. +func (m *MockCoreV1Interface) GetServiceAccountInterface() *MockServiceAccountInterface { + return m.mockServiceAccount +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCoreV1Interface) EXPECT() *MockCoreV1InterfaceMockRecorder { + return m.recorder +} + +// ServiceAccounts mocks base method. +func (m *MockCoreV1Interface) ServiceAccounts(_ string) v1.ServiceAccountInterface { + return m.mockServiceAccount +} + +// MockServiceAccountInterface is a mock of ServiceAccountInterface interface. +type MockServiceAccountInterface struct { + v1.ServiceAccountInterface + ctrl *gomock.Controller + recorder *MockServiceAccountInterfaceMockRecorder +} + +// MockServiceAccountInterfaceMockRecorder is the mock recorder for MockServiceAccountInterface. +type MockServiceAccountInterfaceMockRecorder struct { + mock *MockServiceAccountInterface +} + +// NewMockServiceAccountInterface creates a new mock instance. +func NewMockServiceAccountInterface(ctrl *gomock.Controller) *MockServiceAccountInterface { + mock := &MockServiceAccountInterface{ctrl: ctrl} + mock.recorder = &MockServiceAccountInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockServiceAccountInterface) EXPECT() *MockServiceAccountInterfaceMockRecorder { + return m.recorder +} + +// CreateToken mocks base method. +func (m *MockServiceAccountInterface) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *v10.TokenRequest, opts v12.CreateOptions) (*v10.TokenRequest, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateToken", ctx, serviceAccountName, tokenRequest, opts) + ret0, _ := ret[0].(*v10.TokenRequest) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateToken indicates an expected call of CreateToken. +func (mr *MockServiceAccountInterfaceMockRecorder) CreateToken(ctx, serviceAccountName, tokenRequest, opts any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateToken", reflect.TypeOf((*MockServiceAccountInterface)(nil).CreateToken), ctx, serviceAccountName, tokenRequest, opts) +} diff --git a/pkg/scalers/authentication/authentication_helpers.go b/pkg/scalers/authentication/authentication_helpers.go index a8e8255122d..1b89c048590 100644 --- a/pkg/scalers/authentication/authentication_helpers.go +++ b/pkg/scalers/authentication/authentication_helpers.go @@ -13,10 +13,17 @@ import ( libs "github.com/dysnix/predictkube-libs/external/configs" "github.com/dysnix/predictkube-libs/external/http_transport" pConfig "github.com/prometheus/common/config" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" kedautil "github.com/kedacore/keda/v2/pkg/util" ) +type AuthClientSet struct { + corev1client.CoreV1Interface + corev1listers.SecretLister +} + const ( AuthModesKey = "authModes" ) diff --git a/pkg/scaling/resolver/scale_resolvers.go b/pkg/scaling/resolver/scale_resolvers.go index 6ca40672fc6..96133d5b1ad 100644 --- a/pkg/scaling/resolver/scale_resolvers.go +++ b/pkg/scaling/resolver/scale_resolvers.go @@ -25,16 +25,20 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" + authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/utils/ptr" "knative.dev/pkg/apis/duck" duckv1 "knative.dev/pkg/apis/duck/v1" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/util" ) @@ -48,9 +52,10 @@ const ( ) var ( - kedaNamespace, _ = util.GetClusterObjectNamespace() - restrictSecretAccess = util.GetRestrictSecretAccess() - log = logf.Log.WithName("scale_resolvers") + kedaNamespace, _ = util.GetClusterObjectNamespace() + restrictSecretAccess = util.GetRestrictSecretAccess() + boundServiceAccountTokenExpiry, _ = util.GetBoundServiceAccountTokenExpiry() + log = logf.Log.WithName("scale_resolvers") ) // isSecretAccessRestricted returns whether secret access need to be restricted in KEDA namespace @@ -178,9 +183,9 @@ func ResolveContainerEnv(ctx context.Context, client client.Client, logger logr. // ResolveAuthRefAndPodIdentity provides authentication parameters and pod identity needed authenticate scaler with the environment. func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, logger logr.Logger, triggerAuthRef *kedav1alpha1.AuthenticationRef, podTemplateSpec *corev1.PodTemplateSpec, - namespace string, secretsLister corev1listers.SecretLister) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { + namespace string, authClientSet *authentication.AuthClientSet) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { if podTemplateSpec != nil { - authParams, podIdentity, err := resolveAuthRef(ctx, client, logger, triggerAuthRef, &podTemplateSpec.Spec, namespace, secretsLister) + authParams, podIdentity, err := resolveAuthRef(ctx, client, logger, triggerAuthRef, &podTemplateSpec.Spec, namespace, authClientSet) if err != nil { return authParams, podIdentity, err @@ -220,14 +225,14 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log return authParams, podIdentity, nil } - return resolveAuthRef(ctx, client, logger, triggerAuthRef, nil, namespace, secretsLister) + return resolveAuthRef(ctx, client, logger, triggerAuthRef, nil, namespace, authClientSet) } // resolveAuthRef provides authentication parameters needed authenticate scaler with the environment. // based on authentication method defined in TriggerAuthentication, authParams and podIdentity is returned func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logger, triggerAuthRef *kedav1alpha1.AuthenticationRef, podSpec *corev1.PodSpec, - namespace string, secretsLister corev1listers.SecretLister) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { + namespace string, authClientSet *authentication.AuthClientSet) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { result := make(map[string]string) podIdentity := kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone} var err error @@ -246,7 +251,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge result[e.Parameter] = "" continue } - env, err := ResolveContainerEnv(ctx, client, logger, podSpec, e.ContainerName, namespace, secretsLister) + env, err := ResolveContainerEnv(ctx, client, logger, podSpec, e.ContainerName, namespace, authClientSet.SecretLister) if err != nil { result[e.Parameter] = "" } else { @@ -261,7 +266,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge } if triggerAuthSpec.SecretTargetRef != nil { for _, e := range triggerAuthSpec.SecretTargetRef { - result[e.Parameter] = resolveAuthSecret(ctx, client, logger, e.Name, triggerNamespace, e.Key, secretsLister) + result[e.Parameter] = resolveAuthSecret(ctx, client, logger, e.Name, triggerNamespace, e.Key, authClientSet.SecretLister) } } if triggerAuthSpec.HashiCorpVault != nil && len(triggerAuthSpec.HashiCorpVault.Secrets) > 0 { @@ -287,7 +292,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge } if triggerAuthSpec.AzureKeyVault != nil && len(triggerAuthSpec.AzureKeyVault.Secrets) > 0 { vaultHandler := NewAzureKeyVaultHandler(triggerAuthSpec.AzureKeyVault) - err := vaultHandler.Initialize(ctx, client, logger, triggerNamespace, secretsLister) + err := vaultHandler.Initialize(ctx, client, logger, triggerNamespace, authClientSet.SecretLister) if err != nil { logger.Error(err, "error authenticating to Azure Key Vault", "triggerAuthRef.Name", triggerAuthRef.Name) return result, podIdentity, err @@ -306,7 +311,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge } if triggerAuthSpec.GCPSecretManager != nil && len(triggerAuthSpec.GCPSecretManager.Secrets) > 0 { secretManagerHandler := NewGCPSecretManagerHandler(triggerAuthSpec.GCPSecretManager) - err := secretManagerHandler.Initialize(ctx, client, logger, triggerNamespace, secretsLister) + err := secretManagerHandler.Initialize(ctx, client, logger, triggerNamespace, authClientSet.SecretLister) if err != nil { logger.Error(err, "error authenticating to GCP Secret Manager", "triggerAuthRef.Name", triggerAuthRef.Name) } else { @@ -327,7 +332,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge } if triggerAuthSpec.AwsSecretManager != nil && len(triggerAuthSpec.AwsSecretManager.Secrets) > 0 { awsSecretManagerHandler := NewAwsSecretManagerHandler(triggerAuthSpec.AwsSecretManager) - err := awsSecretManagerHandler.Initialize(ctx, client, logger, triggerNamespace, secretsLister, podSpec) + err := awsSecretManagerHandler.Initialize(ctx, client, logger, triggerNamespace, authClientSet.SecretLister, podSpec) defer awsSecretManagerHandler.Stop() if err != nil { logger.Error(err, "error authenticating to Aws Secret Manager", "triggerAuthRef.Name", triggerAuthRef.Name) @@ -343,6 +348,11 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge } } } + if triggerAuthSpec.BoundServiceAccountToken != nil { + for _, e := range triggerAuthSpec.BoundServiceAccountToken { + result[e.Parameter] = resolveBoundServiceAccountToken(ctx, client, logger, triggerNamespace, &e, authClientSet) + } + } } } @@ -597,6 +607,44 @@ func resolveAuthSecret(ctx context.Context, client client.Client, logger logr.Lo return string(result) } +func resolveBoundServiceAccountToken(ctx context.Context, client client.Client, logger logr.Logger, namespace string, bsat *kedav1alpha1.BoundServiceAccountToken, acs *authentication.AuthClientSet) string { + serviceAccountName := bsat.ServiceAccountName + if serviceAccountName == "" { + logger.Error(fmt.Errorf("error trying to get token"), "serviceAccountName is required") + return "" + } + var err error + + serviceAccount := &corev1.ServiceAccount{} + err = client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: namespace}, serviceAccount) + if err != nil { + logger.Error(err, "error trying to get service account from namespace", "ServiceAccount.Namespace", namespace, "ServiceAccount.Name", serviceAccountName) + return "" + } + return generateBoundServiceAccountToken(ctx, serviceAccountName, namespace, acs) +} + +// generateBoundServiceAccountToken creates a Kubernetes token for a namespaced service account with a runtime-configurable expiration time and returns the token string. +func generateBoundServiceAccountToken(ctx context.Context, serviceAccountName, namespace string, acs *authentication.AuthClientSet) string { + expirationSeconds := ptr.To[int64](int64(boundServiceAccountTokenExpiry.Seconds())) + token, err := acs.CoreV1Interface.ServiceAccounts(namespace).CreateToken( + ctx, + serviceAccountName, + &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + ExpirationSeconds: expirationSeconds, + }, + }, + metav1.CreateOptions{}, + ) + if err != nil { + log.V(1).Error(err, "error trying to create bound service account token for service account", "ServiceAccount.Name", serviceAccountName) + return "" + } + log.V(1).Info("Bound service account token created successfully", "ServiceAccount.Name", serviceAccountName) + return token.Status.Token +} + // resolveServiceAccountAnnotation retrieves the value of a specific annotation // from the annotations of a given Kubernetes ServiceAccount. func resolveServiceAccountAnnotation(ctx context.Context, client client.Client, name, namespace, annotation string, required bool) (string, error) { diff --git a/pkg/scaling/resolver/scale_resolvers_test.go b/pkg/scaling/resolver/scale_resolvers_test.go index 13f3c92c07a..4485d6554e0 100644 --- a/pkg/scaling/resolver/scale_resolvers_test.go +++ b/pkg/scaling/resolver/scale_resolvers_test.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-cmp/cmp" "go.uber.org/mock/gomock" + authv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -33,6 +34,8 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" mock_v1 "github.com/kedacore/keda/v2/pkg/mock/mock_secretlister" + mock_serviceaccounts "github.com/kedacore/keda/v2/pkg/mock/mock_serviceaccounts" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" ) var ( @@ -45,6 +48,8 @@ var ( cmName = "supercm" cmKey = "mycmkey" cmData = "cmDataHere" + bsatSAName = "bsatServiceAccount" + bsatData = "k8s-bsat-token" trueValue = true falseValue = false envKey = "test-env-key" @@ -448,6 +453,37 @@ func TestResolveAuthRef(t *testing.T) { expected: map[string]string{"host-secret": secretData, "host-configmap": cmData}, expectedPodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, }, + { + name: "triggerauth exists bound service account token", + existing: []runtime.Object{ + &kedav1alpha1.TriggerAuthentication{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: triggerAuthenticationName, + }, + Spec: kedav1alpha1.TriggerAuthenticationSpec{ + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: kedav1alpha1.PodIdentityProviderNone, + }, + BoundServiceAccountToken: []kedav1alpha1.BoundServiceAccountToken{ + { + Parameter: "token", + ServiceAccountName: bsatSAName, + }, + }, + }, + }, + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: bsatSAName, + }, + }, + }, + soar: &kedav1alpha1.AuthenticationRef{Name: triggerAuthenticationName}, + expected: map[string]string{"token": bsatData}, + expectedPodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, + }, { name: "clustertriggerauth exists, podidentity nil", existing: []runtime.Object{ @@ -608,8 +644,78 @@ func TestResolveAuthRef(t *testing.T) { expected: map[string]string{}, expectedPodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderGCP}, }, + { + name: "clustertriggerauth exists bound service account token", + existing: []runtime.Object{ + &kedav1alpha1.ClusterTriggerAuthentication{ + ObjectMeta: metav1.ObjectMeta{ + Name: triggerAuthenticationName, + }, + Spec: kedav1alpha1.TriggerAuthenticationSpec{ + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: kedav1alpha1.PodIdentityProviderNone, + }, + BoundServiceAccountToken: []kedav1alpha1.BoundServiceAccountToken{ + { + Parameter: "token", + ServiceAccountName: bsatSAName, + }, + }, + }, + }, + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: bsatSAName, + }, + }, + }, + soar: &kedav1alpha1.AuthenticationRef{Name: triggerAuthenticationName, Kind: "ClusterTriggerAuthentication"}, + expected: map[string]string{"token": bsatData}, + expectedPodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, + }, + { + name: "clustertriggerauth exists bound service account token but service account in the wrong namespace", + existing: []runtime.Object{ + &kedav1alpha1.ClusterTriggerAuthentication{ + ObjectMeta: metav1.ObjectMeta{ + Name: triggerAuthenticationName, + }, + Spec: kedav1alpha1.TriggerAuthenticationSpec{ + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: kedav1alpha1.PodIdentityProviderNone, + }, + BoundServiceAccountToken: []kedav1alpha1.BoundServiceAccountToken{ + { + Parameter: "token", + ServiceAccountName: bsatSAName, + }, + }, + }, + }, + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: bsatSAName, + }, + }, + }, + soar: &kedav1alpha1.AuthenticationRef{Name: triggerAuthenticationName, Kind: "ClusterTriggerAuthentication"}, + expected: map[string]string{"token": ""}, + expectedPodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, + }, } + ctrl := gomock.NewController(t) var secretsLister corev1listers.SecretLister + mockCoreV1Interface := mock_serviceaccounts.NewMockCoreV1Interface(ctrl) + mockServiceAccountInterface := mockCoreV1Interface.GetServiceAccountInterface() + tokenRequest := &authv1.TokenRequest{ + Status: authv1.TokenRequestStatus{ + Token: bsatData, + }, + } + mockServiceAccountInterface.EXPECT().CreateToken(gomock.Any(), gomock.Eq(bsatSAName), gomock.Any(), gomock.Any()).Return(tokenRequest, nil).AnyTimes() + for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { @@ -622,7 +728,11 @@ func TestResolveAuthRef(t *testing.T) { test.soar, test.podSpec, namespace, - secretsLister) + &authentication.AuthClientSet{ + SecretLister: secretsLister, + CoreV1Interface: mockCoreV1Interface, + }, + ) if err != nil && !test.isError { t.Errorf("Expected success because %s got error, %s", test.comment, err) diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index a8c50a057d0..8edaf4c0aa6 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -29,7 +29,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/scale" "k8s.io/client-go/tools/record" "k8s.io/metrics/pkg/apis/external_metrics" @@ -42,6 +41,7 @@ import ( "github.com/kedacore/keda/v2/pkg/fallback" "github.com/kedacore/keda/v2/pkg/metricscollector" "github.com/kedacore/keda/v2/pkg/scalers" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" "github.com/kedacore/keda/v2/pkg/scaling/cache" "github.com/kedacore/keda/v2/pkg/scaling/cache/metricscache" @@ -73,11 +73,11 @@ type scaleHandler struct { scalerCaches map[string]*cache.ScalersCache scalerCachesLock *sync.RWMutex scaledObjectsMetricCache metricscache.MetricsCache - secretsLister corev1listers.SecretLister + authClientSet *authentication.AuthClientSet } // NewScaleHandler creates a ScaleHandler object -func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, reconcilerScheme *runtime.Scheme, globalHTTPTimeout time.Duration, recorder record.EventRecorder, secretsLister corev1listers.SecretLister) ScaleHandler { +func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, reconcilerScheme *runtime.Scheme, globalHTTPTimeout time.Duration, recorder record.EventRecorder, authClientSet *authentication.AuthClientSet) ScaleHandler { return &scaleHandler{ client: client, scaleLoopContexts: &sync.Map{}, @@ -87,7 +87,7 @@ func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, recon scalerCaches: map[string]*cache.ScalersCache{}, scalerCachesLock: &sync.RWMutex{}, scaledObjectsMetricCache: metricscache.NewMetricsCache(), - secretsLister: secretsLister, + authClientSet: authClientSet, } } diff --git a/pkg/scaling/scale_handler_test.go b/pkg/scaling/scale_handler_test.go index 1dd00da0fcc..cca23893f2a 100644 --- a/pkg/scaling/scale_handler_test.go +++ b/pkg/scaling/scale_handler_test.go @@ -43,6 +43,7 @@ import ( mock_scalers "github.com/kedacore/keda/v2/pkg/mock/mock_scaler" "github.com/kedacore/keda/v2/pkg/mock/mock_scaling/mock_executor" "github.com/kedacore/keda/v2/pkg/scalers" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" "github.com/kedacore/keda/v2/pkg/scaling/cache" "github.com/kedacore/keda/v2/pkg/scaling/cache/metricscache" @@ -538,6 +539,9 @@ func TestCheckScaledObjectScalersWithTriggerAuthError(t *testing.T) { scalerCaches: map[string]*cache.ScalersCache{}, scalerCachesLock: &sync.RWMutex{}, scaledObjectsMetricCache: metricscache.NewMetricsCache(), + authClientSet: &authentication.AuthClientSet{ + SecretLister: nil, + }, } isActive, isError, _, activeTriggers, _ := sh.getScaledObjectState(context.TODO(), &scaledObject) diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 11d294bbec3..cad09e6c10e 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -48,7 +48,7 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp factory := func() (scalers.Scaler, *scalersconfig.ScalerConfig, error) { if podTemplateSpec != nil { - resolvedEnv, err = resolver.ResolveContainerEnv(ctx, h.client, logger, &podTemplateSpec.Spec, containerName, withTriggers.Namespace, h.secretsLister) + resolvedEnv, err = resolver.ResolveContainerEnv(ctx, h.client, logger, &podTemplateSpec.Spec, containerName, withTriggers.Namespace, h.authClientSet.SecretLister) if err != nil { return nil, nil, fmt.Errorf("error resolving secrets for ScaleTarget: %w", err) } @@ -72,7 +72,7 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp TriggerUniqueKey: fmt.Sprintf("%s-%s-%s-%d", withTriggers.Kind, withTriggers.Namespace, withTriggers.Name, triggerIndex), } - authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, h.client, logger, trigger.AuthenticationRef, podTemplateSpec, withTriggers.Namespace, h.secretsLister) + authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, h.client, logger, trigger.AuthenticationRef, podTemplateSpec, withTriggers.Namespace, h.authClientSet) switch podIdentity.Provider { case kedav1alpha1.PodIdentityProviderAwsEKS: // FIXME: Delete this for v3 diff --git a/pkg/util/env_resolver.go b/pkg/util/env_resolver.go index b8e9cfa8763..965c688a370 100644 --- a/pkg/util/env_resolver.go +++ b/pkg/util/env_resolver.go @@ -17,12 +17,16 @@ limitations under the License. package util import ( + "fmt" "os" "strconv" "time" + + "k8s.io/utils/ptr" ) const RestrictSecretAccessEnvVar = "KEDA_RESTRICT_SECRET_ACCESS" +const BoundServiceAccountTokenExpiryEnvVar = "KEDA_BOUND_SERVICE_ACCOUNT_TOKEN_EXPIRY" var clusterObjectNamespaceCache *string @@ -90,3 +94,18 @@ func GetPodNamespace() string { func GetRestrictSecretAccess() string { return os.Getenv(RestrictSecretAccessEnvVar) } + +// GetBoundServiceAccountTokenExpiry retrieves the value of the environment variable of KEDA_BOUND_SERVICE_ACCOUNT_TOKEN_EXPIRY +func GetBoundServiceAccountTokenExpiry() (*time.Duration, error) { + expiry, err := ResolveOsEnvDuration(BoundServiceAccountTokenExpiryEnvVar) + if err != nil { + return nil, err + } + if expiry == nil { + return ptr.To[time.Duration](time.Hour), nil // if blank, default to 1 hour + } + if *expiry < time.Hour || *expiry > 6*time.Hour { + return nil, fmt.Errorf("invalid value for %s: %s, must be between 1h and 6h", BoundServiceAccountTokenExpiryEnvVar, expiry.String()) // Must be between 1 hour and 6 hours + } + return expiry, nil +} diff --git a/tests/secret-providers/trigger_auth_bound_service_account_token/trigger_auth_bound_service_account_token_test.go b/tests/secret-providers/trigger_auth_bound_service_account_token/trigger_auth_bound_service_account_token_test.go new file mode 100644 index 00000000000..94a4dba01bb --- /dev/null +++ b/tests/secret-providers/trigger_auth_bound_service_account_token/trigger_auth_bound_service_account_token_test.go @@ -0,0 +1,357 @@ +//go:build e2e +// +build e2e + +package trigger_auth_bound_service_account_token_test + +import ( + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "trigger-auth-bound-service-account-token-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + metricsServerDeploymentName = fmt.Sprintf("%s-metrics-server", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + metricsServerServiceName = fmt.Sprintf("%s-service", testName) + metricsServerEndpoint = fmt.Sprintf("http://%s.%s.svc.cluster.local:8080/api/value", metricsServerServiceName, testNamespace) + serviceAccountName = fmt.Sprintf("%s-sa", testName) + serviceAccountTokenCreationRole = fmt.Sprintf("%s-sa-role", testName) + serviceAccountTokenCreationRoleBinding = fmt.Sprintf("%s-sa-role-binding", testName) + minReplicaCount = 0 + maxReplicaCount = 1 +) + +type templateData struct { + TestNamespace string + ServiceAccountName string + ServiceAccountTokenCreationRole string + ServiceAccountTokenCreationRoleBinding string + DeploymentName string + MetricsServerDeploymentName string + MetricsServerServiceName string + TriggerAuthName string + ScaledObjectName string + MetricsServerEndpoint string + MetricValue int + MinReplicaCount string + MaxReplicaCount string +} + +const ( + serviceAccountTemplate = ` +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{.ServiceAccountName}} + namespace: {{.TestNamespace}} +` + // arbitrary k8s rbac permissions that the test metrics-api container requires requesters to have + serviceAccountClusterRoleTemplate = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{.ServiceAccountName}} +rules: +- nonResourceURLs: + - /api/value + verbs: + - get +` + serviceAccountClusterRoleBindingTemplate = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{.ServiceAccountName}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{.ServiceAccountName}} +subjects: +- kind: ServiceAccount + name: {{.ServiceAccountName}} + namespace: {{.TestNamespace}} +` + serviceAccountTokenCreationRoleTemplate = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{.ServiceAccountTokenCreationRole}} + namespace: {{.TestNamespace}} +rules: +- apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create + - get +` + serviceAccountTokenCreationRoleBindingTemplate = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{.ServiceAccountTokenCreationRoleBinding}} + namespace: {{.TestNamespace}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{.ServiceAccountTokenCreationRole}} +subjects: +- kind: ServiceAccount + name: keda-operator + namespace: keda +` + tokenReviewAndSubjectAccessReviewClusterRoleTemplate = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: token-review-and-subject-access-review-role +rules: +- apiGroups: + - "authentication.k8s.io" + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +` + tokenReviewAndSubjectAccessReviewClusterRoleBindingTemplate = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: token-review-and-subject-access-review-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: token-review-and-subject-access-review-role +subjects: +- kind: ServiceAccount + name: default + namespace: {{.TestNamespace}} +` + metricsServerDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{.MetricsServerDeploymentName}} + name: {{.MetricsServerDeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.MetricsServerDeploymentName}} + template: + metadata: + labels: + app: {{.MetricsServerDeploymentName}} + type: keda-testing + spec: + containers: + - name: k8s-protected-metrics-api + image: ghcr.io/kedacore/tests-bound-service-account-token:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault +` + metricsServerService = ` +apiVersion: v1 +kind: Service +metadata: + name: {{.MetricsServerServiceName}} + namespace: {{.TestNamespace}} +spec: + ports: + - name: http + port: 8080 + targetPort: 8080 + selector: + app: {{.MetricsServerDeploymentName}} +` + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{.DeploymentName}} + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + type: keda-testing + spec: + containers: + - name: prom-test-app + image: ghcr.io/kedacore/tests-prometheus:latest + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault +` + triggerAuthTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + boundServiceAccountToken: + - parameter: token + serviceAccountName: {{.ServiceAccountName}} +` + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + minReplicaCount: 0 + maxReplicaCount: 1 + cooldownPeriod: 10 + triggers: + - type: metrics-api + metadata: + targetValue: "10" + url: "{{.MetricsServerEndpoint}}" + valueLocation: 'value' + authMode: "bearer" + authenticationRef: + name: {{.TriggerAuthName}} +` + updateMetricTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: update-metric-value + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 0 + template: + spec: + containers: + - name: curl-client + image: curlimages/curl + imagePullPolicy: Always + command: ["curl", "-X", "POST", "{{.MetricsServerEndpoint}}/{{.MetricValue}}"] + restartPolicy: Never` +) + +func TestScaler(t *testing.T) { + // setup + // ctx := context.Background() + t.Log("--- setting up ---") + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // wait for metrics server to be ready; scale target to start at 0 replicas + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, metricsServerDeploymentName, testNamespace, 1, 60, 1), + "replica count should be 1 after 1 minute") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + "replica count should be 0 after 1 minute") + + // test scaling + testScaleOut(t, kc, data) + testScaleIn(t, kc, data) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + ServiceAccountName: serviceAccountName, + ServiceAccountTokenCreationRole: serviceAccountTokenCreationRole, + ServiceAccountTokenCreationRoleBinding: serviceAccountTokenCreationRoleBinding, + MetricsServerDeploymentName: metricsServerDeploymentName, + MetricsServerEndpoint: metricsServerEndpoint, + MetricsServerServiceName: metricsServerServiceName, + DeploymentName: deploymentName, + TriggerAuthName: triggerAuthName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: fmt.Sprintf("%d", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%d", maxReplicaCount), + MetricValue: 1, + }, []Template{ + // required for the keda to act as the service account which has the necessary permissions + {Name: "serviceAccountTemplate", Config: serviceAccountTemplate}, + {Name: "serviceAccountClusterRoleTemplate", Config: serviceAccountClusterRoleTemplate}, + {Name: "serviceAccountClusterRoleBindingTemplate", Config: serviceAccountClusterRoleBindingTemplate}, + // required for the keda to request token creations for the service account + {Name: "serviceAccountTokenCreationRoleTemplate", Config: serviceAccountTokenCreationRoleTemplate}, + {Name: "serviceAccountTokenCreationRoleBindingTemplate", Config: serviceAccountTokenCreationRoleBindingTemplate}, + // required for the metrics-api container to delegate authenticate/authorize requests to k8s apiserver + {Name: "tokenReviewAndSubjectAccessReviewClusterRoleTemplate", Config: tokenReviewAndSubjectAccessReviewClusterRoleTemplate}, + {Name: "tokenReviewAndSubjectAccessReviewClusterRoleBindingTemplate", Config: tokenReviewAndSubjectAccessReviewClusterRoleBindingTemplate}, + {Name: "metricsServerDeploymentTemplate", Config: metricsServerDeploymentTemplate}, + {Name: "metricsServerService", Config: metricsServerService}, + // scale target and trigger auths + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "triggerAuthTemplate", Config: triggerAuthTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale out ---") + data.MetricValue = 50 + KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, maxReplicaCount), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale in ---") + data.MetricValue = 0 + KubectlReplaceWithTemplate(t, data, "updateMetricTemplate", updateMetricTemplate) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +}