diff --git a/api/v1beta3/tortoise_types.go b/api/v1beta3/tortoise_types.go index f2350af9..6120b57d 100644 --- a/api/v1beta3/tortoise_types.go +++ b/api/v1beta3/tortoise_types.go @@ -344,8 +344,9 @@ type TortoiseConditionType string const ( // TortoiseConditionTypeFailedToReconcile means tortoise failed to reconcile due to some reasons. - TortoiseConditionTypeFailedToReconcile TortoiseConditionType = "FailedToReconcile" - TortoiseConditionTypeHPATargetUtilizationUpdated TortoiseConditionType = "HPATargetUtilizationUpdated" + TortoiseConditionTypeFailedToReconcile TortoiseConditionType = "FailedToReconcile" + TortoiseConditionTypeHPATargetUtilizationUpdated TortoiseConditionType = "HPATargetUtilizationUpdated" + TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas TortoiseConditionType = "ScaledUpBasedOnPreferredMaxReplicas" ) type TortoiseCondition struct { diff --git a/controllers/testdata/mutable-autoscalingpolicy-add-another-horizontal/after/tortoise.yaml b/controllers/testdata/mutable-autoscalingpolicy-add-another-horizontal/after/tortoise.yaml index fa225e85..a90dcb9b 100644 --- a/controllers/testdata/mutable-autoscalingpolicy-add-another-horizontal/after/tortoise.yaml +++ b/controllers/testdata/mutable-autoscalingpolicy-add-another-horizontal/after/tortoise.yaml @@ -61,12 +61,23 @@ status: cpu: "4" memory: 4Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/mutable-autoscalingpolicy-no-hpa-and-add-horizontal/after/tortoise.yaml b/controllers/testdata/mutable-autoscalingpolicy-no-hpa-and-add-horizontal/after/tortoise.yaml index b282d125..0955b0d5 100644 --- a/controllers/testdata/mutable-autoscalingpolicy-no-hpa-and-add-horizontal/after/tortoise.yaml +++ b/controllers/testdata/mutable-autoscalingpolicy-no-hpa-and-add-horizontal/after/tortoise.yaml @@ -61,12 +61,23 @@ status: cpu: "3" memory: 3Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal-2/after/tortoise.yaml b/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal-2/after/tortoise.yaml index a4f398b5..5df2f89e 100644 --- a/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal-2/after/tortoise.yaml +++ b/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal-2/after/tortoise.yaml @@ -70,7 +70,11 @@ status: resource: cpu: "3" memory: 3Gi - tortoiseConditions: null + tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal/after/tortoise.yaml b/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal/after/tortoise.yaml index 423905bc..88b37e2c 100644 --- a/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal/after/tortoise.yaml +++ b/controllers/testdata/mutable-autoscalingpolicy-remove-horizontal/after/tortoise.yaml @@ -69,7 +69,11 @@ status: resource: cpu: "3" memory: 3Gi - tortoiseConditions: null + tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-istio-enabled-pod-working/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-istio-enabled-pod-working/after/tortoise.yaml index 30b2c104..8ea8f7f2 100644 --- a/controllers/testdata/reconcile-for-the-istio-enabled-pod-working/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-istio-enabled-pod-working/after/tortoise.yaml @@ -61,12 +61,23 @@ status: cpu: "4" memory: 3Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-off/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-off/after/tortoise.yaml index 89612dc9..d321d5b9 100644 --- a/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-off/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-off/after/tortoise.yaml @@ -60,7 +60,11 @@ status: resource: cpu: "4" memory: 4Gi - tortoiseConditions: null + tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-vertical/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-vertical/after/tortoise.yaml index 1c97127a..7593d013 100644 --- a/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-vertical/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-multiple-containers-pod-all-vertical/after/tortoise.yaml @@ -60,7 +60,11 @@ status: resource: cpu: "3" memory: 3Gi - tortoiseConditions: null + tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-multiple-containers-pod-during-emergency/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-multiple-containers-pod-during-emergency/after/tortoise.yaml index f680c8bd..a603763b 100644 --- a/controllers/testdata/reconcile-for-the-multiple-containers-pod-during-emergency/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-multiple-containers-pod-during-emergency/after/tortoise.yaml @@ -68,6 +68,10 @@ status: reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-multiple-containers-pod-emergency-started/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-multiple-containers-pod-emergency-started/after/tortoise.yaml index f680c8bd..a603763b 100644 --- a/controllers/testdata/reconcile-for-the-multiple-containers-pod-emergency-started/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-multiple-containers-pod-emergency-started/after/tortoise.yaml @@ -68,6 +68,10 @@ status: reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-multiple-containers-pod-one-off/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-multiple-containers-pod-one-off/after/tortoise.yaml index abbd3d26..b329bd2b 100644 --- a/controllers/testdata/reconcile-for-the-multiple-containers-pod-one-off/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-multiple-containers-pod-one-off/after/tortoise.yaml @@ -61,12 +61,23 @@ status: cpu: "4" memory: 4Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-multiple-containers-pod-suggested-too-small/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-multiple-containers-pod-suggested-too-small/after/tortoise.yaml index 0a2411e2..df752784 100644 --- a/controllers/testdata/reconcile-for-the-multiple-containers-pod-suggested-too-small/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-multiple-containers-pod-suggested-too-small/after/tortoise.yaml @@ -61,12 +61,23 @@ status: cpu: 100m memory: 11Mi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-multiple-containers-pod-working/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-multiple-containers-pod-working/after/tortoise.yaml index 30b2c104..8ea8f7f2 100644 --- a/controllers/testdata/reconcile-for-the-multiple-containers-pod-working/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-multiple-containers-pod-working/after/tortoise.yaml @@ -61,12 +61,23 @@ status: cpu: "4" memory: 3Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-backtonormal/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-backtonormal/after/tortoise.yaml index 6b581fe2..ee70041b 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-backtonormal/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-backtonormal/after/tortoise.yaml @@ -45,6 +45,10 @@ status: reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-dryrun/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-dryrun/after/tortoise.yaml index d114d0dc..b6713b8b 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-dryrun/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-dryrun/after/tortoise.yaml @@ -38,7 +38,18 @@ status: resource: cpu: "4" memory: 4Gi - tortoiseConditions: null + tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-during-emergency/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-during-emergency/after/tortoise.yaml index aade8f96..3fd2cb78 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-during-emergency/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-during-emergency/after/tortoise.yaml @@ -45,6 +45,10 @@ status: reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-emergency-started/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-emergency-started/after/tortoise.yaml index 89b14c23..52d0e2d6 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-emergency-started/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-emergency-started/after/tortoise.yaml @@ -45,6 +45,10 @@ status: reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data-finished/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data-finished/after/tortoise.yaml index c1ec0bcc..3229e293 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data-finished/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data-finished/after/tortoise.yaml @@ -38,12 +38,23 @@ status: cpu: "4" memory: 3Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data/after/tortoise.yaml index f26e9ba0..8647dfd0 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-gathering-data/after/tortoise.yaml @@ -37,7 +37,18 @@ status: resource: cpu: "4" memory: 4Gi - tortoiseConditions: null + tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-hpa-changed/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-hpa-changed/after/tortoise.yaml index dc809665..a7bf88f8 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-hpa-changed/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-hpa-changed/after/tortoise.yaml @@ -39,12 +39,23 @@ status: cpu: "3" memory: 4Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-initializing/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-initializing/after/tortoise.yaml index c4ffa3c5..20c0439f 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-initializing/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-initializing/after/tortoise.yaml @@ -38,7 +38,11 @@ status: resource: cpu: "4" memory: 4Gi - tortoiseConditions: null + tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-partly-working/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-partly-working/after/tortoise.yaml index 7095d310..583afb08 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-partly-working/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-partly-working/after/tortoise.yaml @@ -38,12 +38,23 @@ status: cpu: "4" memory: 3Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/controllers/testdata/reconcile-for-the-single-container-pod-working/after/tortoise.yaml b/controllers/testdata/reconcile-for-the-single-container-pod-working/after/tortoise.yaml index 413b73ef..2bc9eabe 100644 --- a/controllers/testdata/reconcile-for-the-single-container-pod-working/after/tortoise.yaml +++ b/controllers/testdata/reconcile-for-the-single-container-pod-working/after/tortoise.yaml @@ -38,12 +38,23 @@ status: cpu: "4" memory: 3Gi tortoiseConditions: + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + message: the current number of replicas is not bigger than the preferred max + replica number + reason: ScaledUpBasedOnPreferredMaxReplicas + status: "False" + type: ScaledUpBasedOnPreferredMaxReplicas - lastTransitionTime: "2023-01-01T00:00:00Z" lastUpdateTime: "2023-01-01T00:00:00Z" message: HPA target utilization is updated reason: HPATargetUtilizationUpdated status: "True" type: HPATargetUtilizationUpdated + - lastTransitionTime: "2023-01-01T00:00:00Z" + lastUpdateTime: "2023-01-01T00:00:00Z" + status: "False" + type: FailedToReconcile containerResourcePhases: - containerName: app resourcePhases: diff --git a/pkg/recommender/recommender.go b/pkg/recommender/recommender.go index 101aaafc..9f0ffb96 100644 --- a/pkg/recommender/recommender.go +++ b/pkg/recommender/recommender.go @@ -20,6 +20,7 @@ import ( "github.com/mercari/tortoise/pkg/event" "github.com/mercari/tortoise/pkg/features" hpaservice "github.com/mercari/tortoise/pkg/hpa" + "github.com/mercari/tortoise/pkg/utils" ) type Service struct { @@ -96,7 +97,7 @@ func New( } } -func (s *Service) updateVPARecommendation(ctx context.Context, tortoise *v1beta3.Tortoise, hpa *v2.HorizontalPodAutoscaler, replicaNum int32) (*v1beta3.Tortoise, error) { +func (s *Service) updateVPARecommendation(ctx context.Context, tortoise *v1beta3.Tortoise, hpa *v2.HorizontalPodAutoscaler, replicaNum int32, now time.Time) (*v1beta3.Tortoise, error) { logger := log.FromContext(ctx) requestMap := map[string]map[corev1.ResourceName]resource.Quantity{} for _, r := range tortoise.Status.Conditions.ContainerResourceRequests { @@ -151,7 +152,10 @@ func (s *Service) updateVPARecommendation(ctx context.Context, tortoise *v1beta3 if !ok { return tortoise, fmt.Errorf("no %s recommendation from VPA for the container %s", k, r.ContainerName) } - newSize, reason, err := s.calculateBestNewSize(ctx, tortoise, p, r.ContainerName, recom, k, hpa, replicaNum, req, minAllocatedResourcesMap[r.ContainerName]) + var newSize int64 + var reason string + var err error + newSize, reason, tortoise, err = s.calculateBestNewSize(ctx, tortoise, p, r.ContainerName, recom, k, hpa, replicaNum, req, minAllocatedResourcesMap[r.ContainerName], now) if err != nil { return tortoise, err } @@ -174,12 +178,38 @@ func (s *Service) updateVPARecommendation(ctx context.Context, tortoise *v1beta3 return tortoise, nil } +func allowVerticalScalingBasedOnPreferredMaxReplicas(tortoise *v1beta3.Tortoise, now time.Time) bool { + for _, c := range tortoise.Status.Conditions.TortoiseConditions { + if c.Type == v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas && c.Status == v1.ConditionTrue { + if c.LastTransitionTime.Add(30*time.Minute).After(now) && !c.LastTransitionTime.Time.Equal(now) { + // If the last transition time is within 30 minutes, + // we don't allow the vertical scaling based on the preferred max replicas. + return false + } + } + } + + return true +} + // calculateBestNewSize calculates the best new resource request based on the current replica number and the recommended resource request. // Even if the autoscaling policy is Horizontal, this function may suggest the vertical scaling, see comments in the function. -func (s *Service) calculateBestNewSize(ctx context.Context, tortoise *v1beta3.Tortoise, p v1beta3.AutoscalingType, containerName string, recommendedResourceRequest resource.Quantity, k corev1.ResourceName, hpa *v2.HorizontalPodAutoscaler, replicaNum int32, resourceRequest resource.Quantity, minAllocatedResources corev1.ResourceList) (int64, string, error) { +func (s *Service) calculateBestNewSize( + ctx context.Context, + tortoise *v1beta3.Tortoise, + p v1beta3.AutoscalingType, + containerName string, + recommendedResourceRequest resource.Quantity, + k corev1.ResourceName, + hpa *v2.HorizontalPodAutoscaler, + replicaNum int32, + resourceRequest resource.Quantity, + minAllocatedResources corev1.ResourceList, + now time.Time, +) (int64, string, *v1beta3.Tortoise, error) { if p == v1beta3.AutoscalingTypeOff { // Just keep the current resource request. - return resourceRequest.MilliValue(), "", nil + return resourceRequest.MilliValue(), "", tortoise, nil } if p == v1beta3.AutoscalingTypeVertical { @@ -188,23 +218,31 @@ func (s *Service) calculateBestNewSize(ctx context.Context, tortoise *v1beta3.To // We always follow the recommendation from VPA. newSize := recommendedResourceRequest.MilliValue() jastified := s.justifyNewSize(resourceRequest.MilliValue(), newSize, k, minAllocatedResources, containerName) - return jastified, fmt.Sprintf("change %v request (%v) (%v → %v) based on VPA suggestion", k, containerName, resourceRequest.MilliValue(), jastified), nil + return jastified, fmt.Sprintf("change %v request (%v) (%v → %v) based on VPA suggestion", k, containerName, resourceRequest.MilliValue(), jastified), tortoise, nil } // p == v1beta3.AutoscalingTypeHorizontal // When the current replica num is more than or equal to the preferredMaxReplicas, - // make the container size bigger (just multiple by 1.1) so that the replica number will be descreased. + // make the container size bigger (just multiple by 1.3) so that the replica number will be descreased. // // Here also covers the scenario where the current replica num hits MaximumMaxReplicas. if replicaNum >= s.preferredMaxReplicas && // If the current replica number is equal to the maximumMaxReplica, increasing the resource request would not change the situation that the replica number is higher than preferredMaxReplicas. *hpa.Spec.MinReplicas != replicaNum && - features.Contains(s.featureFlags, features.VerticalScalingBasedOnPreferredMaxReplicas) { + features.Contains(s.featureFlags, features.VerticalScalingBasedOnPreferredMaxReplicas) && + allowVerticalScalingBasedOnPreferredMaxReplicas(tortoise, now) { // We keep increasing the size until we hit the maxResourceSize. - newSize := int64(float64(resourceRequest.MilliValue()) * 1.1) + newSize := int64(float64(resourceRequest.MilliValue()) * 1.3) jastifiedNewSize := s.justifyNewSize(resourceRequest.MilliValue(), newSize, k, minAllocatedResources, containerName) - return jastifiedNewSize, fmt.Sprintf("the current number of replicas is bigger than the preferred max replica number in this cluster (%v), so make %v request (%s) bigger (%v → %v)", s.preferredMaxReplicas, k, containerName, resourceRequest.MilliValue(), jastifiedNewSize), nil + tortoise = utils.ChangeTortoiseCondition(tortoise, v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, v1.ConditionTrue, "ScaledUpBasedOnPreferredMaxReplicas", "the current number of replicas is bigger than the preferred max replica number", now) + msg := fmt.Sprintf("the current number of replicas is bigger than the preferred max replica number in this cluster (%v), so make %v request (%s) bigger (%v → %v)", s.preferredMaxReplicas, k, containerName, resourceRequest.MilliValue(), jastifiedNewSize) + return jastifiedNewSize, msg, tortoise, nil + } + + if replicaNum < s.preferredMaxReplicas { + // Change TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas to False. + tortoise = utils.ChangeTortoiseCondition(tortoise, v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, v1.ConditionFalse, "ScaledUpBasedOnPreferredMaxReplicas", "the current number of replicas is not bigger than the preferred max replica number", now) } if replicaNum <= s.minimumMinReplicas { @@ -221,19 +259,22 @@ func (s *Service) calculateBestNewSize(ctx context.Context, tortoise *v1beta3.To } jastified := s.justifyNewSize(resourceRequest.MilliValue(), newSize, k, minAllocatedResources, containerName) - return jastified, fmt.Sprintf("the current number of replicas is equal or smaller than the minimum min replica number in this cluster (%v), so make %v request (%v) smaller (%v → %v) based on VPA suggestion", s.minimumMinReplicas, k, containerName, resourceRequest.MilliValue(), jastified), nil + return jastified, fmt.Sprintf("the current number of replicas is equal or smaller than the minimum min replica number in this cluster (%v), so make %v request (%v) smaller (%v → %v) based on VPA suggestion", s.minimumMinReplicas, k, containerName, resourceRequest.MilliValue(), jastified), tortoise, nil } // The replica number is OK based on minimumMinReplicas and preferredMaxReplicas. - if !hasMultipleHorizontal(tortoise) { - // nothing else to do for a single-horizontal Tortoise. - return s.justifyNewSize(resourceRequest.MilliValue(), resourceRequest.MilliValue(), k, minAllocatedResources, containerName), "nothing to do", nil + if !hasMultipleHorizontal(tortoise) || replicaNum == *hpa.Spec.MinReplicas { + // Nothing else to do for a single-horizontal Tortoise. + // Also, if the current replica number is equal to the minReplicas, + // we don't change the resource request based on the current resource utilization + // because even if the resource utilization is low, it's due to the minReplicas. + return s.justifyNewSize(resourceRequest.MilliValue(), resourceRequest.MilliValue(), k, minAllocatedResources, containerName), "nothing to do", tortoise, nil } targetUtilizationValue, err := hpaservice.GetHPATargetValue(ctx, hpa, containerName, k) if err != nil { - return 0, "", fmt.Errorf("get the target value from HPA: %w", err) + return 0, "", tortoise, fmt.Errorf("get the target value from HPA: %w", err) } upperUtilization := math.Ceil((float64(recommendedResourceRequest.MilliValue()) / float64(resourceRequest.MilliValue())) * 100) @@ -247,12 +288,12 @@ func (s *Service) calculateBestNewSize(ctx context.Context, tortoise *v1beta3.To // so that the upper usage will be the target usage. newSize := int64(float64(recommendedResourceRequest.MilliValue()) * 100.0 / float64(targetUtilizationValue)) jastified := s.justifyNewSize(resourceRequest.MilliValue(), newSize, k, minAllocatedResources, containerName) - return jastified, fmt.Sprintf("the current resource usage (%v, %v%%) is too small and it's due to unbalanced container size, so make %v request (%v) smaller (%v → %v) based on VPA's recommendation and HPA target utilization %v%%", recommendedResourceRequest.MilliValue(), int(upperUtilization), k, containerName, resourceRequest.MilliValue(), jastified, targetUtilizationValue), nil + return jastified, fmt.Sprintf("the current resource usage (%v, %v%%) is too small and it's due to unbalanced container size, so make %v request (%v) smaller (%v → %v) based on VPA's recommendation and HPA target utilization %v%%", recommendedResourceRequest.MilliValue(), int(upperUtilization), k, containerName, resourceRequest.MilliValue(), jastified, targetUtilizationValue), tortoise, nil } // Just keep the current resource request. // Only do justification. - return s.justifyNewSize(resourceRequest.MilliValue(), resourceRequest.MilliValue(), k, minAllocatedResources, containerName), "nothing to do", nil + return s.justifyNewSize(resourceRequest.MilliValue(), resourceRequest.MilliValue(), k, minAllocatedResources, containerName), "nothing to do", tortoise, nil } func hasMultipleHorizontal(t *v1beta3.Tortoise) bool { @@ -338,7 +379,7 @@ func (s *Service) UpdateRecommendations(ctx context.Context, tortoise *v1beta3.T return tortoise, fmt.Errorf("update HPA recommendations: %w", err) } - tortoise, err = s.updateVPARecommendation(ctx, tortoise, hpa, replicaNum) + tortoise, err = s.updateVPARecommendation(ctx, tortoise, hpa, replicaNum, now) if err != nil { return tortoise, fmt.Errorf("update VPA recommendations: %w", err) } diff --git a/pkg/recommender/recommender_test.go b/pkg/recommender/recommender_test.go index c2ca13da..2e45d285 100644 --- a/pkg/recommender/recommender_test.go +++ b/pkg/recommender/recommender_test.go @@ -1567,6 +1567,7 @@ func Test_updateHPAMinMaxReplicasRecommendations(t *testing.T) { } func TestService_UpdateVPARecommendation(t *testing.T) { + now := time.Now() type fields struct { preferredMaxReplicas int32 minimumMinReplicas int32 @@ -1650,7 +1651,129 @@ func TestService_UpdateVPARecommendation(t *testing.T) { }, }, }, - ).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ + ).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionTrue, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is bigger than the preferred max replica number", + LastTransitionTime: metav1.NewTime(now), + LastUpdateTime: metav1.NewTime(now), + }).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ + ContainerName: "test-container", + Resource: createResourceList("500m", "500Mi"), + }).SetRecommendations(v1beta3.Recommendations{ + Vertical: v1beta3.VerticalRecommendations{ + ContainerResourceRecommendation: []v1beta3.RecommendedContainerResources{ + { + ContainerName: "test-container", + RecommendedResource: createResourceList("650m", "650Mi"), // current * 1.1 + }, + }, + }, + }).Build(), + wantErr: false, + }, + { + name: "all horizontal: replica count above preferredMaxReplicas, but we recently increase the resource: don't increase the resources", + fields: fields{ + preferredMaxReplicas: 3, + maxCPU: "1000m", + maxMemory: "1Gi", + features: []features.FeatureFlag{features.VerticalScalingBasedOnPreferredMaxReplicas}, + }, + args: args{ + hpa: &v2.HorizontalPodAutoscaler{ + Spec: v2.HorizontalPodAutoscalerSpec{ + MinReplicas: ptr.To[int32](1), + Metrics: []v2.MetricSpec{ + { + Type: v2.ContainerResourceMetricSourceType, + ContainerResource: &v2.ContainerResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: v2.MetricTarget{ + AverageUtilization: ptr.To[int32](80), + }, + Container: "test-container", + }, + }, + { + Type: v2.ContainerResourceMetricSourceType, + ContainerResource: &v2.ContainerResourceMetricSource{ + Name: corev1.ResourceMemory, + Target: v2.MetricTarget{ + AverageUtilization: ptr.To[int32](80), + }, + Container: "test-container", + }, + }, + }, + }, + }, + tortoise: utils.NewTortoiseBuilder().AddAutoscalingPolicy(v1beta3.ContainerAutoscalingPolicy{ + ContainerName: "test-container", + Policy: map[corev1.ResourceName]v1beta3.AutoscalingType{ + corev1.ResourceCPU: v1beta3.AutoscalingTypeHorizontal, + corev1.ResourceMemory: v1beta3.AutoscalingTypeHorizontal, + }, + }).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionTrue, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is bigger than the preferred max replica number", + // recently updated. + LastTransitionTime: metav1.NewTime(now.Add(-time.Minute)), + LastUpdateTime: metav1.NewTime(now.Add(-time.Minute)), + }).AddResourcePolicy(v1beta3.ContainerResourcePolicy{ + ContainerName: "test-container", + MinAllocatedResources: createResourceList("100m", "100Mi"), + }).AddContainerRecommendationFromVPA( + v1beta3.ContainerRecommendationFromVPA{ + ContainerName: "test-container", + Recommendation: map[corev1.ResourceName]v1beta3.ResourceQuantity{ + corev1.ResourceCPU: { + Quantity: resource.MustParse("400m"), + }, + corev1.ResourceMemory: { + Quantity: resource.MustParse("400Mi"), + }, + }, + }, + ).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ + ContainerName: "test-container", + Resource: createResourceList("500m", "500Mi"), + }).Build(), + replicaNum: 4, + }, + want: utils.NewTortoiseBuilder().AddAutoscalingPolicy(v1beta3.ContainerAutoscalingPolicy{ + ContainerName: "test-container", + Policy: map[corev1.ResourceName]v1beta3.AutoscalingType{ + corev1.ResourceCPU: v1beta3.AutoscalingTypeHorizontal, + corev1.ResourceMemory: v1beta3.AutoscalingTypeHorizontal, + }, + }).AddResourcePolicy(v1beta3.ContainerResourcePolicy{ + ContainerName: "test-container", + MinAllocatedResources: createResourceList("100m", "100Mi"), + }).AddContainerRecommendationFromVPA( + v1beta3.ContainerRecommendationFromVPA{ + ContainerName: "test-container", + Recommendation: map[corev1.ResourceName]v1beta3.ResourceQuantity{ + corev1.ResourceCPU: { + Quantity: resource.MustParse("400m"), + }, + corev1.ResourceMemory: { + Quantity: resource.MustParse("400Mi"), + }, + }, + }, + ).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionTrue, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is bigger than the preferred max replica number", + // recently updated. + LastTransitionTime: metav1.NewTime(now.Add(-time.Minute)), + LastUpdateTime: metav1.NewTime(now.Add(-time.Minute)), + }).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ ContainerName: "test-container", Resource: createResourceList("500m", "500Mi"), }).SetRecommendations(v1beta3.Recommendations{ @@ -1658,7 +1781,7 @@ func TestService_UpdateVPARecommendation(t *testing.T) { ContainerResourceRecommendation: []v1beta3.RecommendedContainerResources{ { ContainerName: "test-container", - RecommendedResource: createResourceList("550m", "550Mi"), // current * 1.1 + RecommendedResource: createResourceList("500m", "500Mi"), // Unchange }, }, }, @@ -1926,7 +2049,14 @@ func TestService_UpdateVPARecommendation(t *testing.T) { }, }, }, - ).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ + ).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionTrue, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is bigger than the preferred max replica number", + LastTransitionTime: metav1.NewTime(now), + LastUpdateTime: metav1.NewTime(now), + }).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ ContainerName: "test-container", Resource: createResourceList("500m", "500Mi"), }).SetRecommendations(v1beta3.Recommendations{ @@ -1934,7 +2064,7 @@ func TestService_UpdateVPARecommendation(t *testing.T) { ContainerResourceRecommendation: []v1beta3.RecommendedContainerResources{ { ContainerName: "test-container", - RecommendedResource: createResourceList("550m" /* current * 1.1*/, "800Mi" /* VPA recommendation*/), + RecommendedResource: createResourceList("650m" /* current * 1.1*/, "800Mi" /* VPA recommendation*/), }, }, }, @@ -2024,7 +2154,14 @@ func TestService_UpdateVPARecommendation(t *testing.T) { }, }, }, - ).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ + ).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionFalse, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is not bigger than the preferred max replica number", + LastTransitionTime: metav1.NewTime(now), + LastUpdateTime: metav1.NewTime(now), + }).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ ContainerName: "test-container", Resource: createResourceList("1500m", "1.5Gi"), }).SetRecommendations(v1beta3.Recommendations{ @@ -2125,6 +2262,13 @@ func TestService_UpdateVPARecommendation(t *testing.T) { ).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ ContainerName: "test-container", Resource: createResourceList("15m", "1.5Mi"), //smaller than MinAllocatedResources + }).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionFalse, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is not bigger than the preferred max replica number", + LastTransitionTime: metav1.NewTime(now), + LastUpdateTime: metav1.NewTime(now), }).SetRecommendations(v1beta3.Recommendations{ Vertical: v1beta3.VerticalRecommendations{ ContainerResourceRecommendation: []v1beta3.RecommendedContainerResources{ @@ -2203,6 +2347,13 @@ func TestService_UpdateVPARecommendation(t *testing.T) { ).AddContainerResourceRequests(v1beta3.ContainerResourceRequests{ ContainerName: "test-container", Resource: createResourceList("130m", "130Mi"), + }).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionFalse, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is not bigger than the preferred max replica number", + LastTransitionTime: metav1.NewTime(now), + LastUpdateTime: metav1.NewTime(now), }).SetRecommendations(v1beta3.Recommendations{ Vertical: v1beta3.VerticalRecommendations{ ContainerResourceRecommendation: []v1beta3.RecommendedContainerResources{ @@ -2763,6 +2914,13 @@ func TestService_UpdateVPARecommendation(t *testing.T) { }, }, }, + }).AddTortoiseConditions(v1beta3.TortoiseCondition{ + Type: v1beta3.TortoiseConditionTypeScaledUpBasedOnPreferredMaxReplicas, + Status: corev1.ConditionFalse, + Reason: "ScaledUpBasedOnPreferredMaxReplicas", + Message: "the current number of replicas is not bigger than the preferred max replica number", + LastTransitionTime: metav1.NewTime(now), + LastUpdateTime: metav1.NewTime(now), }).Build(), wantErr: false, }, @@ -2771,7 +2929,7 @@ func TestService_UpdateVPARecommendation(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := New(0, 0, 0, 0, int(tt.fields.minimumMinReplicas), int(tt.fields.preferredMaxReplicas), "5m", "5Mi", map[string]string{"istio-proxy": "7m"}, map[string]string{"istio-proxy": "7Mi"}, tt.fields.maxCPU, tt.fields.maxMemory, 10000, tt.fields.maxAllowedScalingDownRatio, tt.fields.features, record.NewFakeRecorder(10)) - got, err := s.updateVPARecommendation(context.Background(), tt.args.tortoise, tt.args.hpa, tt.args.replicaNum) + got, err := s.updateVPARecommendation(context.Background(), tt.args.tortoise, tt.args.hpa, tt.args.replicaNum, now) if (err != nil) != tt.wantErr { t.Errorf("updateVPARecommendation() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/tortoise/tortoise.go b/pkg/tortoise/tortoise.go index 64e697b4..68ef77a8 100644 --- a/pkg/tortoise/tortoise.go +++ b/pkg/tortoise/tortoise.go @@ -479,41 +479,10 @@ func (s *Service) updateLastTimeUpdateTortoise(tortoise *v1beta3.Tortoise, now t func (s *Service) RecordReconciliationFailure(t *v1beta3.Tortoise, err error, now time.Time) *v1beta3.Tortoise { if err != nil { s.recorder.Event(t, "Warning", "ReconcileError", err.Error()) - for i := range t.Status.Conditions.TortoiseConditions { - if t.Status.Conditions.TortoiseConditions[i].Type == v1beta3.TortoiseConditionTypeFailedToReconcile { - // TODO: have a clear reason and utilize it to have a better reconciliation next. - // For example, in some cases, the reconciliation may keep failing until people fix some problems manually. - t.Status.Conditions.TortoiseConditions[i].Reason = "ReconcileError" - t.Status.Conditions.TortoiseConditions[i].Message = err.Error() - t.Status.Conditions.TortoiseConditions[i].Status = corev1.ConditionTrue - t.Status.Conditions.TortoiseConditions[i].LastTransitionTime = metav1.NewTime(now) - t.Status.Conditions.TortoiseConditions[i].LastUpdateTime = metav1.NewTime(now) - return t - } - } - // add as a new condition if not found. - t.Status.Conditions.TortoiseConditions = append(t.Status.Conditions.TortoiseConditions, v1beta3.TortoiseCondition{ - Type: v1beta3.TortoiseConditionTypeFailedToReconcile, - Status: corev1.ConditionTrue, - Reason: "ReconcileError", - Message: err.Error(), - LastTransitionTime: metav1.NewTime(now), - LastUpdateTime: metav1.NewTime(now), - }) - return t + return utils.ChangeTortoiseCondition(t, v1beta3.TortoiseConditionTypeFailedToReconcile, corev1.ConditionTrue, "ReconcileError", err.Error(), now) } - for i := range t.Status.Conditions.TortoiseConditions { - if t.Status.Conditions.TortoiseConditions[i].Type == v1beta3.TortoiseConditionTypeFailedToReconcile { - t.Status.Conditions.TortoiseConditions[i].Reason = "" - t.Status.Conditions.TortoiseConditions[i].Message = "" - t.Status.Conditions.TortoiseConditions[i].Status = corev1.ConditionFalse - t.Status.Conditions.TortoiseConditions[i].LastTransitionTime = metav1.NewTime(now) - t.Status.Conditions.TortoiseConditions[i].LastUpdateTime = metav1.NewTime(now) - return t - } - } - return t + return utils.ChangeTortoiseCondition(t, v1beta3.TortoiseConditionTypeFailedToReconcile, corev1.ConditionFalse, "", "", now) } type resourceNameAndContainerName struct { diff --git a/pkg/utils/tortoise.go b/pkg/utils/tortoise.go index bc50a8bb..f5151e5a 100644 --- a/pkg/utils/tortoise.go +++ b/pkg/utils/tortoise.go @@ -9,6 +9,30 @@ import ( "github.com/mercari/tortoise/api/v1beta3" ) +func ChangeTortoiseCondition(t *v1beta3.Tortoise, conditionType v1beta3.TortoiseConditionType, status corev1.ConditionStatus, reason, message string, now time.Time) *v1beta3.Tortoise { + for i := range t.Status.Conditions.TortoiseConditions { + if t.Status.Conditions.TortoiseConditions[i].Type == conditionType { + t.Status.Conditions.TortoiseConditions[i].Reason = reason + t.Status.Conditions.TortoiseConditions[i].Message = message + t.Status.Conditions.TortoiseConditions[i].Status = status + t.Status.Conditions.TortoiseConditions[i].LastTransitionTime = metav1.NewTime(now) + t.Status.Conditions.TortoiseConditions[i].LastUpdateTime = metav1.NewTime(now) + return t + } + } + // add a new condition if not found. + t.Status.Conditions.TortoiseConditions = append(t.Status.Conditions.TortoiseConditions, v1beta3.TortoiseCondition{ + Type: conditionType, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: metav1.NewTime(now), + LastUpdateTime: metav1.NewTime(now), + }) + + return t +} + func ChangeTortoiseResourcePhase(tortoise *v1beta3.Tortoise, containerName string, rn corev1.ResourceName, now time.Time, phase v1beta3.ContainerResourcePhase) *v1beta3.Tortoise { found := false for i, p := range tortoise.Status.ContainerResourcePhases { diff --git a/pkg/utils/tortoise_builder.go b/pkg/utils/tortoise_builder.go index 69b3bb80..d61a9772 100644 --- a/pkg/utils/tortoise_builder.go +++ b/pkg/utils/tortoise_builder.go @@ -63,6 +63,11 @@ func (b *TortoiseBuilder) AddContainerResourceRequests(actualContainerResource v return b } +func (b *TortoiseBuilder) AddTortoiseConditions(condition v1beta3.TortoiseCondition) *TortoiseBuilder { + b.tortoise.Status.Conditions.TortoiseConditions = append(b.tortoise.Status.Conditions.TortoiseConditions, condition) + return b +} + func (b *TortoiseBuilder) SetRecommendations(recommendations v1beta3.Recommendations) *TortoiseBuilder { b.tortoise.Status.Recommendations = recommendations return b