From c6f7a11a4e768d1602f484c673988d8a91043188 Mon Sep 17 00:00:00 2001 From: Richard Jones Date: Fri, 11 Oct 2024 11:17:30 +0100 Subject: [PATCH] implement last run successful tests and related fixes --- doajtest/unit/test_background_task_status.py | 41 +++++++++++++++++++ .../bll/services/background_task_status.py | 18 ++++---- portality/scripts/priorities.csv | 2 +- portality/settings.py | 37 +++++++++-------- 4 files changed, 70 insertions(+), 28 deletions(-) diff --git a/doajtest/unit/test_background_task_status.py b/doajtest/unit/test_background_task_status.py index fe7299fca..2664d72b4 100644 --- a/doajtest/unit/test_background_task_status.py +++ b/doajtest/unit/test_background_task_status.py @@ -69,6 +69,14 @@ } } +bg_monitor_last_successful = { + 'BG_MONITOR_LAST_SUCCESSFULLY_RUN_CONFIG': { + 'journal_csv': { + 'last_run_successful_in': 100 + } + } +} + class TestBackgroundTaskStatus(DoajTestCase): @classmethod @@ -235,3 +243,36 @@ def test_create_background_status__queued_valid_oldest(self): assert is_stable(status_dict['status']) self.assert_stable_dict(journal_csv_dict) assert journal_csv_dict.get('oldest') is not None + + @apply_test_case_config(bg_monitor_last_successful) + def test_create_background_status__last_run_successful(self): + save_mock_bgjob(JournalCSVBackgroundTask.__action__, + status=constants.BGJOB_STATUS_COMPLETE, ) + + status_dict = background_task_status.create_background_status() + print(json.dumps(status_dict, indent=4)) + + journal_csv_dict = status_dict['queues']['scheduled_short']['last_run_successful'].get(JournalCSVBackgroundTask.__action__, {}) + + assert is_stable(status_dict['status']) + self.assert_stable_dict(journal_csv_dict) + assert journal_csv_dict.get('last_run') is not None + assert journal_csv_dict.get('last_run_status') == constants.BGJOB_STATUS_COMPLETE + + @apply_test_case_config(bg_monitor_last_successful) + def test_create_background_status__last_run_unsuccessful(self): + save_mock_bgjob(JournalCSVBackgroundTask.__action__, + status=constants.BGJOB_STATUS_ERROR, ) + + status_dict = background_task_status.create_background_status() + print(json.dumps(status_dict, indent=4)) + + journal_csv_dict = status_dict['queues']['scheduled_short']['last_run_successful'].get( + JournalCSVBackgroundTask.__action__, {}) + + assert not is_stable(status_dict['status']) + self.assert_unstable_dict(journal_csv_dict) + assert journal_csv_dict.get('last_run') is not None + assert journal_csv_dict.get('last_run_status') == constants.BGJOB_STATUS_ERROR + + diff --git a/portality/bll/services/background_task_status.py b/portality/bll/services/background_task_status.py index e505a70a9..eca6cb88c 100644 --- a/portality/bll/services/background_task_status.py +++ b/portality/bll/services/background_task_status.py @@ -4,6 +4,7 @@ import itertools from typing import Iterable +import constants from constants import BGJOB_STATUS_COMPLETE from portality.constants import BGJOB_QUEUE_ID_LONG, BGJOB_QUEUE_ID_MAIN, BGJOB_QUEUE_ID_EVENTS, BGJOB_QUEUE_ID_SCHEDULED_LONG, BGJOB_QUEUE_ID_SCHEDULED_SHORT, BGJOB_STATUS_ERROR, BGJOB_STATUS_QUEUED, \ BG_STATUS_STABLE, BG_STATUS_UNSTABLE @@ -34,8 +35,8 @@ def all_stable(self, items: Iterable, field_name='status') -> bool: def all_stable_str(self, items: Iterable, field_name='status') -> str: return self.to_bg_status_str(self.all_stable(items, field_name)) - def create_last_successfully_run_status(self, action, last_successful_run=0, **_) -> dict: - if last_successful_run == 0: + def create_last_successfully_run_status(self, action, last_run_successful_in=0, **_) -> dict: + if last_run_successful_in == 0: return dict( status=BG_STATUS_STABLE, last_run=None, @@ -44,18 +45,19 @@ def create_last_successfully_run_status(self, action, last_successful_run=0, **_ ) lr_query = (BackgroundJobQueryBuilder().action(action) - .since(dates.before_now(last_successful_run)) + .since(dates.before_now(last_run_successful_in)) + .status_includes([constants.BGJOB_STATUS_COMPLETE, constants.BGJOB_STATUS_ERROR]) .size(1) .order_by('created_date', 'desc') .build_query_dict()) lr_results = BackgroundJob.q2obj(q=lr_query) - lr_job = lr_results and lr_results[0] + lr_job = lr_results[0] if len(lr_results) > 0 else None status = BG_STATUS_UNSTABLE lr = None last_run_status = None - msg = ["No background jobs run in the time period"] + msg = ["No background jobs completed or errored in the time period"] if lr_job is not None: lr = lr_job.created_date @@ -73,8 +75,6 @@ def create_last_successfully_run_status(self, action, last_successful_run=0, **_ err_msgs=msg ) - - def create_errors_status(self, action, check_sec=3600, allowed_num_err=0, **_) -> dict: in_monitoring_query = SimpleBgjobQueue(action, status=BGJOB_STATUS_ERROR, since=dates.before_now(check_sec)) num_err_in_monitoring = BackgroundJob.hit_count(query=in_monitoring_query.query()) @@ -152,11 +152,11 @@ def create_queues_status(self, queue_name) -> dict: result_dict = dict( status=self.to_bg_status_str( - not err_msgs and self.all_stable(itertools.chain(errors.values(), queued.values()))), + not err_msgs and self.all_stable(itertools.chain(errors.values(), queued.values(), last_run.values()))), last_completed_job=last_completed_date and dates.format(last_completed_date), errors=errors, queued=queued, - last_successfully_run=last_run, + last_run_successful=last_run, err_msgs=err_msgs, ) return result_dict diff --git a/portality/scripts/priorities.csv b/portality/scripts/priorities.csv index f5e5cdcc5..bae43cd73 100644 --- a/portality/scripts/priorities.csv +++ b/portality/scripts/priorities.csv @@ -10,7 +10,7 @@ HP/PfT,"Priority: High, Workflow: Pending for Test",Review HP/rev,Priority: High,Review PfT,Workflow: Pending for Test,Review PfL,Workflow: Pending for Live,Review -Inv,Workflow: Initial Investigation,"Review, In Progress, To Do" +Inv,Workflow: Initial Investigation,"Review, In progress, To Do" Rev,,Review Near,Scale: Nearly Finished, Sch,Priority: Scheduled, diff --git a/portality/settings.py b/portality/settings.py index c8c38a1f5..bc3282d6a 100644 --- a/portality/settings.py +++ b/portality/settings.py @@ -1422,8 +1422,9 @@ # The number of errors allowed in the check period before the result is flagged 'allowed_num_err': 0, - # how long ago since a job just successfully ran (0 turns this off) - 'last_successfully_ran': 0 + # The last time this job ran within the specified time period, was it successful. + # If the most recent job in the timeframe is an error, this will trigger an "unstable" state (0 turns this off) + 'last_run_successful_in': 0 } # Configures the monitoring period and the allowed number of errors in that period before a queue is marked @@ -1502,52 +1503,52 @@ BG_MONITOR_LAST_SUCCESSFULLY_RUN_CONFIG = { 'anon_export': { - 'last_successfully_ran': 32 * _DAY + 'last_run_successful_in': 32 * _DAY }, 'article_cleanup_sync': { - 'last_successfully_ran': 33 * _DAY + 'last_run_successful_in': 33 * _DAY }, 'async_workflow_notifications': { - 'last_successfully_ran': _WEEK + _DAY + 'last_run_successful_in': _WEEK + _DAY }, 'check_latest_es_backup': { - 'last_successfully_ran': _DAY + _HOUR + 'last_run_successful_in': _DAY + _HOUR }, 'datalog_journal_added_update': { - 'last_successfully_ran': _HOUR + 'last_run_successful_in': _HOUR }, 'find_discontinued_soon': { - 'last_successfully_ran': _DAY + _HOUR + 'last_run_successful_in': _DAY + _HOUR }, 'harvest': { - 'last_successfully_ran': _DAY + _HOUR + 'last_run_successful_in': _DAY + _HOUR }, 'journal_csv': { - 'last_successfully_ran': 2 * _HOUR + 'last_run_successful_in': 2 * _HOUR }, 'monitor_bgjobs': { - 'last_successfully_ran': _WEEK + _DAY + 'last_run_successful_in': _WEEK + _DAY }, 'old_data_cleanup': { - 'last_successfully_ran': 32 * _DAY + 'last_run_successful_in': 32 * _DAY }, 'prune_es_backups': { - 'last_successfully_ran': _DAY + _HOUR + 'last_run_successful_in': _DAY + _HOUR }, 'public_data_dump': { - 'last_successfully_ran': 32 * _DAY + 'last_run_successful_in': 32 * _DAY }, 'read_news': { - 'last_successfully_ran': 2 * _HOUR + 'last_run_successful_in': 2 * _HOUR }, 'reporting': { - 'last_successfully_ran': 32 * _DAY + 'last_run_successful_in': 32 * _DAY }, 'request_es_backup': { - 'last_successfully_ran': _DAY + _HOUR + 'last_run_successful_in': _DAY + _HOUR }, 'sitemap': { - 'last_successfully_ran': _DAY + _HOUR + 'last_run_successful_in': _DAY + _HOUR } }