Skip to content

Commit

Permalink
implement last run successful tests and related fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
richard-jones committed Oct 11, 2024
1 parent 85ebff1 commit c6f7a11
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 28 deletions.
41 changes: 41 additions & 0 deletions doajtest/unit/test_background_task_status.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,14 @@
}
}

bg_monitor_last_successful = {
'BG_MONITOR_LAST_SUCCESSFULLY_RUN_CONFIG': {
'journal_csv': {
'last_run_successful_in': 100
}
}
}


class TestBackgroundTaskStatus(DoajTestCase):
@classmethod
Expand Down Expand Up @@ -235,3 +243,36 @@ def test_create_background_status__queued_valid_oldest(self):
assert is_stable(status_dict['status'])
self.assert_stable_dict(journal_csv_dict)
assert journal_csv_dict.get('oldest') is not None

@apply_test_case_config(bg_monitor_last_successful)
def test_create_background_status__last_run_successful(self):
save_mock_bgjob(JournalCSVBackgroundTask.__action__,
status=constants.BGJOB_STATUS_COMPLETE, )

status_dict = background_task_status.create_background_status()
print(json.dumps(status_dict, indent=4))

journal_csv_dict = status_dict['queues']['scheduled_short']['last_run_successful'].get(JournalCSVBackgroundTask.__action__, {})

assert is_stable(status_dict['status'])
self.assert_stable_dict(journal_csv_dict)
assert journal_csv_dict.get('last_run') is not None
assert journal_csv_dict.get('last_run_status') == constants.BGJOB_STATUS_COMPLETE

@apply_test_case_config(bg_monitor_last_successful)
def test_create_background_status__last_run_unsuccessful(self):
save_mock_bgjob(JournalCSVBackgroundTask.__action__,
status=constants.BGJOB_STATUS_ERROR, )

status_dict = background_task_status.create_background_status()
print(json.dumps(status_dict, indent=4))

journal_csv_dict = status_dict['queues']['scheduled_short']['last_run_successful'].get(
JournalCSVBackgroundTask.__action__, {})

assert not is_stable(status_dict['status'])
self.assert_unstable_dict(journal_csv_dict)
assert journal_csv_dict.get('last_run') is not None
assert journal_csv_dict.get('last_run_status') == constants.BGJOB_STATUS_ERROR


18 changes: 9 additions & 9 deletions portality/bll/services/background_task_status.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import itertools
from typing import Iterable

import constants
from constants import BGJOB_STATUS_COMPLETE
from portality.constants import BGJOB_QUEUE_ID_LONG, BGJOB_QUEUE_ID_MAIN, BGJOB_QUEUE_ID_EVENTS, BGJOB_QUEUE_ID_SCHEDULED_LONG, BGJOB_QUEUE_ID_SCHEDULED_SHORT, BGJOB_STATUS_ERROR, BGJOB_STATUS_QUEUED, \
BG_STATUS_STABLE, BG_STATUS_UNSTABLE
Expand Down Expand Up @@ -34,8 +35,8 @@ def all_stable(self, items: Iterable, field_name='status') -> bool:
def all_stable_str(self, items: Iterable, field_name='status') -> str:
return self.to_bg_status_str(self.all_stable(items, field_name))

def create_last_successfully_run_status(self, action, last_successful_run=0, **_) -> dict:
if last_successful_run == 0:
def create_last_successfully_run_status(self, action, last_run_successful_in=0, **_) -> dict:
if last_run_successful_in == 0:
return dict(
status=BG_STATUS_STABLE,
last_run=None,
Expand All @@ -44,18 +45,19 @@ def create_last_successfully_run_status(self, action, last_successful_run=0, **_
)

lr_query = (BackgroundJobQueryBuilder().action(action)
.since(dates.before_now(last_successful_run))
.since(dates.before_now(last_run_successful_in))
.status_includes([constants.BGJOB_STATUS_COMPLETE, constants.BGJOB_STATUS_ERROR])
.size(1)
.order_by('created_date', 'desc')
.build_query_dict())

lr_results = BackgroundJob.q2obj(q=lr_query)
lr_job = lr_results and lr_results[0]
lr_job = lr_results[0] if len(lr_results) > 0 else None

status = BG_STATUS_UNSTABLE
lr = None
last_run_status = None
msg = ["No background jobs run in the time period"]
msg = ["No background jobs completed or errored in the time period"]

if lr_job is not None:
lr = lr_job.created_date
Expand All @@ -73,8 +75,6 @@ def create_last_successfully_run_status(self, action, last_successful_run=0, **_
err_msgs=msg
)



def create_errors_status(self, action, check_sec=3600, allowed_num_err=0, **_) -> dict:
in_monitoring_query = SimpleBgjobQueue(action, status=BGJOB_STATUS_ERROR, since=dates.before_now(check_sec))
num_err_in_monitoring = BackgroundJob.hit_count(query=in_monitoring_query.query())
Expand Down Expand Up @@ -152,11 +152,11 @@ def create_queues_status(self, queue_name) -> dict:

result_dict = dict(
status=self.to_bg_status_str(
not err_msgs and self.all_stable(itertools.chain(errors.values(), queued.values()))),
not err_msgs and self.all_stable(itertools.chain(errors.values(), queued.values(), last_run.values()))),
last_completed_job=last_completed_date and dates.format(last_completed_date),
errors=errors,
queued=queued,
last_successfully_run=last_run,
last_run_successful=last_run,
err_msgs=err_msgs,
)
return result_dict
Expand Down
2 changes: 1 addition & 1 deletion portality/scripts/priorities.csv
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ HP/PfT,"Priority: High, Workflow: Pending for Test",Review
HP/rev,Priority: High,Review
PfT,Workflow: Pending for Test,Review
PfL,Workflow: Pending for Live,Review
Inv,Workflow: Initial Investigation,"Review, In Progress, To Do"
Inv,Workflow: Initial Investigation,"Review, In progress, To Do"
Rev,,Review
Near,Scale: Nearly Finished,
Sch,Priority: Scheduled,
Expand Down
37 changes: 19 additions & 18 deletions portality/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -1422,8 +1422,9 @@
# The number of errors allowed in the check period before the result is flagged
'allowed_num_err': 0,

# how long ago since a job just successfully ran (0 turns this off)
'last_successfully_ran': 0
# The last time this job ran within the specified time period, was it successful.
# If the most recent job in the timeframe is an error, this will trigger an "unstable" state (0 turns this off)
'last_run_successful_in': 0
}

# Configures the monitoring period and the allowed number of errors in that period before a queue is marked
Expand Down Expand Up @@ -1502,52 +1503,52 @@

BG_MONITOR_LAST_SUCCESSFULLY_RUN_CONFIG = {
'anon_export': {
'last_successfully_ran': 32 * _DAY
'last_run_successful_in': 32 * _DAY
},
'article_cleanup_sync': {
'last_successfully_ran': 33 * _DAY
'last_run_successful_in': 33 * _DAY
},
'async_workflow_notifications': {
'last_successfully_ran': _WEEK + _DAY
'last_run_successful_in': _WEEK + _DAY
},
'check_latest_es_backup': {
'last_successfully_ran': _DAY + _HOUR
'last_run_successful_in': _DAY + _HOUR
},
'datalog_journal_added_update': {
'last_successfully_ran': _HOUR
'last_run_successful_in': _HOUR
},
'find_discontinued_soon': {
'last_successfully_ran': _DAY + _HOUR
'last_run_successful_in': _DAY + _HOUR
},
'harvest': {
'last_successfully_ran': _DAY + _HOUR
'last_run_successful_in': _DAY + _HOUR
},
'journal_csv': {
'last_successfully_ran': 2 * _HOUR
'last_run_successful_in': 2 * _HOUR
},
'monitor_bgjobs': {
'last_successfully_ran': _WEEK + _DAY
'last_run_successful_in': _WEEK + _DAY
},
'old_data_cleanup': {
'last_successfully_ran': 32 * _DAY
'last_run_successful_in': 32 * _DAY
},
'prune_es_backups': {
'last_successfully_ran': _DAY + _HOUR
'last_run_successful_in': _DAY + _HOUR
},
'public_data_dump': {
'last_successfully_ran': 32 * _DAY
'last_run_successful_in': 32 * _DAY
},
'read_news': {
'last_successfully_ran': 2 * _HOUR
'last_run_successful_in': 2 * _HOUR
},
'reporting': {
'last_successfully_ran': 32 * _DAY
'last_run_successful_in': 32 * _DAY
},
'request_es_backup': {
'last_successfully_ran': _DAY + _HOUR
'last_run_successful_in': _DAY + _HOUR
},
'sitemap': {
'last_successfully_ran': _DAY + _HOUR
'last_run_successful_in': _DAY + _HOUR
}
}

Expand Down

0 comments on commit c6f7a11

Please sign in to comment.