Skip to content

Commit

Permalink
Merge branch 'main' into sms-soak-test
Browse files Browse the repository at this point in the history
  • Loading branch information
sastels authored Nov 7, 2023
2 parents 60fa612 + e461edc commit de27377
Show file tree
Hide file tree
Showing 18 changed files with 1,186 additions and 1,625 deletions.
2 changes: 2 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
"GitHub.copilot",
"GitHub.copilot-labs",
"googlecloudtools.cloudcode",
"kaiwood.center-editor-window",
"matangover.mypy",
"ms-azuretools.vscode-docker",
"ms-ossdata.vscode-postgresql",
Expand All @@ -35,6 +36,7 @@
"mtxr.sqltools",
"mtxr.sqltools-driver-pg",
"pmbenjamin.vscode-snyk",
"timonwong.shellcheck",
"usernamehw.errorlens",
"visualstudioexptteam.vscodeintellicode",
"wenfangdu.jump",
Expand Down
2 changes: 1 addition & 1 deletion .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
"-l",
"DEBUG",
"-Q",
"database-tasks,-priority-database-tasks.fifo,-normal-database-tasks,-bulk-database-tasks,job-tasks,notify-internal-tasks,periodic-tasks,priority-tasks,bulk-tasks,reporting-tasks,research-mode-tasks,retry-tasks,send-sms-tasks,send-email-tasks,service-callbacks,delivery-receipts",
"database-tasks,-priority-database-tasks.fifo,-normal-database-tasks,-bulk-database-tasks,job-tasks,notify-internal-tasks,periodic-tasks,priority-tasks,normal-tasks,bulk-tasks,reporting-tasks,research-mode-tasks,retry-tasks,send-sms-tasks,send-sms-high,send-sms-medium,send-sms-low,send-throttled-sms-tasks,send-email-high,send-email-medium,send-email-low,send-email-tasks,service-callbacks,delivery-receipts",
]
},
{
Expand Down
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ run-celery-local: ## Run the celery workers with all the queues
run-celery-local-filtered: ## Run the celery workers with all queues but filter out common scheduled tasks
./scripts/run_celery_local.sh 2>&1 >/dev/null | grep -Ev 'beat|in-flight-to-inbox|run-scheduled-jobs|check-job-status'

.PHONY: run-celery-beat
run-celery-beat: ## Run the celery beat
./scripts/run_celery_beat.sh
.PHONY: run-celery-beat-local
run-celery-beat-local: ## Run the celery beat
./scripts/run_celery_beat_local.sh

.PHONY: run-celery-purge
run-celery-purge: ## Purge the celery queues
Expand Down
3 changes: 2 additions & 1 deletion app/celery/provider_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
NotificationTechnicalFailureException,
)
from app.models import NOTIFICATION_TECHNICAL_FAILURE
from app.notifications import build_retry_task_params
from app.notifications.callbacks import _check_and_queue_callback_task


Expand Down Expand Up @@ -127,7 +128,7 @@ def _deliver_sms(self, notification_id):
# Once the previous retry failed, log the exception and this time,
# retry with the default delay.
current_app.logger.exception("SMS notification delivery for id: {} failed".format(notification_id))
self.retry(queue=QueueNames.RETRY)
self.retry(**build_retry_task_params(notification.notification_type, notification.template.process_type))
except self.MaxRetriesExceededError:
message = (
"RETRY FAILED: Max retries reached. The task send_sms_to_provider failed for notification {}. "
Expand Down
14 changes: 9 additions & 5 deletions app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,7 @@ class Config(object):
ONE_OFF_MESSAGE_FILENAME = "Report"
MAX_VERIFY_CODE_COUNT = 10
JOBS_MAX_SCHEDULE_HOURS_AHEAD = 96
FAILED_LOGIN_LIMIT = os.getenv("FAILED_LOGIN_LIMIT", 10)

# be careful increasing this size without being sure that we won't see slowness in pysftp
MAX_LETTER_PDF_ZIP_FILESIZE = 40 * 1024 * 1024 # 40mb
Expand Down Expand Up @@ -573,7 +574,6 @@ class Config(object):
CSV_BULK_REDIRECT_THRESHOLD = os.getenv("CSV_BULK_REDIRECT_THRESHOLD", 200)

# Endpoint of Cloudwatch agent running as a side car in EKS listening for embedded metrics
FF_CLOUDWATCH_METRICS_ENABLED = env.bool("FF_CLOUDWATCH_METRICS_ENABLED", False)
CLOUDWATCH_AGENT_EMF_PORT = 25888
CLOUDWATCH_AGENT_ENDPOINT = os.getenv("CLOUDWATCH_AGENT_ENDPOINT", f"tcp://{STATSD_HOST}:{CLOUDWATCH_AGENT_EMF_PORT}")

Expand All @@ -582,14 +582,15 @@ class Config(object):
BR_WARNING_PERCENTAGE = 0.05
BR_CRITICAL_PERCENTAGE = 0.1

FF_SALESFORCE_CONTACT = env.bool("FF_SALESFORCE_CONTACT", False)

# Feature flags for bounce rate
# Timestamp in epoch milliseconds to seed the bounce rate. We will seed data for (24, the below config) included.
FF_BOUNCE_RATE_SEED_EPOCH_MS = os.getenv("FF_BOUNCE_RATE_SEED_EPOCH_MS", False)

# Feature flag to enable custom retry policies such as lowering retry period for certain priority lanes.
FF_CELERY_CUSTOM_TASK_PARAMS = env.bool("FF_CELERY_CUSTOM_TASK_PARAMS", True)
FF_CLOUDWATCH_METRICS_ENABLED = env.bool("FF_CLOUDWATCH_METRICS_ENABLED", False)
# Feature flags for email_daily_limit
FF_EMAIL_DAILY_LIMIT = env.bool("FF_EMAIL_DAILY_LIMIT", False)
FF_SALESFORCE_CONTACT = env.bool("FF_SALESFORCE_CONTACT", False)

@classmethod
def get_sensitive_config(cls) -> list[str]:
Expand Down Expand Up @@ -686,9 +687,11 @@ class Test(Development):
API_HOST_NAME = "http://localhost:6011"

TEMPLATE_PREVIEW_API_HOST = "http://localhost:9999"
FF_EMAIL_DAILY_LIMIT = False
CRM_GITHUB_PERSONAL_ACCESS_TOKEN = "test-token"
CRM_ORG_LIST_URL = "https://test-url.com"
FAILED_LOGIN_LIMIT = 0

FF_EMAIL_DAILY_LIMIT = False


class Production(Config):
Expand All @@ -706,6 +709,7 @@ class Production(Config):
API_RATE_LIMIT_ENABLED = True
CHECK_PROXY_HEADER = False
CRONITOR_ENABLED = False
FF_CELERY_CUSTOM_TASK_PARAMS = False


class Staging(Production):
Expand Down
3 changes: 3 additions & 0 deletions app/delivery/send_to_providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def send_sms_to_provider(notification):
inactive_service_failure(notification=notification)
return

# If the notification was not sent already, the status should be created.
if notification.status == "created":
provider = provider_to_use(
SMS_TYPE,
Expand Down Expand Up @@ -215,6 +216,8 @@ def send_email_to_provider(notification: Notification):
if not service.active:
inactive_service_failure(notification=notification)
return

# If the notification was not sent already, the status should be created.
if notification.status == "created":
provider = provider_to_use(EMAIL_TYPE, notification.id)

Expand Down
36 changes: 36 additions & 0 deletions app/notifications/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from typing import Any, Dict

from flask import current_app

from app.config import QueueNames
from app.models import BULK, NORMAL, PRIORITY, SMS_TYPE

# Default retry periods for sending notifications.
RETRY_DEFAULT = 300
RETRY_HIGH = 25

RETRY_PERIODS = {
BULK: RETRY_DEFAULT,
NORMAL: RETRY_DEFAULT,
PRIORITY: RETRY_HIGH,
}


def build_retry_task_params(notification_type: str, notification_process_type: str) -> Dict[str, Any]:
"""
Build task params for the sending parameter retry tasks.
If the notification is a high priority SMS, set the retry policy to retry every 25 seconds
else fall back to the default retry policy of retrying every 5 minutes.
"""
params: dict[str, Any] = {"queue": QueueNames.RETRY}
if current_app.config["FF_CELERY_CUSTOM_TASK_PARAMS"] is False:
return params

# Overring the retry policy is only supported for SMS for now;
# email support coming later.
if notification_type == SMS_TYPE:
params["countdown"] = RETRY_PERIODS[notification_process_type]
else:
params["countdown"] = RETRY_DEFAULT
return params
5 changes: 4 additions & 1 deletion app/notifications/process_notifications.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,10 @@ def db_save_and_send_notification(notification: Notification):

deliver_task = choose_deliver_task(notification)
try:
deliver_task.apply_async([str(notification.id)], queue=notification.queue_name)
deliver_task.apply_async(
[str(notification.id)],
queue=notification.queue_name,
)
except Exception:
dao_delete_notifications_by_id(notification.id)
raise
Expand Down
7 changes: 6 additions & 1 deletion app/user/rest.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,12 @@ def verify_user_password(user_id):
return jsonify({}), 204
else:
increment_failed_login_count(user_to_verify)
message = "Incorrect password"
if user_to_verify.failed_login_count >= current_app.config["FAILED_LOGIN_LIMIT"]:
message = "Failed login: Incorrect password for user_id {user_id} failed_login {failed_login_count} times".format(
user_id=user_id, failed_login_count=user_to_verify.failed_login_count
)
else:
message = "Incorrect password for user_id {user_id}".format(user_id=user_id)
errors = {"password": [message]}
raise InvalidRequest(errors, status_code=400)

Expand Down
1 change: 1 addition & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ env =
ASSET_DOMAIN=assets.notification.canada.ca
NOTIFY_EMAIL_DOMAIN=notification.canada.ca
AWS_EMF_ENVIRONMENT=local
D:FF_CELERY_CUSTOM_TASK_PARAMS=True
D:FF_SALESFORCE_CONTACT=True
D:FF_CLOUDWATCH_METRICS_ENABLED=True
D:REDIS_URL=redis://localhost:6380
Expand Down
16 changes: 16 additions & 0 deletions scripts/run_celery_beat.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,20 @@

set -e

# Check and see if this is running in K8s and if so, wait for cloudwatch agent
if [[ ! -z "${STATSD_HOST}" ]]; then

echo "Initializing... Waiting for CWAgent to become ready."
while :
do
if nc -vz $STATSD_HOST 25888; then
echo "CWAgent is Ready."
break;
else
echo "Waiting for CWAgent to become ready."
sleep 1
fi
done
fi

celery -A run_celery.notify_celery beat --loglevel=INFO
7 changes: 7 additions & 0 deletions scripts/run_celery_beat_local.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/sh

# runs the celery beat process. This runs the periodic tasks

set -e

celery -A run_celery.notify_celery beat --loglevel=INFO
16 changes: 16 additions & 0 deletions scripts/run_celery_core_tasks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,22 @@

set -e

# Check and see if this is running in K8s and if so, wait for cloudwatch agent
if [[ ! -z "${STATSD_HOST}" ]]; then

echo "Initializing... Waiting for CWAgent to become ready."
while :
do
if nc -vz $STATSD_HOST 25888; then
echo "CWAgent is Ready."
break;
else
echo "Waiting for CWAgent to become ready."
sleep 1
fi
done
fi

echo "Start celery, concurrency: ${CELERY_CONCURRENCY-4}"

celery -A run_celery.notify_celery worker --pidfile="/tmp/celery.pid" --loglevel=INFO --concurrency=${CELERY_CONCURRENCY-4} -Q database-tasks,-priority-database-tasks.fifo,-normal-database-tasks,-bulk-database-tasks,job-tasks,notify-internal-tasks,periodic-tasks,priority-tasks,normal-tasks,bulk-tasks,reporting-tasks,research-mode-tasks,retry-tasks,service-callbacks,delivery-receipts
16 changes: 16 additions & 0 deletions scripts/run_celery_send_email.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,22 @@

set -e

# Check and see if this is running in K8s and if so, wait for cloudwatch agent
if [[ ! -z "${STATSD_HOST}" ]]; then

echo "Initializing... Waiting for CWAgent to become ready."
while :
do
if nc -vz $STATSD_HOST 25888; then
echo "CWAgent is Ready."
break;
else
echo "Waiting for CWAgent to become ready."
sleep 1
fi
done
fi

echo "Start celery, concurrency: ${CELERY_CONCURRENCY-4}"

# TODO: we shouldn't be using the send-email-tasks queue anymore - once we verify this we can remove it
Expand Down
25 changes: 25 additions & 0 deletions tests/app/notifications/test_process_notification.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import datetime
import uuid
from typing import Any, Dict
from unittest.mock import call

import pytest
Expand All @@ -14,12 +15,18 @@
from app.config import QueueNames
from app.dao.service_sms_sender_dao import dao_update_service_sms_sender
from app.models import (
BULK,
EMAIL_TYPE,
LETTER_TYPE,
NORMAL,
PRIORITY,
SMS_TYPE,
Notification,
NotificationHistory,
ScheduledNotification,
Template,
)
from app.notifications import RETRY_PERIODS, build_retry_task_params
from app.notifications.process_notifications import (
choose_queue,
create_content_for_notification,
Expand Down Expand Up @@ -1085,6 +1092,24 @@ def test_db_save_and_send_notification_throws_exception_deletes_notification(
assert Notification.query.count() == 0
assert NotificationHistory.query.count() == 0

@pytest.mark.parametrize(
("notification_type, process_type, expected_retry_period"),
[
(EMAIL_TYPE, BULK, RETRY_PERIODS[BULK]),
(EMAIL_TYPE, NORMAL, RETRY_PERIODS[NORMAL]),
(EMAIL_TYPE, PRIORITY, RETRY_PERIODS[NORMAL]),
(SMS_TYPE, BULK, RETRY_PERIODS[BULK]),
(SMS_TYPE, NORMAL, RETRY_PERIODS[NORMAL]),
(SMS_TYPE, PRIORITY, RETRY_PERIODS[PRIORITY]),
],
)
def test_retry_task_parameters(self, notify_api, notification_type, process_type, expected_retry_period):
with notify_api.app_context():
params: Dict[str, Any] = build_retry_task_params(notification_type, process_type)

assert params["queue"] == QueueNames.RETRY
assert params["countdown"] == expected_retry_period

def test_db_save_and_send_notification_throws_exception_when_missing_template(self, sample_api_key, mocker):
mocker.patch("app.celery.provider_tasks.deliver_sms.apply_async")
assert Notification.query.count() == 0
Expand Down
31 changes: 31 additions & 0 deletions tests/app/user/test_rest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1544,3 +1544,34 @@ def test_update_user_blocked(admin_request, sample_user, account_change_template

assert resp["data"]["id"] == str(sample_user.id)
assert resp["data"]["blocked"]


class TestFailedLogin:
def test_update_user_password_saves_correctly(self, client, sample_service, mocker):
sample_user = sample_service.users[0]
new_password = "tQETOgIO8yzDMyCsDjLZIEVZHAvkFArYfmSI1KTsJnlnPohI2tfIa8kfng7bxCm"
data = {"_password": new_password}
auth_header = create_authorization_header()
headers = [("Content-Type", "application/json"), auth_header]
resp = client.post(
url_for("user.update_password", user_id=sample_user.id),
data=json.dumps(data),
headers=headers,
)
assert resp.status_code == 200

json_resp = json.loads(resp.get_data(as_text=True))
assert json_resp["data"]["password_changed_at"] is not None
data = {"password": new_password}
auth_header = create_authorization_header()
headers = [("Content-Type", "application/json"), auth_header]
# We force a the password to fail on login
mocker.patch("app.models.User.check_password", return_value=False)

resp = client.post(
url_for("user.verify_user_password", user_id=str(sample_user.id)),
data=json.dumps(data),
headers=headers,
)
assert resp.status_code == 400
assert "Incorrect password for user_id" in resp.json["message"]["password"][0]
Loading

0 comments on commit de27377

Please sign in to comment.