diff --git a/src/sentry/incidents/logic.py b/src/sentry/incidents/logic.py index a59fc409354300..0f538da5a1addf 100644 --- a/src/sentry/incidents/logic.py +++ b/src/sentry/incidents/logic.py @@ -102,6 +102,7 @@ from sentry.utils import metrics from sentry.utils.audit import create_audit_entry_from_user from sentry.utils.snuba import is_measurement +from sentry.workflow_engine.migration_helpers.alert_rule import dual_delete_migrated_alert_rule if TYPE_CHECKING: from sentry.incidents.utils.types import AlertRuleActivationConditionType @@ -1022,9 +1023,13 @@ def delete_alert_rule( data=alert_rule.get_audit_log_data(), event=audit_log.get_event_id("ALERT_RULE_REMOVE"), ) - - subscriptions = _unpack_snuba_query(alert_rule).subscriptions.all() - bulk_delete_snuba_subscriptions(subscriptions) + if not features.has( + "organizations:workflow-engine-metric-alert-dual-write", alert_rule.organization + ): + # NOTE: we will delete the subscription within the dual delete helpers + # if the organization is flagged into dual write + subscriptions = _unpack_snuba_query(alert_rule).subscriptions.all() + bulk_delete_snuba_subscriptions(subscriptions) schedule_update_project_config(alert_rule, [sub.project for sub in subscriptions]) @@ -1049,7 +1054,10 @@ def delete_alert_rule( ) else: RegionScheduledDeletion.schedule(instance=alert_rule, days=0, actor=user) - + if features.has( + "organizations:workflow-engine-metric-alert-dual-write", alert_rule.organization + ): + dual_delete_migrated_alert_rule(alert_rule=alert_rule, user=user) alert_rule.update(status=AlertRuleStatus.SNAPSHOT.value) if alert_rule.id: diff --git a/src/sentry/workflow_engine/migration_helpers/alert_rule.py b/src/sentry/workflow_engine/migration_helpers/alert_rule.py new file mode 100644 index 00000000000000..2dc877b4ab45d0 --- /dev/null +++ b/src/sentry/workflow_engine/migration_helpers/alert_rule.py @@ -0,0 +1,65 @@ +# NOTE: will have to rebase and add these changes to the file created by Colleen once her changes land +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion +from sentry.incidents.models.alert_rule import AlertRule +from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION +from sentry.snuba.models import QuerySubscription +from sentry.snuba.subscriptions import bulk_delete_snuba_subscriptions +from sentry.users.services.user import RpcUser +from sentry.workflow_engine.models import ( + AlertRuleDetector, + DataConditionGroup, + DataSource, + Detector, +) + + +def get_data_source(alert_rule: AlertRule) -> DataSource | None: + # TODO: if dual deleting, then we should delete the subscriptions here and not in logic.py + snuba_query = alert_rule.snuba_query + organization = alert_rule.organization + if not snuba_query or not organization: + # This shouldn't be possible, but just in case. + return None + try: + query_subscription = QuerySubscription.objects.get(snuba_query=snuba_query.id) + except QuerySubscription.DoesNotExist: + return None + try: + data_source = DataSource.objects.get( + organization=organization, + query_id=query_subscription.id, + type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION, + ) + except DataSource.DoesNotExist: + return None + bulk_delete_snuba_subscriptions([QuerySubscription]) + return data_source + + +def dual_delete_migrated_alert_rule( + alert_rule: AlertRule, + user: RpcUser | None = None, +) -> None: + try: + alert_rule_detector = AlertRuleDetector.objects.get(alert_rule=alert_rule) + except AlertRuleDetector.DoesNotExist: + # TODO: log failure + return + + detector: Detector = alert_rule_detector.detector + data_condition_group: DataConditionGroup | None = detector.workflow_condition_group + + data_source = get_data_source(alert_rule=alert_rule) + if data_source is None: + # TODO: log failure + return + + # deleting the alert_rule also deletes alert_rule_workflow (in main delete logic) + # also deletes alert_rule_detector, detector_workflow, detector_state + RegionScheduledDeletion.schedule(instance=detector, days=0, actor=user) + # also deletes workflow_data_condition_group + if data_condition_group: + RegionScheduledDeletion.schedule(instance=data_condition_group, days=0, actor=user) + RegionScheduledDeletion.schedule(instance=data_source, days=0, actor=user) + + return