Skip to content

Commit

Permalink
account for cascades and schedule deletions
Browse files Browse the repository at this point in the history
  • Loading branch information
mifu67 committed Dec 18, 2024
1 parent 22162a4 commit 46c1d9a
Showing 1 changed file with 16 additions and 42 deletions.
58 changes: 16 additions & 42 deletions src/sentry/workflow_engine/migration_helpers/alert_rule.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,14 @@
# NOTE: will have to rebase and add these changes to the file created by Colleen once her changes land
from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion
from sentry.incidents.models.alert_rule import AlertRule
from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION
from sentry.snuba.models import QuerySubscription
from sentry.users.services.user import RpcUser
from sentry.workflow_engine.models import (
AlertRuleDetector,
AlertRuleWorkflow,
DataSource,
Detector,
DetectorState,
DetectorWorkflow,
Workflow,
WorkflowDataConditionGroup,
)


def get_alert_rule_lookup_tables(
alert_rule: AlertRule,
) -> tuple[AlertRuleDetector, AlertRuleWorkflow] | None:
try:
alert_rule_detector = AlertRuleDetector.objects.get(alert_rule=alert_rule)
alert_rule_workflow = AlertRuleWorkflow.objects.get(alert_rule=alert_rule)
except (AlertRuleDetector.DoesNotExist, AlertRuleWorkflow.DoesNotExist):
return None
return (alert_rule_detector, alert_rule_workflow)
from sentry.workflow_engine.models import AlertRuleDetector, DataSource, Detector


def get_data_source(alert_rule: AlertRule) -> DataSource | None:
# TODO: if dual deleting, then we should delete the subscriptions here and not in logic.py
snuba_query = alert_rule.snuba_query
organization = alert_rule.organization
if not snuba_query or not organization:
Expand All @@ -51,35 +33,27 @@ def dual_delete_migrated_alert_rule(
alert_rule: AlertRule,
user: RpcUser | None = None,
) -> None:
# Step one: get the lookup tables corresponding to the alert rule
alert_rule_lookup_tables = get_alert_rule_lookup_tables(alert_rule=alert_rule)
if alert_rule_lookup_tables is None:
try:
alert_rule_detector = AlertRuleDetector.objects.get(alert_rule=alert_rule)
except AlertRuleDetector.DoesNotExist:
# TODO: log failure
return
alert_rule_detector, alert_rule_workflow = alert_rule_lookup_tables
# Step two: get DCG, workflow, detector, detector state, data source using the lookup tables

detector: Detector = alert_rule_detector.detector
workflow: Workflow = alert_rule_workflow.workflow
data_condition_group = detector.workflow_condition_group

data_source = get_data_source(alert_rule=alert_rule)
if data_source is None:
# TODO: log failure
return
try:
detector_state = DetectorState.objects.get(detector=detector)
detector_workflow = DetectorWorkflow.objects.get(detector=detector, workflow=workflow)
workflow_data_condition_group = WorkflowDataConditionGroup.objects.get(
workflow=workflow, data_condition_group=data_condition_group
)
except (
DetectorState.DoesNotExist,
DetectorWorkflow.DoesNotExist,
WorkflowDataConditionGroup.DoesNotExist,
):
# TODO: log failure
return

# Step three: schedule everything for deletion
# also deletes alert_rule_workflow
RegionScheduledDeletion.schedule(instance=alert_rule, days=0, actor=user)
RegionScheduledDeletion.schedule(instance=data_source, days=0, actor=user)
# also deletes alert_rule_detector, detector_workflow, detector_state
RegionScheduledDeletion.schedule(instance=detector, days=0, actor=user)
# also deletes workflow_data_condition_group
RegionScheduledDeletion.schedule(instance=data_condition_group, days=0, actor=user)

# What is the equivalent of SNAPSHOT in the new world?
pass
return

0 comments on commit 46c1d9a

Please sign in to comment.