Skip to content

Commit

Permalink
rebase dual delete helpers
Browse files Browse the repository at this point in the history
  • Loading branch information
mifu67 committed Dec 20, 2024
1 parent 8d5d0b5 commit 951176e
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 6 deletions.
16 changes: 12 additions & 4 deletions src/sentry/incidents/logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@
from sentry.utils import metrics
from sentry.utils.audit import create_audit_entry_from_user
from sentry.utils.snuba import is_measurement
from sentry.workflow_engine.migration_helpers.alert_rule import dual_delete_migrated_alert_rule

if TYPE_CHECKING:
from sentry.incidents.utils.types import AlertRuleActivationConditionType
Expand Down Expand Up @@ -1022,9 +1023,13 @@ def delete_alert_rule(
data=alert_rule.get_audit_log_data(),
event=audit_log.get_event_id("ALERT_RULE_REMOVE"),
)

subscriptions = _unpack_snuba_query(alert_rule).subscriptions.all()
bulk_delete_snuba_subscriptions(subscriptions)
if not features.has(
"organizations:workflow-engine-metric-alert-dual-write", alert_rule.organization
):
# NOTE: we will delete the subscription within the dual delete helpers
# if the organization is flagged into dual write
subscriptions = _unpack_snuba_query(alert_rule).subscriptions.all()
bulk_delete_snuba_subscriptions(subscriptions)

schedule_update_project_config(alert_rule, [sub.project for sub in subscriptions])

Expand All @@ -1049,7 +1054,10 @@ def delete_alert_rule(
)
else:
RegionScheduledDeletion.schedule(instance=alert_rule, days=0, actor=user)

if features.has(
"organizations:workflow-engine-metric-alert-dual-write", alert_rule.organization
):
dual_delete_migrated_alert_rule(alert_rule=alert_rule, user=user)
alert_rule.update(status=AlertRuleStatus.SNAPSHOT.value)

if alert_rule.id:
Expand Down
57 changes: 55 additions & 2 deletions src/sentry/workflow_engine/migration_helpers/alert_rule.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion
from sentry.incidents.grouptype import MetricAlertFire
from sentry.incidents.models.alert_rule import AlertRule
from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION
from sentry.snuba.models import QuerySubscription, SnubaQuery
from sentry.snuba.subscriptions import bulk_delete_snuba_subscriptions
from sentry.users.services.user import RpcUser
from sentry.workflow_engine.models import (
AlertRuleDetector,
Expand Down Expand Up @@ -42,12 +45,10 @@ def create_data_source(
) -> DataSource | None:
if not snuba_query:
return None

try:
query_subscription = QuerySubscription.objects.get(snuba_query=snuba_query.id)
except QuerySubscription.DoesNotExist:
return None

return DataSource.objects.create(
organization_id=organization_id,
query_id=query_subscription.id,
Expand Down Expand Up @@ -154,3 +155,55 @@ def migrate_alert_rule(
detector_workflow,
workflow_data_condition_group,
)


def get_data_source(alert_rule: AlertRule) -> DataSource | None:
# TODO: if dual deleting, then we should delete the subscriptions here and not in logic.py
snuba_query = alert_rule.snuba_query
organization = alert_rule.organization
if not snuba_query or not organization:
# This shouldn't be possible, but just in case.
return None
try:
query_subscription = QuerySubscription.objects.get(snuba_query=snuba_query.id)
except QuerySubscription.DoesNotExist:
return None
try:
data_source = DataSource.objects.get(
organization=organization,
query_id=query_subscription.id,
type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION,
)
except DataSource.DoesNotExist:
return None
bulk_delete_snuba_subscriptions([QuerySubscription])
return data_source


def dual_delete_migrated_alert_rule(
alert_rule: AlertRule,
user: RpcUser | None = None,
) -> None:
try:
alert_rule_detector = AlertRuleDetector.objects.get(alert_rule=alert_rule)
except AlertRuleDetector.DoesNotExist:
# TODO: log failure
return

detector: Detector = alert_rule_detector.detector
data_condition_group: DataConditionGroup | None = detector.workflow_condition_group

data_source = get_data_source(alert_rule=alert_rule)
if data_source is None:
# TODO: log failure
return

# deleting the alert_rule also deletes alert_rule_workflow (in main delete logic)
# also deletes alert_rule_detector, detector_workflow, detector_state
RegionScheduledDeletion.schedule(instance=detector, days=0, actor=user)
# also deletes workflow_data_condition_group
if data_condition_group:
RegionScheduledDeletion.schedule(instance=data_condition_group, days=0, actor=user)
RegionScheduledDeletion.schedule(instance=data_source, days=0, actor=user)

return

0 comments on commit 951176e

Please sign in to comment.