diff --git a/src/sentry/api/authentication.py b/src/sentry/api/authentication.py index 42f3d8d9c4a9c0..b121d6d634f841 100644 --- a/src/sentry/api/authentication.py +++ b/src/sentry/api/authentication.py @@ -1,5 +1,6 @@ from __future__ import annotations +import logging import hashlib import random from collections.abc import Callable, Iterable @@ -22,7 +23,11 @@ from sentry import options from sentry.auth.services.auth import AuthenticatedToken from sentry.auth.system import SystemToken, is_internal_ip -from sentry.hybridcloud.models import ApiKeyReplica, ApiTokenReplica, OrgAuthTokenReplica +from sentry.hybridcloud.models import ( + ApiKeyReplica, + ApiTokenReplica, + OrgAuthTokenReplica, +) from sentry.hybridcloud.rpc.service import compare_signature from sentry.models.apiapplication import ApiApplication from sentry.models.apikey import ApiKey @@ -42,7 +47,12 @@ from sentry.users.services.user.service import user_service from sentry.utils.linksign import process_signature from sentry.utils.sdk import Scope -from sentry.utils.security.orgauthtoken_token import SENTRY_ORG_AUTH_TOKEN_PREFIX, hash_token +from sentry.utils.security.orgauthtoken_token import ( + SENTRY_ORG_AUTH_TOKEN_PREFIX, + hash_token, +) + +logger = logging.getLogger(__name__) class AuthenticationSiloLimit(SiloLimit): @@ -224,7 +234,9 @@ def authenticate_credentials( raise AuthenticationFailed("Unknown relay") try: - data = relay.public_key_object.unpack(request.body, relay_sig, max_age=60 * 5) + data = relay.public_key_object.unpack( + request.body, relay_sig, max_age=60 * 5 + ) request.relay = relay request.relay_request_data = data except UnpackError: @@ -330,7 +342,9 @@ class TokenStrLookupRequired(Exception): class UserAuthTokenAuthentication(StandardAuthentication): token_name = b"bearer" - def _find_or_update_token_by_hash(self, token_str: str) -> ApiToken | ApiTokenReplica: + def _find_or_update_token_by_hash( + self, token_str: str + ) -> ApiToken | ApiTokenReplica: """ Find token by hash or update token's hash value if only found via plaintext. @@ -377,9 +391,9 @@ def _find_or_update_token_by_hash(self, token_str: str) -> ApiToken | ApiTokenRe except (ApiToken.DoesNotExist, TokenStrLookupRequired): try: # If we can't find it by hash, use the plaintext string - api_token = ApiToken.objects.select_related("user", "application").get( - token=token_str - ) + api_token = ApiToken.objects.select_related( + "user", "application" + ).get(token=token_str) except ApiToken.DoesNotExist: # If the token does not exist by plaintext either, it is not a valid token raise AuthenticationFailed("Invalid token") @@ -392,6 +406,8 @@ def _find_or_update_token_by_hash(self, token_str: str) -> ApiToken | ApiTokenRe return api_token def accepts_auth(self, auth: list[bytes]) -> bool: + logger.error(f"AUTH {auth}") + if not super().accepts_auth(auth): return False @@ -401,13 +417,20 @@ def accepts_auth(self, auth: list[bytes]) -> bool: return True token_str = force_str(auth[1]) - return not token_str.startswith(SENTRY_ORG_AUTH_TOKEN_PREFIX) + user_auth_result = not token_str.startswith(SENTRY_ORG_AUTH_TOKEN_PREFIX) + startswith = token_str.startswith(SENTRY_ORG_AUTH_TOKEN_PREFIX) + logger.error( + f"USE USER AUTH? {user_auth_result} | prefix {SENTRY_ORG_AUTH_TOKEN_PREFIX} | token_str {token_str} | startswith {startswith}" + ) + + return user_auth_result def authenticate_token(self, request: Request, token_str: str) -> tuple[Any, Any]: + logger.error(f"USER AUTH REQUEST HEADERS: {request.headers}") user: AnonymousUser | User | RpcUser | None = AnonymousUser() - token: SystemToken | ApiTokenReplica | ApiToken | None = SystemToken.from_request( - request, token_str + token: SystemToken | ApiTokenReplica | ApiToken | None = ( + SystemToken.from_request(request, token_str) ) application_is_inactive = False @@ -452,6 +475,7 @@ class OrgAuthTokenAuthentication(StandardAuthentication): token_name = b"bearer" def accepts_auth(self, auth: list[bytes]) -> bool: + logger.error(f"ORG AUTH: {auth}") if not super().accepts_auth(auth) or len(auth) != 2: return False @@ -459,6 +483,8 @@ def accepts_auth(self, auth: list[bytes]) -> bool: return token_str.startswith(SENTRY_ORG_AUTH_TOKEN_PREFIX) def authenticate_token(self, request: Request, token_str: str) -> tuple[Any, Any]: + logger.error(f"REQUEST HEADERS: {request.headers}") + token_hashed = hash_token(token_str) token: OrgAuthTokenReplica | OrgAuthToken @@ -479,7 +505,11 @@ def authenticate_token(self, request: Request, token_str: str) -> tuple[Any, Any raise AuthenticationFailed("Invalid org token") return self.transform_auth( - None, token, "api_token", api_token_type=self.token_name, api_token_is_org_token=True + None, + token, + "api_token", + api_token_type=self.token_name, + api_token_is_org_token=True, ) diff --git a/src/sentry/api/endpoints/organization_releases.py b/src/sentry/api/endpoints/organization_releases.py index a905454461ef24..a3c03f482d5d5c 100644 --- a/src/sentry/api/endpoints/organization_releases.py +++ b/src/sentry/api/endpoints/organization_releases.py @@ -1,6 +1,7 @@ from __future__ import annotations import re +import logging from datetime import datetime, timedelta from django.db import IntegrityError @@ -12,7 +13,11 @@ from sentry import analytics, release_health from sentry.api.api_publish_status import ApiPublishStatus -from sentry.api.base import EnvironmentMixin, ReleaseAnalyticsMixin, region_silo_endpoint +from sentry.api.base import ( + EnvironmentMixin, + ReleaseAnalyticsMixin, + region_silo_endpoint, +) from sentry.api.bases import NoProjects from sentry.api.bases.organization import OrganizationReleasesBaseEndpoint from sentry.api.exceptions import ConflictError, InvalidRepository @@ -27,7 +32,10 @@ from sentry.api.utils import get_auth_api_token_type from sentry.exceptions import InvalidSearchQuery from sentry.models.activity import Activity -from sentry.models.orgauthtoken import is_org_auth_token_auth, update_org_auth_token_last_used +from sentry.models.orgauthtoken import ( + is_org_auth_token_auth, + update_org_auth_token_last_used, +) from sentry.models.project import Project from sentry.models.release import Release, ReleaseStatus from sentry.models.releases.exceptions import ReleaseCommitError @@ -50,6 +58,8 @@ ERR_INVALID_STATS_PERIOD = "Invalid %s. Valid choices are %s" +logger = logging.getLogger(__name__) + def get_stats_period_detail(key, choices): return ERR_INVALID_STATS_PERIOD % (key, ", ".join("'%s'" % x for x in choices)) @@ -61,7 +71,9 @@ def get_stats_period_detail(key, choices): def add_environment_to_queryset(queryset, filter_params): if "environment" in filter_params: return queryset.filter( - releaseprojectenvironment__environment__name__in=filter_params["environment"], + releaseprojectenvironment__environment__name__in=filter_params[ + "environment" + ], releaseprojectenvironment__project_id__in=filter_params["project_id"], ) return queryset @@ -70,7 +82,9 @@ def add_environment_to_queryset(queryset, filter_params): def add_date_filter_to_queryset(queryset, filter_params): """Once date has been coalesced over released and added, use it to filter releases""" if filter_params["start"] and filter_params["end"]: - return queryset.filter(date__gte=filter_params["start"], date__lte=filter_params["end"]) + return queryset.filter( + date__gte=filter_params["start"], date__lte=filter_params["end"] + ) return queryset @@ -108,7 +122,8 @@ def _filter_releases_by_query(queryset, organization, query, filter_params): if search_filter.key.name == SEMVER_ALIAS: queryset = queryset.filter_by_semver( - organization.id, parse_semver(search_filter.value.raw_value, search_filter.operator) + organization.id, + parse_semver(search_filter.value.raw_value, search_filter.operator), ) if search_filter.key.name == SEMVER_PACKAGE_ALIAS: @@ -144,7 +159,9 @@ class ReleaseSerializerWithProjects(ReleaseWithVersionSerializer): headCommits = ListField( child=ReleaseHeadCommitSerializerDeprecated(), required=False, allow_null=False ) - refs = ListField(child=ReleaseHeadCommitSerializer(), required=False, allow_null=False) + refs = ListField( + child=ReleaseHeadCommitSerializer(), required=False, allow_null=False + ) def debounce_update_release_health_data(organization, project_ids: list[int]): @@ -162,14 +179,18 @@ def debounce_update_release_health_data(organization, project_ids: list[int]): if not should_update: return - projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())} + projects = { + p.id: p for p in Project.objects.get_many_from_cache(should_update.keys()) + } # This gives us updates for all release-projects which have seen new # health data over the last days. It will miss releases where the last # date is longer than what `get_changed_project_release_model_adoptions` # considers recent. - project_releases = release_health.backend.get_changed_project_release_model_adoptions( - should_update.keys() + project_releases = ( + release_health.backend.get_changed_project_release_model_adoptions( + should_update.keys() + ) ) # Check which we already have rows for. @@ -202,7 +223,9 @@ def debounce_update_release_health_data(organization, project_ids: list[int]): # we want to create the release the first time we observed it on the # health side. release = Release.get_or_create( - project=project, version=version, date_added=dates.get((project_id, version)) + project=project, + version=version, + date_added=dates.get((project_id, version)), ) # Make sure that the release knows about this project. Like we had before @@ -221,18 +244,18 @@ class OrganizationReleasesEndpoint( "GET": ApiPublishStatus.UNKNOWN, "POST": ApiPublishStatus.UNKNOWN, } - SESSION_SORTS = frozenset( - [ - "crash_free_sessions", - "crash_free_users", - "sessions", - "users", - "sessions_24h", - "users_24h", - ] - ) - - def get_projects(self, request: Request, organization, project_ids=None, project_slugs=None): + SESSION_SORTS = frozenset([ + "crash_free_sessions", + "crash_free_users", + "sessions", + "users", + "sessions_24h", + "users_24h", + ]) + + def get_projects( + self, request: Request, organization, project_ids=None, project_slugs=None + ): return super().get_projects( request, organization, @@ -259,11 +282,17 @@ def get(self, request: Request, organization) -> Response: sort = request.GET.get("sort") or "date" health_stat = request.GET.get("healthStat") or "sessions" summary_stats_period = request.GET.get("summaryStatsPeriod") or "14d" - health_stats_period = request.GET.get("healthStatsPeriod") or ("24h" if with_health else "") + health_stats_period = request.GET.get("healthStatsPeriod") or ( + "24h" if with_health else "" + ) if summary_stats_period not in STATS_PERIODS: - raise ParseError(detail=get_stats_period_detail("summaryStatsPeriod", STATS_PERIODS)) + raise ParseError( + detail=get_stats_period_detail("summaryStatsPeriod", STATS_PERIODS) + ) if health_stats_period and health_stats_period not in STATS_PERIODS: - raise ParseError(detail=get_stats_period_detail("healthStatsPeriod", STATS_PERIODS)) + raise ParseError( + detail=get_stats_period_detail("healthStatsPeriod", STATS_PERIODS) + ) if health_stat not in ("sessions", "users"): raise ParseError(detail="invalid healthStat") @@ -271,7 +300,9 @@ def get(self, request: Request, organization) -> Response: paginator_kwargs = {} try: - filter_params = self.get_filter_params(request, organization, date_filter_optional=True) + filter_params = self.get_filter_params( + request, organization, date_filter_optional=True + ) except NoProjects: return Response([]) @@ -297,7 +328,9 @@ def get(self, request: Request, organization) -> Response: queryset = add_environment_to_queryset(queryset, filter_params) if query: try: - queryset = _filter_releases_by_query(queryset, organization, query, filter_params) + queryset = _filter_releases_by_query( + queryset, organization, query, filter_params + ) except InvalidSearchQuery as e: return Response( {"detail": str(e)}, @@ -316,7 +349,9 @@ def get(self, request: Request, organization) -> Response: queryset = queryset.order_by("-date") paginator_kwargs["order_by"] = "-date" elif sort == "build": - queryset = queryset.filter(build_number__isnull=False).order_by("-build_number") + queryset = queryset.filter(build_number__isnull=False).order_by( + "-build_number" + ) paginator_kwargs["order_by"] = "-build_number" elif sort == "semver": queryset = queryset.annotate_prerelease_column() @@ -335,7 +370,9 @@ def get(self, request: Request, organization) -> Response: elif sort in self.SESSION_SORTS: if not flatten: return Response( - {"detail": "sorting by crash statistics requires flattening (flatten=1)"}, + { + "detail": "sorting by crash statistics requires flattening (flatten=1)" + }, status=400, ) @@ -347,19 +384,25 @@ def qs_load_func(queryset, total_offset, qs_offset, limit): : total_offset + limit ] ) - releases_with_session_data = release_health.backend.check_releases_have_health_data( - organization.id, - filter_params["project_id"], - release_versions, - ( - filter_params["start"] - if filter_params["start"] - else datetime.utcnow() - timedelta(days=90) - ), - filter_params["end"] if filter_params["end"] else datetime.utcnow(), + releases_with_session_data = ( + release_health.backend.check_releases_have_health_data( + organization.id, + filter_params["project_id"], + release_versions, + ( + filter_params["start"] + if filter_params["start"] + else datetime.utcnow() - timedelta(days=90) + ), + filter_params["end"] + if filter_params["end"] + else datetime.utcnow(), + ) ) valid_versions = [ - rv for rv in release_versions if rv not in releases_with_session_data + rv + for rv in release_versions + if rv not in releases_with_session_data ] results = list( @@ -372,7 +415,8 @@ def qs_load_func(queryset, total_offset, qs_offset, limit): paginator_cls = MergingOffsetPaginator paginator_kwargs.update( - data_load_func=lambda offset, limit: release_health.backend.get_project_releases_by_stability( + data_load_func=lambda offset, + limit: release_health.backend.get_project_releases_by_stability( project_ids=filter_params["project_id"], environments=filter_params.get("environment"), scope=sort, @@ -456,6 +500,8 @@ def post(self, request: Request, organization) -> Response: ``commit`` may contain a range in the form of ``previousCommit..commit`` :auth: required """ + print(f"REQUEST HEADERS {request.headers.get('authorization')}") + logger.error(f"REQUEST HEADERS {request.headers}") bind_organization_context(organization) serializer = ReleaseSerializerWithProjects( data=request.data, context={"organization": organization} @@ -477,7 +523,9 @@ def post(self, request: Request, organization) -> Response: projects = [] for id_or_slug in result["projects"]: if id_or_slug not in allowed_projects: - return Response({"projects": ["Invalid project ids or slugs"]}, status=400) + return Response( + {"projects": ["Invalid project ids or slugs"]}, status=400 + ) projects.append(allowed_projects[id_or_slug]) new_status = result.get("status") @@ -555,7 +603,11 @@ def post(self, request: Request, organization) -> Response: if not request.user.is_authenticated and not request.auth: scope.set_tag("failure_reason", "user_not_authenticated") return Response( - {"refs": ["You must use an authenticated API token to fetch refs"]}, + { + "refs": [ + "You must use an authenticated API token to fetch refs" + ] + }, status=400, ) fetch_commits = not commit_list @@ -585,7 +637,9 @@ def post(self, request: Request, organization) -> Response: ) if is_org_auth_token_auth(request.auth): - update_org_auth_token_last_used(request.auth, [project.id for project in projects]) + update_org_auth_token_last_used( + request.auth, [project.id for project in projects] + ) scope.set_tag("success_status", status) return Response(serialize(release, request.user), status=status) @@ -594,7 +648,9 @@ def post(self, request: Request, organization) -> Response: @region_silo_endpoint -class OrganizationReleasesStatsEndpoint(OrganizationReleasesBaseEndpoint, EnvironmentMixin): +class OrganizationReleasesStatsEndpoint( + OrganizationReleasesBaseEndpoint, EnvironmentMixin +): publish_status = { "GET": ApiPublishStatus.UNKNOWN, } @@ -610,7 +666,9 @@ def get(self, request: Request, organization) -> Response: query = request.GET.get("query") try: - filter_params = self.get_filter_params(request, organization, date_filter_optional=True) + filter_params = self.get_filter_params( + request, organization, date_filter_optional=True + ) except NoProjects: return Response([]) @@ -630,7 +688,9 @@ def get(self, request: Request, organization) -> Response: queryset = add_environment_to_queryset(queryset, filter_params) if query: try: - queryset = _filter_releases_by_query(queryset, organization, query, filter_params) + queryset = _filter_releases_by_query( + queryset, organization, query, filter_params + ) except InvalidSearchQuery as e: return Response( {"detail": str(e)}, @@ -642,7 +702,8 @@ def get(self, request: Request, organization) -> Response: queryset=queryset, paginator_cls=OffsetPaginator, on_results=lambda x: [ - {"version": release["version"], "date": serialize(release["date"])} for release in x + {"version": release["version"], "date": serialize(release["date"])} + for release in x ], default_per_page=1000, max_per_page=1000, diff --git a/src/sentry/conf/server.py b/src/sentry/conf/server.py index 2dbc2fddb43a77..d456c838884482 100644 --- a/src/sentry/conf/server.py +++ b/src/sentry/conf/server.py @@ -40,13 +40,11 @@ def gettext_noop(s: str) -> str: @overload -def env(key: str) -> str: - ... +def env(key: str) -> str: ... @overload -def env(key: str, default: _EnvTypes, type: Type | None = None) -> _EnvTypes: - ... +def env(key: str, default: _EnvTypes, type: Type | None = None) -> _EnvTypes: ... def env( @@ -208,15 +206,13 @@ def env( url = urlparse(os.environ["DATABASE_URL"]) # Update with environment configuration. - DATABASES["default"].update( - { - "NAME": url.path[1:], - "USER": url.username, - "PASSWORD": url.password, - "HOST": url.hostname, - "PORT": url.port, - } - ) + DATABASES["default"].update({ + "NAME": url.path[1:], + "USER": url.username, + "PASSWORD": url.password, + "HOST": url.hostname, + "PORT": url.port, + }) # This should always be UTC. @@ -566,7 +562,9 @@ def env( ) AUTH_PASSWORD_VALIDATORS: list[dict[str, Any]] = [ - {"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"}, + { + "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator" + }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", "OPTIONS": {"min_length": 8}, @@ -828,12 +826,24 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: CELERY_QUEUES_CONTROL = [ - Queue("app_platform.control", routing_key="app_platform.control", exchange=control_exchange), + Queue( + "app_platform.control", + routing_key="app_platform.control", + exchange=control_exchange, + ), Queue("auth.control", routing_key="auth.control", exchange=control_exchange), Queue("cleanup.control", routing_key="cleanup.control", exchange=control_exchange), Queue("email.control", routing_key="email.control", exchange=control_exchange), - Queue("integrations.control", routing_key="integrations.control", exchange=control_exchange), - Queue("files.delete.control", routing_key="files.delete.control", exchange=control_exchange), + Queue( + "integrations.control", + routing_key="integrations.control", + exchange=control_exchange, + ), + Queue( + "files.delete.control", + routing_key="files.delete.control", + exchange=control_exchange, + ), Queue( "hybrid_cloud.control_repair", routing_key="hybrid_cloud.control_repair", @@ -856,7 +866,9 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: # TODO(@anonrig): Remove this when all AppStore connect data is removed. Queue("appstoreconnect", routing_key="sentry.tasks.app_store_connect.#"), Queue("assemble", routing_key="assemble"), - Queue("backfill_seer_grouping_records", routing_key="backfill_seer_grouping_records"), + Queue( + "backfill_seer_grouping_records", routing_key="backfill_seer_grouping_records" + ), Queue("buffers.process_pending", routing_key="buffers.process_pending"), Queue("buffers.process_pending_batch", routing_key="buffers.process_pending_batch"), Queue("buffers.incr", routing_key="buffers.incr"), @@ -867,7 +879,8 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: Queue("default", routing_key="default"), Queue("delayed_rules", routing_key="delayed_rules"), Queue( - "delete_seer_grouping_records_by_hash", routing_key="delete_seer_grouping_records_by_hash" + "delete_seer_grouping_records_by_hash", + routing_key="delete_seer_grouping_records_by_hash", ), Queue("digests.delivery", routing_key="digests.delivery"), Queue("digests.scheduling", routing_key="digests.scheduling"), @@ -876,11 +889,16 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: Queue("events.preprocess_event", routing_key="events.preprocess_event"), Queue("events.process_event", routing_key="events.process_event"), Queue( - "events.reprocessing.preprocess_event", routing_key="events.reprocessing.preprocess_event" + "events.reprocessing.preprocess_event", + routing_key="events.reprocessing.preprocess_event", ), - Queue("events.reprocessing.process_event", routing_key="events.reprocessing.process_event"), Queue( - "events.reprocessing.symbolicate_event", routing_key="events.reprocessing.symbolicate_event" + "events.reprocessing.process_event", + routing_key="events.reprocessing.process_event", + ), + Queue( + "events.reprocessing.symbolicate_event", + routing_key="events.reprocessing.symbolicate_event", ), Queue( "events.reprocessing.symbolicate_event_low_priority", @@ -892,7 +910,8 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: Queue("events.save_event_attachments", routing_key="events.save_event_attachments"), Queue("events.symbolicate_event", routing_key="events.symbolicate_event"), Queue( - "events.symbolicate_event_low_priority", routing_key="events.symbolicate_event_low_priority" + "events.symbolicate_event_low_priority", + routing_key="events.symbolicate_event_low_priority", ), Queue("events.symbolicate_js_event", routing_key="events.symbolicate_js_event"), Queue( @@ -907,9 +926,13 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: Queue("files.copy", routing_key="files.copy"), Queue("files.delete", routing_key="files.delete"), Queue( - "group_owners.process_suspect_commits", routing_key="group_owners.process_suspect_commits" + "group_owners.process_suspect_commits", + routing_key="group_owners.process_suspect_commits", + ), + Queue( + "group_owners.process_commit_context", + routing_key="group_owners.process_commit_context", ), - Queue("group_owners.process_commit_context", routing_key="group_owners.process_commit_context"), Queue("integrations", routing_key="integrations"), Queue( "releasemonitor", @@ -952,14 +975,22 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: Queue("auto_enable_codecov", routing_key="auto_enable_codecov"), Queue("weekly_escalating_forecast", routing_key="weekly_escalating_forecast"), Queue("relocation", routing_key="relocation"), - Queue("performance.statistical_detector", routing_key="performance.statistical_detector"), - Queue("profiling.statistical_detector", routing_key="profiling.statistical_detector"), + Queue( + "performance.statistical_detector", + routing_key="performance.statistical_detector", + ), + Queue( + "profiling.statistical_detector", routing_key="profiling.statistical_detector" + ), CELERY_ISSUE_STATES_QUEUE, Queue("nudge.invite_missing_org_members", routing_key="invite_missing_org_members"), Queue("auto_resolve_issues", routing_key="auto_resolve_issues"), Queue("on_demand_metrics", routing_key="on_demand_metrics"), Queue("check_new_issue_threshold_met", routing_key="check_new_issue_threshold_met"), - Queue("integrations_slack_activity_notify", routing_key="integrations_slack_activity_notify"), + Queue( + "integrations_slack_activity_notify", + routing_key="integrations_slack_activity_notify", + ), ] from celery.schedules import crontab @@ -1251,18 +1282,24 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: # Assign the configuration keys celery uses based on our silo mode. if SILO_MODE == "CONTROL": - CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), "sentry-celerybeat-control") + CELERYBEAT_SCHEDULE_FILENAME = os.path.join( + tempfile.gettempdir(), "sentry-celerybeat-control" + ) CELERYBEAT_SCHEDULE = CELERYBEAT_SCHEDULE_CONTROL CELERY_QUEUES = CELERY_QUEUES_CONTROL elif SILO_MODE == "REGION": - CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), "sentry-celerybeat-region") + CELERYBEAT_SCHEDULE_FILENAME = os.path.join( + tempfile.gettempdir(), "sentry-celerybeat-region" + ) CELERYBEAT_SCHEDULE = CELERYBEAT_SCHEDULE_REGION CELERY_QUEUES = CELERY_QUEUES_REGION else: CELERYBEAT_SCHEDULE = {**CELERYBEAT_SCHEDULE_CONTROL, **CELERYBEAT_SCHEDULE_REGION} - CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), "sentry-celerybeat") + CELERYBEAT_SCHEDULE_FILENAME = os.path.join( + tempfile.gettempdir(), "sentry-celerybeat" + ) CELERY_QUEUES = CELERY_QUEUES_REGION + CELERY_QUEUES_CONTROL for queue in CELERY_QUEUES: @@ -1304,7 +1341,10 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: } BGTASKS = { - "sentry.bgtasks.clean_dsymcache:clean_dsymcache": {"interval": 5 * 60, "roles": ["worker"]}, + "sentry.bgtasks.clean_dsymcache:clean_dsymcache": { + "interval": 5 * 60, + "roles": ["worker"], + }, "sentry.bgtasks.clean_releasefilecache:clean_releasefilecache": { "interval": 5 * 60, "roles": ["worker"], @@ -1322,14 +1362,17 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: LOGGING: LoggingConfig = { "default_level": "INFO", "version": 1, - "disable_existing_loggers": True, + # "disable_existing_loggers": True, "handlers": { "null": {"class": "logging.NullHandler"}, "console": {"class": "sentry.logging.handlers.StructLogHandler"}, # This `internal` logger is separate from the `Logging` integration in the SDK. Since # we have this to record events, in `sdk.py` we set the integration's `event_level` to # None, so that it records breadcrumbs for all log calls but doesn't send any events. - "internal": {"level": "ERROR", "class": "sentry_sdk.integrations.logging.EventHandler"}, + "internal": { + "level": "ERROR", + "class": "sentry_sdk.integrations.logging.EventHandler", + }, "metrics": { "level": "WARNING", "filters": ["important_django_request"], @@ -1362,7 +1405,11 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: # This only needs to go to Sentry for now. "sentry.similarity": {"handlers": ["internal"], "propagate": False}, "sentry.errors": {"handlers": ["console"], "propagate": False}, - "sentry_sdk.errors": {"handlers": ["console"], "level": "INFO", "propagate": False}, + "sentry_sdk.errors": { + "handlers": ["console"], + "level": "INFO", + "propagate": False, + }, "sentry.rules": {"handlers": ["console"], "propagate": False}, "sentry.profiles": {"level": "INFO"}, "multiprocessing": { @@ -1382,7 +1429,11 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: "propagate": False, }, "toronado": {"level": "ERROR", "handlers": ["null"], "propagate": False}, - "urllib3.connectionpool": {"level": "ERROR", "handlers": ["console"], "propagate": False}, + "urllib3.connectionpool": { + "level": "ERROR", + "handlers": ["console"], + "propagate": False, + }, "boto3": {"level": "WARNING", "handlers": ["console"], "propagate": False}, "botocore": {"level": "WARNING", "handlers": ["console"], "propagate": False}, }, @@ -1417,12 +1468,18 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: if os.environ.get("OPENAPIGENERATE", False): OLD_OPENAPI_JSON_PATH = "tests/apidocs/openapi-deprecated.json" - from sentry.apidocs.build import OPENAPI_TAGS, get_old_json_components, get_old_json_paths + from sentry.apidocs.build import ( + OPENAPI_TAGS, + get_old_json_components, + get_old_json_paths, + ) SPECTACULAR_SETTINGS = { "APPEND_COMPONENTS": get_old_json_components(OLD_OPENAPI_JSON_PATH), "APPEND_PATHS": get_old_json_paths(OLD_OPENAPI_JSON_PATH), - "AUTHENTICATION_WHITELIST": ["sentry.api.authentication.UserAuthTokenAuthentication"], + "AUTHENTICATION_WHITELIST": [ + "sentry.api.authentication.UserAuthTokenAuthentication" + ], "COMPONENT_SPLIT_PATCH": False, "COMPONENT_SPLIT_REQUEST": False, "CONTACT": {"email": "partners@sentry.io"}, @@ -1432,7 +1489,10 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # We override the default behavior to skip adding the choice name to the bullet point if # it's identical to the choice value by monkey patching build_choice_description_list. "ENUM_GENERATE_CHOICE_DESCRIPTION": True, - "LICENSE": {"name": "Apache 2.0", "url": "http://www.apache.org/licenses/LICENSE-2.0.html"}, + "LICENSE": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html", + }, "PARSER_WHITELIST": ["rest_framework.parsers.JSONParser"], "POSTPROCESSING_HOOKS": ["sentry.apidocs.hooks.custom_postprocessing_hook"], "PREPROCESSING_HOOKS": ["sentry.apidocs.hooks.custom_preprocessing_hook"], @@ -1602,15 +1662,13 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # This list is a bit fragile and hardcoded, but it's unlikely that # a user will be using a different backend that also mandates SMTP # credentials. -SENTRY_SMTP_DISABLED_BACKENDS = frozenset( - ( - "django.core.mail.backends.dummy.EmailBackend", - "django.core.mail.backends.console.EmailBackend", - "django.core.mail.backends.locmem.EmailBackend", - "django.core.mail.backends.filebased.EmailBackend", - "sentry.utils.email.PreviewBackend", - ) -) +SENTRY_SMTP_DISABLED_BACKENDS = frozenset(( + "django.core.mail.backends.dummy.EmailBackend", + "django.core.mail.backends.console.EmailBackend", + "django.core.mail.backends.locmem.EmailBackend", + "django.core.mail.backends.filebased.EmailBackend", + "sentry.utils.email.PreviewBackend", +)) SENTRY_UPLOAD_RETRY_TIME = 60 # 1 min @@ -1666,7 +1724,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_QUOTA_OPTIONS: dict[str, str] = {} # Cache for Relay project configs -SENTRY_RELAY_PROJECTCONFIG_CACHE = "sentry.relay.projectconfig_cache.redis.RedisProjectConfigCache" +SENTRY_RELAY_PROJECTCONFIG_CACHE = ( + "sentry.relay.projectconfig_cache.redis.RedisProjectConfigCache" +) SENTRY_RELAY_PROJECTCONFIG_CACHE_OPTIONS: dict[str, str] = {} # Which cache to use for debouncing cache updates to the projectconfig cache @@ -1704,7 +1764,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_INDEXSTORE_OPTIONS: dict[str, Any] = {} # Tag storage backend -SENTRY_TAGSTORE = os.environ.get("SENTRY_TAGSTORE", "sentry.tagstore.snuba.SnubaTagStorage") +SENTRY_TAGSTORE = os.environ.get( + "SENTRY_TAGSTORE", "sentry.tagstore.snuba.SnubaTagStorage" +) SENTRY_TAGSTORE_OPTIONS: dict[str, Any] = {} # Search backend @@ -1740,12 +1802,16 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_METRICS_OPTIONS: dict[str, Any] = {} SENTRY_METRICS_SAMPLE_RATE = 1.0 SENTRY_METRICS_PREFIX = "sentry." -SENTRY_METRICS_SKIP_INTERNAL_PREFIXES: list[str] = [] # Order this by most frequent prefixes. +SENTRY_METRICS_SKIP_INTERNAL_PREFIXES: list[ + str +] = [] # Order this by most frequent prefixes. SENTRY_METRICS_SKIP_ALL_INTERNAL = False SENTRY_METRICS_DISALLOW_BAD_TAGS = IS_DEV # Metrics product -SENTRY_METRICS_INDEXER = "sentry.sentry_metrics.indexer.postgres.postgres_v2.PostgresIndexer" +SENTRY_METRICS_INDEXER = ( + "sentry.sentry_metrics.indexer.postgres.postgres_v2.PostgresIndexer" +) SENTRY_METRICS_INDEXER_OPTIONS: dict[str, Any] = {} SENTRY_METRICS_INDEXER_CACHE_TTL = 3600 * 2 SENTRY_METRICS_INDEXER_TRANSACTIONS_SAMPLE_RATE = 0.1 @@ -1828,7 +1894,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: OPENAI_API_KEY: str | None = None # AI Suggested Fix default model -SENTRY_AI_SUGGESTED_FIX_MODEL: str = os.getenv("SENTRY_AI_SUGGESTED_FIX_MODEL", "gpt-3.5-turbo-16k") +SENTRY_AI_SUGGESTED_FIX_MODEL: str = os.getenv( + "SENTRY_AI_SUGGESTED_FIX_MODEL", "gpt-3.5-turbo-16k" +) SENTRY_API_PAGINATION_ALLOWLIST = SENTRY_API_PAGINATION_ALLOWLIST_DO_NOT_MODIFY @@ -1903,7 +1971,12 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: ("org:write", "Read and write access to organization details."), ("org:read", "Read access to organization details."), ), - (("org:integrations", "Read, write, and admin access to organization integrations."),), + ( + ( + "org:integrations", + "Read, write, and admin access to organization integrations.", + ), + ), ( ("member:admin", "Read, write, and admin access to organization members."), ("member:write", "Read and write access to organization members."), @@ -2141,7 +2214,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: "--output-pathinfo=true", "--config={}".format( os.path.normpath( - os.path.join(PROJECT_ROOT, os.pardir, os.pardir, "webpack.config.ts") + os.path.join( + PROJECT_ROOT, os.pardir, os.pardir, "webpack.config.ts" + ) ) ), ], @@ -2235,237 +2310,207 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: ARM64 = platform.processor() in {"arm", "arm64", "aarch64"} SENTRY_DEVSERVICES: dict[str, Callable[[Any, Any], dict[str, Any]]] = { - "redis": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/image-mirror-library-redis:5.0-alpine", - "ports": {"6379/tcp": 6379}, - "command": [ - "redis-server", - "--appendonly", - "yes", - "--save", - "60", - "20", - "--auto-aof-rewrite-percentage", - "100", - "--auto-aof-rewrite-min-size", - "64mb", - ], - "volumes": {"redis": {"bind": "/data"}}, - } - ), - "redis-cluster": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/docker-redis-cluster:7.0.10", - "ports": {f"700{idx}/tcp": f"700{idx}" for idx in range(6)}, - "volumes": {"redis-cluster": {"bind": "/redis-data"}}, - "environment": {"IP": "0.0.0.0"}, - "only_if": settings.SENTRY_DEV_USE_REDIS_CLUSTER, - } - ), - "rabbitmq": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/image-mirror-library-rabbitmq:3-management", - "ports": {"5672/tcp": 5672, "15672/tcp": 15672}, - "environment": {"IP": "0.0.0.0"}, - "only_if": settings.SENTRY_DEV_USE_RABBITMQ, - } - ), - "postgres": lambda settings, options: ( - { - "image": f"ghcr.io/getsentry/image-mirror-library-postgres:{PG_VERSION}-alpine", - "ports": {"5432/tcp": 5432}, - "environment": {"POSTGRES_DB": "sentry", "POSTGRES_HOST_AUTH_METHOD": "trust"}, - "volumes": { - "postgres": {"bind": "/var/lib/postgresql/data"}, - "wal2json": {"bind": "/wal2json"}, - }, - "command": [ - "postgres", - "-c", - "wal_level=logical", - "-c", - "max_replication_slots=1", - "-c", - "max_wal_senders=1", - ], - } - ), - "kafka": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/image-mirror-confluentinc-cp-kafka:7.5.0", - "ports": {"9092/tcp": 9092}, - # https://docs.confluent.io/platform/current/installation/docker/config-reference.html#cp-kakfa-example - "environment": { - "KAFKA_PROCESS_ROLES": "broker,controller", - "KAFKA_CONTROLLER_QUORUM_VOTERS": "1@127.0.0.1:29093", - "KAFKA_CONTROLLER_LISTENER_NAMES": "CONTROLLER", - "KAFKA_NODE_ID": "1", - "CLUSTER_ID": "MkU3OEVBNTcwNTJENDM2Qk", - "KAFKA_LISTENERS": "PLAINTEXT://0.0.0.0:29092,INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29093", - "KAFKA_ADVERTISED_LISTENERS": "PLAINTEXT://127.0.0.1:29092,INTERNAL://sentry_kafka:9093,EXTERNAL://127.0.0.1:9092", - "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP": "PLAINTEXT:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT", - "KAFKA_INTER_BROKER_LISTENER_NAME": "PLAINTEXT", - "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR": "1", - "KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS": "1", - "KAFKA_LOG_RETENTION_HOURS": "24", - "KAFKA_MESSAGE_MAX_BYTES": "50000000", - "KAFKA_MAX_REQUEST_SIZE": "50000000", - }, - "volumes": {"kafka": {"bind": "/var/lib/kafka/data"}}, - "only_if": "kafka" in settings.SENTRY_EVENTSTREAM - or settings.SENTRY_USE_RELAY - or settings.SENTRY_DEV_PROCESS_SUBSCRIPTIONS - or settings.SENTRY_USE_PROFILING, - } - ), - "clickhouse": lambda settings, options: ( - { - "image": ( - "ghcr.io/getsentry/image-mirror-altinity-clickhouse-server:23.3.19.33.altinitystable" - ), - "ports": {"9000/tcp": 9000, "9009/tcp": 9009, "8123/tcp": 8123}, - "ulimits": [{"name": "nofile", "soft": 262144, "hard": 262144}], - # The arm image does not properly load the MAX_MEMORY_USAGE_RATIO - # from the environment in loc_config.xml, thus, hard-coding it there - "volumes": { + "redis": lambda settings, options: ({ + "image": "ghcr.io/getsentry/image-mirror-library-redis:5.0-alpine", + "ports": {"6379/tcp": 6379}, + "command": [ + "redis-server", + "--appendonly", + "yes", + "--save", + "60", + "20", + "--auto-aof-rewrite-percentage", + "100", + "--auto-aof-rewrite-min-size", + "64mb", + ], + "volumes": {"redis": {"bind": "/data"}}, + }), + "redis-cluster": lambda settings, options: ({ + "image": "ghcr.io/getsentry/docker-redis-cluster:7.0.10", + "ports": {f"700{idx}/tcp": f"700{idx}" for idx in range(6)}, + "volumes": {"redis-cluster": {"bind": "/redis-data"}}, + "environment": {"IP": "0.0.0.0"}, + "only_if": settings.SENTRY_DEV_USE_REDIS_CLUSTER, + }), + "rabbitmq": lambda settings, options: ({ + "image": "ghcr.io/getsentry/image-mirror-library-rabbitmq:3-management", + "ports": {"5672/tcp": 5672, "15672/tcp": 15672}, + "environment": {"IP": "0.0.0.0"}, + "only_if": settings.SENTRY_DEV_USE_RABBITMQ, + }), + "postgres": lambda settings, options: ({ + "image": f"ghcr.io/getsentry/image-mirror-library-postgres:{PG_VERSION}-alpine", + "ports": {"5432/tcp": 5432}, + "environment": {"POSTGRES_DB": "sentry", "POSTGRES_HOST_AUTH_METHOD": "trust"}, + "volumes": { + "postgres": {"bind": "/var/lib/postgresql/data"}, + "wal2json": {"bind": "/wal2json"}, + }, + "command": [ + "postgres", + "-c", + "wal_level=logical", + "-c", + "max_replication_slots=1", + "-c", + "max_wal_senders=1", + ], + }), + "kafka": lambda settings, options: ({ + "image": "ghcr.io/getsentry/image-mirror-confluentinc-cp-kafka:7.5.0", + "ports": {"9092/tcp": 9092}, + # https://docs.confluent.io/platform/current/installation/docker/config-reference.html#cp-kakfa-example + "environment": { + "KAFKA_PROCESS_ROLES": "broker,controller", + "KAFKA_CONTROLLER_QUORUM_VOTERS": "1@127.0.0.1:29093", + "KAFKA_CONTROLLER_LISTENER_NAMES": "CONTROLLER", + "KAFKA_NODE_ID": "1", + "CLUSTER_ID": "MkU3OEVBNTcwNTJENDM2Qk", + "KAFKA_LISTENERS": "PLAINTEXT://0.0.0.0:29092,INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29093", + "KAFKA_ADVERTISED_LISTENERS": "PLAINTEXT://127.0.0.1:29092,INTERNAL://sentry_kafka:9093,EXTERNAL://127.0.0.1:9092", + "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP": "PLAINTEXT:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT", + "KAFKA_INTER_BROKER_LISTENER_NAME": "PLAINTEXT", + "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR": "1", + "KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS": "1", + "KAFKA_LOG_RETENTION_HOURS": "24", + "KAFKA_MESSAGE_MAX_BYTES": "50000000", + "KAFKA_MAX_REQUEST_SIZE": "50000000", + }, + "volumes": {"kafka": {"bind": "/var/lib/kafka/data"}}, + "only_if": "kafka" in settings.SENTRY_EVENTSTREAM + or settings.SENTRY_USE_RELAY + or settings.SENTRY_DEV_PROCESS_SUBSCRIPTIONS + or settings.SENTRY_USE_PROFILING, + }), + "clickhouse": lambda settings, options: ({ + "image": ( + "ghcr.io/getsentry/image-mirror-altinity-clickhouse-server:23.3.19.33.altinitystable" + ), + "ports": {"9000/tcp": 9000, "9009/tcp": 9009, "8123/tcp": 8123}, + "ulimits": [{"name": "nofile", "soft": 262144, "hard": 262144}], + # The arm image does not properly load the MAX_MEMORY_USAGE_RATIO + # from the environment in loc_config.xml, thus, hard-coding it there + "volumes": { + ( + "clickhouse_dist" + if settings.SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES + else "clickhouse" + ): {"bind": "/var/lib/clickhouse"}, + os.path.join( + settings.DEVSERVICES_CONFIG_DIR, + "clickhouse", ( - "clickhouse_dist" + "dist_config.xml" if settings.SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES - else "clickhouse" - ): {"bind": "/var/lib/clickhouse"}, - os.path.join( - settings.DEVSERVICES_CONFIG_DIR, - "clickhouse", - ( - "dist_config.xml" - if settings.SENTRY_DISTRIBUTED_CLICKHOUSE_TABLES - else "loc_config.xml" - ), - ): {"bind": "/etc/clickhouse-server/config.d/sentry.xml"}, - }, - } - ), - "snuba": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/snuba:latest", - "ports": {"1218/tcp": 1218, "1219/tcp": 1219}, - "command": ["devserver"] - + (["--no-workers"] if "snuba" in settings.SENTRY_EVENTSTREAM else []), - "environment": { - "PYTHONUNBUFFERED": "1", - "SNUBA_SETTINGS": "docker", - "DEBUG": "1", - "CLICKHOUSE_HOST": "{containers[clickhouse][name]}", - "CLICKHOUSE_PORT": "9000", - "CLICKHOUSE_HTTP_PORT": "8123", - "DEFAULT_BROKERS": ( - "" - if "snuba" in settings.SENTRY_EVENTSTREAM - else "{containers[kafka][name]}:9093" - ), - "REDIS_HOST": "{containers[redis][name]}", - "REDIS_PORT": "6379", - "REDIS_DB": "1", - "ENABLE_SENTRY_METRICS_DEV": "1" if settings.SENTRY_USE_METRICS_DEV else "", - "ENABLE_PROFILES_CONSUMER": "1" if settings.SENTRY_USE_PROFILING else "", - "ENABLE_SPANS_CONSUMER": "1" if settings.SENTRY_USE_SPANS else "", - "ENABLE_ISSUE_OCCURRENCE_CONSUMER": ( - "1" if settings.SENTRY_USE_ISSUE_OCCURRENCE else "" - ), - "ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES": "1", - # TODO: remove setting - "ENABLE_GROUP_ATTRIBUTES_CONSUMER": ( - "1" if settings.SENTRY_USE_GROUP_ATTRIBUTES else "" + else "loc_config.xml" ), - }, - "only_if": "snuba" in settings.SENTRY_EVENTSTREAM - or "kafka" in settings.SENTRY_EVENTSTREAM, - # we don't build linux/arm64 snuba images anymore - # apple silicon users should have working emulation under colima 0.6.2 - # or docker desktop - "platform": "linux/amd64", - } - ), - "bigtable": lambda settings, options: ( - { - "image": "us.gcr.io/sentryio/cbtemulator:23c02d92c7a1747068eb1fc57dddbad23907d614", - "ports": {"8086/tcp": 8086}, - # NEED_BIGTABLE is set by CI so we don't have to pass - # --skip-only-if when compiling which services to run. - "only_if": os.environ.get("NEED_BIGTABLE", False) - or "bigtable" in settings.SENTRY_NODESTORE, - } - ), - "memcached": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/image-mirror-library-memcached:1.5-alpine", - "ports": {"11211/tcp": 11211}, - "only_if": "memcached" in settings.CACHES.get("default", {}).get("BACKEND"), - } - ), - "symbolicator": lambda settings, options: ( - { - "image": "us-central1-docker.pkg.dev/sentryio/symbolicator/image:nightly", - "ports": {"3021/tcp": 3021}, - "volumes": {settings.SYMBOLICATOR_CONFIG_DIR: {"bind": "/etc/symbolicator"}}, - "command": ["run", "--config", "/etc/symbolicator/config.yml"], - "only_if": options.get("symbolicator.enabled"), - } - ), - "relay": lambda settings, options: ( - { - "image": "us-central1-docker.pkg.dev/sentryio/relay/relay:nightly", - "ports": {"7899/tcp": settings.SENTRY_RELAY_PORT}, - "volumes": {settings.RELAY_CONFIG_DIR: {"bind": "/etc/relay"}}, - "command": ["run", "--config", "/etc/relay"], - "only_if": bool(os.environ.get("SENTRY_USE_RELAY", settings.SENTRY_USE_RELAY)), - "with_devserver": True, - } - ), - "chartcuterie": lambda settings, options: ( - { - "image": "us-central1-docker.pkg.dev/sentryio/chartcuterie/image:latest", - "volumes": {settings.CHARTCUTERIE_CONFIG_DIR: {"bind": "/etc/chartcuterie"}}, - "environment": { - "CHARTCUTERIE_CONFIG": "/etc/chartcuterie/config.js", - "CHARTCUTERIE_CONFIG_POLLING": "true", - }, - "ports": {"9090/tcp": 7901}, - # NEED_CHARTCUTERIE is set by CI so we don't have to pass --skip-only-if when compiling which services to run. - "only_if": os.environ.get("NEED_CHARTCUTERIE", False) - or options.get("chart-rendering.enabled"), - } - ), - "vroom": lambda settings, options: ( - { - "image": "us-central1-docker.pkg.dev/sentryio/vroom/vroom:latest", - "volumes": {"profiles": {"bind": "/var/lib/sentry-profiles"}}, - "environment": { - "SENTRY_KAFKA_BROKERS_PROFILING": "{containers[kafka][name]}:9093", - "SENTRY_KAFKA_BROKERS_OCCURRENCES": "{containers[kafka][name]}:9093", - "SENTRY_SNUBA_HOST": "http://{containers[snuba][name]}:1218", - }, - "ports": {"8085/tcp": 8085}, - "only_if": settings.SENTRY_USE_PROFILING, - } - ), - "session-replay-analyzer": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/session-replay-analyzer:latest", - "environment": {}, - "ports": {"3000/tcp": 3000}, - "only_if": settings.SENTRY_USE_REPLAY_ANALYZER_SERVICE, - } - ), - "spotlight-sidecar": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/spotlight:latest", - "environment": {}, - "ports": {"8969/tcp": 8969}, - "only_if": settings.SENTRY_USE_SPOTLIGHT, - } - ), + ): {"bind": "/etc/clickhouse-server/config.d/sentry.xml"}, + }, + }), + "snuba": lambda settings, options: ({ + "image": "ghcr.io/getsentry/snuba:latest", + "ports": {"1218/tcp": 1218, "1219/tcp": 1219}, + "command": ["devserver"] + + (["--no-workers"] if "snuba" in settings.SENTRY_EVENTSTREAM else []), + "environment": { + "PYTHONUNBUFFERED": "1", + "SNUBA_SETTINGS": "docker", + "DEBUG": "1", + "CLICKHOUSE_HOST": "{containers[clickhouse][name]}", + "CLICKHOUSE_PORT": "9000", + "CLICKHOUSE_HTTP_PORT": "8123", + "DEFAULT_BROKERS": ( + "" + if "snuba" in settings.SENTRY_EVENTSTREAM + else "{containers[kafka][name]}:9093" + ), + "REDIS_HOST": "{containers[redis][name]}", + "REDIS_PORT": "6379", + "REDIS_DB": "1", + "ENABLE_SENTRY_METRICS_DEV": "1" if settings.SENTRY_USE_METRICS_DEV else "", + "ENABLE_PROFILES_CONSUMER": "1" if settings.SENTRY_USE_PROFILING else "", + "ENABLE_SPANS_CONSUMER": "1" if settings.SENTRY_USE_SPANS else "", + "ENABLE_ISSUE_OCCURRENCE_CONSUMER": ( + "1" if settings.SENTRY_USE_ISSUE_OCCURRENCE else "" + ), + "ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES": "1", + # TODO: remove setting + "ENABLE_GROUP_ATTRIBUTES_CONSUMER": ( + "1" if settings.SENTRY_USE_GROUP_ATTRIBUTES else "" + ), + }, + "only_if": "snuba" in settings.SENTRY_EVENTSTREAM + or "kafka" in settings.SENTRY_EVENTSTREAM, + # we don't build linux/arm64 snuba images anymore + # apple silicon users should have working emulation under colima 0.6.2 + # or docker desktop + "platform": "linux/amd64", + }), + "bigtable": lambda settings, options: ({ + "image": "us.gcr.io/sentryio/cbtemulator:23c02d92c7a1747068eb1fc57dddbad23907d614", + "ports": {"8086/tcp": 8086}, + # NEED_BIGTABLE is set by CI so we don't have to pass + # --skip-only-if when compiling which services to run. + "only_if": os.environ.get("NEED_BIGTABLE", False) + or "bigtable" in settings.SENTRY_NODESTORE, + }), + "memcached": lambda settings, options: ({ + "image": "ghcr.io/getsentry/image-mirror-library-memcached:1.5-alpine", + "ports": {"11211/tcp": 11211}, + "only_if": "memcached" in settings.CACHES.get("default", {}).get("BACKEND"), + }), + "symbolicator": lambda settings, options: ({ + "image": "us-central1-docker.pkg.dev/sentryio/symbolicator/image:nightly", + "ports": {"3021/tcp": 3021}, + "volumes": {settings.SYMBOLICATOR_CONFIG_DIR: {"bind": "/etc/symbolicator"}}, + "command": ["run", "--config", "/etc/symbolicator/config.yml"], + "only_if": options.get("symbolicator.enabled"), + }), + "relay": lambda settings, options: ({ + "image": "us-central1-docker.pkg.dev/sentryio/relay/relay:nightly", + "ports": {"7899/tcp": settings.SENTRY_RELAY_PORT}, + "volumes": {settings.RELAY_CONFIG_DIR: {"bind": "/etc/relay"}}, + "command": ["run", "--config", "/etc/relay"], + "only_if": bool(os.environ.get("SENTRY_USE_RELAY", settings.SENTRY_USE_RELAY)), + "with_devserver": True, + }), + "chartcuterie": lambda settings, options: ({ + "image": "us-central1-docker.pkg.dev/sentryio/chartcuterie/image:latest", + "volumes": {settings.CHARTCUTERIE_CONFIG_DIR: {"bind": "/etc/chartcuterie"}}, + "environment": { + "CHARTCUTERIE_CONFIG": "/etc/chartcuterie/config.js", + "CHARTCUTERIE_CONFIG_POLLING": "true", + }, + "ports": {"9090/tcp": 7901}, + # NEED_CHARTCUTERIE is set by CI so we don't have to pass --skip-only-if when compiling which services to run. + "only_if": os.environ.get("NEED_CHARTCUTERIE", False) + or options.get("chart-rendering.enabled"), + }), + "vroom": lambda settings, options: ({ + "image": "us-central1-docker.pkg.dev/sentryio/vroom/vroom:latest", + "volumes": {"profiles": {"bind": "/var/lib/sentry-profiles"}}, + "environment": { + "SENTRY_KAFKA_BROKERS_PROFILING": "{containers[kafka][name]}:9093", + "SENTRY_KAFKA_BROKERS_OCCURRENCES": "{containers[kafka][name]}:9093", + "SENTRY_SNUBA_HOST": "http://{containers[snuba][name]}:1218", + }, + "ports": {"8085/tcp": 8085}, + "only_if": settings.SENTRY_USE_PROFILING, + }), + "session-replay-analyzer": lambda settings, options: ({ + "image": "ghcr.io/getsentry/session-replay-analyzer:latest", + "environment": {}, + "ports": {"3000/tcp": 3000}, + "only_if": settings.SENTRY_USE_REPLAY_ANALYZER_SERVICE, + }), + "spotlight-sidecar": lambda settings, options: ({ + "image": "ghcr.io/getsentry/spotlight:latest", + "environment": {}, + "ports": {"8969/tcp": 8969}, + "only_if": settings.SENTRY_USE_SPOTLIGHT, + }), } # Max file size for serialized file uploads in API @@ -2556,7 +2601,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # # This setting takes precedence over `SENTRY_PROFILING_ENABLED` forcing the SDK # to operate under the continuous profiling model. -SENTRY_CONTINUOUS_PROFILING_ENABLED = os.environ.get("SENTRY_CONTINUOUS_PROFILING_ENABLED", False) +SENTRY_CONTINUOUS_PROFILING_ENABLED = os.environ.get( + "SENTRY_CONTINUOUS_PROFILING_ENABLED", False +) # Callable to bind additional context for the Sentry SDK # @@ -3001,7 +3048,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_USE_UWSGI = True # Configure service wrapper for reprocessing2 state -SENTRY_REPROCESSING_STORE = "sentry.eventstore.reprocessing.redis.RedisReprocessingStore" +SENTRY_REPROCESSING_STORE = ( + "sentry.eventstore.reprocessing.redis.RedisReprocessingStore" +) # Which cluster is used to store auxiliary data for reprocessing. Note that # this cluster is not used to store attachments etc, that still happens on # rc-processing. This is just for buffering up event IDs and storing a counter @@ -3139,7 +3188,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SEER_DEFAULT_URL = "http://127.0.0.1:9091" # for local development SEER_DEFAULT_TIMEOUT = 5 -SEER_BREAKPOINT_DETECTION_URL = SEER_DEFAULT_URL # for local development, these share a URL +SEER_BREAKPOINT_DETECTION_URL = ( + SEER_DEFAULT_URL # for local development, these share a URL +) SEER_BREAKPOINT_DETECTION_TIMEOUT = 5 SEER_SEVERITY_URL = SEER_DEFAULT_URL # for local development, these share a URL @@ -3154,7 +3205,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SEER_GROUPING_BACKFILL_URL = SEER_DEFAULT_URL SEER_ANOMALY_DETECTION_MODEL_VERSION = "v1" -SEER_ANOMALY_DETECTION_URL = SEER_DEFAULT_URL # for local development, these share a URL +SEER_ANOMALY_DETECTION_URL = ( + SEER_DEFAULT_URL # for local development, these share a URL +) SEER_ANOMALY_DETECTION_TIMEOUT = 5 SEER_ANOMALY_DETECTION_ENDPOINT_URL = ( @@ -3172,7 +3225,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_REPLAYS_SERVICE_URL = "http://localhost:8090" -SENTRY_ISSUE_ALERT_HISTORY = "sentry.rules.history.backends.postgres.PostgresRuleHistoryBackend" +SENTRY_ISSUE_ALERT_HISTORY = ( + "sentry.rules.history.backends.postgres.PostgresRuleHistoryBackend" +) SENTRY_ISSUE_ALERT_HISTORY_OPTIONS: dict[str, Any] = {} # This is useful for testing SSO expiry flows @@ -3369,7 +3424,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: OPTIONS_AUTOMATOR_HMAC_SECRET: str | None = None -SENTRY_METRICS_INTERFACE_BACKEND = "sentry.sentry_metrics.client.snuba.SnubaMetricsBackend" +SENTRY_METRICS_INTERFACE_BACKEND = ( + "sentry.sentry_metrics.client.snuba.SnubaMetricsBackend" +) SENTRY_METRICS_INTERFACE_BACKEND_OPTIONS: dict[str, Any] = {} # Controls whether the SDK will send the metrics upstream to the S4S transport. @@ -3451,13 +3508,13 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SEER_PROJECT_GROUPING_RECORDS_DELETE_URL = ( f"/{SEER_SIMILARITY_MODEL_VERSION}/issues/similar-issues/grouping-record/delete" ) -SEER_HASH_GROUPING_RECORDS_DELETE_URL = ( - f"/{SEER_SIMILARITY_MODEL_VERSION}/issues/similar-issues/grouping-record/delete-by-hash" -) +SEER_HASH_GROUPING_RECORDS_DELETE_URL = f"/{SEER_SIMILARITY_MODEL_VERSION}/issues/similar-issues/grouping-record/delete-by-hash" SEER_SIMILARITY_CIRCUIT_BREAKER_KEY = "seer.similarity" SEER_ANOMALY_DETECTION_VERSION = "v1" -SEER_ANOMALY_DETECTION_STORE_DATA_URL = f"/{SEER_ANOMALY_DETECTION_VERSION}/anomaly-detection/store" +SEER_ANOMALY_DETECTION_STORE_DATA_URL = ( + f"/{SEER_ANOMALY_DETECTION_VERSION}/anomaly-detection/store" +) SIMILARITY_BACKFILL_COHORT_MAP: dict[str, list[int]] = {} @@ -3466,7 +3523,9 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: if ngrok_host: SENTRY_OPTIONS["system.url-prefix"] = f"https://{ngrok_host}" SENTRY_OPTIONS["system.base-hostname"] = ngrok_host - SENTRY_OPTIONS["system.region-api-url-template"] = f"https://{{region}}.{ngrok_host}" + SENTRY_OPTIONS["system.region-api-url-template"] = ( + f"https://{{region}}.{ngrok_host}" + ) SENTRY_FEATURES["system:multi-region"] = True CSRF_TRUSTED_ORIGINS = [f"https://*.{ngrok_host}", f"https://{ngrok_host}"] diff --git a/src/sentry/mediators/token_exchange/util.py b/src/sentry/mediators/token_exchange/util.py index 6bdbf8b6323f6b..20fb4a6a2c9ba8 100644 --- a/src/sentry/mediators/token_exchange/util.py +++ b/src/sentry/mediators/token_exchange/util.py @@ -6,11 +6,13 @@ AUTHORIZATION = "authorization_code" REFRESH = "refresh_token" +TOKEN_EXCHANGE = "urn:ietf:params:oauth:grant-type:token-exchange" class GrantTypes: AUTHORIZATION = AUTHORIZATION REFRESH = REFRESH + TOKEN_EXCHANGE = TOKEN_EXCHANGE def token_expiration(): diff --git a/src/sentry/middleware/auth.py b/src/sentry/middleware/auth.py index e6e9f74ab1c28c..ebc37b6a541b44 100644 --- a/src/sentry/middleware/auth.py +++ b/src/sentry/middleware/auth.py @@ -33,11 +33,17 @@ def get_user(request): # currently set on the User. By default, the value will # be None until the first action has been taken, at # which point, a nonce will always be required. - if user.session_nonce and request.session.get("_nonce", "") != user.session_nonce: + if ( + user.session_nonce + and request.session.get("_nonce", "") != user.session_nonce + ): # If the nonces don't match, this session is anonymous. logger.info( "user.auth.invalid-nonce", - extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": user.id}, + extra={ + "ip_address": request.META["REMOTE_ADDR"], + "user_id": user.id, + }, ) user = AnonymousUser() else: @@ -48,6 +54,7 @@ def get_user(request): class AuthenticationMiddleware(MiddlewareMixin): def process_request(self, request: HttpRequest) -> None: + logger.error(f"GENERAL AUTH HEADERS: {request.headers}") if request.path.startswith("/api/0/internal/rpc/"): # Avoid doing RPC authentication when we're already # in an RPC request. diff --git a/src/sentry/web/frontend/oauth_token.py b/src/sentry/web/frontend/oauth_token.py index e0cd8445358930..ad96cdd6f637a4 100644 --- a/src/sentry/web/frontend/oauth_token.py +++ b/src/sentry/web/frontend/oauth_token.py @@ -2,6 +2,8 @@ import secrets import jwt +from jwt import PyJWKClient + import requests from django.http import HttpRequest, HttpResponse, HttpResponseBadRequest from django.utils import timezone @@ -63,78 +65,74 @@ def _get_rsa_key(self, jwks, kid): return None def _validate_id_token(self, request: HttpRequest): - grant_type = request.POST.get("grant_type") - if grant_type != GrantTypes.TOKEN_EXCHANGE: - raise NotImplementedError - - id_token = request.POST.get("subject_token") + request_json = json.loads(request.body) + id_token = request_json.get("subject_token") if not id_token: # return error matching RFC requirements https://www.rfc-editor.org/rfc/rfc6749#section-5.2 return HttpResponse( - json.dumps( - { - "error": "invalid_request", - "error_description": "missing id token", - } - ), + json.dumps({ + "error": "invalid_request", + "error_description": "missing id token", + }), status=400, ) # TODO make this configurable for other CI providers eventually github_actions_issuer = "https://token.actions.githubusercontent.com" - github_actions_jwks_url = "https://token.actions.githubusercontent.com/.well-known/jwks" + github_actions_jwks_url = ( + "https://token.actions.githubusercontent.com/.well-known/jwks" + ) # TODO validate audience expected_audience = options.get("system.url-prefix") try: - response = requests.get(github_actions_jwks_url) - response.raise_for_status() - jwks = response.json() + jwks_client = PyJWKClient(github_actions_jwks_url) + signing_key = jwks_client.get_signing_key_from_jwt(id_token) - unverified_header = jwt.get_unverified_header(id_token) - rsa_key = self._get_rsa_key(jwks, unverified_header["kid"]) - - if rsa_key: - # TODO: validate org/repository claims too! - return jwt.decode( + if signing_key: + decoded_jwt = jwt.decode( id_token, - rsa_key, + signing_key.key, audience=expected_audience, issuer=github_actions_issuer, algorithms=["RS256"], + # options={"verify_signature": False}, + ) + logger.error( + f"-----AUDIENCE {decoded_jwt['aud']}\n EXPECTED: {expected_audience}" ) + return decoded_jwt else: return HttpResponseBadRequest( - json.dumps( - { - "error": "invalid_request", - "error_description": "id token signed with invalid key", - } - ), + json.dumps({ + "error": "invalid_request", + "error_description": "id token signed with invalid key", + }), content_type="application/json", ) - except requests.exception.RequestException as e: + except requests.exceptions.RequestException as e: logger.exception("failed to fetch JWKS") return None except jwt.exceptions.InvalidTokenError as e: logger.exception(f"invalid id token: {e}") return None - def _get_organization_from_resource(self, request: HttpRequest) -> Organization | None: - organization_resource = request.POST.get("resource") + def _get_organization_from_resource( + self, request: HttpRequest + ) -> Organization | None: + request_json = json.loads(request.body) + organization_resource = request_json.get("resource") # TODO: use stronger validation here with regex that ensures the organization ID is all numbers if not organization_resource.startswith( - f'{options.get("system.url-prefix")}/api/0/organizations/' + f"https://trosentry.ngrok.dev/api/0/organizations/" ): return HttpResponseBadRequest( - json.dumps( - { - "error": "invalid_target", - "error_description": "resource target must be an organization", - } - ), + json.dumps({ + "error": "invalid_target", + "error_description": "resource target must be an organization", + }), content_type="application/json", ) @@ -142,9 +140,9 @@ def _get_organization_from_resource(self, request: HttpRequest) -> Organization organization_id = organization_resource.rsplit("/", 1) # return a Organization object? - if organization_id.isnumeric(): + if organization_id[1].isnumeric(): try: - organization = Organization.objects.get(id=organization_id[1]) + organization = Organization.objects.get(id=int(organization_id[1])) return organization except Organization.DoesNotExist: return HttpResponseBadRequest(json.dumps({"error": "invalid_target"})) @@ -168,9 +166,12 @@ def _create_org_auth_token(self, organization: Organization): ) try: - org_mapping = OrganizationMapping.objects.get(organization_id=organization.id) + org_mapping = OrganizationMapping.objects.get( + organization_id=organization.id + ) token_str = generate_token( - organization.slug, generate_region_url(region_name=org_mapping.region_name) + organization.slug, + generate_region_url(region_name=org_mapping.region_name), ) except SystemUrlPrefixMissingException: return Response( @@ -221,9 +222,11 @@ def _create_org_auth_token(self, organization: Organization): @method_decorator(never_cache) def post(self, request: HttpRequest) -> HttpResponse: - grant_type = request.POST.get("grant_type") - client_id = request.POST.get("client_id") - client_secret = request.POST.get("client_secret") + logger.error(f"-----------------REQUEST BODY------------: {request.body}") + request_json = json.loads(request.body) + grant_type = request_json.get("grant_type") + client_id = request_json.get("client_id") + client_secret = request_json.get("client_secret") metrics.incr( "oauth_token.post.start", @@ -234,29 +237,42 @@ def post(self, request: HttpRequest) -> HttpResponse: }, ) + logger.error(f"Request POST: {request.body}") + + logger.error(f"{grant_type} | {GrantTypes.TOKEN_EXCHANGE}") + if grant_type == GrantTypes.TOKEN_EXCHANGE: id_token = self._validate_id_token(request) if id_token: organization = self._get_organization_from_resource(request) - gha_oidc_config = organization.get_option("sentry:github_action_oidc") + gha_oidc_config = json.loads( + organization.get_option("sentry:github_action_oidc") + ) # check that the organization is what we expect if gha_oidc_config["organization"] != id_token["repository_owner"]: return HttpResponse(status=401) else: org_auth_token_data = self._create_org_auth_token(organization) + logger.error(f"response: {org_auth_token_data}") return HttpResponse( - json.dumps(org_auth_token_data), content_type="application/json", status=200 + json.dumps(org_auth_token_data), + content_type="application/json", + status=200, ) else: return HttpResponseBadRequest() if not client_id: - return self.error(request=request, name="missing_client_id", reason="missing client_id") + return self.error( + request=request, name="missing_client_id", reason="missing client_id" + ) if not client_secret: return self.error( - request=request, name="missing_client_secret", reason="missing client_secret" + request=request, + name="missing_client_secret", + reason="missing client_secret", ) if grant_type not in [GrantTypes.AUTHORIZATION, GrantTypes.REFRESH]: @@ -264,14 +280,18 @@ def post(self, request: HttpRequest) -> HttpResponse: try: application = ApiApplication.objects.get( - client_id=client_id, client_secret=client_secret, status=ApiApplicationStatus.active + client_id=client_id, + client_secret=client_secret, + status=ApiApplicationStatus.active, ) except ApiApplication.DoesNotExist: metrics.incr( "oauth_token.post.invalid", sample_rate=1.0, ) - logger.warning("Invalid client_id / secret pair", extra={"client_id": client_id}) + logger.warning( + "Invalid client_id / secret pair", extra={"client_id": client_id} + ) return self.error( request=request, name="invalid_credentials", @@ -280,9 +300,13 @@ def post(self, request: HttpRequest) -> HttpResponse: ) if grant_type == GrantTypes.AUTHORIZATION: - token_data = self.get_access_tokens(request=request, application=application) + token_data = self.get_access_tokens( + request=request, application=application + ) else: - token_data = self.get_refresh_token(request=request, application=application) + token_data = self.get_refresh_token( + request=request, application=application + ) if "error" in token_data: return self.error( request=request,