From 6d4772a4091aba9d3fa12745fac51cc17ab70f12 Mon Sep 17 00:00:00 2001 From: dogboat Date: Mon, 1 Jul 2024 10:42:30 -0400 Subject: [PATCH] Metrics performance improvements (#10446) * metrics-performance wip dashboard changes * metrics-performance wip on metrics * metrics-performance work on metrics tables * metrics-performance wip more tables * metrics-performance endpoints work * metrics-performance renaming * metrics-performance endpoints and some cleanup * metrics-performance endpoints metrics details table populates with finding info * metrics-performance endpoint calcs against endpoint_status instead of related finding * metrics-performance template var fix, calculate period ranges more in line with previous offering * metrics-perfromance refactoring * metrics-performance remove old code, use existing helper function rather than reimplementing * metrics-performance fix bug age determination to handle "negative" mitigation dates * metrics-performance Updates to rename some vars, use existing functions, comments * metrics-performance rename age entry vars to be more descriptive * metrics-performance changes to use existing functions * metrics-performance comments, typing, refactoring * metrics-performance refactoring and comments * metrics-performance type hinting, fix dashboard * metrics-performance move metrics methods to a util module * metrics-performance reordering imports for linter * metrics-performance refactor * metrics-performance remove perf class, restore some code that didn't need changing * metrics-performance comment cleanup * metrics-performance work on test fixes * metrics-performance test updates * metrics-performance test updates * metrics-performance attempt at handing findings age determination for mysql * metrics-performacne fix import * metrics-performance loosen exception for finding age determination * metrics-performance derp querysets are lazy * metrics-performance linter fix * metrics-performance fixes for mysql * metrics-performance use counts for severities instead of sums to avoid null values, use correct reverse lookup on urls in accepted/closed/open tables * metrics-performance set appropriate links on findings tables * trigger actions --- dojo/metrics/utils.py | 598 +++++++++++++++++++++ dojo/metrics/views.py | 376 +------------ dojo/templates/dojo/dashboard-metrics.html | 56 +- dojo/templates/dojo/metrics.html | 223 ++++---- unittests/test_metrics_queries.py | 98 ++-- 5 files changed, 821 insertions(+), 530 deletions(-) create mode 100644 dojo/metrics/utils.py diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py new file mode 100644 index 0000000000..1a9d3f07c4 --- /dev/null +++ b/dojo/metrics/utils.py @@ -0,0 +1,598 @@ + +from datetime import date, datetime, timedelta +from enum import Enum +from functools import partial +from math import ceil +from typing import Any, Callable, NamedTuple, TypeVar, Union + +from dateutil.relativedelta import relativedelta +from django.contrib import messages +from django.db import connection +from django.db.models import Case, Count, F, IntegerField, Q, Sum, Value, When +from django.db.models.expressions import RawSQL +from django.db.models.functions import Coalesce, ExtractDay, Now, TruncMonth, TruncWeek +from django.db.models.query import QuerySet +from django.http import HttpRequest +from django.utils import timezone +from django.utils.translation import gettext as _ + +from dojo.authorization.roles_permissions import Permissions +from dojo.endpoint.queries import get_authorized_endpoint_status +from dojo.filters import ( + MetricsEndpointFilter, + MetricsEndpointFilterWithoutObjectLookups, + MetricsFindingFilter, + MetricsFindingFilterWithoutObjectLookups, +) +from dojo.finding.helper import ACCEPTED_FINDINGS_QUERY, CLOSED_FINDINGS_QUERY, OPEN_FINDINGS_QUERY +from dojo.finding.queries import get_authorized_findings +from dojo.models import Endpoint_Status, Finding, Product_Type +from dojo.product.queries import get_authorized_products +from dojo.utils import ( + get_system_setting, + queryset_check, +) + + +def finding_queries( + prod_type: QuerySet[Product_Type], + request: HttpRequest +) -> dict[str, Any]: + # Get the initial list of findings the user is authorized to see + findings_query = get_authorized_findings( + Permissions.Finding_View, + user=request.user, + ).select_related( + 'reporter', + 'test', + 'test__engagement__product', + 'test__engagement__product__prod_type', + ).prefetch_related( + 'risk_acceptance_set', + 'test__engagement__risk_acceptance', + 'test__test_type', + ) + + filter_string_matching = get_system_setting("filter_string_matching", False) + finding_filter_class = MetricsFindingFilterWithoutObjectLookups if filter_string_matching else MetricsFindingFilter + findings = finding_filter_class(request.GET, queryset=findings_query) + form = findings.form + findings_qs = queryset_check(findings) + # Quick check to determine if the filters were too tight and filtered everything away + if not findings_qs.exists() and not findings_query.exists(): + findings = findings_query + findings_qs = findings if isinstance(findings, QuerySet) else findings.qs + messages.add_message( + request, + messages.ERROR, + _('All objects have been filtered away. Displaying all objects'), + extra_tags='alert-danger') + + start_date, end_date = get_date_range(findings_qs) + + # Filter by the date ranges supplied + findings_query = findings_query.filter(date__range=[start_date, end_date]) + # Get the list of closed and risk accepted findings + findings_closed = findings_query.filter(CLOSED_FINDINGS_QUERY) + accepted_findings = findings_query.filter(ACCEPTED_FINDINGS_QUERY) + active_findings = findings_query.filter(OPEN_FINDINGS_QUERY) + + # filter by product type if applicable + if len(prod_type) > 0: + findings_query = findings_query.filter(test__engagement__product__prod_type__in=prod_type) + findings_closed = findings_closed.filter(test__engagement__product__prod_type__in=prod_type) + accepted_findings = accepted_findings.filter(test__engagement__product__prod_type__in=prod_type) + active_findings = active_findings.filter(test__engagement__product__prod_type__in=prod_type) + + # Get the severity counts of risk accepted findings + accepted_findings_counts = severity_count(accepted_findings, 'aggregate', 'severity') + + weeks_between, months_between = period_deltas(start_date, end_date) + + query_counts_for_period = query_counts( + findings_query, + active_findings, + accepted_findings, + start_date, + MetricsType.FINDING + ) + + monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between) + weekly_counts = query_counts_for_period(MetricsPeriod.WEEK, weeks_between) + + top_ten = get_authorized_products(Permissions.Product_View) + top_ten = top_ten.filter(engagement__test__finding__verified=True, + engagement__test__finding__false_p=False, + engagement__test__finding__duplicate=False, + engagement__test__finding__out_of_scope=False, + engagement__test__finding__mitigated__isnull=True, + engagement__test__finding__severity__in=('Critical', 'High', 'Medium', 'Low'), + prod_type__in=prod_type) + + top_ten = severity_count( + top_ten, 'annotate', 'engagement__test__finding__severity' + ).order_by( + '-critical', '-high', '-medium', '-low' + )[:10] + + return { + 'all': findings_query, + 'closed': findings_closed, + 'accepted': accepted_findings, + 'accepted_count': accepted_findings_counts, + 'top_ten': top_ten, + 'monthly_counts': monthly_counts, + 'weekly_counts': weekly_counts, + 'weeks_between': weeks_between, + 'start_date': start_date, + 'end_date': end_date, + 'form': form, + } + + +def endpoint_queries( + prod_type: QuerySet[Product_Type], + request: HttpRequest +) -> dict[str, Any]: + endpoints_query = Endpoint_Status.objects.filter( + mitigated=False, + finding__severity__in=('Critical', 'High', 'Medium', 'Low', 'Info') + ).prefetch_related( + 'finding__test__engagement__product', + 'finding__test__engagement__product__prod_type', + 'finding__test__engagement__risk_acceptance', + 'finding__risk_acceptance_set', + 'finding__reporter' + ) + + endpoints_query = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_query, request.user) + filter_string_matching = get_system_setting("filter_string_matching", False) + filter_class = MetricsEndpointFilterWithoutObjectLookups if filter_string_matching else MetricsEndpointFilter + endpoints = filter_class(request.GET, queryset=endpoints_query) + form = endpoints.form + endpoints_qs = queryset_check(endpoints) + + if not endpoints_qs.exists(): + endpoints = endpoints_query + endpoints_qs = endpoints if isinstance(endpoints, QuerySet) else endpoints.qs + messages.add_message( + request, + messages.ERROR, + _('All objects have been filtered away. Displaying all objects'), + extra_tags='alert-danger') + + start_date, end_date = get_date_range(endpoints_qs) + + if len(prod_type) > 0: + endpoints_closed = Endpoint_Status.objects.filter( + mitigated_time__range=[start_date, end_date], + finding__test__engagement__product__prod_type__in=prod_type + ).prefetch_related( + 'finding__test__engagement__product' + ) + # capture the accepted findings in period + accepted_endpoints = Endpoint_Status.objects.filter( + date__range=[start_date, end_date], + risk_accepted=True, + finding__test__engagement__product__prod_type__in=prod_type + ).prefetch_related( + 'finding__test__engagement__product' + ) + else: + endpoints_closed = Endpoint_Status.objects.filter( + mitigated_time__range=[start_date, end_date] + ).prefetch_related( + 'finding__test__engagement__product' + ) + # capture the accepted findings in period + accepted_endpoints = Endpoint_Status.objects.filter( + date__range=[start_date, end_date], + risk_accepted=True + ).prefetch_related( + 'finding__test__engagement__product' + ) + + endpoints_closed = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_closed, request.user) + accepted_endpoints = get_authorized_endpoint_status(Permissions.Endpoint_View, accepted_endpoints, request.user) + accepted_endpoints_counts = severity_count(accepted_endpoints, 'aggregate', 'finding__severity') + + weeks_between, months_between = period_deltas(start_date, end_date) + + query_counts_for_period = query_counts( + endpoints_qs, + endpoints_qs.filter(finding__active=True), + accepted_endpoints, + start_date, + MetricsType.ENDPOINT + ) + + monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between) + weekly_counts = query_counts_for_period(MetricsPeriod.WEEK, weeks_between) + + top_ten = get_authorized_products(Permissions.Product_View) + top_ten = top_ten.filter(engagement__test__finding__status_finding__mitigated=False, + engagement__test__finding__status_finding__false_positive=False, + engagement__test__finding__status_finding__out_of_scope=False, + engagement__test__finding__status_finding__risk_accepted=False, + engagement__test__finding__severity__in=('Critical', 'High', 'Medium', 'Low'), + prod_type__in=prod_type) + + top_ten = severity_count( + top_ten, 'annotate', 'engagement__test__finding__severity' + ).order_by( + '-critical', '-high', '-medium', '-low' + )[:10] + + return { + 'all': endpoints, + 'closed': endpoints_closed, + 'accepted': accepted_endpoints, + 'accepted_count': accepted_endpoints_counts, + 'top_ten': top_ten, + 'monthly_counts': monthly_counts, + 'weekly_counts': weekly_counts, + 'weeks_between': weeks_between, + 'start_date': start_date, + 'end_date': end_date, + 'form': form, + } + + +# For type-hinting methods that take querysets we can perform metrics over +MetricsQuerySet = TypeVar('MetricsQuerySet', QuerySet[Finding], QuerySet[Endpoint_Status]) + + +class _MetricsPeriodEntry(NamedTuple): + """ + Class for holding information for a metrics period. Allows us to store a kwarg for date manipulation alongside a DB + method used to aggregate around the same timeframe. + """ + datetime_name: str + db_method: Union[TruncWeek, TruncMonth] + + +class MetricsPeriod(_MetricsPeriodEntry, Enum): + """ + Enum for the two metrics periods supported: by week and month + """ + WEEK = ('weeks', TruncWeek) + MONTH = ('months', TruncMonth) + + +class _MetricsTypeEntry(NamedTuple): + """ + Class for holding information for a metrics type. Allows us to store relative queryset lookups for severities + alongside relative lookups for closed statuses. + """ + severity_lookup: str + closed_lookup: str + + +class MetricsType(_MetricsTypeEntry, Enum): + """ + Enum for the two metrics types supported: by Findings and by Endpoints (Endpoint_Status) + """ + FINDING = ('severity', 'is_mitigated') + ENDPOINT = ('finding__severity', 'mitigated') + + +def query_counts( + open_qs: MetricsQuerySet, + active_qs: MetricsQuerySet, + accepted_qs: MetricsQuerySet, + start_date: date, + metrics_type: MetricsType +) -> Callable[[MetricsPeriod, int], dict[str, list[dict]]]: + """ + Given three QuerySets, a start date, and a MetricsType, returns a method that can be used to generate statistics for + the three QuerySets across a given period. In use, simplifies gathering monthly and weekly aggregates for QuerySets. + + :param open_qs: QuerySet for open findings/endpoints + :param active_qs: QuerySet for active findings/endpoints + :param accepted_qs: QuerySet for accepted findings/endpoints + :param start_date: The start date for statistics generation + :param metrics_type: The type of metrics to generate statistics for + :return: A method that takes period information to generate statistics for the given QuerySets + """ + def _aggregates_for_period(period: MetricsPeriod, period_count: int) -> dict[str, list[dict]]: + def _aggregate_data(qs: MetricsQuerySet, include_closed: bool = False) -> list[dict]: + chart_data = partial(get_charting_data, start_date=start_date, period=period, period_count=period_count) + agg_qs = partial(aggregate_counts_by_period, period=period, metrics_type=metrics_type) + return chart_data(agg_qs(qs, include_closed=include_closed), include_closed=include_closed) + return { + 'opened_per_period': _aggregate_data(open_qs, True), + 'active_per_period': _aggregate_data(active_qs), + 'accepted_per_period': _aggregate_data(accepted_qs) + } + return _aggregates_for_period + + +def get_date_range( + qs: QuerySet +) -> tuple[datetime, datetime]: + """ + Given a queryset of objects, returns a tuple of (earliest date, latest date) from among those objects, based on the + objects' 'date' attribute. On exception, return a tuple representing (now, now). + + :param qs: The queryset of objects + :return: A tuple of (earliest date, latest date) + """ + try: + tz = timezone.get_current_timezone() + + start_date = qs.earliest('date').date + start_date = datetime(start_date.year, start_date.month, start_date.day, tzinfo=tz) + + end_date = qs.latest('date').date + end_date = datetime(end_date.year, end_date.month, end_date.day, tzinfo=tz) + except: + start_date = end_date = timezone.now() + + return start_date, end_date + + +def severity_count( + queryset: MetricsQuerySet, + method: str, + expression: str +) -> Union[MetricsQuerySet, dict[str, int]]: + """ + Aggregates counts by severity for the given queryset. + + :param queryset: The queryset to aggregate + :param method: The method to use for aggregation, either 'annotate' or 'aggregate' depending on use case. + :param expression: The lookup expression for severity, relative to the queryset model type + :return: A queryset containing aggregated counts of severities + """ + total_expression = expression + '__in' + return getattr(queryset, method)( + total=Count('id', filter=Q(**{total_expression: ('Critical', 'High', 'Medium', 'Low', 'Info')})), + critical=Count('id', filter=Q(**{expression: 'Critical'})), + high=Count('id', filter=Q(**{expression: 'High'})), + medium=Count('id', filter=Q(**{expression: 'Medium'})), + low=Count('id', filter=Q(**{expression: 'Low'})), + info=Count('id', filter=Q(**{expression: 'Info'})) + ) + + +def identify_view( + request: HttpRequest +) -> str: + """ + Identifies the requested metrics view. + + :param request: The request object + :return: A string, either 'Endpoint' or 'Finding,' that represents the requested metrics view + """ + get_data = request.GET + view = get_data.get('type', None) + if view: + return view + + finding_severity = get_data.get('finding__severity', None) + false_positive = get_data.get('false_positive', None) + + referer = request.META.get('HTTP_REFERER', None) + endpoint_in_referer = referer and referer.find('type=Endpoint') > -1 + + if finding_severity or false_positive or endpoint_in_referer: + return 'Endpoint' + + return 'Finding' + + +def js_epoch( + d: Union[date, datetime] +) -> int: + """ + Converts a date/datetime object to a JavaScript epoch time (for use in FE charts) + + :param d: The date or datetime object + :return: The js epoch time (milliseconds since the epoch) + """ + if isinstance(d, date): + d = datetime.combine(d, datetime.min.time()) + return int(d.timestamp()) * 1000 + + +def get_charting_data( + qs: MetricsQuerySet, + start_date: date, + period: MetricsPeriod, + period_count: int, + include_closed: bool +) -> list[dict]: + """ + Given a queryset of severities data for charting, adds epoch timestamp information and fills in missing data points + queryset aggregation didn't include (because the data didn't exist) with zero-element data, all useful for frontend + chart rendering. Returns a list of these dictionaries, sorted by date ascending. + + :param qs: The query set + :param start_date: The start date + :param period: A MetricsPeriod to generate charting data across + :param period_count: The number of periods we should have data for + :param include_closed: A boolean dictating whether 'closed' finding/status aggregates should be included + :return: A list of dictionaries representing data points for charting, sorted by date + """ + tz = timezone.get_current_timezone() + + # Calculate the start date for our data. This will depend on whether we're generating for months or weeks. + if period == MetricsPeriod.WEEK: + # For weeks, start at the first day of the specified week + start_date = datetime(start_date.year, start_date.month, start_date.day, tzinfo=tz) + start_date = start_date + timedelta(days=-start_date.weekday()) + else: + # For months, start on the first day of the month + start_date = datetime(start_date.year, start_date.month, 1, tzinfo=tz) + + # Arrange all our data by epoch date for easy lookup in the loop below. + # At the same time, add the epoch date to each entry as the charts will rely on that. + by_date = {e: {'epoch': e, **q} for q in qs if (e := js_epoch(q['grouped_date'])) is not None} + + # Iterate over our period of time, adding zero-element data entries for dates not represented + for x in range(-1, period_count): + cur_date = start_date + relativedelta(**{period.datetime_name: x}) + if (e := js_epoch(cur_date)) not in by_date: + by_date[e] = { + 'epoch': e, 'grouped_date': cur_date.date(), + 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0} + if include_closed: + by_date[e]['closed'] = 0 + + # Return, sorting by date + return sorted(by_date.values(), key=lambda m: m['grouped_date']) + + +def period_deltas(start_date, end_date): + """ + Given a start date and end date, returns a tuple of (weeks between the dates, months between the dates). + + :param start_date: The start date to consider + :param end_date: The end date to consider + :return: A tuple of integers representing (number of weeks between the dates, number of months between the dates) + """ + r = relativedelta(end_date, start_date) + months_between = (r.years * 12) + r.months + # include current month + months_between += 1 + + weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) + if weeks_between <= 0: + weeks_between += 2 + return weeks_between, months_between + + +def aggregate_counts_by_period( + qs: MetricsQuerySet, + period: MetricsPeriod, + metrics_type: MetricsType, + include_closed: bool, +) -> QuerySet: + """ + Annotates the given queryset with severity counts, grouping by desired period as defined by the specified + trunc_method. Optionally includes a sum of closed findings/statuses as well. + + :param qs: The queryset to annotate with aggregate severity counts, either of Findings or Endpoint_Statuses + :param period: A MetricsPeriod to aggregate across + :param metrics_type: The type of metrics to generate statistics for + :param include_closed: A boolean dictating whether 'closed' finding/status aggregates should be included + :return: A queryset with aggregate severity counts grouped by period + """ + + desired_values = ('grouped_date', 'critical', 'high', 'medium', 'low', 'info', 'total',) + + severities_by_period = severity_count( + # Group by desired period + qs.annotate(grouped_date=period.db_method('date')).values('grouped_date'), + 'annotate', + metrics_type.severity_lookup + ) + if include_closed: + severities_by_period = severities_by_period.annotate( + # Include 'closed' counts + closed=Sum(Case( + When(Q(**{metrics_type.closed_lookup: True}), then=Value(1)), + output_field=IntegerField(), default=0) + ), + ) + desired_values += ('closed',) + + return severities_by_period.values(*desired_values) + + +def findings_by_product( + findings: QuerySet[Finding] +) -> QuerySet[Finding]: + """ + Groups the given Findings queryset around related product (name/ID) + + :param findings: A queryset of Findings + :return: A queryset of Findings grouped by product (name/ID) + """ + return findings.values(product_name=F('test__engagement__product__name'), + product_id=F('test__engagement__product__id')) + + +def get_in_period_details( + findings: QuerySet[Finding] +) -> tuple[QuerySet[Finding], QuerySet[Finding], dict[str, int]]: + """ + Gathers details for the given queryset, corresponding to metrics information for 'in period' Findings + + :param findings: A queryset of Findings + :return: A tuple of (a queryset of severity aggregates, a queryset of severity aggregates by product, a dict of + Findings by age) + """ + in_period_counts = severity_count(findings, 'aggregate', 'severity') + in_period_details = severity_count( + findings_by_product(findings), 'annotate', 'severity' + ).order_by('product_name') + + # Approach to age determination is db-engine dependent + if 'postgresql' in connection.settings_dict['ENGINE']: + age_detail = findings.annotate(age=ExtractDay(Coalesce('mitigated', Now()) - F('date'))) + elif 'mysql' in connection.settings_dict['ENGINE']: + # MySQL doesn't support durations natively and using an expression with subtraction yields unwanted results, + # so datediff() it is. + finding_table = Finding.objects.model._meta.db_table + age_detail = findings.annotate( + age=RawSQL(f'DATEDIFF(COALESCE({finding_table}.mitigated, CURRENT_TIMESTAMP), {finding_table}.date)', []) + ) + else: + raise ValueError + + age_detail = age_detail.aggregate( + age_under_30=Sum(Case(When(age__lte=30, then=Value(1))), default=Value(0), output_field=IntegerField()), + age_31_60=Sum(Case(When(age__range=[31, 60], then=Value(1))), default=Value(0), output_field=IntegerField()), + age_61_90=Sum(Case(When(age__range=[61, 90], then=Value(1))), default=Value(0), output_field=IntegerField()), + age_90_plus=Sum(Case(When(age__gt=90, then=Value(1))), default=Value(0), output_field=IntegerField()), + ) + + return in_period_counts, in_period_details, age_detail + + +def get_accepted_in_period_details( + findings: QuerySet[Finding] +) -> QuerySet[Finding]: + """ + Gathers details for the given queryset, corresponding to metrics information for 'accepted' Findings + + :param findings: A queryset of Findings + :return: A queryset of severity aggregates for Findings grouped by product (name/ID) + """ + return severity_count( + findings_by_product(findings), 'annotate', 'severity' + ).order_by('product_name') + + +def get_closed_in_period_details( + findings: QuerySet[Finding] +) -> tuple[QuerySet[Finding], QuerySet[Finding]]: + """ + Gathers details for the given queryset, corresponding to metrics information for 'closed' Findings + + :param findings: A queryset of Findings + :return: A tuple of (a queryset of severity aggregates, a queryset of severity aggregates for Findings grouped by + product) + """ + return ( + severity_count(findings, 'aggregate', 'severity'), + severity_count( + findings_by_product(findings), 'annotate', 'severity' + ).order_by('product_name') + ) + + +def findings_queryset( + qs: MetricsQuerySet +) -> QuerySet[Finding]: + """ + Given a MetricsQuerySet, returns a QuerySet representing all its findings. + + :param qs: MetricsQuerySet (A queryset of either Findings or Endpoint_Statuses) + :return: A queryset of Findings, related to the given queryset + """ + if qs.model is Endpoint_Status: + return Finding.objects.filter(status_finding__in=qs) + else: + return qs diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py index f0348f348e..6428fb453d 100644 --- a/dojo/metrics/views.py +++ b/dojo/metrics/views.py @@ -6,14 +6,12 @@ from collections import OrderedDict from datetime import date, datetime, timedelta from functools import reduce -from math import ceil from operator import itemgetter from dateutil.relativedelta import relativedelta from django.contrib import messages from django.core.exceptions import PermissionDenied from django.db.models import Case, Count, IntegerField, Q, Sum, Value, When -from django.db.models.query import QuerySet from django.http import HttpResponseRedirect from django.shortcuts import get_object_or_404, render from django.urls import reverse @@ -25,18 +23,19 @@ from dojo.authorization.authorization import user_has_permission_or_403 from dojo.authorization.roles_permissions import Permissions -from dojo.endpoint.queries import get_authorized_endpoint_status -from dojo.filters import ( - MetricsEndpointFilter, - MetricsEndpointFilterWithoutObjectLookups, - MetricsFindingFilter, - MetricsFindingFilterWithoutObjectLookups, - UserFilter, -) -from dojo.finding.helper import ACCEPTED_FINDINGS_QUERY, CLOSED_FINDINGS_QUERY -from dojo.finding.queries import get_authorized_findings +from dojo.filters import UserFilter from dojo.forms import ProductTagCountsForm, ProductTypeCountsForm, SimpleMetricsForm -from dojo.models import Dojo_User, Endpoint_Status, Engagement, Finding, Product, Product_Type, Risk_Acceptance, Test +from dojo.metrics.utils import ( + endpoint_queries, + finding_queries, + findings_queryset, + get_accepted_in_period_details, + get_closed_in_period_details, + get_in_period_details, + identify_view, + severity_count, +) +from dojo.models import Dojo_User, Engagement, Finding, Product, Product_Type, Risk_Acceptance, Test from dojo.product.queries import get_authorized_products from dojo.product_type.queries import get_authorized_product_types from dojo.utils import ( @@ -44,7 +43,6 @@ count_findings, findings_this_period, get_page_items, - get_period_counts, get_punchcard_data, get_system_setting, opened_in_period, @@ -53,6 +51,7 @@ logger = logging.getLogger(__name__) + """ Greg, Jay status: in production @@ -73,324 +72,7 @@ def critical_product_metrics(request, mtype): }) -def get_date_range(objects): - tz = timezone.get_current_timezone() - - start_date = objects.earliest('date').date - start_date = datetime(start_date.year, start_date.month, start_date.day, - tzinfo=tz) - end_date = objects.latest('date').date - end_date = datetime(end_date.year, end_date.month, end_date.day, - tzinfo=tz) - - return start_date, end_date - - -def severity_count(queryset, method, expression): - total_expression = expression + '__in' - return getattr(queryset, method)( - total=Sum( - Case(When(**{total_expression: ('Critical', 'High', 'Medium', 'Low', 'Info')}, - then=Value(1)), - output_field=IntegerField(), - default=0)), - critical=Sum( - Case(When(**{expression: 'Critical'}, - then=Value(1)), - output_field=IntegerField(), - default=0)), - high=Sum( - Case(When(**{expression: 'High'}, - then=Value(1)), - output_field=IntegerField(), - default=0)), - medium=Sum( - Case(When(**{expression: 'Medium'}, - then=Value(1)), - output_field=IntegerField(), - default=0)), - low=Sum( - Case(When(**{expression: 'Low'}, - then=Value(1)), - output_field=IntegerField(), - default=0)), - info=Sum( - Case(When(**{expression: 'Info'}, - then=Value(1)), - output_field=IntegerField(), - default=0)), - ) - - -def identify_view(request): - get_data = request.GET - view = get_data.get('type', None) - if view: - return view - - finding_severity = get_data.get('finding__severity', None) - false_positive = get_data.get('false_positive', None) - - referer = request.META.get('HTTP_REFERER', None) - endpoint_in_referer = referer and referer.find('type=Endpoint') > -1 - - if finding_severity or false_positive or endpoint_in_referer: - return 'Endpoint' - - return 'Finding' - - -def finding_querys(prod_type, request): - # Get the initial list of findings th use is authorized to see - findings_query = get_authorized_findings( - Permissions.Finding_View, - user=request.user, - ).select_related( - 'reporter', - 'test', - 'test__engagement__product', - 'test__engagement__product__prod_type', - ).prefetch_related( - 'risk_acceptance_set', - 'test__engagement__risk_acceptance', - 'test__test_type', - ) - filter_string_matching = get_system_setting("filter_string_matching", False) - finding_filter_class = MetricsFindingFilterWithoutObjectLookups if filter_string_matching else MetricsFindingFilter - findings = finding_filter_class(request.GET, queryset=findings_query) - form = findings.form - findings_qs = queryset_check(findings) - # Quick check to determine if the filters were too tight and filtered everything away - if not findings_qs and not findings_query: - findings = findings_query - findings_qs = findings if isinstance(findings, QuerySet) else findings.qs - messages.add_message( - request, - messages.ERROR, - _('All objects have been filtered away. Displaying all objects'), - extra_tags='alert-danger') - # Attempt to parser the date ranges - try: - start_date, end_date = get_date_range(findings_qs) - except: - start_date = timezone.now() - end_date = timezone.now() - # Filter by the date ranges supplied - findings_query = findings_query.filter(date__range=[start_date, end_date]) - # Get the list of closed and risk accepted findings - findings_closed = findings_query.filter(CLOSED_FINDINGS_QUERY) - accepted_findings = findings_query.filter(ACCEPTED_FINDINGS_QUERY) - # filter by product type if applicable - if len(prod_type) > 0: - findings_closed = findings_closed.filter(test__engagement__product__prod_type__in=prod_type) - accepted_findings = accepted_findings.filter(test__engagement__product__prod_type__in=prod_type) - # Get the severity counts of risk accepted findings - accepted_findings_counts = severity_count(accepted_findings, 'aggregate', 'severity') - - r = relativedelta(end_date, start_date) - months_between = (r.years * 12) + r.months - # include current month - months_between += 1 - - weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) - if weeks_between <= 0: - weeks_between += 2 - - monthly_counts = get_period_counts(findings_qs, findings_closed, accepted_findings, months_between, start_date, - relative_delta='months') - weekly_counts = get_period_counts(findings_qs, findings_closed, accepted_findings, weeks_between, start_date, - relative_delta='weeks') - top_ten = get_authorized_products(Permissions.Product_View) - top_ten = top_ten.filter(engagement__test__finding__verified=True, - engagement__test__finding__false_p=False, - engagement__test__finding__duplicate=False, - engagement__test__finding__out_of_scope=False, - engagement__test__finding__mitigated__isnull=True, - engagement__test__finding__severity__in=( - 'Critical', 'High', 'Medium', 'Low'), - prod_type__in=prod_type) - top_ten = severity_count(top_ten, 'annotate', 'engagement__test__finding__severity').order_by('-critical', '-high', '-medium', '-low')[:10] - - return { - 'all': findings, - 'closed': findings_closed, - 'accepted': accepted_findings, - 'accepted_count': accepted_findings_counts, - 'top_ten': top_ten, - 'monthly_counts': monthly_counts, - 'weekly_counts': weekly_counts, - 'weeks_between': weeks_between, - 'start_date': start_date, - 'end_date': end_date, - 'form': form, - } - - -def endpoint_querys(prod_type, request): - endpoints_query = Endpoint_Status.objects.filter(mitigated=False, - finding__severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')).prefetch_related( - 'finding__test__engagement__product', - 'finding__test__engagement__product__prod_type', - 'finding__test__engagement__risk_acceptance', - 'finding__risk_acceptance_set', - 'finding__reporter') - - endpoints_query = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_query, request.user) - filter_string_matching = get_system_setting("filter_string_matching", False) - filter_class = MetricsEndpointFilterWithoutObjectLookups if filter_string_matching else MetricsEndpointFilter - endpoints = filter_class(request.GET, queryset=endpoints_query) - form = endpoints.form - endpoints_qs = queryset_check(endpoints) - - if not endpoints_qs: - endpoints = endpoints_query - endpoints_qs = endpoints if isinstance(endpoints, QuerySet) else endpoints.qs - messages.add_message(request, - messages.ERROR, - _('All objects have been filtered away. Displaying all objects'), - extra_tags='alert-danger') - - try: - start_date, end_date = get_date_range(endpoints_qs) - except: - start_date = timezone.now() - end_date = timezone.now() - - if len(prod_type) > 0: - endpoints_closed = Endpoint_Status.objects.filter(mitigated_time__range=[start_date, end_date], - finding__test__engagement__product__prod_type__in=prod_type).prefetch_related( - 'finding__test__engagement__product') - # capture the accepted findings in period - accepted_endpoints = Endpoint_Status.objects.filter(date__range=[start_date, end_date], risk_accepted=True, - finding__test__engagement__product__prod_type__in=prod_type). \ - prefetch_related('finding__test__engagement__product') - accepted_endpoints_counts = Endpoint_Status.objects.filter(date__range=[start_date, end_date], risk_accepted=True, - finding__test__engagement__product__prod_type__in=prod_type). \ - prefetch_related('finding__test__engagement__product') - else: - endpoints_closed = Endpoint_Status.objects.filter(mitigated_time__range=[start_date, end_date]).prefetch_related( - 'finding__test__engagement__product') - accepted_endpoints = Endpoint_Status.objects.filter(date__range=[start_date, end_date], risk_accepted=True). \ - prefetch_related('finding__test__engagement__product') - accepted_endpoints_counts = Endpoint_Status.objects.filter(date__range=[start_date, end_date], risk_accepted=True). \ - prefetch_related('finding__test__engagement__product') - - endpoints_closed = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_closed, request.user) - accepted_endpoints = get_authorized_endpoint_status(Permissions.Endpoint_View, accepted_endpoints, request.user) - accepted_endpoints_counts = get_authorized_endpoint_status(Permissions.Endpoint_View, accepted_endpoints_counts, request.user) - accepted_endpoints_counts = severity_count(accepted_endpoints_counts, 'aggregate', 'finding__severity') - - r = relativedelta(end_date, start_date) - months_between = (r.years * 12) + r.months - # include current month - months_between += 1 - - weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) - if weeks_between <= 0: - weeks_between += 2 - - monthly_counts = get_period_counts(endpoints_qs, endpoints_closed, accepted_endpoints, months_between, start_date, - relative_delta='months') - weekly_counts = get_period_counts(endpoints_qs, endpoints_closed, accepted_endpoints, weeks_between, start_date, - relative_delta='weeks') - - top_ten = get_authorized_products(Permissions.Product_View) - top_ten = top_ten.filter(engagement__test__finding__status_finding__mitigated=False, - engagement__test__finding__status_finding__false_positive=False, - engagement__test__finding__status_finding__out_of_scope=False, - engagement__test__finding__status_finding__risk_accepted=False, - engagement__test__finding__severity__in=( - 'Critical', 'High', 'Medium', 'Low'), - prod_type__in=prod_type) - top_ten = severity_count(top_ten, 'annotate', 'engagement__test__finding__severity').order_by('-critical', '-high', '-medium', '-low')[:10] - - return { - 'all': endpoints, - 'closed': endpoints_closed, - 'accepted': accepted_endpoints, - 'accepted_count': accepted_endpoints_counts, - 'top_ten': top_ten, - 'monthly_counts': monthly_counts, - 'weekly_counts': weekly_counts, - 'weeks_between': weeks_between, - 'start_date': start_date, - 'end_date': end_date, - 'form': form, - } - - -def get_in_period_details(findings): - in_period_counts = {"Critical": 0, "High": 0, "Medium": 0, - "Low": 0, "Info": 0, "Total": 0} - in_period_details = {} - age_detail = [0, 0, 0, 0] - - for obj in findings: - if 0 <= obj.age <= 30: - age_detail[0] += 1 - elif 30 < obj.age <= 60: - age_detail[1] += 1 - elif 60 < obj.age <= 90: - age_detail[2] += 1 - elif obj.age > 90: - age_detail[3] += 1 - - # This condition should be true in nearly all cases, - # but there are some far edge cases - if obj.severity in in_period_counts: - in_period_counts[obj.severity] += 1 - in_period_counts['Total'] += 1 - # This condition should be true in nearly all cases, - # but there are some far edge cases - if obj.severity in in_period_details: - if obj.test.engagement.product.name not in in_period_details: - in_period_details[obj.test.engagement.product.name] = { - 'path': reverse('product_open_findings', args=(obj.test.engagement.product.id,)), - 'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Info': 0, 'Total': 0} - in_period_details[obj.test.engagement.product.name][obj.severity] += 1 - in_period_details[obj.test.engagement.product.name]['Total'] += 1 - - return in_period_counts, in_period_details, age_detail - - -def get_accepted_in_period_details(findings): - accepted_in_period_details = {} - for obj in findings: - if obj.test.engagement.product.name not in accepted_in_period_details: - accepted_in_period_details[obj.test.engagement.product.name] = { - 'path': reverse('accepted_findings') + '?test__engagement__product=' + str(obj.test.engagement.product.id), - 'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Info': 0, 'Total': 0} - accepted_in_period_details[ - obj.test.engagement.product.name - ][obj.severity] += 1 - accepted_in_period_details[obj.test.engagement.product.name]['Total'] += 1 - - return accepted_in_period_details - - -def get_closed_in_period_details(findings): - closed_in_period_counts = {"Critical": 0, "High": 0, "Medium": 0, - "Low": 0, "Info": 0, "Total": 0} - closed_in_period_details = {} - - for obj in findings: - closed_in_period_counts[obj.severity] += 1 - closed_in_period_counts['Total'] += 1 - - if obj.test.engagement.product.name not in closed_in_period_details: - closed_in_period_details[obj.test.engagement.product.name] = { - 'path': reverse('closed_findings') + '?test__engagement__product=' + str( - obj.test.engagement.product.id), - 'Critical': 0, 'High': 0, 'Medium': 0, 'Low': 0, 'Info': 0, 'Total': 0} - closed_in_period_details[ - obj.test.engagement.product.name - ][obj.severity] += 1 - closed_in_period_details[obj.test.engagement.product.name]['Total'] += 1 - - return closed_in_period_counts, closed_in_period_details - - -@cache_page(60 * 5) # cache for 5 minutes +# @cache_page(60 * 5) # cache for 5 minutes @vary_on_cookie def metrics(request, mtype): template = 'dojo/metrics.html' @@ -416,31 +98,28 @@ def metrics(request, mtype): filters = {} if view == 'Finding': page_name = _('Product Type Metrics by Findings') - filters = finding_querys(prod_type, request) + filters = finding_queries(prod_type, request) elif view == 'Endpoint': page_name = _('Product Type Metrics by Affected Endpoints') - filters = endpoint_querys(prod_type, request) + filters = endpoint_queries(prod_type, request) - in_period_counts, in_period_details, age_detail = get_in_period_details([ - obj.finding if view == 'Endpoint' else obj - for obj in queryset_check(filters['all']) - ]) + all_findings = findings_queryset(queryset_check(filters['all'])) - accepted_in_period_details = get_accepted_in_period_details([ - obj.finding if view == 'Endpoint' else obj - for obj in filters['accepted'] - ]) + in_period_counts, in_period_details, age_detail = get_in_period_details(all_findings) - closed_in_period_counts, closed_in_period_details = get_closed_in_period_details([ - obj.finding if view == 'Endpoint' else obj - for obj in filters['closed'] - ]) + accepted_in_period_details = get_accepted_in_period_details( + findings_queryset(filters['accepted']) + ) + + closed_in_period_counts, closed_in_period_details = get_closed_in_period_details( + findings_queryset(filters['closed']) + ) punchcard = [] ticks = [] if 'view' in request.GET and 'dashboard' == request.GET['view']: - punchcard, ticks = get_punchcard_data(queryset_check(filters['all']), filters['start_date'], filters['weeks_between'], view) + punchcard, ticks = get_punchcard_data(all_findings, filters['start_date'], filters['weeks_between'], view) page_name = _('%(team_name)s Metrics') % {'team_name': get_system_setting('team_name')} template = 'dojo/dashboard-metrics.html' @@ -450,7 +129,8 @@ def metrics(request, mtype): 'name': page_name, 'start_date': filters['start_date'], 'end_date': filters['end_date'], - 'findings': filters['all'], + 'findings': all_findings, + 'max_findings_details': 50, 'opened_per_month': filters['monthly_counts']['opened_per_period'], 'active_per_month': filters['monthly_counts']['active_per_period'], 'opened_per_week': filters['weekly_counts']['opened_per_period'], diff --git a/dojo/templates/dojo/dashboard-metrics.html b/dojo/templates/dojo/dashboard-metrics.html index 929bea53e9..b5489e6381 100644 --- a/dojo/templates/dojo/dashboard-metrics.html +++ b/dojo/templates/dojo/dashboard-metrics.html @@ -208,11 +208,11 @@

{% blocktrans with start_date=start_date.date end_date=end_date.date%}{{ nam var high = []; var medium = []; var low = []; - {% for month in opened_per_month|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for month in opened_per_month %} + critical.push([{{ month.epoch }}, {{ month.critical }}]); + high.push([{{ month.epoch }}, {{ month.high }}]); + medium.push([{{ month.epoch }}, {{ month.medium }}]); + low.push([{{ month.epoch }}, {{ month.low }}]); {% endfor %} opened_per_month(critical, high, medium, low); @@ -220,11 +220,11 @@

{% blocktrans with start_date=start_date.date end_date=end_date.date%}{{ nam high = []; medium = []; low = []; - {% for month in accepted_per_month|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for month in accepted_per_month %} + critical.push([{{ month.epoch }}, {{ month.critical }}]); + high.push([{{ month.epoch }}, {{ month.high }}]); + medium.push([{{ month.epoch }}, {{ month.medium }}]); + low.push([{{ month.epoch }}, {{ month.low }}]); {% endfor %} accepted_per_month(critical, high, medium, low); @@ -232,11 +232,11 @@

{% blocktrans with start_date=start_date.date end_date=end_date.date%}{{ nam high = []; medium = []; low = []; - {% for month in opened_per_week|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for week in opened_per_week %} + critical.push([{{ week.epoch }}, {{ week.critical }}]); + high.push([{{ week.epoch }}, {{ week.high }}]); + medium.push([{{ week.epoch }}, {{ week.medium }}]); + low.push([{{ week.epoch }}, {{ week.low }}]); {% endfor %} opened_per_week(critical, high, medium, low); @@ -244,11 +244,11 @@

{% blocktrans with start_date=start_date.date end_date=end_date.date%}{{ nam high = []; medium = []; low = []; - {% for month in accepted_per_week|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for week in accepted_per_week %} + critical.push([{{ week.epoch }}, {{ week.critical }}]); + high.push([{{ week.epoch }}, {{ week.high }}]); + medium.push([{{ week.epoch }}, {{ week.medium }}]); + low.push([{{ week.epoch }}, {{ week.low }}]); {% endfor %} accepted_per_week(critical, high, medium, low); @@ -269,10 +269,10 @@

{% blocktrans with start_date=start_date.date end_date=end_date.date%}{{ nam top_ten_products(critical, high, medium, low, ticks); {% endif %} - critical = "{{in_period_counts.Critical|default_if_none:0}}"; - high = "{{in_period_counts.High|default_if_none:0}}"; - medium = "{{in_period_counts.Medium|default_if_none:0}}"; - low = "{{in_period_counts.Low|default_if_none:0}}"; + critical = "{{in_period_counts.critical|default_if_none:0}}"; + high = "{{in_period_counts.high|default_if_none:0}}"; + medium = "{{in_period_counts.medium|default_if_none:0}}"; + low = "{{in_period_counts.low|default_if_none:0}}"; severity_pie(critical, high, medium, low); critical = "{{accepted_in_period_counts.critical|default_if_none:0}}"; @@ -281,10 +281,10 @@

{% blocktrans with start_date=start_date.date end_date=end_date.date%}{{ nam low = "{{accepted_in_period_counts.low|default_if_none:0}}"; total_accepted_pie(critical, high, medium, low); - critical = "{{closed_in_period_counts.Critical}}"; - high = "{{closed_in_period_counts.High}}"; - medium = "{{closed_in_period_counts.Medium}}"; - low = "{{closed_in_period_counts.Low}}"; + critical = "{{closed_in_period_counts.critical}}"; + high = "{{closed_in_period_counts.high}}"; + medium = "{{closed_in_period_counts.medium}}"; + low = "{{closed_in_period_counts.low}}"; total_closed_pie(critical, high, medium, low); {% if punchcard %} diff --git a/dojo/templates/dojo/metrics.html b/dojo/templates/dojo/metrics.html index f44b469a8b..f4692c8252 100644 --- a/dojo/templates/dojo/metrics.html +++ b/dojo/templates/dojo/metrics.html @@ -330,6 +330,11 @@

{% trans "Metric Counts" %}

{% endif %}
+ {% if findings.count > max_findings_details %} +
+ Note: {{ max_findings_details }} Findings listed of {{ findings.count }} total. +
+ {% endif %} @@ -342,13 +347,13 @@

{% trans "Metric Counts" %}

- {% for finding in findings.qs %} + {% for finding in findings|slice:max_findings_details %} - - - - - - + + + + + +
{% trans "Team" %} {% trans "Reporter" %}
{{ finding.test.engagement.product.prod_type.name }} - - {{ finding.test.engagement.product.name|truncatechars_html:20 }} - + + {{ finding.test.engagement.product.name|truncatechars_html:20 }} + {{ finding.severity_display }} @@ -380,12 +385,12 @@

{% trans "Opened During Period" %}

{% trans "Total" %}
{{ in_period_counts.Critical }}{{ in_period_counts.High }}{{ in_period_counts.Medium }}{{ in_period_counts.Low }}{{ in_period_counts.Info }}{{ in_period_counts.Total }}{{ in_period_counts.critical }}{{ in_period_counts.high }}{{ in_period_counts.medium }}{{ in_period_counts.low }}{{ in_period_counts.info }}{{ in_period_counts.total }}
{% trans "Opened During Period" %} - {% for key, value in in_period_details.items %} + {% for product in in_period_details %} - - - - - - - + + + + + + + {% endfor %}
{% trans "Info" %} {% trans "Total" %}
{{ key }}{{ value.Critical }}{{ value.High }}{{ value.Medium }}{{ value.Low }}{{ value.Info }}{{ value.Total }} + + {{ product.product_name }} + + {{ product.critical }}{{ product.high }}{{ product.medium }}{{ product.low }}{{ product.info }}{{ product.total }}
@@ -446,15 +455,19 @@

{% trans "Accepted in Period" %}

{% trans "Info" %} {% trans "Total" %} - {% for key, value in accepted_in_period_details.items %} + {% for product in accepted_in_period_details %} - {{ key }} - {{ value.Critical }} - {{ value.High }} - {{ value.Medium }} - {{ value.Low }} - {{ value.Info }} - {{ value.Total }} + + + {{ product.product_name }} + + + {{ product.critical }} + {{ product.high }} + {{ product.medium }} + {{ product.low }} + {{ product.info }} + {{ product.total }} {% endfor %} @@ -476,12 +489,12 @@

{% trans "Closed in Period" %}

{% trans "Total" %} - {{ closed_in_period_counts.Critical }} - {{ closed_in_period_counts.High }} - {{ closed_in_period_counts.Medium }} - {{ closed_in_period_counts.Low }} - {{ closed_in_period_counts.Info }} - {{ closed_in_period_counts.Total }} + {{ closed_in_period_counts.critical }} + {{ closed_in_period_counts.high }} + {{ closed_in_period_counts.medium }} + {{ closed_in_period_counts.low }} + {{ closed_in_period_counts.info }} + {{ closed_in_period_counts.total }} {% trans "Closed in Period" %} - {% for key, value in closed_in_period_details.items %} + {% for product in closed_in_period_details %} - - - - - - - + + + + + + + {% endfor %}
{% trans "Total" %}
{{ key }}{{ value.Critical }}{{ value.High }}{{ value.Medium }}{{ value.Low }}{{ value.Info }}{{ value.Total }} + + {{ product.product_name }} + + {{ product.critical }}{{ product.high }}{{ product.medium }}{{ product.low }}{{ product.info }}{{ product.total }}
@@ -519,18 +536,20 @@

{% trans "Closed in Period" %}

{% trans "High" %} {% trans "Medium" %} {% trans "Low" %} + {% trans "Info" %} {% trans "Total" %} {% trans "Closed*" %} - {% for week in opened_per_week|slice:'1:' %} + {% for week in opened_per_week %} - {{ week.1|date:"m-d-Y" }} - {{ week.2 }} - {{ week.3 }} - {{ week.4 }} - {{ week.5 }} - {{ week.6 }} - {{ week.7 }} + {{ week.grouped_date|date:"m-d-Y" }} + {{ week.critical }} + {{ week.high }} + {{ week.medium }} + {{ week.low }} + {{ week.info }} + {{ week.total }} + {{ week.closed }} {% endfor %} @@ -543,18 +562,20 @@

{% trans "Closed in Period" %}

{% trans "High" %} {% trans "Medium" %} {% trans "Low" %} + {% trans "Info" %} {% trans "Total" %} {% trans "Closed*" %} - {% for month in opened_per_month|slice:'1:' %} + {% for month in opened_per_month %} - {{ month.1|date:"m-Y" }} - {{ month.2 }} - {{ month.3 }} - {{ month.4 }} - {{ month.5 }} - {{ month.6 }} - {{ month.7 }} + {{ month.grouped_date|date:"m-Y" }} + {{ month.critical }} + {{ month.high }} + {{ month.medium }} + {{ month.low }} + {{ month.info }} + {{ month.total }} + {{ month.closed }} {% endfor %} @@ -570,16 +591,18 @@

{% trans "Closed in Period" %}

{% trans "High" %} {% trans "Medium" %} {% trans "Low" %} + {% trans "Info" %} {% trans "Total" %} - {% for week in accepted_per_week|slice:'1:' %} + {% for week in accepted_per_week %} - {{ week.1|date:"m-d-Y" }} - {{ week.2 }} - {{ week.3 }} - {{ week.4 }} - {{ week.5 }} - {{ week.6 }} + {{ week.grouped_date|date:"m-d-Y" }} + {{ week.critical }} + {{ week.high }} + {{ week.medium }} + {{ week.low }} + {{ week.info }} + {{ week.total }} {% endfor %} @@ -592,16 +615,18 @@

{% trans "Closed in Period" %}

{% trans "High" %} {% trans "Medium" %} {% trans "Low" %} + {% trans "Info" %} {% trans "Total" %} - {% for month in accepted_per_month|slice:'1:' %} + {% for month in accepted_per_month %} - {{ month.1|date:"m-Y" }} - {{ month.2 }} - {{ month.3 }} - {{ month.4 }} - {{ month.5 }} - {{ month.6 }} + {{ month.grouped_date|date:"m-Y" }} + {{ month.critical }} + {{ month.high }} + {{ month.medium }} + {{ month.low }} + {{ month.info }} + {{ month.total }} {% endfor %} @@ -616,19 +641,19 @@

{% trans "Closed in Period" %}

{% trans "0 - 30 Days" %} - {{ age_detail.0 }} + {{ age_detail.age_under_30 }} {% trans "31 - 60 Days" %} - {{ age_detail.1 }} + {{ age_detail.age_31_60 }} {% trans "61 - 90 Days" %} - {{ age_detail.2 }} + {{ age_detail.age_61_90 }} {% trans "91+ Days" %} - {{ age_detail.3 }} + {{ age_detail.age_90_plus }}
@@ -681,11 +706,11 @@

{% trans "Closed in Period" %}

var high = []; var medium = []; var low = []; - {% for month in opened_per_month|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for month in opened_per_month %} + critical.push([{{ month.epoch }}, {{ month.critical }}]); + high.push([{{ month.epoch }}, {{ month.high }}]); + medium.push([{{ month.epoch }}, {{ month.medium }}]); + low.push([{{ month.epoch }}, {{ month.low }}]); {% endfor %} {% if opened_per_month %} opened_per_month_2(critical, high, medium, low); @@ -695,11 +720,11 @@

{% trans "Closed in Period" %}

high = []; medium = []; low = []; - {% for month in active_per_month|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for month in active_per_month %} + critical.push([{{ month.epoch }}, {{ month.critical }}]); + high.push([{{ month.epoch }}, {{ month.high }}]); + medium.push([{{ month.epoch }}, {{ month.medium }}]); + low.push([{{ month.epoch }}, {{ month.low }}]); {% endfor %} {% if active_per_month %} active_per_month(critical, high, medium, low); @@ -709,11 +734,11 @@

{% trans "Closed in Period" %}

high = []; medium = []; low = []; - {% for month in accepted_per_month|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for month in accepted_per_month %} + critical.push([{{ month.epoch }}, {{ month.critical }}]); + high.push([{{ month.epoch }}, {{ month.high }}]); + medium.push([{{ month.epoch }}, {{ month.medium }}]); + low.push([{{ month.epoch }}, {{ month.low }}]); {% endfor %} {% if accepted_per_month %} accepted_per_month_2(critical, high, medium, low); @@ -727,17 +752,17 @@

{% trans "Closed in Period" %}

var high1 = []; var medium1 = []; var low1 = []; - {% for month in opened_per_week|slice:'1:' %} - critical.push([{{ month.0 }}, {{ month.2 }}]); - high.push([{{ month.0 }}, {{ month.3 }}]); - medium.push([{{ month.0 }}, {{ month.4 }}]); - low.push([{{ month.0 }}, {{ month.5 }}]); + {% for week in opened_per_week %} + critical.push([{{ week.epoch }}, {{ week.critical }}]); + high.push([{{ week.epoch }}, {{ week.high }}]); + medium.push([{{ week.epoch }}, {{ week.medium }}]); + low.push([{{ week.epoch }}, {{ week.low }}]); {% endfor %} - {% for month in accepted_per_week|slice:'1:' %} - critical1.push([{{ month.0 }}, {{ month.2 }}]); - high1.push([{{ month.0 }}, {{ month.3 }}]); - medium1.push([{{ month.0 }}, {{ month.4 }}]); - low1.push([{{ month.0 }}, {{ month.5 }}]); + {% for week in accepted_per_week %} + critical1.push([{{ week.epoch }}, {{ week.critical }}]); + high1.push([{{ week.epoch }}, {{ week.high }}]); + medium1.push([{{ week.epoch }}, {{ week.medium }}]); + low1.push([{{ week.epoch }}, {{ week.low }}]); {% endfor %} {% if not critical_prods %} opened_per_week_2(critical, high, medium, low); diff --git a/unittests/test_metrics_queries.py b/unittests/test_metrics_queries.py index d65fc49f79..6378248e0e 100644 --- a/unittests/test_metrics_queries.py +++ b/unittests/test_metrics_queries.py @@ -9,7 +9,7 @@ from django.test import RequestFactory from django.urls import reverse -from dojo.metrics import views +from dojo.metrics import utils from dojo.models import User from .dojo_test_case import DojoTestCase @@ -34,7 +34,7 @@ def test_finding_queries_no_data(self): self.request.user = user3 product_types = [] - finding_queries = views.finding_querys( + finding_queries = utils.finding_queries( product_types, self.request ) @@ -50,9 +50,9 @@ def test_finding_queries(self, mock_timezone): mock_timezone.return_value = mock_datetime # Queries over Finding and Risk_Acceptance - with self.assertNumQueries(25): + with self.assertNumQueries(22): product_types = [] - finding_queries = views.finding_querys( + finding_queries = utils.finding_queries( product_types, self.request ) @@ -77,7 +77,7 @@ def test_finding_queries(self, mock_timezone): # Assert that we get expected querysets back. This is to be used to # support refactoring, in attempt of lowering the query count. self.assertSequenceEqual( - finding_queries['all'].qs.values(), + finding_queries['all'].values(), [] # [{'id': 226, 'title': 'Test Endpoint Mitigation - Finding F1 Without Endpoints', 'date': date(2022, 10, 15), 'sla_start_date': None, 'cwe': None, 'cve': None, 'cvssv3': None, 'cvssv3_score': None, 'url': None, 'severity': 'Info', 'description': 'vulnerability', 'mitigation': '', 'impact': '', 'steps_to_reproduce': '', 'severity_justification': '', 'references': '', 'test_id': 89, 'active': True, 'verified': True, 'false_p': False, 'duplicate': False, 'duplicate_finding_id': None, 'out_of_scope': False, 'risk_accepted': False, 'under_review': False, 'last_status_update': None, 'review_requested_by_id': None, 'under_defect_review': False, 'defect_review_requested_by_id': None, 'is_mitigated': False, 'thread_id': 0, 'mitigated': None, 'mitigated_by_id': None, 'reporter_id': 1, 'numerical_severity': 'S4', 'last_reviewed': None, 'last_reviewed_by_id': None, 'param': None, 'payload': None, 'hash_code': 'a6dd6bd359ff0b504a21b8a7ae5e59f1b40dd0fa1715728bd58de8f688f01b19', 'line': None, 'file_path': '', 'component_name': None, 'component_version': None, 'static_finding': False, 'dynamic_finding': True, 'created': datetime(2022, 10, 15, 23, 12, 52, 966000, tzinfo=pytz.UTC), 'scanner_confidence': None, 'sonarqube_issue_id': None, 'unique_id_from_tool': None, 'vuln_id_from_tool': None, 'sast_source_object': None, 'sast_sink_object': None, 'sast_source_line': None, 'sast_source_file_path': None, 'nb_occurences': None, 'publish_date': None, 'service': None, 'planned_remediation_date': None, 'test__engagement__product__prod_type__member': True, 'test__engagement__product__member': True, 'test__engagement__product__prod_type__authorized_group': False, 'test__engagement__product__authorized_group': False}] ) @@ -91,52 +91,46 @@ def test_finding_queries(self, mock_timezone): ) self.assertSequenceEqual( list(finding_queries['accepted_count'].values()), - [None, None, None, None, None, None] + [0, 0, 0, 0, 0, 0] ) self.assertSequenceEqual( finding_queries['top_ten'].values(), [] ) - self.assertSequenceEqual( + self.assertEqual( list(finding_queries['monthly_counts'].values()), [ [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1604188800000, datetime(2020, 11, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0], - [1606780800000, datetime(2020, 12, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0] + {'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, + {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0} ], [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1604188800000, datetime(2020, 11, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1606780800000, datetime(2020, 12, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0] + {'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0} ], [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1604188800000, datetime(2020, 11, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1606780800000, datetime(2020, 12, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0] + {'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0} ] ] ) - self.assertDictEqual( + self.assertEqual( finding_queries['weekly_counts'], { 'opened_per_period': [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1607299200000, datetime(2020, 12, 7, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0], - [1607904000000, datetime(2020, 12, 14, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0], - [1608508800000, datetime(2020, 12, 21, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0] + {'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0}, + {'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0}, + {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0} ], 'accepted_per_period': [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1607299200000, datetime(2020, 12, 7, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1607904000000, datetime(2020, 12, 14, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1608508800000, datetime(2020, 12, 21, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0] + {'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, + {'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, + {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0} ], 'active_per_period': [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1607299200000, datetime(2020, 12, 7, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1607904000000, datetime(2020, 12, 14, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1608508800000, datetime(2020, 12, 21, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0] + {'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, + {'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}, + {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0} ] } ) @@ -159,7 +153,7 @@ def test_endpoint_queries_no_data(self): self.request.user = user3 product_types = [] - endpoint_queries = views.endpoint_querys( + endpoint_queries = utils.endpoint_queries( product_types, self.request ) @@ -171,9 +165,9 @@ def test_endpoint_queries_no_data(self): def test_endpoint_queries(self): # Queries over Finding and Endpoint_Status - with self.assertNumQueries(70): + with self.assertNumQueries(43): product_types = [] - endpoint_queries = views.endpoint_querys( + endpoint_queries = utils.endpoint_queries( product_types, self.request ) @@ -224,46 +218,40 @@ def test_endpoint_queries(self): endpoint_queries['top_ten'].values(), [], ) - self.assertSequenceEqual( + self.assertEqual( list(endpoint_queries['monthly_counts'].values()), [ [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1590969600000, datetime(2020, 6, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0], - [1593561600000, datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc), 0, 1, 0, 0, 1, 0], + {'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, + {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0} ], [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1590969600000, datetime(2020, 6, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1593561600000, datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0] + {'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5} ], [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1590969600000, datetime(2020, 6, 1, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1593561600000, datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc), 0, 1, 0, 0, 1], + {'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1} ] ], ) - self.assertSequenceEqual( + self.assertEqual( list(endpoint_queries['weekly_counts'].values()), [ [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1593388800000, datetime(2020, 6, 29, 0, 0, tzinfo=timezone.utc), 0, 1, 0, 0, 1, 0], - [1593993600000, datetime(2020, 7, 6, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0], - [1594598400000, datetime(2020, 7, 13, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0, 0] + {'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}, + {'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0}, + {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0} ], [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1593388800000, datetime(2020, 6, 29, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1593993600000, datetime(2020, 7, 6, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0], - [1594598400000, datetime(2020, 7, 13, 0, 0, tzinfo=timezone.utc), 0, 0, 0, 0, 0] + {'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5}, + {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0} ], [ - ['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'], - [1593388800000, datetime(2020, 6, 29, 0, 0, tzinfo=timezone.utc), 0, 1, 0, 0, 1], - [1593993600000, datetime(2020, 7, 6, 0, 0, tzinfo=timezone.utc), 0, 1, 0, 0, 1], - [1594598400000, datetime(2020, 7, 13, 0, 0, tzinfo=timezone.utc), 0, 1, 0, 0, 1] + {'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}, + {'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1}, + {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0} ] ], )