diff --git a/.github/workflows/k8s-tests.yml b/.github/workflows/k8s-tests.yml
index dd34b88d76f..f5ec107d83f 100644
--- a/.github/workflows/k8s-tests.yml
+++ b/.github/workflows/k8s-tests.yml
@@ -29,6 +29,14 @@ env:
--set mysql.enabled=false \
--set createPostgresqlSecret=true \
"
+ HELM_PGHA_DATABASE_SETTINGS: " \
+ --set database=postgresqlha \
+ --set postgresql.enabled=false \
+ --set mysql.enabled=false \
+ --set postgresqlha.enabled=true \
+ --set createPostgresqlHaSecret=true \
+ --set createPostgresqlHaPgpoolSecret=true \
+ "
jobs:
setting_minikube_cluster:
name: Kubernetes Deployment
@@ -56,6 +64,10 @@ jobs:
brokers: redis
k8s: 'v1.23.9'
os: debian
+ - databases: pgsqlha
+ brokers: rabbit
+ k8s: 'v1.23.9'
+ os: debian
- databases: pgsql
brokers: rabbit
k8s: 'v1.23.9'
diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian
index 545e4e3ef79..c7db1f1feef 100644
--- a/Dockerfile.integration-tests-debian
+++ b/Dockerfile.integration-tests-debian
@@ -1,7 +1,7 @@
# code: language=Dockerfile
-FROM openapitools/openapi-generator-cli:v7.4.0@sha256:579832bed49ea6c275ce2fb5f2d515f5b03d2b6243f3c80fa8430e4f5a770e9a as openapitools
+FROM openapitools/openapi-generator-cli:v7.3.0@sha256:74b9992692c836e42a02980db4b76bee94e17075e4487cd80f5c540dd57126b9 as openapitools
FROM python:3.11.4-slim-bullseye@sha256:40319d0a897896e746edf877783ef39685d44e90e1e6de8d964d0382df0d4952 as build
WORKDIR /app
RUN \
diff --git a/components/yarn.lock b/components/yarn.lock
index d3d65c363f5..ffe72a3aaf0 100644
--- a/components/yarn.lock
+++ b/components/yarn.lock
@@ -538,6 +538,10 @@ fast-levenshtein@~2.0.6:
resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==
+flot-axis@markrcote/flot-axislabels#*:
+ version "0.0.0"
+ resolved "https://codeload.github.com/markrcote/flot-axislabels/tar.gz/a181e09d04d120d05e5bc2baaa8738b5b3670428"
+
flot@flot/flot#~0.8.3:
version "0.8.3"
resolved "https://codeload.github.com/flot/flot/tar.gz/453b017cc5acfd75e252b93e8635f57f4196d45d"
diff --git a/docs/content/en/integrations/parsers/file/checkmarx_one.md b/docs/content/en/integrations/parsers/file/checkmarx_one.md
deleted file mode 100644
index 1d5a07f0ca2..00000000000
--- a/docs/content/en/integrations/parsers/file/checkmarx_one.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: "Checkmarx One Scan"
-toc_hide: true
----
-Import JSON Checkmarx One scanner reports
-
-### Sample Scan Data
-Sample Checkmarx One scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/checkmarx_one).
\ No newline at end of file
diff --git a/docs/content/en/integrations/parsers/file/crunch42.md b/docs/content/en/integrations/parsers/file/crunch42.md
deleted file mode 100644
index e8aa1b1e556..00000000000
--- a/docs/content/en/integrations/parsers/file/crunch42.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: "Crunch42 Scan"
-toc_hide: true
----
-Import JSON findings from Crunch42 vulnerability scan tool.
-
-### Sample Scan Data
-Sample Crunch42 Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/crunch42).
\ No newline at end of file
diff --git a/docs/content/en/integrations/parsers/file/npm_audit_7_plus.md b/docs/content/en/integrations/parsers/file/npm_audit_7_plus.md
deleted file mode 100644
index a4b4a090b08..00000000000
--- a/docs/content/en/integrations/parsers/file/npm_audit_7_plus.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: "NPM Audit Version 7+"
-toc_hide: true
----
-
-**Note: This parser only supports import from NPM Audit v7 or newer.**
-
-Node Package Manager (NPM) Audit plugin output file can be imported in
-JSON format. Only imports the \'vulnerabilities\' subtree.
-
-### File Types
-This parser expects a JSON file. Can only import NPM Audit files from NPM Audit v7 or newer. It aims to provide the same
-information as the non-JSON formatted output.
-
-Attempting to import a file from a version less than 7 of NPM Audit will raise an error message.
-
-### Command Used To Generate Output
-Either of these commands will work:
-- \`npm audit --json\`
-- \`npm audit fix --dry-run --json\`
-
-### Sample Scan Data
-Sample NPM Audit scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/npm_audit_7_plus).
-
-### Link To Tool
-See NPM-Audit-Report on GitHub: https://github.com/npm/npm-audit-report/
diff --git a/docs/content/en/integrations/parsers/file/pip_audit.md b/docs/content/en/integrations/parsers/file/pip_audit.md
index 96b9b250d58..df24cdbe7a3 100644
--- a/docs/content/en/integrations/parsers/file/pip_audit.md
+++ b/docs/content/en/integrations/parsers/file/pip_audit.md
@@ -2,41 +2,7 @@
title: "pip-audit Scan"
toc_hide: true
---
-
-Import pip-audit JSON scan report.
-
-### File Types
-This parser expects a JSON file.
-
-The parser can handle legacy and current JSON format.
-
-The current format has added a `dependencies` element:
-
- {
- "dependencies": [
- {
- "name": "pyopenssl",
- "version": "23.1.0",
- "vulns": []
- },
- ...
- ]
- ...
- }
-
-The legacy format does not include the `dependencies` key:
-
- [
- {
- "name": "adal",
- "version": "1.2.2",
- "vulns": []
- },
- ...
- ]
+Import pip-audit JSON scan report
### Sample Scan Data
-Sample pip-audit Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/pip_audit).
-
-### Link To Tool
-[pip-audit](https://pypi.org/project/pip-audit/)
+Sample pip-audit Scan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/pip_audit).
\ No newline at end of file
diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py
index 0f5b7676c75..c2b491eb1a1 100644
--- a/dojo/endpoint/views.py
+++ b/dojo/endpoint/views.py
@@ -33,6 +33,12 @@ def process_endpoints_view(request, host_view=False, vulnerable=False):
if vulnerable:
endpoints = Endpoint.objects.filter(
+ finding__active=True,
+ finding__verified=True,
+ finding__out_of_scope=False,
+ finding__mitigated__isnull=True,
+ finding__false_p=False,
+ finding__duplicate=False,
status_endpoint__mitigated=False,
status_endpoint__false_positive=False,
status_endpoint__out_of_scope=False,
@@ -118,12 +124,12 @@ def process_endpoint_view(request, eid, host_view=False):
endpoints = endpoint.host_endpoints()
endpoint_metadata = None
all_findings = endpoint.host_findings()
- active_findings = endpoint.host_active_findings()
+ active_verified_findings = endpoint.host_active_verified_findings()
else:
endpoints = None
endpoint_metadata = dict(endpoint.endpoint_meta.values_list('name', 'value'))
all_findings = endpoint.findings.all()
- active_findings = endpoint.active_findings()
+ active_verified_findings = endpoint.active_verified_findings()
if all_findings:
start_date = timezone.make_aware(datetime.combine(all_findings.last().date, datetime.min.time()))
@@ -142,8 +148,12 @@ def process_endpoint_view(request, eid, host_view=False):
monthly_counts = get_period_counts(all_findings, closed_findings, None, months_between, start_date,
relative_delta='months')
- paged_findings = get_page_items(request, active_findings, 25)
- vulnerable = active_findings.count() != 0
+ paged_findings = get_page_items(request, active_verified_findings, 25)
+
+ vulnerable = False
+
+ if active_verified_findings.count() != 0:
+ vulnerable = True
product_tab = Product_Tab(endpoint.product, "Host" if host_view else "Endpoint", tab="endpoints")
return render(request,
diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py
index 7b20a8cc10b..4f7360fc465 100644
--- a/dojo/jira_link/helper.py
+++ b/dojo/jira_link/helper.py
@@ -1,5 +1,4 @@
import logging
-from typing import Any
from dojo.utils import add_error_message_to_response, get_system_setting, to_str_typed
import os
import io
@@ -696,13 +695,6 @@ def prepare_jira_issue_fields(
def add_jira_issue(obj, *args, **kwargs):
- def failure_to_add_message(message: str, exception: Exception, object: Any) -> bool:
- if exception:
- logger.exception(exception)
- logger.error(message)
- log_jira_alert(message, obj)
- return False
-
logger.info('trying to create a new jira issue for %d:%s', obj.id, to_str_typed(obj))
if not is_jira_enabled():
@@ -710,7 +702,9 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b
if not is_jira_configured_and_enabled(obj):
message = 'Object %s cannot be pushed to JIRA as there is no JIRA configuration for %s.' % (obj.id, to_str_typed(obj))
- return failure_to_add_message(message, None, obj)
+ logger.error(message)
+ log_jira_alert(message, obj)
+ return False
jira_project = get_jira_project(obj)
jira_instance = get_jira_instance(obj)
@@ -725,23 +719,19 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b
logger.warning("The JIRA issue will NOT be created.")
return False
logger.debug('Trying to create a new JIRA issue for %s...', to_str_typed(obj))
- # Attempt to get the jira connection
+ meta = None
try:
JIRAError.log_to_tempfile = False
jira = get_jira_connection(jira_instance)
- except Exception as e:
- message = f"The following jira instance could not be connected: {jira_instance} - {e.text}"
- return failure_to_add_message(message, e, obj)
- # Set the list of labels to set on the jira issue
- labels = get_labels(obj) + get_tags(obj)
- if labels:
- labels = list(dict.fromkeys(labels)) # de-dup
- # Determine what due date to set on the jira issue
- duedate = None
- if System_Settings.objects.get().enable_finding_sla:
- duedate = obj.sla_deadline()
- # Set the fields that will compose the jira issue
- try:
+
+ labels = get_labels(obj) + get_tags(obj)
+ if labels:
+ labels = list(dict.fromkeys(labels)) # de-dup
+
+ duedate = None
+ if System_Settings.objects.get().enable_finding_sla:
+ duedate = obj.sla_deadline()
+
issuetype_fields = get_issuetype_fields(jira, jira_project.project_key, jira_instance.default_issue_type)
fields = prepare_jira_issue_fields(
project_key=jira_project.project_key,
@@ -757,40 +747,16 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b
duedate=duedate,
issuetype_fields=issuetype_fields,
default_assignee=jira_project.default_assignee)
- except TemplateDoesNotExist as e:
- message = f"Failed to find a jira issue template to be used - {e}"
- return failure_to_add_message(message, e, obj)
- except Exception as e:
- message = f"Failed to fetch fields for {jira_instance.default_issue_type} under project {jira_project.project_key} - {e}"
- return failure_to_add_message(message, e, obj)
- # Create a new issue in Jira with the fields set in the last step
- try:
+
logger.debug('sending fields to JIRA: %s', fields)
new_issue = jira.create_issue(fields)
- logger.debug('saving JIRA_Issue for %s finding %s', new_issue.key, obj.id)
- j_issue = JIRA_Issue(jira_id=new_issue.id, jira_key=new_issue.key, jira_project=jira_project)
- j_issue.set_obj(obj)
- j_issue.jira_creation = timezone.now()
- j_issue.jira_change = timezone.now()
- j_issue.save()
- jira.issue(new_issue.id)
- logger.info('Created the following jira issue for %d:%s', obj.id, to_str_typed(obj))
- except Exception as e:
- message = f"Failed to create jira issue with the following payload: {fields} - {e}"
- return failure_to_add_message(message, e, obj)
- # Attempt to set a default assignee
- try:
if jira_project.default_assignee:
created_assignee = str(new_issue.get_field('assignee'))
logger.debug("new issue created with assignee %s", created_assignee)
if created_assignee != jira_project.default_assignee:
jira.assign_issue(new_issue.key, jira_project.default_assignee)
- except Exception as e:
- message = f"Failed to assign the default user: {jira_project.default_assignee} - {e}"
- # Do not return here as this should be a soft failure that should be logged
- failure_to_add_message(message, e, obj)
- # Upload dojo finding screenshots to Jira
- try:
+
+ # Upload dojo finding screenshots to Jira
findings = [obj]
if isinstance(obj, Finding_Group):
findings = obj.findings.all()
@@ -805,22 +771,7 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b
settings.MEDIA_ROOT + '/' + pic)
except FileNotFoundError as e:
logger.info(e)
- except Exception as e:
- message = f"Failed to attach attachments to the jira issue: {e}"
- # Do not return here as this should be a soft failure that should be logged
- failure_to_add_message(message, e, obj)
- # Add any notes that already exist in the finding to the JIRA
- try:
- for find in findings:
- if find.notes.all():
- for note in find.notes.all().reverse():
- add_comment(obj, note)
- except Exception as e:
- message = f"Failed to add notes to the jira ticket: {e}"
- # Do not return here as this should be a soft failure that should be logged
- failure_to_add_message(message, e, obj)
- # Determine whether to assign this new jira issue to a mapped epic
- try:
+
if jira_project.enable_engagement_epic_mapping:
eng = obj.test.engagement
logger.debug('Adding to EPIC Map: %s', eng.name)
@@ -829,11 +780,36 @@ def failure_to_add_message(message: str, exception: Exception, object: Any) -> b
add_issues_to_epic(jira, obj, epic_id=epic.jira_id, issue_keys=[str(new_issue.id)], ignore_epics=True)
else:
logger.info('The following EPIC does not exist: %s', eng.name)
- except Exception as e:
- message = f"Failed to assign jira issue to existing epic: {e}"
- return failure_to_add_message(message, e, obj)
- return True
+ # only link the new issue if it was successfully created, incl attachments and epic link
+ logger.debug('saving JIRA_Issue for %s finding %s', new_issue.key, obj.id)
+ j_issue = JIRA_Issue(
+ jira_id=new_issue.id, jira_key=new_issue.key, jira_project=jira_project)
+ j_issue.set_obj(obj)
+
+ j_issue.jira_creation = timezone.now()
+ j_issue.jira_change = timezone.now()
+ j_issue.save()
+ jira.issue(new_issue.id)
+
+ logger.info('Created the following jira issue for %d:%s', obj.id, to_str_typed(obj))
+
+ # Add any notes that already exist in the finding to the JIRA
+ for find in findings:
+ if find.notes.all():
+ for note in find.notes.all().reverse():
+ add_comment(obj, note)
+
+ return True
+ except TemplateDoesNotExist as e:
+ logger.exception(e)
+ log_jira_alert(str(e), obj)
+ return False
+ except JIRAError as e:
+ logger.exception(e)
+ logger.error("jira_meta for project: %s and url: %s meta: %s", jira_project.project_key, jira_project.jira_instance.url, json.dumps(meta, indent=4)) # this is None safe
+ log_jira_alert(e.text, obj)
+ return False
# we need two separate celery tasks due to the decorators we're using to map to/from ids
@@ -855,13 +831,6 @@ def update_jira_issue_for_finding_group(finding_group, *args, **kwargs):
def update_jira_issue(obj, *args, **kwargs):
- def failure_to_update_message(message: str, exception: Exception, obj: Any) -> bool:
- if exception:
- logger.exception(exception)
- logger.error(message)
- log_jira_alert(message, obj)
- return False
-
logger.debug('trying to update a linked jira issue for %d:%s', obj.id, to_str_typed(obj))
if not is_jira_enabled():
@@ -872,22 +841,21 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b
if not is_jira_configured_and_enabled(obj):
message = 'Object %s cannot be pushed to JIRA as there is no JIRA configuration for %s.' % (obj.id, to_str_typed(obj))
- return failure_to_update_message(message, None, obj)
+ logger.error(message)
+ log_jira_alert(message, obj)
+ return False
j_issue = obj.jira_issue
+ meta = None
try:
JIRAError.log_to_tempfile = False
jira = get_jira_connection(jira_instance)
issue = jira.issue(j_issue.jira_id)
- except Exception as e:
- message = f"The following jira instance could not be connected: {jira_instance} - {e}"
- return failure_to_update_message(message, e, obj)
- # Set the list of labels to set on the jira issue
- labels = get_labels(obj) + get_tags(obj)
- if labels:
- labels = list(dict.fromkeys(labels)) # de-dup
- # Set the fields that will compose the jira issue
- try:
+
+ labels = get_labels(obj) + get_tags(obj)
+ if labels:
+ labels = list(dict.fromkeys(labels)) # de-dup
+
issuetype_fields = get_issuetype_fields(jira, jira_project.project_key, jira_instance.default_issue_type)
fields = prepare_jira_issue_fields(
project_key=jira_project.project_key,
@@ -900,38 +868,26 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b
# Do not update the priority in jira after creation as this could have changed in jira, but should not change in dojo
# priority_name=jira_priority(obj),
issuetype_fields=issuetype_fields)
- except Exception as e:
- message = f"Failed to fetch fields for {jira_instance.default_issue_type} under project {jira_project.project_key} - {e}"
- return failure_to_update_message(message, e, obj)
- # Update the issue in jira
- try:
+
logger.debug('sending fields to JIRA: %s', fields)
+
issue.update(
summary=fields['summary'],
description=fields['description'],
# Do not update the priority in jira after creation as this could have changed in jira, but should not change in dojo
# priority=fields['priority'],
fields=fields)
- j_issue.jira_change = timezone.now()
- j_issue.save()
- except Exception as e:
- message = f"Failed to update the jira issue with the following payload: {fields} - {e}"
- return failure_to_update_message(message, e, obj)
- # Update the status in jira
- try:
+
push_status_to_jira(obj, jira_instance, jira, issue)
- except Exception as e:
- message = f"Failed to update the jira issue status - {e}"
- return failure_to_update_message(message, e, obj)
- # Upload dojo finding screenshots to Jira
- try:
+
+ # Upload dojo finding screenshots to Jira
findings = [obj]
if isinstance(obj, Finding_Group):
findings = obj.findings.all()
for find in findings:
for pic in get_file_images(find):
- # It doesn't look like the celery container has anything in the media
+ # It doesn't look like the celery cotainer has anything in the media
# folder. Has this feature ever worked?
try:
jira_attachment(
@@ -939,12 +895,7 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b
settings.MEDIA_ROOT + '/' + pic)
except FileNotFoundError as e:
logger.info(e)
- except Exception as e:
- message = f"Failed to attach attachments to the jira issue: {e}"
- # Do not return here as this should be a soft failure that should be logged
- failure_to_update_message(message, e, obj)
- # Determine whether to assign this new jira issue to a mapped epic
- try:
+
if jira_project.enable_engagement_epic_mapping:
eng = find.test.engagement
logger.debug('Adding to EPIC Map: %s', eng.name)
@@ -953,11 +904,20 @@ def failure_to_update_message(message: str, exception: Exception, obj: Any) -> b
add_issues_to_epic(jira, obj, epic_id=epic.jira_id, issue_keys=[str(j_issue.jira_id)], ignore_epics=True)
else:
logger.info('The following EPIC does not exist: %s', eng.name)
- except Exception as e:
- message = f"Failed to assign jira issue to existing epic: {e}"
- return failure_to_update_message(message, e, obj)
- return True
+ j_issue.jira_change = timezone.now()
+ j_issue.save()
+
+ logger.debug('Updated the following linked jira issue for %d:%s', find.id, find.title)
+ return True
+
+ except JIRAError as e:
+ logger.exception(e)
+ logger.error("jira_meta for project: %s and url: %s meta: %s", jira_project.project_key, jira_project.jira_instance.url, json.dumps(meta, indent=4)) # this is None safe
+ if issue_from_jira_is_active(issue):
+ # Only alert if the upstream JIRA is active, we don't care about closed issues
+ log_jira_alert(e.text, obj)
+ return False
def get_jira_issue_from_jira(find):
diff --git a/dojo/locale/en/LC_MESSAGES/django.po b/dojo/locale/en/LC_MESSAGES/django.po
index 92e365e334b..ab26c8cbdb4 100644
--- a/dojo/locale/en/LC_MESSAGES/django.po
+++ b/dojo/locale/en/LC_MESSAGES/django.po
@@ -3748,7 +3748,7 @@ msgid ""
"tags, references, languages or technologies contain the search query and "
"products whose\n"
" name, tags or description contain the "
-"search query.
Advanced search operators: (Restrict results to a certain "
+"search query. Advanced search operators: (Restrict results to a certain "
"type) product:,\n"
" engagement:, finding:, endpoint:, tag:, "
"language:, technology: or vulnerability_id:.\n"
diff --git a/dojo/models.py b/dojo/models.py
index 36a7d2e5200..362ec399b69 100755
--- a/dojo/models.py
+++ b/dojo/models.py
@@ -1124,7 +1124,7 @@ def endpoint_count(self):
endpoints = getattr(self, 'active_endpoints', None)
if endpoints:
return len(self.active_endpoints)
- return 0
+ return None
def open_findings(self, start_date=None, end_date=None):
if start_date is None or end_date is None:
diff --git a/dojo/product/views.py b/dojo/product/views.py
index 6291540342e..ee7c3b35e80 100755
--- a/dojo/product/views.py
+++ b/dojo/product/views.py
@@ -112,11 +112,8 @@ def prefetch_for_product(prods):
prefetched_prods = prefetched_prods.prefetch_related('members')
prefetched_prods = prefetched_prods.prefetch_related('prod_type__members')
active_endpoint_query = Endpoint.objects.filter(
- status_endpoint__mitigated=False,
- status_endpoint__false_positive=False,
- status_endpoint__out_of_scope=False,
- status_endpoint__risk_accepted=False,
- ).distinct()
+ finding__active=True,
+ finding__mitigated__isnull=True).distinct()
prefetched_prods = prefetched_prods.prefetch_related(
Prefetch('endpoint_set', queryset=active_endpoint_query, to_attr='active_endpoints'))
prefetched_prods = prefetched_prods.prefetch_related('tags')
@@ -326,15 +323,15 @@ def finding_querys(request, prod):
end_date = timezone.now()
week = end_date - timedelta(days=7) # seven days and /newer are considered "new"
- filters['accepted'] = findings_qs.filter(finding_helper.ACCEPTED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
+ filters['accepted'] = findings_qs.filter(finding_helper.ACCEPTED_FINDINGS_QUERY).filter(date__range=[start_date, end_date])
filters['verified'] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
filters['new_verified'] = findings_qs.filter(finding_helper.VERIFIED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
- filters['open'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
- filters['inactive'] = findings_qs.filter(finding_helper.INACTIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
- filters['closed'] = findings_qs.filter(finding_helper.CLOSED_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
- filters['false_positive'] = findings_qs.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
- filters['out_of_scope'] = findings_qs.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY).filter(date__range=[start_date, end_date]).order_by("date")
- filters['all'] = findings_qs.order_by("date")
+ filters['open'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(date__range=[start_date, end_date])
+ filters['inactive'] = findings_qs.filter(finding_helper.INACTIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date])
+ filters['closed'] = findings_qs.filter(finding_helper.CLOSED_FINDINGS_QUERY).filter(date__range=[start_date, end_date])
+ filters['false_positive'] = findings_qs.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY).filter(date__range=[start_date, end_date])
+ filters['out_of_scope'] = findings_qs.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY).filter(date__range=[start_date, end_date])
+ filters['all'] = findings_qs
filters['open_vulns'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(
cwe__isnull=False,
).order_by('cwe').values(
@@ -479,7 +476,6 @@ def view_product_metrics(request, pid):
add_breadcrumb(parent=prod, top_level=False, request=request)
- # An ordered dict does not make sense here.
open_close_weekly = OrderedDict()
severity_weekly = OrderedDict()
critical_weekly = OrderedDict()
@@ -487,83 +483,81 @@ def view_product_metrics(request, pid):
medium_weekly = OrderedDict()
open_objs_by_severity = get_zero_severity_level()
- closed_objs_by_severity = get_zero_severity_level()
accepted_objs_by_severity = get_zero_severity_level()
- for finding in filters.get("all", []):
- iso_cal = finding.date.isocalendar()
- date = iso_to_gregorian(iso_cal[0], iso_cal[1], 1)
- html_date = date.strftime("%m/%d
%Y")
- unix_timestamp = (tcalendar.timegm(date.timetuple()) * 1000)
-
- # Open findings
- if finding in filters.get("open", []):
- if unix_timestamp not in critical_weekly:
- critical_weekly[unix_timestamp] = {'count': 0, 'week': html_date}
- if unix_timestamp not in high_weekly:
- high_weekly[unix_timestamp] = {'count': 0, 'week': html_date}
- if unix_timestamp not in medium_weekly:
- medium_weekly[unix_timestamp] = {'count': 0, 'week': html_date}
-
- if unix_timestamp in open_close_weekly:
- open_close_weekly[unix_timestamp]['open'] += 1
+ for v in filters.get('open', None):
+ iso_cal = v.date.isocalendar()
+ x = iso_to_gregorian(iso_cal[0], iso_cal[1], 1)
+ y = x.strftime("%m/%d
%Y")
+ x = (tcalendar.timegm(x.timetuple()) * 1000)
+ if x not in critical_weekly:
+ critical_weekly[x] = {'count': 0, 'week': y}
+ if x not in high_weekly:
+ high_weekly[x] = {'count': 0, 'week': y}
+ if x not in medium_weekly:
+ medium_weekly[x] = {'count': 0, 'week': y}
+
+ if x in open_close_weekly:
+ if v.mitigated:
+ open_close_weekly[x]['closed'] += 1
else:
- open_close_weekly[unix_timestamp] = {'closed': 0, 'open': 1, 'accepted': 0}
- open_close_weekly[unix_timestamp]['week'] = html_date
+ open_close_weekly[x]['open'] += 1
+ else:
+ if v.mitigated:
+ open_close_weekly[x] = {'closed': 1, 'open': 0, 'accepted': 0}
+ else:
+ open_close_weekly[x] = {'closed': 0, 'open': 1, 'accepted': 0}
+ open_close_weekly[x]['week'] = y
- if view == 'Finding':
- severity = finding.severity
- elif view == 'Endpoint':
- severity = finding.finding.severity
+ if view == 'Finding':
+ severity = v.severity
+ elif view == 'Endpoint':
+ severity = v.finding.severity
- if unix_timestamp in severity_weekly:
- if severity in severity_weekly[unix_timestamp]:
- severity_weekly[unix_timestamp][severity] += 1
- else:
- severity_weekly[unix_timestamp][severity] = 1
+ if x in severity_weekly:
+ if severity in severity_weekly[x]:
+ severity_weekly[x][severity] += 1
else:
- severity_weekly[unix_timestamp] = get_zero_severity_level()
- severity_weekly[unix_timestamp][severity] = 1
- severity_weekly[unix_timestamp]['week'] = html_date
+ severity_weekly[x][severity] = 1
+ else:
+ severity_weekly[x] = get_zero_severity_level()
+ severity_weekly[x][severity] = 1
+ severity_weekly[x]['week'] = y
- if severity == 'Critical':
- if unix_timestamp in critical_weekly:
- critical_weekly[unix_timestamp]['count'] += 1
- else:
- critical_weekly[unix_timestamp] = {'count': 1, 'week': html_date}
- elif severity == 'High':
- if unix_timestamp in high_weekly:
- high_weekly[unix_timestamp]['count'] += 1
- else:
- high_weekly[unix_timestamp] = {'count': 1, 'week': html_date}
- elif severity == 'Medium':
- if unix_timestamp in medium_weekly:
- medium_weekly[unix_timestamp]['count'] += 1
- else:
- medium_weekly[unix_timestamp] = {'count': 1, 'week': html_date}
- # Optimization: count severity level on server side
- if open_objs_by_severity.get(finding.severity) is not None:
- open_objs_by_severity[finding.severity] += 1
- # Close findings
- if finding in filters.get("closed", []):
- if unix_timestamp in open_close_weekly:
- open_close_weekly[unix_timestamp]['closed'] += 1
+ if severity == 'Critical':
+ if x in critical_weekly:
+ critical_weekly[x]['count'] += 1
else:
- open_close_weekly[unix_timestamp] = {'closed': 1, 'open': 0, 'accepted': 0}
- open_close_weekly[unix_timestamp]['week'] = html_date
- # Optimization: count severity level on server side
- if closed_objs_by_severity.get(finding.severity) is not None:
- closed_objs_by_severity[finding.severity] += 1
- # Risk Accepted findings
- if finding in filters.get("accepted", []):
- if unix_timestamp in open_close_weekly:
- open_close_weekly[unix_timestamp]['accepted'] += 1
+ critical_weekly[x] = {'count': 1, 'week': y}
+ elif severity == 'High':
+ if x in high_weekly:
+ high_weekly[x]['count'] += 1
else:
- open_close_weekly[unix_timestamp] = {'closed': 0, 'open': 0, 'accepted': 1}
- open_close_weekly[unix_timestamp]['week'] = html_date
- # Optimization: count severity level on server side
- if accepted_objs_by_severity.get(finding.severity) is not None:
- accepted_objs_by_severity[finding.severity] += 1
+ high_weekly[x] = {'count': 1, 'week': y}
+ elif severity == 'Medium':
+ if x in medium_weekly:
+ medium_weekly[x]['count'] += 1
+ else:
+ medium_weekly[x] = {'count': 1, 'week': y}
+
+ # Optimization: count severity level on server side
+ if open_objs_by_severity.get(v.severity) is not None:
+ open_objs_by_severity[v.severity] += 1
+
+ for a in filters.get('accepted', None):
+ iso_cal = a.date.isocalendar()
+ x = iso_to_gregorian(iso_cal[0], iso_cal[1], 1)
+ y = x.strftime("%m/%d
%Y")
+ x = (tcalendar.timegm(x.timetuple()) * 1000)
+
+ if x in open_close_weekly:
+ open_close_weekly[x]['accepted'] += 1
+ else:
+ open_close_weekly[x] = {'closed': 0, 'open': 0, 'accepted': 1}
+ open_close_weekly[x]['week'] = y
+
+ if accepted_objs_by_severity.get(a.severity) is not None:
+ accepted_objs_by_severity[a.severity] += 1
test_data = {}
for t in tests:
@@ -590,7 +584,7 @@ def view_product_metrics(request, pid):
'inactive_objs': filters.get('inactive', None),
'inactive_objs_by_severity': sum_by_severity_level(filters.get('inactive')),
'closed_objs': filters.get('closed', None),
- 'closed_objs_by_severity': closed_objs_by_severity,
+ 'closed_objs_by_severity': sum_by_severity_level(filters.get('closed')),
'false_positive_objs': filters.get('false_positive', None),
'false_positive_objs_by_severity': sum_by_severity_level(filters.get('false_positive')),
'out_of_scope_objs': filters.get('out_of_scope', None),
diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py
index 54e83542eba..c2d85ec3975 100644
--- a/dojo/settings/settings.dist.py
+++ b/dojo/settings/settings.dist.py
@@ -1187,7 +1187,6 @@ def saml2_attrib_map_format(dict):
'Nexpose Scan': ['title', 'severity', 'vulnerability_ids', 'cwe'],
# possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity
'NPM Audit Scan': ['title', 'severity', 'file_path', 'vulnerability_ids', 'cwe'],
- 'NPM Audit v7+ Scan': ['title', 'severity', 'cwe', 'vuln_id_from_tool'],
# possible improvement: in the scanner put the library name into file_path, then dedup on cwe + file_path + severity
'Yarn Audit Scan': ['title', 'severity', 'file_path', 'vulnerability_ids', 'cwe'],
# possible improvement: in the scanner put the library name into file_path, then dedup on vulnerability_ids + file_path + severity
@@ -1281,7 +1280,6 @@ def saml2_attrib_map_format(dict):
'Tenable Scan': True,
'Nexpose Scan': True,
'NPM Audit Scan': True,
- 'NPM Audit v7+ Scan': True,
'Yarn Audit Scan': True,
'Mend Scan': True,
'ZAP Scan': False,
@@ -1364,12 +1362,10 @@ def saml2_attrib_map_format(dict):
'CargoAudit Scan': DEDUPE_ALGO_HASH_CODE,
'Checkmarx Scan detailed': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL,
'Checkmarx Scan': DEDUPE_ALGO_HASH_CODE,
- 'Checkmarx One Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL,
'Checkmarx OSA': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE,
'Codechecker Report native': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL,
'Coverity API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL,
'Cobalt.io API': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL,
- 'Crunch42 Scan': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL,
'Dependency Track Finding Packaging Format (FPF) Export': DEDUPE_ALGO_HASH_CODE,
'Mobsfscan Scan': DEDUPE_ALGO_HASH_CODE,
'SonarQube Scan detailed': DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL,
@@ -1381,7 +1377,6 @@ def saml2_attrib_map_format(dict):
'Tenable Scan': DEDUPE_ALGO_HASH_CODE,
'Nexpose Scan': DEDUPE_ALGO_HASH_CODE,
'NPM Audit Scan': DEDUPE_ALGO_HASH_CODE,
- 'NPM Audit v7+ Scan': DEDUPE_ALGO_HASH_CODE,
'Yarn Audit Scan': DEDUPE_ALGO_HASH_CODE,
'Mend Scan': DEDUPE_ALGO_HASH_CODE,
'ZAP Scan': DEDUPE_ALGO_HASH_CODE,
diff --git a/dojo/static/dojo/js/metrics.js b/dojo/static/dojo/js/metrics.js
index 2e95555d379..392ad2ac6f8 100644
--- a/dojo/static/dojo/js/metrics.js
+++ b/dojo/static/dojo/js/metrics.js
@@ -1618,6 +1618,8 @@ function open_close_weekly(opened, closed, accepted, ticks) {
var options = {
xaxes: [{
ticks: ticks,
+ transform: function(v) { return -v; },
+ inverseTransform: function(v) { return -v; }
}],
yaxes: [{
min: 0
@@ -1659,6 +1661,8 @@ function severity_weekly(critical, high, medium, low, info, ticks) {
var options = {
xaxes: [{
ticks: ticks,
+ transform: function(v) { return -v; },
+ inverseTransform: function(v) { return -v; }
}],
yaxes: [{
min: 0
@@ -1709,6 +1713,8 @@ function severity_counts_weekly(critical, high, medium, ticks) {
var options = {
xaxes: [{
ticks: ticks,
+ transform: function(v) { return -v; },
+ inverseTransform: function(v) { return -v; }
}],
yaxes: [{
min: 0
diff --git a/dojo/templates/base.html b/dojo/templates/base.html
index 2f1cace966c..f4043d42e3c 100644
--- a/dojo/templates/base.html
+++ b/dojo/templates/base.html
@@ -765,8 +765,10 @@