From 8184c8163dfb30eafba3641fd416e1cd4ad11d50 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 7 Oct 2024 15:40:28 +0000 Subject: [PATCH 01/18] Update versions in application files --- components/package.json | 2 +- docs/content/en/getting_started/upgrading/2.40.md | 7 +++++++ dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 4 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 docs/content/en/getting_started/upgrading/2.40.md diff --git a/components/package.json b/components/package.json index ca4351fe41e..06cdce1889b 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.39.0", + "version": "2.40.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/docs/content/en/getting_started/upgrading/2.40.md b/docs/content/en/getting_started/upgrading/2.40.md new file mode 100644 index 00000000000..3420f9b8356 --- /dev/null +++ b/docs/content/en/getting_started/upgrading/2.40.md @@ -0,0 +1,7 @@ +--- +title: 'Upgrading to DefectDojo Version 2.40.x' +toc_hide: true +weight: -20241007 +description: No special instructions. +--- +There are no special instructions for upgrading to 2.40.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.40.0) for the contents of the release. diff --git a/dojo/__init__.py b/dojo/__init__.py index 6bc97e6bbb2..0dc36e95a1e 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.39.0" +__version__ = "2.40.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index b1927a7074a..4f2c96ba0fa 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.39.0" +appVersion: "2.40.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.153 +version: 1.6.154-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 9766dc96c6dd747bf3ffd7681eac7d2774de3874 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:33:35 -0500 Subject: [PATCH 02/18] Bump boto3 from 1.35.33 to 1.35.34 (#11009) Bumps [boto3](https://github.com/boto/boto3) from 1.35.33 to 1.35.34. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.33...1.35.34) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 79d2844fc8f..57180e3f9d9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.33 # Required for Celery Broker AWS (SQS) support +boto3==1.35.34 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 07e918abc3f5824b49b9068d5252f399ed59c22e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:33:56 -0500 Subject: [PATCH 03/18] Bump redis from 5.1.0 to 5.1.1 (#11008) Bumps [redis](https://github.com/redis/redis-py) from 5.1.0 to 5.1.1. - [Release notes](https://github.com/redis/redis-py/releases) - [Changelog](https://github.com/redis/redis-py/blob/master/CHANGES) - [Commits](https://github.com/redis/redis-py/compare/v5.1.0...v5.1.1) --- updated-dependencies: - dependency-name: redis dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 57180e3f9d9..c3134c757f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -35,7 +35,7 @@ psycopg[c]==3.2.3 cryptography==43.0.1 python-dateutil==2.9.0.post0 pytz==2024.2 -redis==5.1.0 +redis==5.1.1 requests==2.32.3 sqlalchemy==2.0.35 # Required by Celery broker transport urllib3==1.26.18 From 0a74312cd204f7ed77537b2571c0e18036b5e510 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:34:14 -0500 Subject: [PATCH 04/18] Bump humanize from 4.10.0 to 4.11.0 (#11007) Bumps [humanize](https://github.com/python-humanize/humanize) from 4.10.0 to 4.11.0. - [Release notes](https://github.com/python-humanize/humanize/releases) - [Commits](https://github.com/python-humanize/humanize/compare/4.10.0...4.11.0) --- updated-dependencies: - dependency-name: humanize dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c3134c757f6..9615a932a50 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,7 +24,7 @@ django-prometheus==2.3.1 Django==5.0.8 djangorestframework==3.15.2 html2text==2024.2.26 -humanize==4.10.0 +humanize==4.11.0 jira==3.8.0 PyGithub==1.58.2 lxml==5.3.0 From b7d296bcb146459139f2868045a171abebc02ed9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:34:35 -0500 Subject: [PATCH 05/18] Bump ruff from 0.6.8 to 0.6.9 (#11006) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.6.8 to 0.6.9. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.6.8...0.6.9) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements-lint.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-lint.txt b/requirements-lint.txt index e2fd91d90a2..0e4ee0a0eae 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1 +1 @@ -ruff==0.6.8 \ No newline at end of file +ruff==0.6.9 \ No newline at end of file From 2ebbf76159abce411ccb271d2a14943bee330fc0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:35:04 -0500 Subject: [PATCH 06/18] Bump nginx from 1.27.0-alpine to 1.27.2-alpine (#11005) Bumps nginx from 1.27.0-alpine to 1.27.2-alpine. --- updated-dependencies: - dependency-name: nginx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.nginx-alpine | 2 +- Dockerfile.nginx-debian | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.nginx-alpine b/Dockerfile.nginx-alpine index 0528e63047b..b1bd293b09d 100644 --- a/Dockerfile.nginx-alpine +++ b/Dockerfile.nginx-alpine @@ -140,7 +140,7 @@ COPY manage.py ./ COPY dojo/ ./dojo/ RUN env DD_SECRET_KEY='.' python3 manage.py collectstatic --noinput && true -FROM nginx:1.27.0-alpine@sha256:208b70eefac13ee9be00e486f79c695b15cef861c680527171a27d253d834be9 +FROM nginx:1.27.2-alpine@sha256:2140dad235c130ac861018a4e13a6bc8aea3a35f3a40e20c1b060d51a7efd250 ARG uid=1001 ARG appuser=defectdojo COPY --from=collectstatic /app/static/ /usr/share/nginx/html/static/ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index b07ce5407de..f818e54c7f9 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -73,7 +73,7 @@ COPY dojo/ ./dojo/ RUN env DD_SECRET_KEY='.' python3 manage.py collectstatic --noinput && true -FROM nginx:1.27.0-alpine@sha256:208b70eefac13ee9be00e486f79c695b15cef861c680527171a27d253d834be9 +FROM nginx:1.27.2-alpine@sha256:2140dad235c130ac861018a4e13a6bc8aea3a35f3a40e20c1b060d51a7efd250 ARG uid=1001 ARG appuser=defectdojo COPY --from=collectstatic /app/static/ /usr/share/nginx/html/static/ From 0017f6a4f07406a92819e884fe80f6752cb1a8a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:51:17 -0500 Subject: [PATCH 07/18] Bump boto3 from 1.35.34 to 1.35.35 (#11021) Bumps [boto3](https://github.com/boto/boto3) from 1.35.34 to 1.35.35. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.34...1.35.35) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9615a932a50..edc8633b8e0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.34 # Required for Celery Broker AWS (SQS) support +boto3==1.35.35 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 1fdb05a94a3bdf8e900f770a5fa0717a7f8cb14c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:51:36 -0500 Subject: [PATCH 08/18] Bump vcrpy from 6.0.1 to 6.0.2 (#11022) Bumps [vcrpy](https://github.com/kevin1024/vcrpy) from 6.0.1 to 6.0.2. - [Release notes](https://github.com/kevin1024/vcrpy/releases) - [Changelog](https://github.com/kevin1024/vcrpy/blob/master/docs/changelog.rst) - [Commits](https://github.com/kevin1024/vcrpy/compare/v6.0.1...v6.0.2) --- updated-dependencies: - dependency-name: vcrpy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index edc8633b8e0..08a3a83e4d7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -54,7 +54,7 @@ JSON-log-formatter==1.1 django-split-settings==1.3.2 django-debug-toolbar==4.4.6 django-debug-toolbar-request-history==0.1.4 -vcrpy==6.0.1 +vcrpy==6.0.2 vcrpy-unittest==0.1.7 django-tagulous==2.1.0 PyJWT==2.9.0 From 60db54e1e1b706db52af4102b984c312bf364b03 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 16:32:27 -0500 Subject: [PATCH 09/18] Bump pdfmake from 0.2.13 to 0.2.14 in /components (#11028) Bumps [pdfmake](https://github.com/bpampuch/pdfmake) from 0.2.13 to 0.2.14. - [Release notes](https://github.com/bpampuch/pdfmake/releases) - [Changelog](https://github.com/bpampuch/pdfmake/blob/0.2.14/CHANGELOG.md) - [Commits](https://github.com/bpampuch/pdfmake/compare/0.2.13...0.2.14) --- updated-dependencies: - dependency-name: pdfmake dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- components/package.json | 2 +- components/yarn.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/components/package.json b/components/package.json index 06cdce1889b..7d7c9d1b857 100644 --- a/components/package.json +++ b/components/package.json @@ -35,7 +35,7 @@ "metismenu": "~3.0.7", "moment": "^2.30.1", "morris.js": "morrisjs/morris.js", - "pdfmake": "^0.2.13", + "pdfmake": "^0.2.14", "startbootstrap-sb-admin-2": "1.0.7" }, "engines": { diff --git a/components/yarn.lock b/components/yarn.lock index 7bb19365790..952d09ff22a 100644 --- a/components/yarn.lock +++ b/components/yarn.lock @@ -824,10 +824,10 @@ path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -pdfmake@^0.2.13: - version "0.2.13" - resolved "https://registry.yarnpkg.com/pdfmake/-/pdfmake-0.2.13.tgz#ea43fe9f0c8de1e5ec7b08486d6f4f8bbb8619e4" - integrity sha512-qeVE9Bzjm0oPCitH4/HYM/XCGTwoeOAOVAXPnV3s0kpPvTLkTF/bAF4jzorjkaIhXGQhzYk6Xclt0hMDYLY93w== +pdfmake@^0.2.14: + version "0.2.14" + resolved "https://registry.yarnpkg.com/pdfmake/-/pdfmake-0.2.14.tgz#a257a393b54917218add829bff8e490be21e8077" + integrity sha512-x9gXFAY37/CAC/WaZB/683E4Pi0cVW/RMTTNxMpe4I2kRsKv8AE3Pz6+n7iTfn+84/GtSg99BjZkYh7oGFCKmg== dependencies: "@foliojs-fork/linebreak" "^1.1.1" "@foliojs-fork/pdfkit" "^0.14.0" From db7d9a0fc592662265c8855657c2ddfa5b82db22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 16:33:46 -0500 Subject: [PATCH 10/18] Bump python-gitlab from 4.12.2 to 4.13.0 (#11027) Bumps [python-gitlab](https://github.com/python-gitlab/python-gitlab) from 4.12.2 to 4.13.0. - [Release notes](https://github.com/python-gitlab/python-gitlab/releases) - [Changelog](https://github.com/python-gitlab/python-gitlab/blob/main/CHANGELOG.md) - [Commits](https://github.com/python-gitlab/python-gitlab/compare/v4.12.2...v4.13.0) --- updated-dependencies: - dependency-name: python-gitlab dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 08a3a83e4d7..e4b67867cf1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,7 @@ titlecase==2.4.1 social-auth-app-django==5.4.2 social-auth-core==4.5.4 gitpython==3.1.43 -python-gitlab==4.12.2 +python-gitlab==4.13.0 cpe==1.3.1 packageurl-python==0.15.6 django-crum==0.7.9 From 535bb3b5696f4a325496ef3bcf5f078d26046ab5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 16:34:05 -0500 Subject: [PATCH 11/18] Bump boto3 from 1.35.35 to 1.35.36 (#11026) Bumps [boto3](https://github.com/boto/boto3) from 1.35.35 to 1.35.36. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.35...1.35.36) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e4b67867cf1..3e4b52d094d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.35 # Required for Celery Broker AWS (SQS) support +boto3==1.35.36 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From bd507d322c6c6e9e9fd856ab285b6d2df9c4ac02 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:15:53 +0200 Subject: [PATCH 12/18] Ruff: Fix issues via "target-version" (#10846) --- dojo/api_v2/serializers.py | 5 +- dojo/apps.py | 2 +- dojo/engagement/views.py | 30 +++++------ dojo/finding/views.py | 23 ++++----- dojo/finding_group/views.py | 3 +- dojo/forms.py | 5 +- dojo/home/views.py | 3 +- dojo/importers/auto_create_context.py | 30 +++++------ dojo/importers/base_importer.py | 51 +++++++++---------- dojo/importers/default_importer.py | 21 ++++---- dojo/importers/default_reimporter.py | 33 ++++++------ dojo/importers/endpoint_manager.py | 19 ++++--- dojo/importers/options.py | 13 ++--- dojo/metrics/utils.py | 11 ++-- dojo/models.py | 3 +- dojo/remote_user.py | 3 +- dojo/reports/views.py | 24 +++------ dojo/risk_acceptance/api.py | 4 +- dojo/system_settings/views.py | 3 +- dojo/test/views.py | 5 +- .../engines/appcheck.py | 3 +- .../engines/base.py | 22 ++++---- .../engines/nmap.py | 4 +- dojo/tools/blackduck/importer.py | 2 +- .../blackduck_binary_analysis/importer.py | 2 +- dojo/tools/checkmarx_one/parser.py | 13 +++-- .../parser.py | 3 +- dojo/tools/kics/parser.py | 3 +- dojo/tools/sarif/parser.py | 5 +- dojo/tools/tenable/xml_format.py | 2 +- dojo/tools/veracode/json_parser.py | 2 +- dojo/tools/whitehat_sentinel/parser.py | 7 ++- dojo/utils.py | 4 +- ruff.toml | 3 ++ unittests/test_bulk_risk_acceptance_api.py | 22 ++++---- unittests/test_dashboard.py | 5 +- unittests/test_finding_helper.py | 6 +-- unittests/test_flush_auditlog.py | 8 +-- unittests/test_import_reimport.py | 4 +- unittests/test_metrics_queries.py | 42 +++++++-------- unittests/test_risk_acceptance.py | 6 +-- unittests/test_utils_deduplication_reopen.py | 2 +- unittests/tools/test_arachni_parser.py | 2 +- unittests/tools/test_bugcrowd_parser.py | 4 +- .../tools/test_dependency_check_parser.py | 4 +- unittests/tools/test_sarif_parser.py | 10 ++-- unittests/tools/test_stackhawk_parser.py | 2 +- 47 files changed, 227 insertions(+), 256 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 471dfc019b5..eed696a2c95 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -3,7 +3,6 @@ import os import re from datetime import datetime -from typing import List import six import tagulous @@ -1517,7 +1516,7 @@ def get_engagement(self, obj): ) def validate(self, data): - def validate_findings_have_same_engagement(finding_objects: List[Finding]): + def validate_findings_have_same_engagement(finding_objects: list[Finding]): engagements = finding_objects.values_list("test__engagement__id", flat=True).distinct().count() if engagements > 1: msg = "You are not permitted to add findings from multiple engagements" @@ -2043,7 +2042,7 @@ def get_findings_count(self, obj) -> int: return obj.findings_count # TODO: maybe extend_schema_field is needed here? - def get_findings_list(self, obj) -> List[int]: + def get_findings_list(self, obj) -> list[int]: return obj.open_findings_list diff --git a/dojo/apps.py b/dojo/apps.py index fd3a06575fd..4d4d07af50e 100644 --- a/dojo/apps.py +++ b/dojo/apps.py @@ -98,5 +98,5 @@ def get_model_fields(default_fields, extra_fields=()): def get_model_default_fields(model): return tuple( field.name for field in model._meta.fields if - isinstance(field, (models.CharField, models.TextField)) + isinstance(field, models.CharField | models.TextField) ) diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index ea73bd80c63..54781eed409 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -7,7 +7,6 @@ from functools import reduce from tempfile import NamedTemporaryFile from time import strftime -from typing import List, Optional, Tuple from django.conf import settings from django.contrib import messages @@ -427,7 +426,7 @@ def get_risks_accepted(self, eng): def get_filtered_tests( self, request: HttpRequest, - queryset: List[Test], + queryset: list[Test], engagement: Engagement, ): filter_string_matching = get_system_setting("filter_string_matching", False) @@ -710,9 +709,9 @@ def get_development_environment( def get_engagement_or_product( self, user: Dojo_User, - engagement_id: Optional[int] = None, - product_id: Optional[int] = None, - ) -> Tuple[Engagement, Product, Product | Engagement]: + engagement_id: int | None = None, + product_id: int | None = None, + ) -> tuple[Engagement, Product, Product | Engagement]: """Using the path parameters, either fetch the product or engagement""" engagement = product = engagement_or_product = None # Get the product if supplied @@ -769,7 +768,7 @@ def get_jira_form( self, request: HttpRequest, engagement_or_product: Engagement | Product, - ) -> Tuple[JIRAImportScanForm | None, bool]: + ) -> tuple[JIRAImportScanForm | None, bool]: """Returns a JiraImportScanForm if jira is enabled""" jira_form = None push_all_jira_issues = False @@ -794,7 +793,7 @@ def get_product_tab( self, product: Product, engagement: Engagement, - ) -> Tuple[Product_Tab, dict]: + ) -> tuple[Product_Tab, dict]: """ Determine how the product tab will be rendered, and what tab will be selected as currently active @@ -811,9 +810,9 @@ def get_product_tab( def handle_request( self, request: HttpRequest, - engagement_id: Optional[int] = None, - product_id: Optional[int] = None, - ) -> Tuple[HttpRequest, dict]: + engagement_id: int | None = None, + product_id: int | None = None, + ) -> tuple[HttpRequest, dict]: """ Process the common behaviors between request types, and then return the request and context dict back to be rendered @@ -1046,8 +1045,8 @@ def failure_redirect( def get( self, request: HttpRequest, - engagement_id: Optional[int] = None, - product_id: Optional[int] = None, + engagement_id: int | None = None, + product_id: int | None = None, ) -> HttpResponse: """Process GET requests for the Import View""" # process the request and path parameters @@ -1062,8 +1061,8 @@ def get( def post( self, request: HttpRequest, - engagement_id: Optional[int] = None, - product_id: Optional[int] = None, + engagement_id: int | None = None, + product_id: int | None = None, ) -> HttpResponse: """Process POST requests for the Import View""" # process the request and path parameters @@ -1555,8 +1554,7 @@ def get_engagements(request): if not url: msg = "Please use the export button when exporting engagements" raise ValidationError(msg) - if url.startswith("url="): - url = url[4:] + url = url.removeprefix("url=") path_items = list(filter(None, re.split(r"/|\?", url))) diff --git a/dojo/finding/views.py b/dojo/finding/views.py index ea5578ee460..0c0d78d6cc5 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -7,7 +7,6 @@ import mimetypes from collections import OrderedDict, defaultdict from itertools import chain -from typing import Optional from django.conf import settings from django.contrib import messages @@ -265,9 +264,9 @@ class BaseListFindings: def __init__( self, filter_name: str = "All", - product_id: Optional[int] = None, - engagement_id: Optional[int] = None, - test_id: Optional[int] = None, + product_id: int | None = None, + engagement_id: int | None = None, + test_id: int | None = None, order_by: str = "numerical_severity", prefetch_type: str = "all", ): @@ -420,7 +419,7 @@ def add_breadcrumbs(self, request: HttpRequest, context: dict): return request, context - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): # Store the product and engagement ids self.product_id = product_id self.engagement_id = engagement_id @@ -446,43 +445,43 @@ def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement class ListOpenFindings(ListFindings): - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): self.filter_name = "Open" return super().get(request, product_id=product_id, engagement_id=engagement_id) class ListVerifiedFindings(ListFindings): - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): self.filter_name = "Verified" return super().get(request, product_id=product_id, engagement_id=engagement_id) class ListOutOfScopeFindings(ListFindings): - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): self.filter_name = "Out of Scope" return super().get(request, product_id=product_id, engagement_id=engagement_id) class ListFalsePositiveFindings(ListFindings): - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): self.filter_name = "False Positive" return super().get(request, product_id=product_id, engagement_id=engagement_id) class ListInactiveFindings(ListFindings): - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): self.filter_name = "Inactive" return super().get(request, product_id=product_id, engagement_id=engagement_id) class ListAcceptedFindings(ListFindings): - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): self.filter_name = "Accepted" return super().get(request, product_id=product_id, engagement_id=engagement_id) class ListClosedFindings(ListFindings): - def get(self, request: HttpRequest, product_id: Optional[int] = None, engagement_id: Optional[int] = None): + def get(self, request: HttpRequest, product_id: int | None = None, engagement_id: int | None = None): self.filter_name = "Closed" self.order_by = "-mitigated" return super().get(request, product_id=product_id, engagement_id=engagement_id) diff --git a/dojo/finding_group/views.py b/dojo/finding_group/views.py index 546dae93763..814d88888ed 100644 --- a/dojo/finding_group/views.py +++ b/dojo/finding_group/views.py @@ -74,8 +74,7 @@ def view_finding_group(request, fgid): if jira_issue: # See if the submitted issue was a issue key or the full URL jira_instance = jira_helper.get_jira_project(finding_group).jira_instance - if jira_issue.startswith(jira_instance.url + "/browse/"): - jira_issue = jira_issue[len(jira_instance.url + "/browse/"):] + jira_issue = jira_issue.removeprefix(jira_instance.url + "/browse/") if finding_group.has_jira_issue and not jira_issue == jira_helper.get_jira_key(finding_group): jira_helper.unlink_jira(request, finding_group) diff --git a/dojo/forms.py b/dojo/forms.py index 1dd52671c45..fcd37a467d7 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -2382,10 +2382,7 @@ def get_jira_issue_template_dir_choices(): # template_list.append((os.path.join(base_dir, filename), filename)) for dirname in dirnames: - if base_dir.startswith(settings.TEMPLATE_DIR_PREFIX): - clean_base_dir = base_dir[len(settings.TEMPLATE_DIR_PREFIX):] - else: - clean_base_dir = base_dir + clean_base_dir = base_dir.removeprefix(settings.TEMPLATE_DIR_PREFIX) template_dir_list.append((os.path.join(clean_base_dir, dirname), dirname)) logger.debug("templates: %s", template_dir_list) diff --git a/dojo/home/views.py b/dojo/home/views.py index 2c4d16fadef..67e90bec106 100644 --- a/dojo/home/views.py +++ b/dojo/home/views.py @@ -1,6 +1,5 @@ from collections import defaultdict from datetime import timedelta -from typing import Dict from dateutil.relativedelta import relativedelta from django.db.models import Count, Q @@ -75,7 +74,7 @@ def support(request: HttpRequest) -> HttpResponse: return render(request, "dojo/support.html", {}) -def get_severities_all(findings) -> Dict[str, int]: +def get_severities_all(findings) -> dict[str, int]: severities_all = findings.values("severity").annotate(count=Count("severity")).order_by() return defaultdict(lambda: 0, {s["severity"]: s["count"] for s in severities_all}) diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py index 16454568624..7d54782a676 100644 --- a/dojo/importers/auto_create_context.py +++ b/dojo/importers/auto_create_context.py @@ -1,6 +1,6 @@ import logging from datetime import datetime, timedelta -from typing import Any, Optional +from typing import Any from crum import get_current_user from django.db import transaction @@ -113,7 +113,7 @@ def process_import_meta_data_from_dict( """ def get_target_product_type_if_exists( self, - product_type_name: Optional[str] = None, + product_type_name: str | None = None, **kwargs: dict, ) -> Product_Type | None: """ @@ -128,8 +128,8 @@ def get_target_product_type_if_exists( def get_target_product_if_exists( self, - product_name: Optional[str] = None, - product_type_name: Optional[str] = None, + product_name: str | None = None, + product_type_name: str | None = None, **kwargs: dict, ) -> Product | None: """ @@ -168,7 +168,7 @@ def get_target_product_by_id_if_exists( def get_target_engagement_if_exists( self, engagement_id: int = 0, - engagement_name: Optional[str] = None, + engagement_name: str | None = None, product: Product = None, **kwargs: dict, ) -> Engagement | None: @@ -191,8 +191,8 @@ def get_target_engagement_if_exists( def get_target_test_if_exists( self, test_id: int = 0, - test_title: Optional[str] = None, - scan_type: Optional[str] = None, + test_title: str | None = None, + scan_type: str | None = None, engagement: Engagement = None, **kwargs: dict, ) -> Test | None: @@ -220,7 +220,7 @@ def get_target_test_if_exists( """ def get_or_create_product_type( self, - product_type_name: Optional[str] = None, + product_type_name: str | None = None, **kwargs: dict, ) -> Product_Type: """ @@ -243,8 +243,8 @@ def get_or_create_product_type( def get_or_create_product( self, - product_name: Optional[str] = None, - product_type_name: Optional[str] = None, + product_name: str | None = None, + product_type_name: str | None = None, *, auto_create_context: bool = False, **kwargs: dict, @@ -278,14 +278,14 @@ def get_or_create_product( def get_or_create_engagement( self, engagement_id: int = 0, - engagement_name: Optional[str] = None, - product_name: Optional[str] = None, - product_type_name: Optional[str] = None, + engagement_name: str | None = None, + product_name: str | None = None, + product_type_name: str | None = None, *, auto_create_context: bool = False, deduplication_on_engagement: bool = False, - source_code_management_uri: Optional[str] = None, - target_end: Optional[datetime] = None, + source_code_management_uri: str | None = None, + target_end: datetime | None = None, **kwargs: dict, ) -> Engagement: """Fetches an engagement by name or ID if one already exists.""" diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py index ebd97fc37f6..c9a77fbb95b 100644 --- a/dojo/importers/base_importer.py +++ b/dojo/importers/base_importer.py @@ -1,6 +1,5 @@ import base64 import logging -from typing import List, Tuple from django.conf import settings from django.core.exceptions import ValidationError @@ -42,7 +41,7 @@ class Parser: and is purely for the sake of type hinting """ - def get_findings(scan_type: str, test: Test) -> List[Finding]: + def get_findings(scan_type: str, test: Test) -> list[Finding]: """ Stub function to make the hinting happier. The actual class is loosely obligated to have this function defined. @@ -89,7 +88,7 @@ def process_scan( scan: TemporaryUploadedFile, *args: list, **kwargs: dict, - ) -> Tuple[Test, int, int, int, int, int, Test_Import]: + ) -> tuple[Test, int, int, int, int, int, Test_Import]: """ A helper method that executes the entire import process in a single method. This includes parsing the file, processing the findings, and returning the @@ -99,9 +98,9 @@ def process_scan( def process_findings( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Make the conversion from unsaved Findings in memory to Findings that are saved in the database with and ID associated with them. This processor will also save any associated @@ -111,9 +110,9 @@ def process_findings( def close_old_findings( self, - findings: List[Finding], + findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Identify any findings that have been imported before, but are no longer present in later reports so that @@ -147,7 +146,7 @@ def parse_findings_static_test_type( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Parse the scan report submitted with the parser class and generate some findings that are not saved to the database yet. This step is crucial in determining if @@ -168,7 +167,7 @@ def parse_dynamic_test_type_tests( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Test]: + ) -> list[Test]: """Use the API configuration object to get the tests to be used by the parser""" try: return parser.get_tests(self.scan_type, scan) @@ -178,8 +177,8 @@ def parse_dynamic_test_type_tests( def parse_dynamic_test_type_findings_from_tests( self, - tests: List[Test], - ) -> List[Finding]: + tests: list[Test], + ) -> list[Finding]: """ currently we only support import one Test so for parser that support multiple tests (like SARIF) @@ -194,7 +193,7 @@ def parse_findings_dynamic_test_type( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Use the API configuration object to get the tests to be used by the parser to dump findings into @@ -208,7 +207,7 @@ def parse_findings( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Determine how to parse the findings based on the presence of the `get_tests` function on the parser object @@ -221,9 +220,9 @@ def parse_findings( def sync_process_findings( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + ) -> tuple[list[Finding], list[Finding], list[Finding], list[Finding]]: """ Processes findings in a synchronous manner such that all findings will be processed in a worker/process/thread @@ -232,9 +231,9 @@ def sync_process_findings( def async_process_findings( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Processes findings in chunks within N number of processes. The ASYNC_FINDING_IMPORT_CHUNK_SIZE setting will determine how many @@ -244,9 +243,9 @@ def async_process_findings( def determine_process_method( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Determines whether to process the scan iteratively, or in chunks, based upon the ASYNC_FINDING_IMPORT setting @@ -318,10 +317,10 @@ def update_test_tags(self): def update_import_history( self, - new_findings: List[Finding] = [], - closed_findings: List[Finding] = [], - reactivated_findings: List[Finding] = [], - untouched_findings: List[Finding] = [], + new_findings: list[Finding] = [], + closed_findings: list[Finding] = [], + reactivated_findings: list[Finding] = [], + untouched_findings: list[Finding] = [], ) -> Test_Import: """Creates a record of the import or reimport operation that has occurred.""" # Quick fail check to determine if we even wanted this @@ -447,9 +446,9 @@ def construct_imported_message( def chunk_findings( self, - finding_list: List[Finding], + finding_list: list[Finding], chunk_size: int = settings.ASYNC_FINDING_IMPORT_CHUNK_SIZE, - ) -> List[List[Finding]]: + ) -> list[list[Finding]]: """ Split a single large list into a list of lists of size `chunk_size`. For Example @@ -627,7 +626,7 @@ def process_request_response_pairs( def process_endpoints( self, finding: Finding, - endpoints_to_add: List[Endpoint], + endpoints_to_add: list[Endpoint], ) -> None: """ Process any endpoints to add to the finding. Endpoints could come from two places diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py index 28a2de1e30d..95254ef59b8 100644 --- a/dojo/importers/default_importer.py +++ b/dojo/importers/default_importer.py @@ -1,5 +1,4 @@ import logging -from typing import List, Tuple from django.core.files.uploadedfile import TemporaryUploadedFile from django.core.serializers import deserialize, serialize @@ -86,7 +85,7 @@ def process_scan( scan: TemporaryUploadedFile, *args: list, **kwargs: dict, - ) -> Tuple[Test, int, int, int, int, int, Test_Import]: + ) -> tuple[Test, int, int, int, int, int, Test_Import]: """ The full step process of taking a scan report, and converting it to findings in the database. This entails the the following actions: @@ -143,9 +142,9 @@ def process_scan( def process_findings( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Saves findings in memory that were parsed from the scan report into the database. This process involves first saving associated objects such as endpoints, files, @@ -233,9 +232,9 @@ def process_findings( def close_old_findings( self, - findings: List[Finding], + findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Closes old findings based on a hash code match at either the product or the engagement scope. Closing an old finding entails setting the @@ -300,7 +299,7 @@ def parse_findings( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Determine how to parse the findings based on the presence of the `get_tests` function on the parser object @@ -318,7 +317,7 @@ def parse_findings_static_test_type( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Creates a test object as part of the import process as there is not one present at the time of import. Once the test is created, proceed with the traditional @@ -334,7 +333,7 @@ def parse_findings_dynamic_test_type( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Uses the parser to fetch any tests that may have been created by the API based parser, aggregates all findings from each test @@ -377,9 +376,9 @@ def parse_findings_dynamic_test_type( def async_process_findings( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Processes findings in chunks within N number of processes. The ASYNC_FINDING_IMPORT_CHUNK_SIZE setting will determine how many diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py index 7f1c3bd15bc..9debf4aabaa 100644 --- a/dojo/importers/default_reimporter.py +++ b/dojo/importers/default_reimporter.py @@ -1,5 +1,4 @@ import logging -from typing import List, Tuple from django.core.files.uploadedfile import TemporaryUploadedFile from django.core.serializers import deserialize, serialize @@ -73,7 +72,7 @@ def process_scan( scan: TemporaryUploadedFile, *args: list, **kwargs: dict, - ) -> Tuple[Test, int, int, int, int, int, Test_Import]: + ) -> tuple[Test, int, int, int, int, int, Test_Import]: """ The full step process of taking a scan report, and converting it to findings in the database. This entails the the following actions: @@ -158,9 +157,9 @@ def determine_deduplication_algorithm(self) -> str: def process_findings( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + ) -> tuple[list[Finding], list[Finding], list[Finding], list[Finding]]: """ Saves findings in memory that were parsed from the scan report into the database. This process involves first saving associated objects such as endpoints, files, @@ -256,9 +255,9 @@ def process_findings( def close_old_findings( self, - findings: List[Finding], + findings: list[Finding], **kwargs: dict, - ) -> List[Finding]: + ) -> list[Finding]: """ Updates the status of findings that were detected as "old" by the reimport process findings methods @@ -289,7 +288,7 @@ def parse_findings( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Determine how to parse the findings based on the presence of the `get_tests` function on the parser object @@ -307,7 +306,7 @@ def parse_findings_static_test_type( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Parses the findings from file and assigns them to the test that was supplied @@ -320,7 +319,7 @@ def parse_findings_dynamic_test_type( self, scan: TemporaryUploadedFile, parser: Parser, - ) -> List[Finding]: + ) -> list[Finding]: """ Uses the parser to fetch any tests that may have been created by the API based parser, aggregates all findings from each test @@ -331,9 +330,9 @@ def parse_findings_dynamic_test_type( def async_process_findings( self, - parsed_findings: List[Finding], + parsed_findings: list[Finding], **kwargs: dict, - ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + ) -> tuple[list[Finding], list[Finding], list[Finding], list[Finding]]: """ Processes findings in chunks within N number of processes. The ASYNC_FINDING_IMPORT_CHUNK_SIZE setting will determine how many @@ -388,7 +387,7 @@ def async_process_findings( def match_new_finding_to_existing_finding( self, unsaved_finding: Finding, - ) -> List[Finding]: + ) -> list[Finding]: """Matches a single new finding to N existing findings and then returns those matches""" # This code should match the logic used for deduplication out of the re-import feature. # See utils.py deduplicate_* functions @@ -429,7 +428,7 @@ def process_matched_finding( self, unsaved_finding: Finding, existing_finding: Finding, - ) -> Tuple[Finding, bool]: + ) -> tuple[Finding, bool]: """ Determine how to handle the an existing finding based on the status that is possesses at the time of reimport @@ -453,7 +452,7 @@ def process_matched_special_status_finding( self, unsaved_finding: Finding, existing_finding: Finding, - ) -> Tuple[Finding, bool]: + ) -> tuple[Finding, bool]: """ Determine if there is parity between statuses of the new and existing finding. If so, do not touch either finding, and move on to the next unsaved finding @@ -488,7 +487,7 @@ def process_matched_mitigated_finding( self, unsaved_finding: Finding, existing_finding: Finding, - ) -> Tuple[Finding, bool]: + ) -> tuple[Finding, bool]: """ Determine how mitigated the existing and new findings really are. We need to cover circumstances where mitigation timestamps are different, and @@ -583,7 +582,7 @@ def process_matched_active_finding( self, unsaved_finding: Finding, existing_finding: Finding, - ) -> Tuple[Finding, bool]: + ) -> tuple[Finding, bool]: """ The existing finding must be active here, so we need to compare it closely with the new finding coming in and determine how to proceed @@ -734,7 +733,7 @@ def process_groups_for_all_findings( def process_results( self, **kwargs: dict, - ) -> Tuple[List[Finding], List[Finding], List[Finding], List[Finding]]: + ) -> tuple[list[Finding], list[Finding], list[Finding], list[Finding]]: """ Determine how to to return the results based on whether the process was ran asynchronous or not diff --git a/dojo/importers/endpoint_manager.py b/dojo/importers/endpoint_manager.py index 6686584da3c..625e3cb8073 100644 --- a/dojo/importers/endpoint_manager.py +++ b/dojo/importers/endpoint_manager.py @@ -1,5 +1,4 @@ import logging -from typing import List from django.conf import settings from django.core.exceptions import MultipleObjectsReturned, ValidationError @@ -25,7 +24,7 @@ class EndpointManager: def add_endpoints_to_unsaved_finding( self, finding: Finding, - endpoints: List[Endpoint], + endpoints: list[Endpoint], **kwargs: dict, ) -> None: """Creates Endpoint objects for a single finding and creates the link via the endpoint status""" @@ -61,7 +60,7 @@ def add_endpoints_to_unsaved_finding( @app.task() def mitigate_endpoint_status( self, - endpoint_status_list: List[Endpoint_Status], + endpoint_status_list: list[Endpoint_Status], user: Dojo_User, **kwargs: dict, ) -> None: @@ -81,7 +80,7 @@ def mitigate_endpoint_status( @app.task() def reactivate_endpoint_status( self, - endpoint_status_list: List[Endpoint_Status], + endpoint_status_list: list[Endpoint_Status], **kwargs: dict, ) -> None: """Reactivate all endpoint status objects that are supplied""" @@ -98,9 +97,9 @@ def reactivate_endpoint_status( def chunk_endpoints( self, - endpoint_list: List[Endpoint], + endpoint_list: list[Endpoint], chunk_size: int = settings.ASYNC_FINDING_IMPORT_CHUNK_SIZE, - ) -> List[List[Endpoint]]: + ) -> list[list[Endpoint]]: """ Split a single large list into a list of lists of size `chunk_size`. For Example @@ -117,7 +116,7 @@ def chunk_endpoints( def chunk_endpoints_and_disperse( self, finding: Finding, - endpoints: List[Endpoint], + endpoints: list[Endpoint], **kwargs: dict, ) -> None: """ @@ -141,7 +140,7 @@ def chunk_endpoints_and_disperse( def clean_unsaved_endpoints( self, - endpoints: List[Endpoint], + endpoints: list[Endpoint], ) -> None: """ Clean endpoints that are supplied. For any endpoints that fail this validation @@ -156,7 +155,7 @@ def clean_unsaved_endpoints( def chunk_endpoints_and_reactivate( self, - endpoint_status_list: List[Endpoint_Status], + endpoint_status_list: list[Endpoint_Status], **kwargs: dict, ) -> None: """ @@ -180,7 +179,7 @@ def chunk_endpoints_and_reactivate( def chunk_endpoints_and_mitigate( self, - endpoint_status_list: List[Endpoint_Status], + endpoint_status_list: list[Endpoint_Status], user: Dojo_User, **kwargs: dict, ) -> None: diff --git a/dojo/importers/options.py b/dojo/importers/options.py index 2431975856e..f458f2a4f36 100644 --- a/dojo/importers/options.py +++ b/dojo/importers/options.py @@ -1,8 +1,9 @@ import logging +from collections.abc import Callable from datetime import datetime from functools import wraps from pprint import pformat as pp -from typing import Any, Callable, List, Optional +from typing import Any from django.contrib.auth.models import User from django.db.models import Model @@ -57,19 +58,19 @@ def load_base_options( self.do_not_reactivate: bool = self.validate_do_not_reactivate(*args, **kwargs) self.commit_hash: str = self.validate_commit_hash(*args, **kwargs) self.create_finding_groups_for_all_findings: bool = self.validate_create_finding_groups_for_all_findings(*args, **kwargs) - self.endpoints_to_add: List[Endpoint] | None = self.validate_endpoints_to_add(*args, **kwargs) + self.endpoints_to_add: list[Endpoint] | None = self.validate_endpoints_to_add(*args, **kwargs) self.engagement: Engagement | None = self.validate_engagement(*args, **kwargs) self.environment: Development_Environment | None = self.validate_environment(*args, **kwargs) self.group_by: str = self.validate_group_by(*args, **kwargs) self.import_type: str = self.validate_import_type(*args, **kwargs) self.lead: Dojo_User | None = self.validate_lead(*args, **kwargs) self.minimum_severity: str = self.validate_minimum_severity(*args, **kwargs) - self.parsed_findings: List[Finding] | None = self.validate_parsed_findings(*args, **kwargs) + self.parsed_findings: list[Finding] | None = self.validate_parsed_findings(*args, **kwargs) self.push_to_jira: bool = self.validate_push_to_jira(*args, **kwargs) self.scan_date: datetime = self.validate_scan_date(*args, **kwargs) self.scan_type: str = self.validate_scan_type(*args, **kwargs) self.service: str = self.validate_service(*args, **kwargs) - self.tags: List[str] = self.validate_tags(*args, **kwargs) + self.tags: list[str] = self.validate_tags(*args, **kwargs) self.test: Test | None = self.validate_test(*args, **kwargs) self.user: Dojo_User | None = self.validate_user(*args, **kwargs) self.test_title: str = self.validate_test_title(*args, **kwargs) @@ -88,7 +89,7 @@ def load_additional_options( def log_translation( self, - header_message: Optional[str] = None, + header_message: str | None = None, ): if header_message is not None: logger.debug(header_message) @@ -181,7 +182,7 @@ def set_dict_fields( def validate( self, field_name: str, - expected_types: List[Callable] = [], + expected_types: list[Callable] = [], *, required: bool = False, default: Any = None, diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index 191c454b6e9..a6b947d2b97 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -1,9 +1,10 @@ import operator +from collections.abc import Callable from datetime import date, datetime, timedelta from enum import Enum from functools import partial -from typing import Any, Callable, NamedTuple, Type, TypeVar, Union +from typing import Any, NamedTuple, TypeVar from dateutil.relativedelta import relativedelta from django.contrib import messages @@ -34,7 +35,7 @@ ) -def get_metrics_finding_filter_class() -> Type[Union[MetricsFindingFilter, MetricsFindingFilterWithoutObjectLookups]]: +def get_metrics_finding_filter_class() -> type[MetricsFindingFilter | MetricsFindingFilterWithoutObjectLookups]: if get_system_setting("filter_string_matching", False): return MetricsFindingFilterWithoutObjectLookups return MetricsFindingFilter @@ -257,7 +258,7 @@ class _MetricsPeriodEntry(NamedTuple): """ datetime_name: str - db_method: Union[TruncWeek, TruncMonth] + db_method: TruncWeek | TruncMonth class MetricsPeriod(_MetricsPeriodEntry, Enum): @@ -346,7 +347,7 @@ def severity_count( queryset: MetricsQuerySet, method: str, expression: str, -) -> Union[MetricsQuerySet, dict[str, int]]: +) -> MetricsQuerySet | dict[str, int]: """ Aggregates counts by severity for the given queryset. @@ -393,7 +394,7 @@ def identify_view( def js_epoch( - d: Union[date, datetime], + d: date | datetime, ) -> int: """ Converts a date/datetime object to a JavaScript epoch time (for use in FE charts) diff --git a/dojo/models.py b/dojo/models.py index b34691b7103..aec1549d49d 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -6,7 +6,6 @@ import re import warnings from datetime import datetime -from typing import Dict, Optional, Set from uuid import uuid4 import hyperlink @@ -3409,7 +3408,7 @@ def severity(self): @cached_property def components(self): - components: Dict[str, Set[Optional[str]]] = {} + components: dict[str, set[str | None]] = {} for finding in self.findings.all(): if finding.component_name is not None: components.setdefault(finding.component_name, set()).add(finding.component_version) diff --git a/dojo/remote_user.py b/dojo/remote_user.py index 764af4e548b..a60fe52c899 100644 --- a/dojo/remote_user.py +++ b/dojo/remote_user.py @@ -100,8 +100,7 @@ def get_security_definition(self, auto_schema): return {} header_name = settings.AUTH_REMOTEUSER_USERNAME_HEADER - if header_name.startswith("HTTP_"): - header_name = header_name[5:] + header_name = header_name.removeprefix("HTTP_") header_name = header_name.replace("_", "-").capitalize() return { diff --git a/dojo/reports/views.py b/dojo/reports/views.py index aacf4369333..b9505ada877 100644 --- a/dojo/reports/views.py +++ b/dojo/reports/views.py @@ -3,7 +3,6 @@ import re from datetime import datetime from tempfile import NamedTemporaryFile -from typing import List from dateutil.relativedelta import relativedelta from django.conf import settings @@ -94,7 +93,7 @@ def get_endpoints(self, request: HttpRequest): filter_class = EndpointFilterWithoutObjectLookups if filter_string_matching else EndpointFilter return filter_class(request.GET, queryset=endpoints, user=request.user) - def get_available_widgets(self, request: HttpRequest) -> List[Widget]: + def get_available_widgets(self, request: HttpRequest) -> list[Widget]: return [ CoverPage(request=request), TableOfContents(request=request), @@ -649,8 +648,7 @@ def get_findings(request): if not url: msg = "Please use the report button when viewing findings" raise Http404(msg) - if url.startswith("url="): - url = url[4:] + url = url.removeprefix("url=") views = ["all", "open", "inactive", "verified", "closed", "accepted", "out_of_scope", @@ -871,8 +869,7 @@ def get(self, request): for endpoint in finding.endpoints.all(): num_endpoints += 1 endpoint_value += f"{str(endpoint)}; " - if endpoint_value.endswith("; "): - endpoint_value = endpoint_value[:-2] + endpoint_value = endpoint_value.removesuffix("; ") if len(endpoint_value) > EXCEL_CHAR_LIMIT: endpoint_value = endpoint_value[:EXCEL_CHAR_LIMIT - 3] + "..." fields.append(endpoint_value) @@ -887,8 +884,7 @@ def get(self, request): vulnerability_ids_value += f"{str(vulnerability_id)}; " if finding.cve and vulnerability_ids_value.find(finding.cve) < 0: vulnerability_ids_value += finding.cve - if vulnerability_ids_value.endswith("; "): - vulnerability_ids_value = vulnerability_ids_value[:-2] + vulnerability_ids_value = vulnerability_ids_value.removesuffix("; ") fields.append(vulnerability_ids_value) # Tags tags_value = "" @@ -899,8 +895,7 @@ def get(self, request): tags_value += "..." break tags_value += f"{str(tag)}; " - if tags_value.endswith("; "): - tags_value = tags_value[:-2] + tags_value = tags_value.removesuffix("; ") fields.append(tags_value) self.fields = fields @@ -1021,8 +1016,7 @@ def get(self, request): for endpoint in finding.endpoints.all(): num_endpoints += 1 endpoint_value += f"{str(endpoint)}; \n" - if endpoint_value.endswith("; \n"): - endpoint_value = endpoint_value[:-3] + endpoint_value = endpoint_value.removesuffix("; \n") if len(endpoint_value) > EXCEL_CHAR_LIMIT: endpoint_value = endpoint_value[:EXCEL_CHAR_LIMIT - 3] + "..." worksheet.cell(row=row_num, column=col_num, value=endpoint_value) @@ -1038,16 +1032,14 @@ def get(self, request): vulnerability_ids_value += f"{str(vulnerability_id)}; \n" if finding.cve and vulnerability_ids_value.find(finding.cve) < 0: vulnerability_ids_value += finding.cve - if vulnerability_ids_value.endswith("; \n"): - vulnerability_ids_value = vulnerability_ids_value[:-3] + vulnerability_ids_value = vulnerability_ids_value.removesuffix("; \n") worksheet.cell(row=row_num, column=col_num, value=vulnerability_ids_value) col_num += 1 # tags tags_value = "" for tag in finding.tags.all(): tags_value += f"{str(tag)}; \n" - if tags_value.endswith("; \n"): - tags_value = tags_value[:-3] + tags_value = tags_value.removesuffix("; \n") worksheet.cell(row=row_num, column=col_num, value=tags_value) col_num += 1 self.col_num = col_num diff --git a/dojo/risk_acceptance/api.py b/dojo/risk_acceptance/api.py index 4fc89a32fe0..2fdaadf0afb 100644 --- a/dojo/risk_acceptance/api.py +++ b/dojo/risk_acceptance/api.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import List, NamedTuple +from typing import NamedTuple from django.db.models import QuerySet from django.utils import timezone @@ -81,7 +81,7 @@ def accept_risks(self, request): return Response(status=201, data=result.data) -def _accept_risks(accepted_risks: List[AcceptedRisk], base_findings: QuerySet, owner: User): +def _accept_risks(accepted_risks: list[AcceptedRisk], base_findings: QuerySet, owner: User): accepted = [] for risk in accepted_risks: vulnerability_ids = Vulnerability_Id.objects \ diff --git a/dojo/system_settings/views.py b/dojo/system_settings/views.py index 4c952d57a0f..584fa547d32 100644 --- a/dojo/system_settings/views.py +++ b/dojo/system_settings/views.py @@ -1,5 +1,4 @@ import logging -from typing import Tuple from django.conf import settings from django.contrib import messages @@ -59,7 +58,7 @@ def validate_form( self, request: HttpRequest, context: dict, - ) -> Tuple[HttpRequest, bool]: + ) -> tuple[HttpRequest, bool]: if context["form"].is_valid(): if (context["form"].cleaned_data["default_group"] is None and context["form"].cleaned_data["default_group_role"] is not None) or \ (context["form"].cleaned_data["default_group"] is not None and context["form"].cleaned_data["default_group_role"] is None): diff --git a/dojo/test/views.py b/dojo/test/views.py index 96d3a58c1ed..76b0bcd2aa5 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -4,7 +4,6 @@ import operator from datetime import datetime from functools import reduce -from typing import Tuple from django.contrib import messages from django.contrib.admin.utils import NestedObjects @@ -826,7 +825,7 @@ def get_jira_form( self, request: HttpRequest, test: Test, - ) -> Tuple[JIRAImportScanForm | None, bool]: + ) -> tuple[JIRAImportScanForm | None, bool]: """Returns a JiraImportScanForm if jira is enabled""" jira_form = None push_all_jira_issues = False @@ -853,7 +852,7 @@ def handle_request( self, request: HttpRequest, test_id: int, - ) -> Tuple[HttpRequest, dict]: + ) -> tuple[HttpRequest, dict]: """ Process the common behaviors between request types, and then return the request and context dict back to be rendered diff --git a/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py b/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py index 6d10485e4d1..b34931d0f8f 100644 --- a/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py +++ b/dojo/tools/appcheck_web_application_scanner/engines/appcheck.py @@ -1,5 +1,4 @@ import re -from typing import Union from dojo.models import Finding from dojo.tools.appcheck_web_application_scanner.engines.base import BaseEngineParser @@ -29,7 +28,7 @@ def extract_request_response(self, finding: Finding, value: dict[str, [str]]) -> value.pop("Messages") finding.unsaved_request, finding.unsaved_response = (d.strip() for d in rr_details[0]) - def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, list[str]]]]) -> None: + def parse_details(self, finding: Finding, value: dict[str, str | dict[str, list[str]]]) -> None: self.extract_request_response(finding, value) # super's version adds everything else to the description field return super().parse_details(finding, value) diff --git a/dojo/tools/appcheck_web_application_scanner/engines/base.py b/dojo/tools/appcheck_web_application_scanner/engines/base.py index 782c047443a..e07433c2946 100644 --- a/dojo/tools/appcheck_web_application_scanner/engines/base.py +++ b/dojo/tools/appcheck_web_application_scanner/engines/base.py @@ -1,6 +1,6 @@ import re from itertools import starmap -from typing import Any, Optional, Tuple, Union +from typing import Any import cvss.parser import dateutil.parser @@ -193,7 +193,7 @@ def __init__(self): ##### # For parsing the initial finding datetime to a date format pleasing to Finding ##### - def get_date(self, value: str) -> Optional[str]: + def get_date(self, value: str) -> str | None: try: return str(dateutil.parser.parse(value).date()) except dateutil.parser.ParserError: @@ -229,7 +229,7 @@ def parse_status(self, finding: Finding, value: str) -> None: ##### # For parsing component data ##### - def parse_cpe(self, cpe_str: str) -> (Optional[str], Optional[str]): + def parse_cpe(self, cpe_str: str) -> (str | None, str | None): if not cpe_str: return None, None cpe_obj = CPE(cpe_str) @@ -257,12 +257,12 @@ def append_description(self, finding: Finding, addendum: dict[str, str]) -> None def parse_notes(self, finding: Finding, value: str) -> None: self.append_description(finding, {"Notes": value}) - def extract_details(self, value: Union[str, dict[str, Union[str, dict[str, list[str]]]]]) -> dict[str, str]: + def extract_details(self, value: str | dict[str, str | dict[str, list[str]]]) -> dict[str, str]: if isinstance(value, dict): return {k: v for k, v in value.items() if k != "_meta"} return {"Details": str(value)} - def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, list[str]]]]) -> None: + def parse_details(self, finding: Finding, value: dict[str, str | dict[str, list[str]]]) -> None: self.append_description(finding, self.extract_details(value)) ##### @@ -271,7 +271,7 @@ def parse_details(self, finding: Finding, value: dict[str, Union[str, dict[str, def get_host(self, item: dict[str, Any]) -> str: return item.get("url") or item.get("host") or item.get("ipv4_address") or None - def parse_port(self, item: Any) -> Optional[int]: + def parse_port(self, item: Any) -> int | None: try: int_val = int(item) if 0 < int_val <= 65535: @@ -280,10 +280,10 @@ def parse_port(self, item: Any) -> Optional[int]: pass return None - def get_port(self, item: dict[str, Any]) -> Optional[int]: + def get_port(self, item: dict[str, Any]) -> int | None: return self.parse_port(item.get("port")) - def construct_endpoint(self, host: str, port: Optional[int]) -> Endpoint: + def construct_endpoint(self, host: str, port: int | None) -> Endpoint: endpoint = Endpoint.from_uri(host) if endpoint.host: if port: @@ -306,7 +306,7 @@ def set_endpoints(self, finding: Finding, item: Any) -> None: ##### # For severity (extracted from various cvss vectors) ##### - def parse_cvss_vector(self, value: str) -> Optional[str]: + def parse_cvss_vector(self, value: str) -> str | None: # CVSS4 vectors don't parse with the handy-danty parse method :( try: if (severity := cvss.CVSS4(value).severity) in Finding.SEVERITIES: @@ -347,7 +347,7 @@ def get_engine_fields(self) -> dict[str, FieldType]: **BaseEngineParser._COMMON_FIELDS_MAP, **self._ENGINE_FIELDS_MAP} - def get_finding_key(self, finding: Finding) -> Tuple: + def get_finding_key(self, finding: Finding) -> tuple: return ( finding.severity, finding.title, @@ -355,7 +355,7 @@ def get_finding_key(self, finding: Finding) -> Tuple: self.SCANNING_ENGINE, ) - def parse_finding(self, item: dict[str, Any]) -> Tuple[Finding, Tuple]: + def parse_finding(self, item: dict[str, Any]) -> tuple[Finding, tuple]: finding = Finding() for field, field_handler in self.get_engine_fields().items(): # Check first whether the field even exists on this item entry; if not, skip it diff --git a/dojo/tools/appcheck_web_application_scanner/engines/nmap.py b/dojo/tools/appcheck_web_application_scanner/engines/nmap.py index 9252bdcb53d..3fba10e455d 100644 --- a/dojo/tools/appcheck_web_application_scanner/engines/nmap.py +++ b/dojo/tools/appcheck_web_application_scanner/engines/nmap.py @@ -1,4 +1,4 @@ -from typing import Any, Union +from typing import Any from dojo.models import Endpoint from dojo.tools.appcheck_web_application_scanner.engines.base import BaseEngineParser @@ -18,7 +18,7 @@ class NmapScanningEngineParser(BaseEngineParser): def is_port_table_entry(self, entry) -> bool: return len(entry) > 0 and self.parse_port(entry[0]) - def get_ports(self, item) -> Union[list[int], list[None]]: + def get_ports(self, item) -> list[int] | list[None]: meta = item.get("meta") if not isinstance(meta, dict): meta = {} diff --git a/dojo/tools/blackduck/importer.py b/dojo/tools/blackduck/importer.py index 83175371705..51caa180765 100644 --- a/dojo/tools/blackduck/importer.py +++ b/dojo/tools/blackduck/importer.py @@ -4,8 +4,8 @@ import zipfile from abc import ABC, abstractmethod from collections import defaultdict +from collections.abc import Iterable from pathlib import Path -from typing import Iterable from .model import BlackduckFinding diff --git a/dojo/tools/blackduck_binary_analysis/importer.py b/dojo/tools/blackduck_binary_analysis/importer.py index 4d381aae058..0ada8cca26b 100644 --- a/dojo/tools/blackduck_binary_analysis/importer.py +++ b/dojo/tools/blackduck_binary_analysis/importer.py @@ -1,8 +1,8 @@ import csv from abc import ABC, abstractmethod from collections import defaultdict +from collections.abc import Iterable from pathlib import Path -from typing import Iterable from .model import BlackduckBinaryAnalysisFinding diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py index a48023e5d6f..f8896c0b271 100644 --- a/dojo/tools/checkmarx_one/parser.py +++ b/dojo/tools/checkmarx_one/parser.py @@ -1,7 +1,6 @@ import datetime import json import re -from typing import List from dateutil import parser from django.conf import settings @@ -40,7 +39,7 @@ def parse_vulnerabilities_from_scan_list( self, test: Test, data: dict, - ) -> List[Finding]: + ) -> list[Finding]: findings = [] cwe_store = data.get("vulnerabilityDetails", []) # SAST @@ -59,7 +58,7 @@ def parse_iac_vulnerabilities( test: Test, results: list, cwe_store: list, - ) -> List[Finding]: + ) -> list[Finding]: findings = [] for technology in results: # Set the name aside for use in the title @@ -109,7 +108,7 @@ def parse_sca_vulnerabilities( test: Test, results: list, cwe_store: list, - ) -> List[Finding]: + ) -> list[Finding]: # Not implemented yet return [] @@ -118,7 +117,7 @@ def parse_sast_vulnerabilities( test: Test, results: list, cwe_store: list, - ) -> List[Finding]: + ) -> list[Finding]: def get_cwe_store_entry(cwe_store: list, cwe: int) -> dict: # Quick base case if cwe is None: @@ -197,7 +196,7 @@ def parse_vulnerabilities( self, test: Test, results: list, - ) -> List[Finding]: + ) -> list[Finding]: findings = [] for result in results: id = result.get("identifiers")[0].get("value") @@ -233,7 +232,7 @@ def parse_results( self, test: Test, results: list, - ) -> List[Finding]: + ) -> list[Finding]: findings = [] for vulnerability in results: result_type = vulnerability.get("type") diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py index 2fa4dd3c290..7516cfe211f 100644 --- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py +++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py @@ -136,8 +136,7 @@ def get_vuln_id_from_tool(vulnerability): def clean_title(title): - if title.startswith("Issue summary: "): - title = title[len("Issue summary: "):] + title = title.removeprefix("Issue summary: ") if "\n" in title: title = title[:title.index("\n")] return title diff --git a/dojo/tools/kics/parser.py b/dojo/tools/kics/parser.py index 5eb30227b5a..31645ab7da3 100644 --- a/dojo/tools/kics/parser.py +++ b/dojo/tools/kics/parser.py @@ -53,8 +53,7 @@ def get_findings(self, filename, test): description += f"**Issue type:** {issue_type}\n" if actual_value: description += f"**Actual value:** {actual_value}\n" - if description.endswith("\n"): - description = description[:-1] + description = description.removesuffix("\n") dupe_key = hashlib.sha256( ( diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py index e6c68388841..ae9736e9a0f 100644 --- a/dojo/tools/sarif/parser.py +++ b/dojo/tools/sarif/parser.py @@ -297,10 +297,7 @@ def get_description(result, rule): if len(result.get("codeFlows", [])) > 0: description += get_codeFlowsDescription(result["codeFlows"]) - if description.endswith("\n"): - description = description[:-1] - - return description + return description.removesuffix("\n") def get_references(rule): diff --git a/dojo/tools/tenable/xml_format.py b/dojo/tools/tenable/xml_format.py index 7094e82d626..438ce2220e5 100644 --- a/dojo/tools/tenable/xml_format.py +++ b/dojo/tools/tenable/xml_format.py @@ -51,7 +51,7 @@ def safely_get_element_text(self, element): return None if isinstance(element_text, str): return element_text if len(element_text) > 0 else None - if isinstance(element_text, (int, float)): + if isinstance(element_text, int | float): return element_text or None return None diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py index 6584d213821..55ea07602d3 100644 --- a/dojo/tools/veracode/json_parser.py +++ b/dojo/tools/veracode/json_parser.py @@ -130,7 +130,7 @@ def create_finding_from_details(self, finding_details, scan_type, policy_violate finding.cvssv3 = CVSS3(str(uncleaned_cvss)).clean_vector(output_prefix=True) elif not uncleaned_cvss.startswith("CVSS"): finding.cvssv3 = CVSS3(f"CVSS:3.1/{str(uncleaned_cvss)}").clean_vector(output_prefix=True) - elif isinstance(uncleaned_cvss, (float, int)): + elif isinstance(uncleaned_cvss, float | int): finding.cvssv3_score = float(uncleaned_cvss) # Fill in extra info based on the scan type if scan_type == "STATIC": diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py index 325bd364283..c23d002cb84 100644 --- a/dojo/tools/whitehat_sentinel/parser.py +++ b/dojo/tools/whitehat_sentinel/parser.py @@ -3,7 +3,6 @@ import logging import re from datetime import datetime -from typing import List, Union from dojo.models import Endpoint, Finding @@ -55,7 +54,7 @@ def get_findings(self, file, test): def _convert_whitehat_severity_id_to_dojo_severity( self, whitehat_severity_id: int, - ) -> Union[str, None]: + ) -> str | None: """ Converts a WhiteHat Sentinel numerical severity to a DefectDojo severity. Args: @@ -165,8 +164,8 @@ def __remove_paragraph_tags(self, html_string): return re.sub(r"

|

", "", html_string) def _convert_attack_vectors_to_endpoints( - self, attack_vectors: List[dict], - ) -> List["Endpoint"]: + self, attack_vectors: list[dict], + ) -> list["Endpoint"]: """ Takes a list of Attack Vectors dictionaries from the WhiteHat vuln API and converts them to Defect Dojo Endpoints diff --git a/dojo/utils.py b/dojo/utils.py index 470d8607725..35ccac5aea2 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -7,9 +7,9 @@ import os import re from calendar import monthrange +from collections.abc import Callable from datetime import date, datetime, timedelta from math import pi, sqrt -from typing import Callable, Optional import bleach import crum @@ -2586,7 +2586,7 @@ def get_open_findings_burndown(product): return past_90_days -def get_custom_method(setting_name: str) -> Optional[Callable]: +def get_custom_method(setting_name: str) -> Callable | None: """ Attempts to load and return the method specified by fully-qualified name at the given setting. diff --git a/ruff.toml b/ruff.toml index 376a6d3cc7b..9a34bf6e005 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,3 +1,6 @@ +# Always generate Python 3.11-compatible code. +target-version = "py311" + # Same as Black. line-length = 120 diff --git a/unittests/test_bulk_risk_acceptance_api.py b/unittests/test_bulk_risk_acceptance_api.py index bdc87451d04..05bbe10e7a8 100644 --- a/unittests/test_bulk_risk_acceptance_api.py +++ b/unittests/test_bulk_risk_acceptance_api.py @@ -29,25 +29,25 @@ def setUpTestData(cls): cls.product = Product.objects.create(prod_type=cls.product_type, name="Flopper", description="Test product") Product_Type_Member.objects.create(product_type=cls.product_type, user=cls.user, role=Role.objects.get(id=Roles.Owner)) cls.product_2 = Product.objects.create(prod_type=cls.product_type, name="Flopper2", description="Test product2") - cls.engagement = Engagement.objects.create(product=cls.product, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), - target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) - cls.engagement_2a = Engagement.objects.create(product=cls.product_2, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), - target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) - cls.engagement_2b = Engagement.objects.create(product=cls.product_2, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), - target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) + cls.engagement = Engagement.objects.create(product=cls.product, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), + target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) + cls.engagement_2a = Engagement.objects.create(product=cls.product_2, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), + target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) + cls.engagement_2b = Engagement.objects.create(product=cls.product_2, target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), + target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) cls.test_type = Test_Type.objects.create(name="Risk Acceptance Mock Scan", static_tool=True) cls.test_a = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type, - target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) + target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) cls.test_b = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type, - target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) + target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) cls.test_c = Test.objects.create(engagement=cls.engagement, test_type=cls.test_type, - target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) + target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) cls.test_d = Test.objects.create(engagement=cls.engagement_2a, test_type=cls.test_type, - target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) + target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) cls.test_e = Test.objects.create(engagement=cls.engagement_2b, test_type=cls.test_type, - target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.timezone.utc)) + target_start=datetime.datetime(2000, 1, 1, tzinfo=datetime.UTC), target_end=datetime.datetime(2000, 2, 1, tzinfo=datetime.UTC)) def create_finding(test: Test, reporter: User, cve: str) -> Finding: return Finding(test=test, title=f"Finding {cve}", cve=cve, severity="High", verified=True, diff --git a/unittests/test_dashboard.py b/unittests/test_dashboard.py index 81d9000e40a..35e3eabbde6 100644 --- a/unittests/test_dashboard.py +++ b/unittests/test_dashboard.py @@ -1,5 +1,4 @@ from datetime import datetime, timedelta -from typing import List, Tuple from unittest.mock import patch from dateutil.relativedelta import relativedelta @@ -14,7 +13,7 @@ User = get_user_model() -def create(when: datetime, product_id: int, titles_and_severities: List[Tuple[str, str]]): +def create(when: datetime, product_id: int, titles_and_severities: list[tuple[str, str]]): with patch("django.db.models.fields.timezone.now") as mock_now: mock_now.return_value = when engagement = Engagement.objects.create(product_id=product_id, target_start=when.date(), target_end=when.date()) @@ -25,7 +24,7 @@ def create(when: datetime, product_id: int, titles_and_severities: List[Tuple[st ) -def create_with_duplicates(when: datetime, product_id: int, titles_and_severities: List[Tuple[str, str]]): +def create_with_duplicates(when: datetime, product_id: int, titles_and_severities: list[tuple[str, str]]): with patch("django.db.models.fields.timezone.now") as mock_now: mock_now.return_value = when engagement = Engagement.objects.create(product_id=product_id, target_start=when.date(), target_end=when.date()) diff --git a/unittests/test_finding_helper.py b/unittests/test_finding_helper.py index 7ff00889c09..8d3432864d9 100644 --- a/unittests/test_finding_helper.py +++ b/unittests/test_finding_helper.py @@ -96,7 +96,7 @@ def test_mark_old_active_as_mitigated(self, mock_can_edit, mock_tz): def test_mark_old_active_as_mitigated_custom_edit(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime - custom_mitigated = datetime.datetime.now(datetime.timezone.utc) + custom_mitigated = datetime.datetime.now(datetime.UTC) with impersonate(self.user_1): test = Test.objects.last() @@ -118,7 +118,7 @@ def test_mark_old_active_as_mitigated_custom_edit(self, mock_can_edit, mock_tz): def test_update_old_mitigated_with_custom_edit(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime - custom_mitigated = datetime.datetime.now(datetime.timezone.utc) + custom_mitigated = datetime.datetime.now(datetime.UTC) with impersonate(self.user_1): test = Test.objects.last() @@ -140,7 +140,7 @@ def test_update_old_mitigated_with_custom_edit(self, mock_can_edit, mock_tz): def test_update_old_mitigated_with_missing_data(self, mock_can_edit, mock_tz): mock_tz.return_value = frozen_datetime - custom_mitigated = datetime.datetime.now(datetime.timezone.utc) + custom_mitigated = datetime.datetime.now(datetime.UTC) with impersonate(self.user_1): test = Test.objects.last() diff --git a/unittests/test_flush_auditlog.py b/unittests/test_flush_auditlog.py index a75473664be..1c7f5ef08df 100644 --- a/unittests/test_flush_auditlog.py +++ b/unittests/test_flush_auditlog.py @@ -1,5 +1,5 @@ import logging -from datetime import date, datetime, timezone +from datetime import UTC, date, datetime from auditlog.models import LogEntry from dateutil.relativedelta import relativedelta @@ -33,8 +33,8 @@ def test_delete_all_entries(self): @override_settings(AUDITLOG_FLUSH_RETENTION_PERIOD=1) def test_delete_entries_with_retention_period(self): - entries_before = LogEntry.objects.filter(timestamp__date__lt=datetime.now(timezone.utc)).count() - two_weeks_ago = datetime.now(timezone.utc) - relativedelta(weeks=2) + entries_before = LogEntry.objects.filter(timestamp__date__lt=datetime.now(UTC)).count() + two_weeks_ago = datetime.now(UTC) - relativedelta(weeks=2) log_entry = LogEntry.objects.log_create( instance=Finding.objects.all()[0], timestamp=two_weeks_ago, @@ -44,6 +44,6 @@ def test_delete_entries_with_retention_period(self): log_entry.timestamp = two_weeks_ago log_entry.save() flush_auditlog() - entries_after = LogEntry.objects.filter(timestamp__date__lt=datetime.now(timezone.utc)).count() + entries_after = LogEntry.objects.filter(timestamp__date__lt=datetime.now(UTC)).count() # we have three old log entries in our testdata and added a new one self.assertEqual(entries_before - 3 + 1, entries_after) diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index 03cea9b0b0d..b0f7da906b6 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -1456,8 +1456,8 @@ def test_import_reimport_vulnerability_ids(self): engagement=test.engagement, test_type=test_type, scan_type=self.anchore_grype_scan_type, - target_start=datetime.datetime.now(datetime.timezone.utc), - target_end=datetime.datetime.now(datetime.timezone.utc), + target_start=datetime.datetime.now(datetime.UTC), + target_end=datetime.datetime.now(datetime.UTC), ) reimport_test.save() diff --git a/unittests/test_metrics_queries.py b/unittests/test_metrics_queries.py index 6bd54ff9e89..68e754b6d6d 100644 --- a/unittests/test_metrics_queries.py +++ b/unittests/test_metrics_queries.py @@ -1,6 +1,6 @@ """Tests for metrics database queries""" -from datetime import date, datetime, timezone +from datetime import UTC, date, datetime from unittest.mock import patch import pytz @@ -21,23 +21,23 @@ def add(*args, **kwargs): #### # Test Findings data #### -FINDING_1 = {"id": 4, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_2 = {"id": 5, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_3 = {"id": 6, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_4 = {"id": 7, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": False, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_5 = {"id": 24, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_6 = {"id": 125, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_7 = {"id": 225, "title": "UID Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_8 = {"id": 240, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": True, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_9 = {"id": 241, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_10 = {"id": 242, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_11 = {"id": 243, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": True, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_12 = {"id": 244, "title": "Low Impact Test Finding", "date": date(2017, 12, 29), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_13 = {"id": 245, "title": "Low Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_14 = {"id": 246, "title": "Low Impact Test Finding", "date": date(2018, 1, 2), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_15 = {"id": 247, "title": "Low Impact Test Finding", "date": date(2018, 1, 3), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_16 = {"id": 248, "title": "UID Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": True, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} -FINDING_17 = {"id": 249, "title": "UID Impact Test Finding", "date": date(2018, 1, 4), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=timezone.utc), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_1 = {"id": 4, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_2 = {"id": 5, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_3 = {"id": 6, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_4 = {"id": 7, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": False, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_5 = {"id": 24, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_6 = {"id": 125, "title": "Low Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_7 = {"id": 225, "title": "UID Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_8 = {"id": 240, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": True, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_9 = {"id": 241, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_10 = {"id": 242, "title": "High Impact Test Finding", "date": date(2018, 1, 1), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "High", "description": "test finding", "mitigation": "test mitigation", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 2, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "5d368a051fdec959e08315a32ef633ba5711bed6e8e75319ddee2cab4d4608c7", "line": None, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_11 = {"id": 243, "title": "DUMMY FINDING", "date": date(2017, 12, 31), "sla_start_date": None, "sla_expiration_date": None, "cwe": 1, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": "http://www.example.com", "severity": "High", "description": "TEST finding", "mitigation": "MITIGATION", "impact": "High", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 3, "active": False, "verified": False, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": True, "under_review": False, "last_status_update": None, "review_requested_by_id": 2, "under_defect_review": False, "defect_review_requested_by_id": 2, "is_mitigated": True, "thread_id": 1, "mitigated": None, "mitigated_by_id": None, "reporter_id": 2, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "c89d25e445b088ba339908f68e15e3177b78d22f3039d1bfea51c4be251bf4e0", "line": 100, "file_path": "", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_12 = {"id": 244, "title": "Low Impact Test Finding", "date": date(2017, 12, 29), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_13 = {"id": 245, "title": "Low Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_14 = {"id": 246, "title": "Low Impact Test Finding", "date": date(2018, 1, 2), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 33, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 22, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": None, "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_15 = {"id": 247, "title": "Low Impact Test Finding", "date": date(2018, 1, 3), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 55, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "9aca00affd340c4da02c934e7e3106a45c6ad0911da479daae421b3b28a2c1aa", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "12345", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_16 = {"id": 248, "title": "UID Impact Test Finding", "date": date(2017, 12, 27), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": True, "verified": True, "false_p": False, "duplicate": False, "duplicate_finding_id": None, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": True, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} +FINDING_17 = {"id": 249, "title": "UID Impact Test Finding", "date": date(2018, 1, 4), "sla_start_date": None, "sla_expiration_date": None, "cwe": None, "cve": None, "epss_score": None, "epss_percentile": None, "cvssv3": None, "cvssv3_score": None, "url": None, "severity": "Low", "description": "test finding", "mitigation": "test mitigation", "impact": "Low", "steps_to_reproduce": None, "severity_justification": None, "references": "", "test_id": 77, "active": False, "verified": False, "false_p": False, "duplicate": True, "duplicate_finding_id": 224, "out_of_scope": False, "risk_accepted": False, "under_review": False, "last_status_update": None, "review_requested_by_id": 1, "under_defect_review": False, "defect_review_requested_by_id": 1, "is_mitigated": False, "thread_id": 11, "mitigated": None, "mitigated_by_id": None, "reporter_id": 1, "numerical_severity": "S0", "last_reviewed": None, "last_reviewed_by_id": None, "param": None, "payload": None, "hash_code": "6f8d0bf970c14175e597843f4679769a4775742549d90f902ff803de9244c7e1", "line": 123, "file_path": "/dev/urandom", "component_name": None, "component_version": None, "static_finding": False, "dynamic_finding": False, "created": datetime(2017, 12, 1, 0, 0, tzinfo=UTC), "scanner_confidence": None, "sonarqube_issue_id": None, "unique_id_from_tool": "6789", "vuln_id_from_tool": None, "sast_source_object": None, "sast_sink_object": None, "sast_source_line": None, "sast_source_file_path": None, "nb_occurences": None, "publish_date": None, "service": None, "planned_remediation_date": None, "planned_remediation_version": None, "effort_for_fixing": None, "test__engagement__product__prod_type__member": False, "test__engagement__product__member": True, "test__engagement__product__prod_type__authorized_group": False, "test__engagement__product__authorized_group": False} ALL_FINDINGS = [FINDING_1, FINDING_2, FINDING_3, FINDING_4, FINDING_5, FINDING_6, FINDING_7, FINDING_8, FINDING_9, @@ -75,7 +75,7 @@ def test_finding_queries_no_data(self): @patch("django.utils.timezone.now") def test_finding_queries(self, mock_timezone): - mock_datetime = datetime(2020, 12, 9, tzinfo=timezone.utc) + mock_datetime = datetime(2020, 12, 9, tzinfo=UTC) mock_timezone.return_value = mock_datetime # Queries over Finding @@ -280,5 +280,5 @@ def test_endpoint_queries(self): ], ) self.assertEqual(endpoint_queries["weeks_between"], 2) - self.assertEqual(endpoint_queries["start_date"], datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc)) - self.assertEqual(endpoint_queries["end_date"], datetime(2020, 7, 1, 0, 0, tzinfo=timezone.utc)) + self.assertEqual(endpoint_queries["start_date"], datetime(2020, 7, 1, 0, 0, tzinfo=UTC)) + self.assertEqual(endpoint_queries["end_date"], datetime(2020, 7, 1, 0, 0, tzinfo=UTC)) diff --git a/unittests/test_risk_acceptance.py b/unittests/test_risk_acceptance.py index 97afb3e1f7d..9e7904f4716 100644 --- a/unittests/test_risk_acceptance.py +++ b/unittests/test_risk_acceptance.py @@ -269,9 +269,9 @@ def test_expiration_handler(self): # ra1: expire in 9 days -> warn:yes, expire:no # ra2: expire in 11 days -> warn:no, expire:no # ra3: expire 5 days ago -> warn:no, expire:yes (expiration not handled yet, so expire) - ra1.expiration_date = datetime.datetime.now(datetime.timezone.utc) + relativedelta(days=heads_up_days - 1) - ra2.expiration_date = datetime.datetime.now(datetime.timezone.utc) + relativedelta(days=heads_up_days + 1) - ra3.expiration_date = datetime.datetime.now(datetime.timezone.utc) - relativedelta(days=5) + ra1.expiration_date = datetime.datetime.now(datetime.UTC) + relativedelta(days=heads_up_days - 1) + ra2.expiration_date = datetime.datetime.now(datetime.UTC) + relativedelta(days=heads_up_days + 1) + ra3.expiration_date = datetime.datetime.now(datetime.UTC) - relativedelta(days=5) ra1.save() ra2.save() ra3.save() diff --git a/unittests/test_utils_deduplication_reopen.py b/unittests/test_utils_deduplication_reopen.py index 1876deefe3c..a5f8fcf54d5 100644 --- a/unittests/test_utils_deduplication_reopen.py +++ b/unittests/test_utils_deduplication_reopen.py @@ -17,7 +17,7 @@ def setUp(self): self.finding_a = Finding.objects.get(id=2) self.finding_a.pk = None self.finding_a.duplicate = False - self.finding_a.mitigated = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + self.finding_a.mitigated = datetime.datetime(1970, 1, 1, tzinfo=datetime.UTC) self.finding_a.is_mitigated = True self.finding_a.false_p = True self.finding_a.active = False diff --git a/unittests/tools/test_arachni_parser.py b/unittests/tools/test_arachni_parser.py index 337200796ea..266d45dc05d 100644 --- a/unittests/tools/test_arachni_parser.py +++ b/unittests/tools/test_arachni_parser.py @@ -20,7 +20,7 @@ def test_parser_has_one_finding(self): self.assertEqual("Cross-Site Scripting (XSS)", finding.title) self.assertEqual(79, finding.cwe) self.assertEqual("High", finding.severity) - self.assertEqual(datetime.datetime(2017, 11, 14, 2, 57, 29, tzinfo=datetime.timezone.utc), finding.date) + self.assertEqual(datetime.datetime(2017, 11, 14, 2, 57, 29, tzinfo=datetime.UTC), finding.date) def test_parser_has_many_finding(self): with open("unittests/scans/arachni/dd.com.afr.json", encoding="utf-8") as testfile: diff --git a/unittests/tools/test_bugcrowd_parser.py b/unittests/tools/test_bugcrowd_parser.py index 5e66c9c6c7b..87a3083ffb2 100644 --- a/unittests/tools/test_bugcrowd_parser.py +++ b/unittests/tools/test_bugcrowd_parser.py @@ -1,4 +1,4 @@ -from datetime import datetime, timezone +from datetime import UTC, datetime from dojo.models import Test from dojo.tools.bugcrowd.parser import BugCrowdParser @@ -24,7 +24,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): for endpoint in finding.unsaved_endpoints: endpoint.clean() self.assertEqual(1, len(findings)) - self.assertEqual(findings[0].date, datetime(2020, 3, 1, 6, 15, 6, tzinfo=timezone.utc)) + self.assertEqual(findings[0].date, datetime(2020, 3, 1, 6, 15, 6, tzinfo=UTC)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): with open("unittests/scans/bugcrowd/BugCrowd-many.csv", encoding="utf-8") as testfile: diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py index 8f39cc6f707..620e6adfc62 100644 --- a/unittests/tools/test_dependency_check_parser.py +++ b/unittests/tools/test_dependency_check_parser.py @@ -1,5 +1,5 @@ import logging -from datetime import datetime, timezone +from datetime import UTC, datetime from os import path from dateutil.tz import tzlocal, tzoffset @@ -271,7 +271,7 @@ def test_parse_java_6_5_3(self): ) self.assertEqual(items[i].severity, "Low") self.assertEqual(items[i].file_path, "log4j-api-2.12.4.jar") - self.assertEqual(items[i].date, datetime(2022, 1, 15, 14, 31, 13, 42600, tzinfo=timezone.utc)) + self.assertEqual(items[i].date, datetime(2022, 1, 15, 14, 31, 13, 42600, tzinfo=UTC)) def test_parse_file_pr6439(self): with open("unittests/scans/dependency_check/PR6439.xml", encoding="utf-8") as testfile: diff --git a/unittests/tools/test_sarif_parser.py b/unittests/tools/test_sarif_parser.py index e316ae9fe24..0ae50e659f2 100644 --- a/unittests/tools/test_sarif_parser.py +++ b/unittests/tools/test_sarif_parser.py @@ -64,7 +64,7 @@ def test_example2_report(self): 3. collections/list.h:L25\t-\tadd_core(ptr, offset, val) \tUninitialized variable `ptr` passed to method `add_core`.""" self.assertEqual(description, item.description) - self.assertEqual(datetime.datetime(2016, 7, 16, 14, 19, 1, tzinfo=datetime.timezone.utc), item.date) + self.assertEqual(datetime.datetime(2016, 7, 16, 14, 19, 1, tzinfo=datetime.UTC), item.date) for finding in findings: self.common_checks(finding) @@ -175,7 +175,7 @@ def test_example_report_scanlift_bash(self): item.file_path, ) self.assertIsNone(item.unsaved_vulnerability_ids) - self.assertEqual(datetime.datetime(2021, 3, 8, 15, 39, 40, tzinfo=datetime.timezone.utc), item.date) + self.assertEqual(datetime.datetime(2021, 3, 8, 15, 39, 40, tzinfo=datetime.UTC), item.date) # finding 6 with self.subTest(i=6): finding = findings[6] @@ -207,7 +207,7 @@ def test_example_report_taint_python(self): item.file_path, ) self.assertIsNone(item.unsaved_vulnerability_ids) - self.assertEqual(datetime.datetime(2021, 3, 8, 15, 46, 16, tzinfo=datetime.timezone.utc), item.date) + self.assertEqual(datetime.datetime(2021, 3, 8, 15, 46, 16, tzinfo=datetime.UTC), item.date) self.assertEqual( "scanFileHash:4bc9f13947613303|scanPrimaryLocationHash:1a8bbb28fe7380df|scanTagsHash:21de8f8d0eb8d9b2", finding.unique_id_from_tool, @@ -246,7 +246,7 @@ def test_njsscan(self): finding.file_path, ) self.assertIsNone(finding.unsaved_vulnerability_ids) - self.assertEqual(datetime.datetime(2021, 3, 23, 0, 10, 48, tzinfo=datetime.timezone.utc), finding.date) + self.assertEqual(datetime.datetime(2021, 3, 23, 0, 10, 48, tzinfo=datetime.UTC), finding.date) self.assertEqual(327, finding.cwe) # finding 1 finding = findings[1] @@ -255,7 +255,7 @@ def test_njsscan(self): finding.file_path, ) self.assertEqual(235, finding.line) - self.assertEqual(datetime.datetime(2021, 3, 23, 0, 10, 48, tzinfo=datetime.timezone.utc), finding.date) + self.assertEqual(datetime.datetime(2021, 3, 23, 0, 10, 48, tzinfo=datetime.UTC), finding.date) self.assertEqual(798, finding.cwe) for finding in findings: self.common_checks(finding) diff --git a/unittests/tools/test_stackhawk_parser.py b/unittests/tools/test_stackhawk_parser.py index da043f94104..7f63ea1d458 100644 --- a/unittests/tools/test_stackhawk_parser.py +++ b/unittests/tools/test_stackhawk_parser.py @@ -6,7 +6,7 @@ class TestStackHawkParser(DojoTestCase): - __test_datetime = datetime.datetime(2022, 2, 16, 23, 7, 19, 575000, datetime.timezone.utc) + __test_datetime = datetime.datetime(2022, 2, 16, 23, 7, 19, 575000, datetime.UTC) def test_invalid_json_format(self): with open("unittests/scans/stackhawk/invalid.json", encoding="utf-8") as testfile: From 730dd979bd56060eb48e3227aeed24429eb71837 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 20:53:08 -0500 Subject: [PATCH 13/18] Bump django from 5.0.8 to 5.1.2 (#11025) Bumps [django](https://github.com/django/django) from 5.0.8 to 5.1.2. - [Commits](https://github.com/django/django/compare/5.0.8...5.1.2) --- updated-dependencies: - dependency-name: django dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3e4b52d094d..0f5f961016a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ django-slack==5.19.0 git+https://github.com/DefectDojo/django-tagging@develop#egg=django-tagging django-watson==1.6.3 django-prometheus==2.3.1 -Django==5.0.8 +Django==5.1.2 djangorestframework==3.15.2 html2text==2024.2.26 humanize==4.11.0 From 3206efb806105f84bb5205d272bbb8b0301fd00c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:35:38 -0500 Subject: [PATCH 14/18] Bump boto3 from 1.35.36 to 1.35.37 (#11037) Bumps [boto3](https://github.com/boto/boto3) from 1.35.36 to 1.35.37. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.36...1.35.37) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0f5f961016a..7a8f53dbaed 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.36 # Required for Celery Broker AWS (SQS) support +boto3==1.35.37 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 2ec7cb598b5d05ead81a7317efbb8747f9c04206 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:36:27 -0500 Subject: [PATCH 15/18] Update manusa/actions-setup-minikube action from v2.12.0 to v2.13.0 (.github/workflows/k8s-tests.yml) (#11036) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/k8s-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/k8s-tests.yml b/.github/workflows/k8s-tests.yml index d2da08eb7fd..60f8bc3c38c 100644 --- a/.github/workflows/k8s-tests.yml +++ b/.github/workflows/k8s-tests.yml @@ -35,7 +35,7 @@ jobs: uses: actions/checkout@v4 - name: Setup Minikube - uses: manusa/actions-setup-minikube@v2.12.0 + uses: manusa/actions-setup-minikube@v2.13.0 with: minikube version: 'v1.33.1' kubernetes version: ${{ matrix.k8s }} From ac6e327d7fc4dba78adfc616065c22cbea316340 Mon Sep 17 00:00:00 2001 From: testaccount90009 <122134756+testaccount90009@users.noreply.github.com> Date: Fri, 11 Oct 2024 08:07:19 -0700 Subject: [PATCH 16/18] Mend SCA imports contain locations which are similar to filePaths for SAST scans (#11001) * add impact add impact since it is unused * Update test_asff_parser.py * Update parser.py * Update parser.py * Mend SCA imports contain locations which are similar to filePaths for the SAST scans This code will use the 'locations' for SCA scan outputs to do the same thing that's done for SAST 'filePaths'. Since a Finding report will either be from SAST or SCA, it is unlikely that a collision will happen, since those findings are inherently different from Mend. Since the filepaths is already being joined for the SAST implementation, if it is indeed SCA results instead, the same thing will happen except now with the appropriate locations of the library and vulnerability. Note: this is not from Mend Platform or the CLI Agent output, but rather the Mend SCA portal. There is a new Platform API that combines both SAST and SCA vulnerabilities, so a new parser at some point for that would be good, and then it's possible to rename this to 'Legacy' for the Mend parser, since the 'Platform' should be the new. * Update parser.py * adding unit test for mend_sca_vulns from Mend SCA portal Mend has gone through some updates. Historically they've been SAST and then SCA, with their own separate portals. They are joining to a Mend Platform that contains both SAST+SCA+other vulnerabilities. This parser originally looks like it was based on Mend SAST, but I have been using it for SCA also since the vulnerabilities.json output files were similarly structured. This parser change hopes to update this to extract the location and path from an SCA.json and provide that as the file path. SAST calls this in a different way than SCA, which is why I think file path can be reused for both - depending on the file context found. I hope this code reflects that goal. To note: this was not a CLI or Unified Agent generated output file, but rather from downloading the Mend SCA portal API vulnerability data and uploading the returned vuln.json files using this parser. There may be a need in the future to add a parser that can correctly accept the updated format from the Mend Portal which contains combined vulnerability data sets, and the API response .json is different, so the parser does not work for the new Mend Platform returned .json, as experienced. * Update test_mend_parser.py --- dojo/tools/mend/parser.py | 18 ++++++++ unittests/scans/mend/mend_sca_vuln.json | 56 +++++++++++++++++++++++++ unittests/tools/test_mend_parser.py | 8 ++++ 3 files changed, 82 insertions(+) create mode 100644 unittests/scans/mend/mend_sca_vuln.json diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py index 75ed871a6a1..6bcc96f7501 100644 --- a/dojo/tools/mend/parser.py +++ b/dojo/tools/mend/parser.py @@ -102,6 +102,24 @@ def _build_common_output(node, lib_name=None): "Error handling local paths for vulnerability.", ) + locations = [] + if "locations" in node: + try: + locations_node = node.get("locations", []) + for location in locations_node: + path = location.get("path") + if path is not None: + locations.append(path) + except Exception: + logger.exception( + "Error handling local paths for vulnerability.", + ) + + if locations: + filepaths = locations + else: + filepaths = filepaths + new_finding = Finding( title=title, test=test, diff --git a/unittests/scans/mend/mend_sca_vuln.json b/unittests/scans/mend/mend_sca_vuln.json new file mode 100644 index 00000000000..6af95cb315c --- /dev/null +++ b/unittests/scans/mend/mend_sca_vuln.json @@ -0,0 +1,56 @@ +{ + "vulnerabilities": [ + { + "name": "WS-2019-0379", + "type": "WS", + "severity": "medium", + "score": "6.5", + "cvss3_severity": "MEDIUM", + "cvss3_score": "6.5", + "publishDate": "2019-05-20", + "lastUpdatedDate": "2020-03-05", + "scoreMetadataVector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:L/A:N", + "description": "Apache commons-codec before version \\u201ccommons-codec-1.13-RC1\\u201d is vulnerable to information disclosure due to Improper Input validation.", + "project": "mend-test-sca-project", + "product": "mend-test-sca-product", + "cvss3Attributes": { + "attackVector": "NETWORK", + "attackComplexity": "LOW", + "userInteraction": "NONE", + "privilegesRequired": "NONE", + "scope": "UNCHANGED", + "confidentialityImpact": "LOW", + "integrityImpact": "LOW", + "availabilityImpact": "NONE" + }, + "library": { + "keyUuid": "e4ad5291-19e0-4907-9cf1-5ce5a1746e89", + "filename": "commons-codec-1.6.jar", + "type": "JAVA_ARCHIVE", + "description": "", + "sha1": "b7f0fc8f61ecadeb3695f0b9464755eee44374d4", + "name": "commons-codec-1.6", + "artifactId": "commons-codec-1.6.jar", + "version": "1.6", + "groupId": "commons-codec-1.6", + "architecture": "", + "languageVersion": "" + }, + "topFix": { + "vulnerability": "WS-2019-0379", + "type": "UPGRADE_VERSION", + "origin": "WHITESOURCE_EXPERT", + "url": "https://github.com/apache/commons-codec/commit/48b615756d1d770091ea3322eefc08011ee8b113", + "fixResolution": "Upgrade to version commons-codec:commons-codec:1.13", + "date": "2019-05-20 15:39:18", + "message": "Upgrade to version" + }, + "locations": [ + { + "matchType": "Exact Match", + "path": "D:\\MendRepo\\test-product\\test-project\\test-project-subcomponent\\path\\to\\the\\Java\\commons-codec-1.6_donotuse.jar" + } + ] + } + ] +} \ No newline at end of file diff --git a/unittests/tools/test_mend_parser.py b/unittests/tools/test_mend_parser.py index 393dd4097c1..1cd8cc11dd7 100644 --- a/unittests/tools/test_mend_parser.py +++ b/unittests/tools/test_mend_parser.py @@ -35,3 +35,11 @@ def test_parse_file_with_multiple_vuln_cli_output(self): parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(20, len(findings)) + + def test_parse_file_with_one_sca_vuln_finding(self): + with open("unittests/scans/mend/mend_sca_vuln.json", encoding="utf-8") as testfile: + parser = MendParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(1, len(findings)) + finding = list(findings)[0] + self.assertEqual("D:\\MendRepo\\test-product\\test-project\\test-project-subcomponent\\path\\to\\the\\Java\\commons-codec-1.6_donotuse.jar", finding.file_path) From 6cd1f998c93bfc734af1b1dd34e5e32de4cdcd43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 20:51:06 -0500 Subject: [PATCH 17/18] Bump boto3 from 1.35.37 to 1.35.38 (#11049) Bumps [boto3](https://github.com/boto/boto3) from 1.35.37 to 1.35.38. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.37...1.35.38) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7a8f53dbaed..ebb62de12c7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support -boto3==1.35.37 # Required for Celery Broker AWS (SQS) support +boto3==1.35.38 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.1 fontawesomefree==6.6.0 From 0bc4879ad15b8f7235a02f99600ba5a354249fc1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 20:51:23 -0500 Subject: [PATCH 18/18] Bump vulners from 2.2.1 to 2.2.2 (#11050) Bumps vulners from 2.2.1 to 2.2.2. --- updated-dependencies: - dependency-name: vulners dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ebb62de12c7..b1e8b0be6a5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -71,6 +71,6 @@ blackduck==1.1.3 pycurl==7.45.3 # Required for Celery Broker AWS (SQS) support boto3==1.35.38 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 -vulners==2.2.1 +vulners==2.2.2 fontawesomefree==6.6.0 PyYAML==6.0.2