From a309c716cc72776695052f982d63093458113cc5 Mon Sep 17 00:00:00 2001
From: kiblik <5609770+kiblik@users.noreply.github.com>
Date: Wed, 3 Jul 2024 22:16:13 +0200
Subject: [PATCH] Ruff: add and fix COM (#10086)
---
docker/install_chrome_dependencies.py | 2 +-
dojo/admin.py | 2 +-
dojo/announcement/signals.py | 8 +-
dojo/announcement/views.py | 6 +-
dojo/api_v2/exception_handler.py | 2 +-
dojo/api_v2/mixins.py | 2 +-
dojo/api_v2/permissions.py | 100 +++---
dojo/api_v2/prefetch/prefetcher.py | 4 +-
dojo/api_v2/prefetch/schema.py | 6 +-
dojo/api_v2/prefetch/utils.py | 2 +-
dojo/api_v2/serializers.py | 260 +++++++--------
dojo/api_v2/views.py | 254 +++++++-------
dojo/apps.py | 8 +-
dojo/authorization/authorization.py | 24 +-
.../authorization/authorization_decorators.py | 2 +-
dojo/banner/urls.py | 2 +-
dojo/banner/views.py | 4 +-
dojo/benchmark/views.py | 38 +--
dojo/checks.py | 2 +-
dojo/components/sql_group_concat.py | 6 +-
dojo/components/views.py | 20 +-
dojo/context_processors.py | 2 +-
dojo/cred/views.py | 28 +-
dojo/endpoint/signals.py | 2 +-
dojo/endpoint/utils.py | 10 +-
dojo/endpoint/views.py | 4 +-
dojo/engagement/signals.py | 2 +-
dojo/engagement/views.py | 40 +--
dojo/filters.py | 74 ++---
dojo/finding/queries.py | 6 +-
dojo/finding/urls.py | 44 +--
dojo/finding/views.py | 312 +++++++++---------
dojo/finding_group/signals.py | 2 +-
dojo/forms.py | 37 +--
dojo/github_issue_link/views.py | 4 +-
dojo/group/urls.py | 2 +-
dojo/group/views.py | 10 +-
dojo/home/views.py | 2 +-
dojo/importers/auto_create_context.py | 2 +-
dojo/importers/base_importer.py | 8 +-
dojo/importers/default_importer.py | 10 +-
dojo/importers/default_reimporter.py | 28 +-
dojo/importers/endpoint_manager.py | 6 +-
dojo/jira_link/helper.py | 10 +-
dojo/jira_link/views.py | 6 +-
dojo/management/commands/dedupe.py | 2 +-
.../commands/initialize_permissions.py | 2 +-
.../commands/jira_status_reconciliation.py | 2 +-
dojo/management/commands/migrate_cve.py | 4 +-
dojo/metrics/utils.py | 78 ++---
dojo/metrics/views.py | 18 +-
dojo/models.py | 40 +--
dojo/notes/urls.py | 2 +-
dojo/notifications/helper.py | 4 +-
dojo/notifications/urls.py | 2 +-
dojo/notifications/views.py | 2 +-
dojo/object/views.py | 6 +-
dojo/okta.py | 8 +-
dojo/product/queries.py | 2 +-
dojo/product/signals.py | 2 +-
dojo/product/views.py | 50 +--
dojo/product_type/signals.py | 2 +-
dojo/product_type/urls.py | 2 +-
dojo/product_type/views.py | 4 +-
dojo/regulations/urls.py | 2 +-
dojo/regulations/views.py | 6 +-
dojo/reports/views.py | 12 +-
dojo/risk_acceptance/helper.py | 6 +-
dojo/settings/.settings.dist.py.sha256sum | 2 +-
.../attribute-maps/django_saml_uri.py | 2 +-
dojo/settings/attribute-maps/saml_uri.py | 2 +-
dojo/settings/settings.dist.py | 62 ++--
dojo/settings/settings.py | 2 +-
dojo/settings/unittest.py | 2 +-
dojo/sla_config/views.py | 10 +-
dojo/survey/views.py | 16 +-
dojo/system_settings/urls.py | 4 +-
dojo/system_settings/views.py | 2 +-
dojo/templatetags/display_tags.py | 12 +-
dojo/test/signals.py | 2 +-
dojo/test/urls.py | 2 +-
dojo/test/views.py | 14 +-
dojo/tool_config/views.py | 4 +-
dojo/tool_product/views.py | 8 +-
dojo/tool_type/views.py | 4 +-
dojo/tools/acunetix/parse_acunetix360_json.py | 2 +-
dojo/tools/acunetix/parse_acunetix_xml.py | 26 +-
dojo/tools/anchore_engine/parser.py | 4 +-
dojo/tools/anchore_enterprise/parser.py | 6 +-
dojo/tools/anchore_grype/parser.py | 6 +-
dojo/tools/anchorectl_policies/parser.py | 2 +-
dojo/tools/anchorectl_vulns/parser.py | 4 +-
dojo/tools/api_blackduck/api_client.py | 2 +-
dojo/tools/api_blackduck/parser.py | 6 +-
dojo/tools/api_bugcrowd/api_client.py | 16 +-
dojo/tools/api_bugcrowd/importer.py | 2 +-
dojo/tools/api_bugcrowd/parser.py | 20 +-
dojo/tools/api_cobalt/api_client.py | 6 +-
dojo/tools/api_cobalt/parser.py | 2 +-
dojo/tools/api_edgescan/importer.py | 2 +-
dojo/tools/api_edgescan/parser.py | 4 +-
dojo/tools/api_sonarqube/api_client.py | 4 +-
dojo/tools/api_sonarqube/importer.py | 20 +-
dojo/tools/api_sonarqube/updater.py | 16 +-
.../api_sonarqube/updater_from_source.py | 4 +-
dojo/tools/api_vulners/importer.py | 2 +-
dojo/tools/api_vulners/parser.py | 2 +-
dojo/tools/aqua/parser.py | 4 +-
dojo/tools/arachni/parser.py | 2 +-
dojo/tools/auditjs/parser.py | 10 +-
dojo/tools/aws_prowler/parser.py | 4 +-
dojo/tools/awssecurityhub/parser.py | 2 +-
.../parser.py | 8 +-
dojo/tools/bandit/parser.py | 4 +-
dojo/tools/blackduck/importer.py | 8 +-
dojo/tools/blackduck/parser.py | 4 +-
.../blackduck_binary_analysis/importer.py | 6 +-
.../tools/blackduck_binary_analysis/parser.py | 4 +-
dojo/tools/blackduck_component_risk/parser.py | 22 +-
dojo/tools/bugcrowd/parser.py | 10 +-
dojo/tools/burp/parser.py | 14 +-
dojo/tools/burp_api/parser.py | 14 +-
dojo/tools/burp_enterprise/parser.py | 8 +-
dojo/tools/burp_graphql/parser.py | 32 +-
dojo/tools/cargo_audit/parser.py | 6 +-
dojo/tools/checkmarx/parser.py | 46 +--
dojo/tools/checkmarx_one/parser.py | 4 +-
dojo/tools/checkmarx_osa/parser.py | 8 +-
dojo/tools/chefinspect/parser.py | 2 +-
dojo/tools/clair/clairklar_parser.py | 4 +-
dojo/tools/cloudsploit/parser.py | 2 +-
dojo/tools/cobalt/parser.py | 4 +-
dojo/tools/contrast/parser.py | 8 +-
dojo/tools/coverity_api/parser.py | 6 +-
dojo/tools/crashtest_security/parser.py | 8 +-
dojo/tools/cred_scan/parser.py | 8 +-
dojo/tools/crunch42/parser.py | 2 +-
dojo/tools/cyclonedx/helpers.py | 2 +-
dojo/tools/cyclonedx/json_parser.py | 8 +-
dojo/tools/cyclonedx/xml_parser.py | 50 +--
dojo/tools/dawnscanner/parser.py | 2 +-
dojo/tools/dependency_check/parser.py | 52 +--
dojo/tools/detect_secrets/parser.py | 2 +-
dojo/tools/dockerbench/parser.py | 4 +-
dojo/tools/dockle/parser.py | 2 +-
dojo/tools/dsop/parser.py | 10 +-
dojo/tools/eslint/parser.py | 2 +-
dojo/tools/fortify/fpr_parser.py | 2 +-
dojo/tools/fortify/xml_parser.py | 6 +-
dojo/tools/gcloud_artifact_scan/parser.py | 2 +-
dojo/tools/generic/csv_parser.py | 14 +-
dojo/tools/generic/json_parser.py | 2 +-
dojo/tools/ggshield/parser.py | 2 +-
dojo/tools/github_vulnerability/parser.py | 8 +-
dojo/tools/gitlab_api_fuzzing/parser.py | 2 +-
dojo/tools/gitlab_container_scan/parser.py | 4 +-
dojo/tools/gitlab_dast/parser.py | 8 +-
dojo/tools/gitlab_sast/parser.py | 4 +-
dojo/tools/gitleaks/parser.py | 6 +-
dojo/tools/gosec/parser.py | 4 +-
dojo/tools/govulncheck/parser.py | 16 +-
dojo/tools/h1/parser.py | 14 +-
dojo/tools/hadolint/parser.py | 2 +-
dojo/tools/horusec/parser.py | 8 +-
dojo/tools/huskyci/parser.py | 4 +-
dojo/tools/hydra/parser.py | 8 +-
dojo/tools/ibm_app/parser.py | 22 +-
dojo/tools/immuniweb/parser.py | 4 +-
dojo/tools/intsights/csv_handler.py | 14 +-
dojo/tools/intsights/json_handler.py | 10 +-
dojo/tools/intsights/parser.py | 6 +-
.../jfrog_xray_api_summary_artifact/parser.py | 4 +-
dojo/tools/jfrog_xray_unified/parser.py | 2 +-
dojo/tools/jfrogxray/parser.py | 6 +-
dojo/tools/kics/parser.py | 2 +-
dojo/tools/kiuwan/parser.py | 4 +-
dojo/tools/kubehunter/parser.py | 2 +-
dojo/tools/kubescape/parser.py | 2 +-
dojo/tools/mend/parser.py | 10 +-
dojo/tools/meterian/parser.py | 4 +-
dojo/tools/microfocus_webinspect/parser.py | 6 +-
dojo/tools/mobsf/parser.py | 32 +-
dojo/tools/mobsfscan/parser.py | 8 +-
dojo/tools/mozilla_observatory/parser.py | 2 +-
dojo/tools/netsparker/parser.py | 8 +-
dojo/tools/neuvector/parser.py | 2 +-
dojo/tools/nexpose/parser.py | 16 +-
dojo/tools/nikto/json_parser.py | 4 +-
dojo/tools/nikto/xml_parser.py | 4 +-
dojo/tools/nmap/parser.py | 12 +-
dojo/tools/noseyparker/parser.py | 2 +-
dojo/tools/nuclei/parser.py | 8 +-
dojo/tools/openscap/parser.py | 12 +-
dojo/tools/openvas/csv_parser.py | 4 +-
dojo/tools/openvas/xml_parser.py | 2 +-
dojo/tools/ort/parser.py | 20 +-
dojo/tools/ossindex_devaudit/parser.py | 2 +-
dojo/tools/outpost24/parser.py | 6 +-
dojo/tools/php_security_audit_v2/parser.py | 2 +-
.../php_symfony_security_check/parser.py | 4 +-
dojo/tools/pmd/parser.py | 6 +-
dojo/tools/popeye/parser.py | 6 +-
dojo/tools/progpilot/parser.py | 2 +-
dojo/tools/pwn_sast/parser.py | 10 +-
dojo/tools/qualys/csv_parser.py | 18 +-
dojo/tools/qualys/parser.py | 6 +-
dojo/tools/qualys_infrascan_webgui/parser.py | 6 +-
dojo/tools/qualys_webapp/parser.py | 52 +--
dojo/tools/retirejs/parser.py | 4 +-
dojo/tools/risk_recon/parser.py | 2 +-
dojo/tools/rubocop/parser.py | 2 +-
dojo/tools/rusty_hog/parser.py | 32 +-
dojo/tools/sarif/parser.py | 18 +-
dojo/tools/scantist/parser.py | 2 +-
dojo/tools/scout_suite/parser.py | 6 +-
dojo/tools/semgrep/parser.py | 12 +-
dojo/tools/skf/parser.py | 10 +-
dojo/tools/snyk/parser.py | 10 +-
dojo/tools/snyk_code/parser.py | 10 +-
dojo/tools/solar_appscreener/parser.py | 2 +-
dojo/tools/sonarqube/parser.py | 2 +-
dojo/tools/sonarqube/soprasteria_helper.py | 2 +-
dojo/tools/sonarqube/soprasteria_html.py | 8 +-
dojo/tools/sonarqube/soprasteria_json.py | 4 +-
dojo/tools/sonatype/parser.py | 2 +-
dojo/tools/spotbugs/parser.py | 6 +-
dojo/tools/ssl_labs/parser.py | 8 +-
dojo/tools/sslscan/parser.py | 2 +-
dojo/tools/sslyze/parser_json.py | 24 +-
dojo/tools/sslyze/parser_xml.py | 8 +-
dojo/tools/stackhawk/parser.py | 12 +-
dojo/tools/sysdig_reports/sysdig_data.py | 2 +-
dojo/tools/talisman/parser.py | 2 +-
dojo/tools/tenable/csv_format.py | 6 +-
dojo/tools/tenable/parser.py | 2 +-
dojo/tools/tenable/xml_format.py | 32 +-
dojo/tools/terrascan/parser.py | 2 +-
dojo/tools/testssl/parser.py | 12 +-
dojo/tools/tfsec/parser.py | 4 +-
dojo/tools/threagile/parser.py | 4 +-
dojo/tools/trivy/parser.py | 8 +-
dojo/tools/trivy_operator/parser.py | 2 +-
.../trivy_operator/vulnerability_handler.py | 2 +-
dojo/tools/trufflehog/parser.py | 6 +-
dojo/tools/trufflehog3/parser.py | 2 +-
dojo/tools/trustwave/parser.py | 6 +-
dojo/tools/trustwave_fusion_api/parser.py | 4 +-
dojo/tools/twistlock/parser.py | 8 +-
dojo/tools/vcg/parser.py | 8 +-
dojo/tools/veracode/json_parser.py | 4 +-
dojo/tools/veracode/xml_parser.py | 42 +--
dojo/tools/veracode_sca/parser.py | 12 +-
dojo/tools/wapiti/parser.py | 6 +-
dojo/tools/wfuzz/parser.py | 6 +-
dojo/tools/whispers/parser.py | 4 +-
dojo/tools/whitehat_sentinel/parser.py | 24 +-
dojo/tools/wiz/parser.py | 2 +-
dojo/tools/wpscan/parser.py | 16 +-
dojo/tools/xanitizer/parser.py | 14 +-
dojo/tools/zap/parser.py | 10 +-
dojo/urls.py | 4 +-
dojo/user/urls.py | 4 +-
dojo/user/views.py | 4 +-
dojo/utils.py | 60 ++--
dojo/views.py | 2 +-
dojo/widgets.py | 2 +-
dojo/wsgi.py | 2 +-
ruff.toml | 1 +
tests/base_test_class.py | 26 +-
tests/false_positive_history_test.py | 8 +-
tests/notifications_test.py | 2 +-
tests/zap.py | 2 +-
unittests/dojo_test_case.py | 6 +-
unittests/test_api_sonarqube_updater.py | 28 +-
unittests/test_apiv2_endpoint.py | 18 +-
unittests/test_apiv2_methods_and_endpoints.py | 2 +-
unittests/test_apiv2_notifications.py | 4 +-
unittests/test_apiv2_user.py | 20 +-
unittests/test_apply_finding_template.py | 2 +-
unittests/test_dashboard.py | 4 +-
unittests/test_deduplication_logic.py | 2 +-
unittests/test_endpoint_model.py | 38 +--
.../test_false_positive_history_logic.py | 2 +-
unittests/test_finding_helper.py | 20 +-
unittests/test_import_reimport.py | 28 +-
unittests/test_jira_webhook.py | 24 +-
unittests/test_metrics_queries.py | 60 ++--
unittests/test_migrations.py | 12 +-
unittests/test_parsers.py | 20 +-
unittests/test_remote_user.py | 20 +-
unittests/test_rest_framework.py | 104 +++---
unittests/test_risk_acceptance.py | 18 +-
unittests/test_search_parser.py | 2 +-
unittests/test_utils.py | 6 +-
.../tools/test_anchore_enterprise_parser.py | 8 +-
unittests/tools/test_api_bugcrowd_importer.py | 8 +-
unittests/tools/test_api_bugcrowd_parser.py | 30 +-
unittests/tools/test_api_edgescan_parser.py | 2 +-
.../tools/test_api_sonarqube_importer.py | 44 +--
unittests/tools/test_api_sonarqube_parser.py | 4 +-
unittests/tools/test_auditjs_parser.py | 2 +-
unittests/tools/test_aws_prowler_parser.py | 2 +-
unittests/tools/test_awssecurityhub_parser.py | 2 +-
unittests/tools/test_bandit_parser.py | 2 +-
.../test_blackduck_binary_analysis_parser.py | 4 +-
.../test_blackduck_component_risk_parser.py | 2 +-
unittests/tools/test_blackduck_parser.py | 4 +-
unittests/tools/test_checkmarx_osa_parser.py | 18 +-
unittests/tools/test_checkmarx_parser.py | 48 +--
unittests/tools/test_checkov_parser.py | 6 +-
unittests/tools/test_codechecker_parser.py | 8 +-
.../tools/test_crashtest_security_parser.py | 2 +-
.../tools/test_dependency_check_parser.py | 16 +-
.../tools/test_dependency_track_parser.py | 18 +-
unittests/tools/test_dockerbench_parser.py | 6 +-
.../test_gitlab_container_scan_parser.py | 2 +-
unittests/tools/test_gitlab_dast_parser.py | 8 +-
unittests/tools/test_govulncheck_parser.py | 2 +-
unittests/tools/test_huskyci_parser.py | 6 +-
unittests/tools/test_hydra_parser.py | 14 +-
..._jfrog_xray_api_summary_artifact_parser.py | 4 +-
unittests/tools/test_kubebench_parser.py | 8 +-
unittests/tools/test_kubehunter_parser.py | 2 +-
unittests/tools/test_mend_parser.py | 2 +-
.../test_microfocus_webinspect_parser.py | 14 +-
unittests/tools/test_noseyparker_parser.py | 2 +-
unittests/tools/test_ort_parser.py | 2 +-
.../tools/test_ossindex_devaudit_parser.py | 30 +-
.../test_php_symfony_security_check_parser.py | 6 +-
.../test_qualys_infrascan_webgui_parser.py | 8 +-
unittests/tools/test_qualys_parser.py | 42 +--
unittests/tools/test_qualys_webapp_parser.py | 6 +-
unittests/tools/test_sarif_parser.py | 20 +-
unittests/tools/test_scantist_parser.py | 2 +-
unittests/tools/test_snyk_parser.py | 8 +-
.../tools/test_solar_appscreener_parser.py | 2 +-
unittests/tools/test_sonarqube_parser.py | 46 +--
unittests/tools/test_spotbugs_parser.py | 2 +-
unittests/tools/test_sslyze_parser.py | 6 +-
unittests/tools/test_stackhawk_parser.py | 26 +-
unittests/tools/test_sysdig_reports_parser.py | 4 +-
unittests/tools/test_talisman_parser.py | 2 +-
.../tools/test_trustwave_fusion_api_parser.py | 8 +-
unittests/tools/test_twistlock_parser.py | 8 +-
unittests/tools/test_veracode_parser.py | 2 +-
unittests/tools/test_yarn_audit_parser.py | 2 +-
346 files changed, 2184 insertions(+), 2184 deletions(-)
diff --git a/docker/install_chrome_dependencies.py b/docker/install_chrome_dependencies.py
index 2bf949c86c..c85372bf5d 100644
--- a/docker/install_chrome_dependencies.py
+++ b/docker/install_chrome_dependencies.py
@@ -25,7 +25,7 @@ def ldd(file_path):
# For simplicity, I'm assuming if we get an error, the code is non-zero.
try:
result = subprocess.run(
- ["ldd", file_path], capture_output=True, text=True
+ ["ldd", file_path], capture_output=True, text=True,
)
stdout = result.stdout
code = result.returncode
diff --git a/dojo/admin.py b/dojo/admin.py
index 68353f24ab..87823ff4d0 100644
--- a/dojo/admin.py
+++ b/dojo/admin.py
@@ -49,7 +49,7 @@ class QuestionParentAdmin(PolymorphicParentModelAdmin):
base_model = Question
child_models = (
TextQuestion,
- ChoiceQuestion
+ ChoiceQuestion,
)
diff --git a/dojo/announcement/signals.py b/dojo/announcement/signals.py
index 580da64a84..9a2682eddb 100644
--- a/dojo/announcement/signals.py
+++ b/dojo/announcement/signals.py
@@ -17,11 +17,11 @@ def add_announcement_to_new_user(sender, instance, **kwargs):
)
if not cloud_announcement or settings.CREATE_CLOUD_BANNER:
user_announcements = UserAnnouncement.objects.filter(
- user=dojo_user, announcement=announcement
+ user=dojo_user, announcement=announcement,
)
if user_announcements.count() == 0:
UserAnnouncement.objects.get_or_create(
- user=dojo_user, announcement=announcement
+ user=dojo_user, announcement=announcement,
)
@@ -31,8 +31,8 @@ def announcement_post_save(sender, instance, created, **kwargs):
UserAnnouncement.objects.bulk_create(
[
UserAnnouncement(
- user=user_id, announcement=instance
+ user=user_id, announcement=instance,
)
for user_id in Dojo_User.objects.all()
- ]
+ ],
)
diff --git a/dojo/announcement/views.py b/dojo/announcement/views.py
index 5c01ffaaf4..6b0cb16bc3 100644
--- a/dojo/announcement/views.py
+++ b/dojo/announcement/views.py
@@ -28,7 +28,7 @@ def configure_announcement(request):
"message": announcement.message,
"style": announcement.style,
"dismissable": announcement.dismissable,
- }
+ },
)
remove = True
except Announcement.DoesNotExist:
@@ -64,14 +64,14 @@ def configure_announcement(request):
request=request,
)
return render(
- request, "dojo/announcement.html", {"form": form, "remove": remove}
+ request, "dojo/announcement.html", {"form": form, "remove": remove},
)
def dismiss_announcement(request):
if request.method == "POST":
deleted_count, _objects_deleted = UserAnnouncement.objects.filter(
- user=request.user, announcement=1
+ user=request.user, announcement=1,
).delete()
if deleted_count > 0:
messages.add_message(
diff --git a/dojo/api_v2/exception_handler.py b/dojo/api_v2/exception_handler.py
index b4d8143366..513c98004b 100644
--- a/dojo/api_v2/exception_handler.py
+++ b/dojo/api_v2/exception_handler.py
@@ -54,7 +54,7 @@ def custom_exception_handler(exc, context):
# message, if it is different from the detail that is already
# in the response.
if isinstance(response.data, dict) and str(
- exc
+ exc,
) != response.data.get("detail", ""):
response.data["message"] = str(exc)
else:
diff --git a/dojo/api_v2/mixins.py b/dojo/api_v2/mixins.py
index 749f7ab8b5..e32683c374 100644
--- a/dojo/api_v2/mixins.py
+++ b/dojo/api_v2/mixins.py
@@ -15,7 +15,7 @@ class DeletePreviewModelMixin:
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.DeletePreviewSerializer(many=True)
+ status.HTTP_200_OK: serializers.DeletePreviewSerializer(many=True),
},
)
@action(detail=True, methods=["get"], filter_backends=[], suffix="List")
diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py
index 10991bb3e5..f766982683 100644
--- a/dojo/api_v2/permissions.py
+++ b/dojo/api_v2/permissions.py
@@ -62,7 +62,7 @@ def check_object_permission(
class UserHasAppAnalysisPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Technology_Add
+ request, Product, "product", Permissions.Technology_Add,
)
def has_object_permission(self, request, view, obj):
@@ -79,22 +79,22 @@ class UserHasCredentialPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.data.get("product") is not None:
return check_post_permission(
- request, Cred_Mapping, "product", Permissions.Credential_Add
+ request, Cred_Mapping, "product", Permissions.Credential_Add,
)
if request.data.get("engagement") is not None:
return check_post_permission(
- request, Cred_Mapping, "engagement", Permissions.Credential_Add
+ request, Cred_Mapping, "engagement", Permissions.Credential_Add,
)
if request.data.get("test") is not None:
return check_post_permission(
- request, Cred_Mapping, "test", Permissions.Credential_Add
+ request, Cred_Mapping, "test", Permissions.Credential_Add,
)
if request.data.get("finding") is not None:
return check_post_permission(
- request, Cred_Mapping, "finding", Permissions.Credential_Add
+ request, Cred_Mapping, "finding", Permissions.Credential_Add,
)
return check_post_permission(
- request, Cred_Mapping, "product", Permissions.Credential_Add
+ request, Cred_Mapping, "product", Permissions.Credential_Add,
)
def has_object_permission(self, request, view, obj):
@@ -111,11 +111,11 @@ class UserHasDojoGroupPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.method == "GET":
return user_has_configuration_permission(
- request.user, "auth.view_group"
+ request.user, "auth.view_group",
)
elif request.method == "POST":
return user_has_configuration_permission(
- request.user, "auth.add_group"
+ request.user, "auth.add_group",
)
else:
return True
@@ -126,9 +126,9 @@ def has_object_permission(self, request, view, obj):
# because with the group they can see user information that might
# be considered as confidential
return user_has_configuration_permission(
- request.user, "auth.view_group"
+ request.user, "auth.view_group",
) and user_has_permission(
- request.user, obj, Permissions.Group_View
+ request.user, obj, Permissions.Group_View,
)
else:
return check_object_permission(
@@ -143,7 +143,7 @@ def has_object_permission(self, request, view, obj):
class UserHasDojoGroupMemberPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Dojo_Group, "group", Permissions.Group_Manage_Members
+ request, Dojo_Group, "group", Permissions.Group_Manage_Members,
)
def has_object_permission(self, request, view, obj):
@@ -166,7 +166,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Product_Edit
+ request.user, object, Permissions.Product_Edit,
)
)
finding_id = request.data.get("finding", None)
@@ -175,7 +175,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Finding_Edit
+ request.user, object, Permissions.Finding_Edit,
)
)
endpoint_id = request.data.get("endpoint", None)
@@ -184,7 +184,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Endpoint_Edit
+ request.user, object, Permissions.Endpoint_Edit,
)
)
return has_permission_result
@@ -235,7 +235,7 @@ def has_object_permission(self, request, view, obj):
class UserHasToolProductSettingsPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Edit
+ request, Product, "product", Permissions.Product_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -251,7 +251,7 @@ def has_object_permission(self, request, view, obj):
class UserHasEndpointPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Endpoint_Add
+ request, Product, "product", Permissions.Endpoint_Add,
)
def has_object_permission(self, request, view, obj):
@@ -267,7 +267,7 @@ def has_object_permission(self, request, view, obj):
class UserHasEndpointStatusPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Endpoint, "endpoint", Permissions.Endpoint_Edit
+ request, Endpoint, "endpoint", Permissions.Endpoint_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -288,10 +288,10 @@ class UserHasEngagementPermission(permissions.BasePermission):
def has_permission(self, request, view):
if UserHasEngagementPermission.path_engagement_post.match(
- request.path
+ request.path,
) or UserHasEngagementPermission.path_engagement.match(request.path):
return check_post_permission(
- request, Product, "product", Permissions.Engagement_Add
+ request, Product, "product", Permissions.Engagement_Add,
)
else:
# related object only need object permission
@@ -299,7 +299,7 @@ def has_permission(self, request, view):
def has_object_permission(self, request, view, obj):
if UserHasEngagementPermission.path_engagement_post.match(
- request.path
+ request.path,
) or UserHasEngagementPermission.path_engagement.match(request.path):
return check_object_permission(
request,
@@ -327,12 +327,12 @@ class UserHasRiskAcceptancePermission(permissions.BasePermission):
def has_permission(self, request, view):
if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match(
- request.path
+ request.path,
) or UserHasRiskAcceptancePermission.path_risk_acceptance.match(
- request.path
+ request.path,
):
return check_post_permission(
- request, Product, "product", Permissions.Risk_Acceptance
+ request, Product, "product", Permissions.Risk_Acceptance,
)
else:
# related object only need object permission
@@ -340,9 +340,9 @@ def has_permission(self, request, view):
def has_object_permission(self, request, view, obj):
if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match(
- request.path
+ request.path,
) or UserHasRiskAcceptancePermission.path_risk_acceptance.match(
- request.path
+ request.path,
):
return check_object_permission(
request,
@@ -375,12 +375,12 @@ def has_permission(self, request, view):
UserHasFindingPermission.path_finding_post.match(request.path)
or UserHasFindingPermission.path_finding.match(request.path)
or UserHasFindingPermission.path_stub_finding_post.match(
- request.path
+ request.path,
)
or UserHasFindingPermission.path_stub_finding.match(request.path)
):
return check_post_permission(
- request, Test, "test", Permissions.Finding_Add
+ request, Test, "test", Permissions.Finding_Add,
)
else:
# related object only need object permission
@@ -391,7 +391,7 @@ def has_object_permission(self, request, view, obj):
UserHasFindingPermission.path_finding_post.match(request.path)
or UserHasFindingPermission.path_finding.match(request.path)
or UserHasFindingPermission.path_stub_finding_post.match(
- request.path
+ request.path,
)
or UserHasFindingPermission.path_stub_finding.match(request.path)
):
@@ -433,7 +433,7 @@ def has_permission(self, request, view):
if engagement := converted_dict.get("engagement"):
# existing engagement, nothing special to check
return user_has_permission(
- request.user, engagement, Permissions.Import_Scan_Result
+ request.user, engagement, Permissions.Import_Scan_Result,
)
elif engagement_id := converted_dict.get("engagement_id"):
# engagement_id doesn't exist
@@ -488,7 +488,7 @@ def has_permission(self, request, view):
if product:
# existing product, nothing special to check
return user_has_permission(
- request.user, product, Permissions.Import_Scan_Result
+ request.user, product, Permissions.Import_Scan_Result,
)
elif product_id := converted_dict.get("product_id"):
# product_id doesn't exist
@@ -521,7 +521,7 @@ def has_object_permission(self, request, view, obj):
class UserHasProductMemberPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Manage_Members
+ request, Product, "product", Permissions.Product_Manage_Members,
)
def has_object_permission(self, request, view, obj):
@@ -537,7 +537,7 @@ def has_object_permission(self, request, view, obj):
class UserHasProductGroupPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Group_Add
+ request, Product, "product", Permissions.Product_Group_Add,
)
def has_object_permission(self, request, view, obj):
@@ -554,7 +554,7 @@ class UserHasProductTypePermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.method == "POST":
return user_has_global_permission(
- request.user, Permissions.Product_Type_Add
+ request.user, Permissions.Product_Type_Add,
)
else:
return True
@@ -629,7 +629,7 @@ def has_permission(self, request, view):
if test := converted_dict.get("test"):
# existing test, nothing special to check
return user_has_permission(
- request.user, test, Permissions.Import_Scan_Result
+ request.user, test, Permissions.Import_Scan_Result,
)
elif test_id := converted_dict.get("test_id"):
# test_id doesn't exist
@@ -671,10 +671,10 @@ class UserHasTestPermission(permissions.BasePermission):
def has_permission(self, request, view):
if UserHasTestPermission.path_tests_post.match(
- request.path
+ request.path,
) or UserHasTestPermission.path_tests.match(request.path):
return check_post_permission(
- request, Engagement, "engagement", Permissions.Test_Add
+ request, Engagement, "engagement", Permissions.Test_Add,
)
else:
# related object only need object permission
@@ -682,7 +682,7 @@ def has_permission(self, request, view):
def has_object_permission(self, request, view, obj):
if UserHasTestPermission.path_tests_post.match(
- request.path
+ request.path,
) or UserHasTestPermission.path_tests.match(request.path):
return check_object_permission(
request,
@@ -705,7 +705,7 @@ def has_object_permission(self, request, view, obj):
class UserHasTestImportPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Test, "test", Permissions.Test_Edit
+ request, Test, "test", Permissions.Test_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -721,7 +721,7 @@ def has_object_permission(self, request, view, obj):
class UserHasLanguagePermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Language_Add
+ request, Product, "product", Permissions.Language_Add,
)
def has_object_permission(self, request, view, obj):
@@ -763,7 +763,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Engagement_Edit
+ request.user, object, Permissions.Engagement_Edit,
)
)
product_id = request.data.get("product", None)
@@ -772,7 +772,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Product_Edit
+ request.user, object, Permissions.Product_Edit,
)
)
return has_permission_result
@@ -818,7 +818,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Engagement_Edit
+ request.user, object, Permissions.Engagement_Edit,
)
)
finding_id = request.data.get("finding", None)
@@ -827,7 +827,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Finding_Edit
+ request.user, object, Permissions.Finding_Edit,
)
)
finding_group_id = request.data.get("finding_group", None)
@@ -836,7 +836,7 @@ def has_permission(self, request, view):
has_permission_result = (
has_permission_result
and user_has_permission(
- request.user, object, Permissions.Finding_Group_Edit
+ request.user, object, Permissions.Finding_Group_Edit,
)
)
return has_permission_result
@@ -892,7 +892,7 @@ def has_permission(self, request, view):
class UserHasEngagementPresetPermission(permissions.BasePermission):
def has_permission(self, request, view):
return check_post_permission(
- request, Product, "product", Permissions.Product_Edit
+ request, Product, "product", Permissions.Product_Edit,
)
def has_object_permission(self, request, view, obj):
@@ -990,7 +990,7 @@ def check_auto_create_permission(
if engagement:
# existing engagement, nothing special to check
return user_has_permission(
- user, engagement, Permissions.Import_Scan_Result
+ user, engagement, Permissions.Import_Scan_Result,
)
if product and product_name and engagement_name:
@@ -999,7 +999,7 @@ def check_auto_create_permission(
raise PermissionDenied(msg)
if not user_has_permission(
- user, product, Permissions.Import_Scan_Result
+ user, product, Permissions.Import_Scan_Result,
):
msg = f'No permission to import scans into product "{product_name}"'
raise PermissionDenied(msg)
@@ -1014,7 +1014,7 @@ def check_auto_create_permission(
if not product_type:
if not user_has_global_permission(
- user, Permissions.Product_Type_Add
+ user, Permissions.Product_Type_Add,
):
msg = f'No permission to create product_type "{product_type_name}"'
raise PermissionDenied(msg)
@@ -1023,7 +1023,7 @@ def check_auto_create_permission(
return True
else:
if not user_has_permission(
- user, product_type, Permissions.Product_Type_Add_Product
+ user, product_type, Permissions.Product_Type_Add_Product,
):
msg = f'No permission to create products in product_type "{product_type}"'
raise PermissionDenied(msg)
@@ -1051,7 +1051,7 @@ def has_permission(self, request, view):
class UserHasConfigurationPermissionSuperuser(
- permissions.DjangoModelPermissions
+ permissions.DjangoModelPermissions,
):
# Override map to also provide 'view' permissions
perms_map = {
diff --git a/dojo/api_v2/prefetch/prefetcher.py b/dojo/api_v2/prefetch/prefetcher.py
index 1b207c394d..79a4b0e731 100644
--- a/dojo/api_v2/prefetch/prefetcher.py
+++ b/dojo/api_v2/prefetch/prefetcher.py
@@ -26,7 +26,7 @@ def _is_model_serializer(obj):
# We process all the serializers found in the module SERIALIZER_DEFS_MODULE. We restrict the scope to avoid
# processing all the classes in the symbol table
available_serializers = inspect.getmembers(
- sys.modules[SERIALIZER_DEFS_MODULE], _is_model_serializer
+ sys.modules[SERIALIZER_DEFS_MODULE], _is_model_serializer,
)
for _, serializer in available_serializers:
@@ -86,7 +86,7 @@ def _prefetch(self, entry, fields_to_fetch):
# the serializer accordingly
many = utils._is_many_to_many_relation(field_meta)
field_data = extra_serializer(many=many).to_representation(
- field_value
+ field_value,
)
# For convenience in processing we store the field data in a list
field_data_list = (
diff --git a/dojo/api_v2/prefetch/schema.py b/dojo/api_v2/prefetch/schema.py
index 48892c4381..030a572a15 100644
--- a/dojo/api_v2/prefetch/schema.py
+++ b/dojo/api_v2/prefetch/schema.py
@@ -56,7 +56,7 @@ def prefetch_postprocessing_hook(result, generator, request, public):
prefetcher = _Prefetcher()
fields = _get_prefetchable_fields(
- serializer_classes[path]()
+ serializer_classes[path](),
)
field_names = [
@@ -87,8 +87,8 @@ def prefetch_postprocessing_hook(result, generator, request, public):
"type": "object",
"readOnly": True,
"additionalProperties": {
- "$ref": f"#/components/schemas/{fields_to_refname[name]}"
- }
+ "$ref": f"#/components/schemas/{fields_to_refname[name]}",
+ },
}
for name in field_names
}
diff --git a/dojo/api_v2/prefetch/utils.py b/dojo/api_v2/prefetch/utils.py
index 833fe9ae6e..de7ea2b383 100644
--- a/dojo/api_v2/prefetch/utils.py
+++ b/dojo/api_v2/prefetch/utils.py
@@ -39,7 +39,7 @@ def _get_prefetchable_fields(serializer):
def _is_field_prefetchable(field):
return _is_one_to_one_relation(field) or _is_many_to_many_relation(
- field
+ field,
)
meta = getattr(serializer, "Meta", None)
diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py
index 4de5d536d0..006edc63f8 100644
--- a/dojo/api_v2/serializers.py
+++ b/dojo/api_v2/serializers.py
@@ -167,22 +167,22 @@ class ImportStatisticsSerializer(serializers.Serializer):
help_text="Finding statistics of modifications made by the reimport. Only available when TRACK_IMPORT_HISTORY hass not disabled.",
)
after = SeverityStatusStatisticsSerializer(
- help_text="Finding statistics as stored in Defect Dojo after the import"
+ help_text="Finding statistics as stored in Defect Dojo after the import",
)
@extend_schema_field(
- serializers.ListField(child=serializers.CharField())
+ serializers.ListField(child=serializers.CharField()),
) # also takes basic python types
class TagListSerializerField(serializers.ListField):
child = serializers.CharField()
default_error_messages = {
"not_a_list": _(
- 'Expected a list of items but got type "{input_type}".'
+ 'Expected a list of items but got type "{input_type}".',
),
"invalid_json": _(
"Invalid json list. A tag list submitted in string"
- " form must be valid json."
+ " form must be valid json.",
),
"not_a_str": _("All list items must be of string type."),
}
@@ -256,7 +256,7 @@ def update(self, instance, validated_data):
to_be_tagged, validated_data = self._pop_tags(validated_data)
tag_object = super().update(
- instance, validated_data
+ instance, validated_data,
)
return self._save_tags(tag_object, to_be_tagged)
@@ -301,7 +301,7 @@ def __getitem__(self, item):
def __str__(self):
if self.pretty_print:
return json.dumps(
- self, sort_keys=True, indent=4, separators=(",", ": ")
+ self, sort_keys=True, indent=4, separators=(",", ": "),
)
else:
return json.dumps(self)
@@ -311,14 +311,14 @@ class RequestResponseSerializerField(serializers.ListSerializer):
child = DictField(child=serializers.CharField())
default_error_messages = {
"not_a_list": _(
- 'Expected a list of items but got type "{input_type}".'
+ 'Expected a list of items but got type "{input_type}".',
),
"invalid_json": _(
"Invalid json list. A tag list submitted in string"
- " form must be valid json."
+ " form must be valid json.",
),
"not_a_dict": _(
- "All list items must be of dict type with keys 'request' and 'response'"
+ "All list items must be of dict type with keys 'request' and 'response'",
),
"not_a_str": _("All values in the dict must be of string type."),
}
@@ -437,7 +437,7 @@ class UserSerializer(serializers.ModelSerializer):
configuration_permissions = serializers.PrimaryKeyRelatedField(
allow_null=True,
queryset=Permission.objects.filter(
- codename__in=get_configuration_permissions_codenames()
+ codename__in=get_configuration_permissions_codenames(),
),
many=True,
required=False,
@@ -469,10 +469,10 @@ def to_representation(self, instance):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.values_list("id", flat=True)
+ ].child_relation.queryset.values_list("id", flat=True),
)
ret["configuration_permissions"] = list(
- all_permissions.intersection(allowed_configuration_permissions)
+ all_permissions.intersection(allowed_configuration_permissions),
)
return ret
@@ -483,7 +483,7 @@ def update(self, instance, validated_data):
"user_permissions" in validated_data
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("user_permissions")
+ validated_data.pop("user_permissions"),
)
instance = super().update(instance, validated_data)
@@ -494,14 +494,14 @@ def update(self, instance, validated_data):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.all()
+ ].child_relation.queryset.all(),
)
non_configuration_permissions = (
set(instance.user_permissions.all())
- allowed_configuration_permissions
)
new_permissions = non_configuration_permissions.union(
- new_configuration_permissions
+ new_configuration_permissions,
)
instance.user_permissions.set(new_permissions)
@@ -518,7 +518,7 @@ def create(self, validated_data):
"user_permissions" in validated_data
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("user_permissions")
+ validated_data.pop("user_permissions"),
)
user = Dojo_User.objects.create(**validated_data)
@@ -582,7 +582,7 @@ class DojoGroupSerializer(serializers.ModelSerializer):
configuration_permissions = serializers.PrimaryKeyRelatedField(
allow_null=True,
queryset=Permission.objects.filter(
- codename__in=get_configuration_permissions_codenames()
+ codename__in=get_configuration_permissions_codenames(),
),
many=True,
required=False,
@@ -609,10 +609,10 @@ def to_representation(self, instance):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.values_list("id", flat=True)
+ ].child_relation.queryset.values_list("id", flat=True),
)
ret["configuration_permissions"] = list(
- all_permissions.intersection(allowed_configuration_permissions)
+ all_permissions.intersection(allowed_configuration_permissions),
)
return ret
@@ -624,7 +624,7 @@ def create(self, validated_data):
and "permissions" in validated_data["auth_group"]
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("auth_group")["permissions"]
+ validated_data.pop("auth_group")["permissions"],
)
instance = super().create(validated_data)
@@ -643,7 +643,7 @@ def update(self, instance, validated_data):
and "permissions" in validated_data["auth_group"]
): # This field was renamed from "configuration_permissions" in the meantime
new_configuration_permissions = set(
- validated_data.pop("auth_group")["permissions"]
+ validated_data.pop("auth_group")["permissions"],
)
instance = super().update(instance, validated_data)
@@ -654,14 +654,14 @@ def update(self, instance, validated_data):
allowed_configuration_permissions = set(
self.fields[
"configuration_permissions"
- ].child_relation.queryset.all()
+ ].child_relation.queryset.all(),
)
non_configuration_permissions = (
set(instance.auth_group.permissions.all())
- allowed_configuration_permissions
)
new_permissions = non_configuration_permissions.union(
- new_configuration_permissions
+ new_configuration_permissions,
)
instance.auth_group.permissions.set(new_permissions)
@@ -692,7 +692,7 @@ def validate(self, data):
or data.get("user") != self.instance.user
):
members = Dojo_Group_Member.objects.filter(
- group=data.get("group"), user=data.get("user")
+ group=data.get("group"), user=data.get("user"),
)
if members.count() > 0:
msg = "Dojo_Group_Member already exists"
@@ -701,7 +701,7 @@ def validate(self, data):
if self.instance is not None and not data.get("role").is_owner:
owners = (
Dojo_Group_Member.objects.filter(
- group=data.get("group"), role__is_owner=True
+ group=data.get("group"), role__is_owner=True,
)
.exclude(id=self.instance.id)
.count()
@@ -862,7 +862,7 @@ def validate(self, data):
or data.get("user") != self.instance.user
):
members = Product_Member.objects.filter(
- product=data.get("product"), user=data.get("user")
+ product=data.get("product"), user=data.get("user"),
)
if members.count() > 0:
msg = "Product_Member already exists"
@@ -903,7 +903,7 @@ def validate(self, data):
or data.get("group") != self.instance.group
):
members = Product_Group.objects.filter(
- product=data.get("product"), group=data.get("group")
+ product=data.get("product"), group=data.get("group"),
)
if members.count() > 0:
msg = "Product_Group already exists"
@@ -944,7 +944,7 @@ def validate(self, data):
or data.get("user") != self.instance.user
):
members = Product_Type_Member.objects.filter(
- product_type=data.get("product_type"), user=data.get("user")
+ product_type=data.get("product_type"), user=data.get("user"),
)
if members.count() > 0:
msg = "Product_Type_Member already exists"
@@ -953,7 +953,7 @@ def validate(self, data):
if self.instance is not None and not data.get("role").is_owner:
owners = (
Product_Type_Member.objects.filter(
- product_type=data.get("product_type"), role__is_owner=True
+ product_type=data.get("product_type"), role__is_owner=True,
)
.exclude(id=self.instance.id)
.count()
@@ -997,7 +997,7 @@ def validate(self, data):
or data.get("group") != self.instance.group
):
members = Product_Type_Group.objects.filter(
- product_type=data.get("product_type"), group=data.get("group")
+ product_type=data.get("product_type"), group=data.get("group"),
)
if members.count() > 0:
msg = "Product_Type_Group already exists"
@@ -1044,14 +1044,14 @@ def build_relational_field(self, field_name, relation_info):
class EngagementToNotesSerializer(serializers.Serializer):
engagement_id = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all(), many=False, allow_null=True
+ queryset=Engagement.objects.all(), many=False, allow_null=True,
)
notes = NoteSerializer(many=True)
class EngagementToFilesSerializer(serializers.Serializer):
engagement_id = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all(), many=False, allow_null=True
+ queryset=Engagement.objects.all(), many=False, allow_null=True,
)
files = FileSerializer(many=True)
@@ -1066,11 +1066,11 @@ def to_representation(self, data):
"file": "{site_url}/{file_access_url}".format(
site_url=settings.SITE_URL,
file_access_url=file.get_accessible_url(
- engagement, engagement.id
+ engagement, engagement.id,
),
),
"title": file.title,
- }
+ },
)
new_data = {"engagement_id": engagement.id, "files": new_files}
return new_data
@@ -1125,7 +1125,7 @@ class Meta:
class ToolProductSettingsSerializer(serializers.ModelSerializer):
setting_url = serializers.CharField(source="url")
product = serializers.PrimaryKeyRelatedField(
- queryset=Product.objects.all(), required=True
+ queryset=Product.objects.all(), required=True,
)
class Meta:
@@ -1143,7 +1143,7 @@ def create(self, validated_data):
finding = validated_data.get("finding")
try:
status = Endpoint_Status.objects.create(
- finding=finding, endpoint=endpoint
+ finding=finding, endpoint=endpoint,
)
except IntegrityError as ie:
if "endpoint-finding relation" in str(ie):
@@ -1279,7 +1279,7 @@ def validate(self, data):
engagement = data.get("engagement", self.instance.engagement)
finding = data.get("finding", self.instance.finding)
finding_group = data.get(
- "finding_group", self.instance.finding_group
+ "finding_group", self.instance.finding_group,
)
else:
engagement = data.get("engagement", None)
@@ -1364,7 +1364,7 @@ class TestSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField(required=False)
test_type_name = serializers.ReadOnlyField()
finding_groups = FindingGroupSerializer(
- source="finding_group_set", many=True, read_only=True
+ source="finding_group_set", many=True, read_only=True,
)
class Meta:
@@ -1381,7 +1381,7 @@ def build_relational_field(self, field_name, relation_info):
class TestCreateSerializer(TaggitSerializer, serializers.ModelSerializer):
engagement = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all()
+ queryset=Engagement.objects.all(),
)
notes = serializers.PrimaryKeyRelatedField(
allow_null=True,
@@ -1406,14 +1406,14 @@ class Meta:
class TestToNotesSerializer(serializers.Serializer):
test_id = serializers.PrimaryKeyRelatedField(
- queryset=Test.objects.all(), many=False, allow_null=True
+ queryset=Test.objects.all(), many=False, allow_null=True,
)
notes = NoteSerializer(many=True)
class TestToFilesSerializer(serializers.Serializer):
test_id = serializers.PrimaryKeyRelatedField(
- queryset=Test.objects.all(), many=False, allow_null=True
+ queryset=Test.objects.all(), many=False, allow_null=True,
)
files = FileSerializer(many=True)
@@ -1427,7 +1427,7 @@ def to_representation(self, data):
"id": file.id,
"file": f"{settings.SITE_URL}/{file.get_accessible_url(test, test.id)}",
"title": file.title,
- }
+ },
)
new_data = {"test_id": test.id, "files": new_files}
return new_data
@@ -1442,7 +1442,7 @@ class Meta:
class TestImportSerializer(serializers.ModelSerializer):
# findings = TestImportFindingActionSerializer(source='test_import_finding_action', many=True, read_only=True)
test_import_finding_action_set = TestImportFindingActionSerializer(
- many=True, read_only=True
+ many=True, read_only=True,
)
class Meta:
@@ -1489,12 +1489,12 @@ def get_decision(self, obj):
@extend_schema_field(serializers.CharField())
def get_path(self, obj):
engagement = Engagement.objects.filter(
- risk_acceptance__id__in=[obj.id]
+ risk_acceptance__id__in=[obj.id],
).first()
path = "No proof has been supplied"
if engagement and obj.filename() is not None:
path = reverse(
- "download_risk_acceptance", args=(engagement.id, obj.id)
+ "download_risk_acceptance", args=(engagement.id, obj.id),
)
request = self.context.get("request")
if request:
@@ -1504,10 +1504,10 @@ def get_path(self, obj):
@extend_schema_field(serializers.IntegerField())
def get_engagement(self, obj):
engagement = Engagement.objects.filter(
- risk_acceptance__id__in=[obj.id]
+ risk_acceptance__id__in=[obj.id],
).first()
return EngagementSerializer(read_only=True).to_representation(
- engagement
+ engagement,
)
def validate(self, data):
@@ -1618,7 +1618,7 @@ class FindingRelatedFieldsSerializer(serializers.Serializer):
@extend_schema_field(FindingTestSerializer)
def get_test(self, obj):
return FindingTestSerializer(read_only=True).to_representation(
- obj.test
+ obj.test,
)
@extend_schema_field(JIRAIssueSerializer)
@@ -1639,7 +1639,7 @@ class FindingSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField(required=False)
request_response = serializers.SerializerMethodField()
accepted_risks = RiskAcceptanceSerializer(
- many=True, read_only=True, source="risk_acceptance_set"
+ many=True, read_only=True, source="risk_acceptance_set",
)
push_to_jira = serializers.BooleanField(default=False)
age = serializers.IntegerField(read_only=True)
@@ -1651,13 +1651,13 @@ class FindingSerializer(TaggitSerializer, serializers.ModelSerializer):
jira_change = serializers.SerializerMethodField(read_only=True)
display_status = serializers.SerializerMethodField()
finding_groups = FindingGroupSerializer(
- source="finding_group_set", many=True, read_only=True
+ source="finding_group_set", many=True, read_only=True,
)
vulnerability_ids = VulnerabilityIdSerializer(
- source="vulnerability_id_set", many=True, required=False
+ source="vulnerability_id_set", many=True, required=False,
)
reporter = serializers.PrimaryKeyRelatedField(
- required=False, queryset=User.objects.all()
+ required=False, queryset=User.objects.all(),
)
class Meta:
@@ -1684,7 +1684,7 @@ def get_related_fields(self, obj):
query_params = request.query_params
if query_params.get("related_fields", "false") == "true":
return FindingRelatedFieldsSerializer(
- required=False
+ required=False,
).to_representation(obj)
else:
return None
@@ -1701,7 +1701,7 @@ def update(self, instance, validated_data):
# TODO: JIRA can we remove this is_push_all_issues, already checked in
# apiv2 viewset?
push_to_jira = validated_data.pop(
- "push_to_jira"
+ "push_to_jira",
) or jira_helper.is_push_all_issues(instance)
# Save vulnerability ids and pop them
@@ -1711,12 +1711,12 @@ def update(self, instance, validated_data):
if vulnerability_id_set:
for vulnerability_id in vulnerability_id_set:
vulnerability_ids.append(
- vulnerability_id["vulnerability_id"]
+ vulnerability_id["vulnerability_id"],
)
save_vulnerability_ids(instance, vulnerability_ids)
instance = super(TaggitSerializer, self).update(
- instance, validated_data
+ instance, validated_data,
)
# Save the reporter on the finding
if reporter_id := validated_data.get("reporter"):
@@ -1741,7 +1741,7 @@ def validate(self, data):
is_duplicate = data.get("duplicate", self.instance.duplicate)
is_false_p = data.get("false_p", self.instance.false_p)
is_risk_accepted = data.get(
- "risk_accepted", self.instance.risk_accepted
+ "risk_accepted", self.instance.risk_accepted,
)
else:
is_active = data.get("active", True)
@@ -1794,28 +1794,28 @@ def get_request_response(self, obj):
response = burp.get_response()
burp_list.append({"request": request, "response": response})
serialized_burps = BurpRawRequestResponseSerializer(
- {"req_resp": burp_list}
+ {"req_resp": burp_list},
)
return serialized_burps.data
class FindingCreateSerializer(TaggitSerializer, serializers.ModelSerializer):
notes = serializers.PrimaryKeyRelatedField(
- read_only=True, allow_null=True, required=False, many=True
+ read_only=True, allow_null=True, required=False, many=True,
)
test = serializers.PrimaryKeyRelatedField(queryset=Test.objects.all())
thread_id = serializers.IntegerField(default=0)
found_by = serializers.PrimaryKeyRelatedField(
- queryset=Test_Type.objects.all(), many=True
+ queryset=Test_Type.objects.all(), many=True,
)
url = serializers.CharField(allow_null=True, default=None)
tags = TagListSerializerField(required=False)
push_to_jira = serializers.BooleanField(default=False)
vulnerability_ids = VulnerabilityIdSerializer(
- source="vulnerability_id_set", many=True, required=False
+ source="vulnerability_id_set", many=True, required=False,
)
reporter = serializers.PrimaryKeyRelatedField(
- required=False, queryset=User.objects.all()
+ required=False, queryset=User.objects.all(),
)
class Meta:
@@ -1857,7 +1857,7 @@ def create(self, validated_data):
# TODO: JIRA can we remove this is_push_all_issues, already checked in
# apiv2 viewset?
push_to_jira = push_to_jira or jira_helper.is_push_all_issues(
- new_finding
+ new_finding,
)
# If we need to push to JIRA, an extra save call is needed.
@@ -1877,7 +1877,7 @@ def validate(self, data):
data["reporter"] = request.user
if (data.get("active") or data.get("verified")) and data.get(
- "duplicate"
+ "duplicate",
):
msg = "Duplicate findings cannot be verified or active"
raise serializers.ValidationError(msg)
@@ -1918,7 +1918,7 @@ class Meta:
class FindingTemplateSerializer(TaggitSerializer, serializers.ModelSerializer):
tags = TagListSerializerField(required=False)
vulnerability_ids = VulnerabilityIdTemplateSerializer(
- source="vulnerability_id_template_set", many=True, required=False
+ source="vulnerability_id_template_set", many=True, required=False,
)
class Meta:
@@ -1929,13 +1929,13 @@ def create(self, validated_data):
# Save vulnerability ids and pop them
if "vulnerability_id_template_set" in validated_data:
vulnerability_id_set = validated_data.pop(
- "vulnerability_id_template_set"
+ "vulnerability_id_template_set",
)
else:
vulnerability_id_set = None
new_finding_template = super(TaggitSerializer, self).create(
- validated_data
+ validated_data,
)
if vulnerability_id_set:
@@ -1944,7 +1944,7 @@ def create(self, validated_data):
vulnerability_ids.append(vulnerability_id["vulnerability_id"])
validated_data["cve"] = vulnerability_ids[0]
save_vulnerability_ids_template(
- new_finding_template, vulnerability_ids
+ new_finding_template, vulnerability_ids,
)
new_finding_template.save()
@@ -1954,13 +1954,13 @@ def update(self, instance, validated_data):
# Save vulnerability ids and pop them
if "vulnerability_id_template_set" in validated_data:
vulnerability_id_set = validated_data.pop(
- "vulnerability_id_template_set"
+ "vulnerability_id_template_set",
)
vulnerability_ids = []
if vulnerability_id_set:
for vulnerability_id in vulnerability_id_set:
vulnerability_ids.append(
- vulnerability_id["vulnerability_id"]
+ vulnerability_id["vulnerability_id"],
)
save_vulnerability_ids_template(instance, vulnerability_ids)
@@ -2020,7 +2020,7 @@ class Meta:
exclude = (
"tid",
"updated",
- "async_updating"
+ "async_updating",
)
def validate(self, data):
@@ -2053,10 +2053,10 @@ class ImportScanSerializer(serializers.Serializer):
help_text="Minimum severity level to be imported",
)
active = serializers.BooleanField(
- help_text="Override the active setting from the tool."
+ help_text="Override the active setting from the tool.",
)
verified = serializers.BooleanField(
- help_text="Override the verified setting from the tool."
+ help_text="Override the verified setting from the tool.",
)
scan_type = serializers.ChoiceField(choices=get_choices_sorted())
# TODO why do we allow only existing endpoints?
@@ -2080,16 +2080,16 @@ class ImportScanSerializer(serializers.Serializer):
help_text="Resource link to source code",
)
engagement = serializers.PrimaryKeyRelatedField(
- queryset=Engagement.objects.all(), required=False
+ queryset=Engagement.objects.all(), required=False,
)
test_title = serializers.CharField(required=False)
auto_create_context = serializers.BooleanField(required=False)
deduplication_on_engagement = serializers.BooleanField(required=False)
lead = serializers.PrimaryKeyRelatedField(
- allow_null=True, default=None, queryset=User.objects.all()
+ allow_null=True, default=None, queryset=User.objects.all(),
)
tags = TagListSerializerField(
- required=False, allow_empty=True, help_text="Add tags that help describe this scan."
+ required=False, allow_empty=True, help_text="Add tags that help describe this scan.",
)
close_old_findings = serializers.BooleanField(
required=False,
@@ -2106,16 +2106,16 @@ class ImportScanSerializer(serializers.Serializer):
push_to_jira = serializers.BooleanField(default=False)
environment = serializers.CharField(required=False)
version = serializers.CharField(
- required=False, help_text="Version that was scanned."
+ required=False, help_text="Version that was scanned.",
)
build_id = serializers.CharField(
- required=False, help_text="ID of the build that was scanned."
+ required=False, help_text="ID of the build that was scanned.",
)
branch_tag = serializers.CharField(
- required=False, help_text="Branch or Tag that was scanned."
+ required=False, help_text="Branch or Tag that was scanned.",
)
commit_hash = serializers.CharField(
- required=False, help_text="Commit that was scanned."
+ required=False, help_text="Commit that was scanned.",
)
api_scan_configuration = serializers.PrimaryKeyRelatedField(
allow_null=True,
@@ -2142,7 +2142,7 @@ class ImportScanSerializer(serializers.Serializer):
# need to use the _id suffix as without the serializer framework gets
# confused
test = serializers.IntegerField(
- read_only=True
+ read_only=True,
) # left for backwards compatibility
test_id = serializers.IntegerField(read_only=True)
engagement_id = serializers.IntegerField(read_only=True)
@@ -2170,7 +2170,7 @@ def set_context(
# update some vars
context["scan"] = data.pop("file", None)
context["environment"] = Development_Environment.objects.get(
- name=data.get("environment", "Development")
+ name=data.get("environment", "Development"),
)
# Set the active/verified status based upon the overrides
if "active" in self.initial_data:
@@ -2197,7 +2197,7 @@ def set_context(
# the API would fail (but unit tests for api upload would pass...)
context["scan_date"] = (
timezone.make_aware(
- datetime.combine(context.get("scan_date"), datetime.min.time())
+ datetime.combine(context.get("scan_date"), datetime.min.time()),
)
if context.get("scan_date")
else None
@@ -2241,7 +2241,7 @@ def get_importer(
def process_scan(
self,
data: dict,
- context: dict
+ context: dict,
) -> None:
"""
Process the scan with all of the supplied data fully massaged
@@ -2252,7 +2252,7 @@ def process_scan(
try:
importer = self.get_importer(**context)
context["test"], _, _, _, _, _, _ = importer.process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
# Update the response body with some new data
if test := context.get("test"):
@@ -2318,20 +2318,20 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
help_text="Minimum severity level to be imported",
)
active = serializers.BooleanField(
- help_text="Override the active setting from the tool."
+ help_text="Override the active setting from the tool.",
)
verified = serializers.BooleanField(
- help_text="Override the verified setting from the tool."
+ help_text="Override the verified setting from the tool.",
)
help_do_not_reactivate = "Select if the import should ignore active findings from the report, useful for triage-less scanners. Will keep existing findings closed, without reactivating them. For more information check the docs."
do_not_reactivate = serializers.BooleanField(
- default=False, required=False, help_text=help_do_not_reactivate
+ default=False, required=False, help_text=help_do_not_reactivate,
)
scan_type = serializers.ChoiceField(
- choices=get_choices_sorted(), required=True
+ choices=get_choices_sorted(), required=True,
)
endpoint_to_add = serializers.PrimaryKeyRelatedField(
- queryset=Endpoint.objects.all(), default=None, required=False
+ queryset=Endpoint.objects.all(), default=None, required=False,
)
file = serializers.FileField(allow_empty_file=True, required=False)
product_type_name = serializers.CharField(required=False)
@@ -2347,7 +2347,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
help_text="Resource link to source code",
)
test = serializers.PrimaryKeyRelatedField(
- required=False, queryset=Test.objects.all()
+ required=False, queryset=Test.objects.all(),
)
test_title = serializers.CharField(required=False)
auto_create_context = serializers.BooleanField(required=False)
@@ -2374,13 +2374,13 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
help_text="Version that will be set on existing Test object. Leave empty to leave existing value in place.",
)
build_id = serializers.CharField(
- required=False, help_text="ID of the build that was scanned."
+ required=False, help_text="ID of the build that was scanned.",
)
branch_tag = serializers.CharField(
- required=False, help_text="Branch or Tag that was scanned."
+ required=False, help_text="Branch or Tag that was scanned.",
)
commit_hash = serializers.CharField(
- required=False, help_text="Commit that was scanned."
+ required=False, help_text="Commit that was scanned.",
)
api_scan_configuration = serializers.PrimaryKeyRelatedField(
allow_null=True,
@@ -2395,7 +2395,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
)
environment = serializers.CharField(required=False)
lead = serializers.PrimaryKeyRelatedField(
- allow_null=True, default=None, queryset=User.objects.all()
+ allow_null=True, default=None, queryset=User.objects.all(),
)
tags = TagListSerializerField(
required=False,
@@ -2419,7 +2419,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
# confused
test_id = serializers.IntegerField(read_only=True)
engagement_id = serializers.IntegerField(
- read_only=True
+ read_only=True,
) # need to use the _id suffix as without the serializer framework gets confused
product_id = serializers.IntegerField(read_only=True)
product_type_id = serializers.IntegerField(read_only=True)
@@ -2427,7 +2427,7 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
statistics = ImportStatisticsSerializer(read_only=True, required=False)
apply_tags_to_findings = serializers.BooleanField(
help_text="If set to True, the tags will be applied to the findings",
- required=False
+ required=False,
)
apply_tags_to_endpoints = serializers.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
@@ -2446,7 +2446,7 @@ def set_context(
# update some vars
context["scan"] = data.get("file", None)
context["environment"] = Development_Environment.objects.get(
- name=data.get("environment", "Development")
+ name=data.get("environment", "Development"),
)
# Set the active/verified status based upon the overrides
if "active" in self.initial_data:
@@ -2473,7 +2473,7 @@ def set_context(
# the API would fail (but unit tests for api upload would pass...)
context["scan_date"] = (
timezone.make_aware(
- datetime.combine(context.get("scan_date"), datetime.min.time())
+ datetime.combine(context.get("scan_date"), datetime.min.time()),
)
if context.get("scan_date")
else None
@@ -2540,9 +2540,9 @@ def process_scan(
if test := context.get("test"):
statistics_before = test.statistics
context["test"], _, _, _, _, _, test_import = self.get_reimporter(
- **context
+ **context,
).process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
if test_import:
statistics_delta = test_import.statistics
@@ -2551,9 +2551,9 @@ def process_scan(
logger.debug("reimport for non-existing test, using import to create new test")
context["engagement"] = auto_create_manager.get_or_create_engagement(**context)
context["test"], _, _, _, _, _, _ = self.get_importer(
- **context
+ **context,
).process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
else:
msg = "A test could not be found!"
@@ -2626,7 +2626,7 @@ class EndpointMetaImporterSerializer(serializers.Serializer):
create_dojo_meta = serializers.BooleanField(default=False, required=False)
product_name = serializers.CharField(required=False)
product = serializers.PrimaryKeyRelatedField(
- queryset=Product.objects.all(), required=False
+ queryset=Product.objects.all(), required=False,
)
# extra fields populated in response
# need to use the _id suffix as without the serializer framework gets
@@ -2689,7 +2689,7 @@ class Meta:
class ImportLanguagesSerializer(serializers.Serializer):
product = serializers.PrimaryKeyRelatedField(
- queryset=Product.objects.all(), required=True
+ queryset=Product.objects.all(), required=True,
)
file = serializers.FileField(required=True)
@@ -2721,7 +2721,7 @@ def save(self):
) = Language_Type.objects.get_or_create(language=name)
except Language_Type.MultipleObjectsReturned:
language_type = Language_Type.objects.filter(
- language=name
+ language=name,
).first()
language = Languages()
@@ -2754,14 +2754,14 @@ class Meta:
class FindingToNotesSerializer(serializers.Serializer):
finding_id = serializers.PrimaryKeyRelatedField(
- queryset=Finding.objects.all(), many=False, allow_null=True
+ queryset=Finding.objects.all(), many=False, allow_null=True,
)
notes = NoteSerializer(many=True)
class FindingToFilesSerializer(serializers.Serializer):
finding_id = serializers.PrimaryKeyRelatedField(
- queryset=Finding.objects.all(), many=False, allow_null=True
+ queryset=Finding.objects.all(), many=False, allow_null=True,
)
files = FileSerializer(many=True)
@@ -2776,11 +2776,11 @@ def to_representation(self, data):
"file": "{site_url}/{file_access_url}".format(
site_url=settings.SITE_URL,
file_access_url=file.get_accessible_url(
- finding, finding.id
+ finding, finding.id,
),
),
"title": file.title,
- }
+ },
)
new_data = {"finding_id": finding.id, "files": new_files}
return new_data
@@ -2820,7 +2820,7 @@ class ExecutiveSummarySerializer(serializers.Serializer):
test_target_end = serializers.DateTimeField()
test_environment_name = serializers.CharField(max_length=200)
test_strategy_ref = serializers.URLField(
- max_length=200, min_length=None, allow_blank=True
+ max_length=200, min_length=None, allow_blank=True,
)
total_findings = serializers.IntegerField()
@@ -2842,7 +2842,7 @@ class ReportGenerateSerializer(serializers.Serializer):
user_id = serializers.IntegerField()
host = serializers.CharField(max_length=200)
finding_notes = FindingToNotesSerializer(
- many=True, allow_null=True, required=False
+ many=True, allow_null=True, required=False,
)
@@ -2892,55 +2892,55 @@ class NotificationsSerializer(serializers.ModelSerializer):
allow_null=True,
)
product_type_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
product_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
engagement_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
test_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
scan_added = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
jira_update = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
upcoming_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
stale_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
auto_close_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
close_engagement = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
user_mentioned = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
code_review = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
review_requested = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
other = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
sla_breach = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
sla_breach_combined = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
risk_acceptance_expiration = MultipleChoiceField(
- choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION
+ choices=NOTIFICATION_CHOICES, default=DEFAULT_NOTIFICATION,
)
template = serializers.BooleanField(default=False)
@@ -2967,7 +2967,7 @@ def validate(self, data):
or product != self.instance.product
):
notifications = Notifications.objects.filter(
- user=user, product=product, template=False
+ user=user, product=product, template=False,
).count()
if notifications > 0:
msg = "Notification for user and product already exists"
diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py
index d0fe775b07..c0a6f14229 100644
--- a/dojo/api_v2/views.py
+++ b/dojo/api_v2/views.py
@@ -288,7 +288,7 @@ def get_queryset(self):
return get_authorized_group_members(Permissions.Group_View).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -329,7 +329,7 @@ def get_queryset(self):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
endpoint = self.get_object()
@@ -337,7 +337,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -354,7 +354,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, endpoint, options)
@@ -386,7 +386,7 @@ class EndpointStatusViewSet(
def get_queryset(self):
return get_authorized_endpoint_status(
- Permissions.Endpoint_View
+ Permissions.Endpoint_View,
).distinct()
@@ -426,7 +426,7 @@ def get_queryset(self):
)
@extend_schema(
- request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""}
+ request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""},
)
@action(detail=True, methods=["post"])
def close(self, request, pk=None):
@@ -435,7 +435,7 @@ def close(self, request, pk=None):
return HttpResponse()
@extend_schema(
- request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""}
+ request=OpenApiTypes.NONE, responses={status.HTTP_200_OK: ""},
)
@action(detail=True, methods=["post"])
def reopen(self, request, pk=None):
@@ -448,7 +448,7 @@ def reopen(self, request, pk=None):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
engagement = self.get_object()
@@ -456,7 +456,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -473,7 +473,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, engagement, options)
@@ -483,7 +483,7 @@ def generate_report(self, request, pk=None):
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.EngagementToNotesSerializer
+ status.HTTP_200_OK: serializers.EngagementToNotesSerializer,
},
)
@extend_schema(
@@ -496,7 +496,7 @@ def notes(self, request, pk=None):
engagement = self.get_object()
if request.method == "POST":
new_note = serializers.AddNewNoteOptionSerializer(
- data=request.data
+ data=request.data,
)
if new_note.is_valid():
entry = new_note.validated_data["entry"]
@@ -504,7 +504,7 @@ def notes(self, request, pk=None):
note_type = new_note.validated_data.get("note_type", None)
else:
return Response(
- new_note.errors, status=status.HTTP_400_BAD_REQUEST
+ new_note.errors, status=status.HTTP_400_BAD_REQUEST,
)
author = request.user
@@ -518,22 +518,22 @@ def notes(self, request, pk=None):
engagement.notes.add(note)
serialized_note = serializers.NoteSerializer(
- {"author": author, "entry": entry, "private": private}
+ {"author": author, "entry": entry, "private": private},
)
return Response(
- serialized_note.data, status=status.HTTP_201_CREATED
+ serialized_note.data, status=status.HTTP_201_CREATED,
)
notes = engagement.notes.all()
serialized_notes = serializers.EngagementToNotesSerializer(
- {"engagement_id": engagement, "notes": notes}
+ {"engagement_id": engagement, "notes": notes},
)
return Response(serialized_notes.data, status=status.HTTP_200_OK)
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.EngagementToFilesSerializer
+ status.HTTP_200_OK: serializers.EngagementToFilesSerializer,
},
)
@extend_schema(
@@ -542,7 +542,7 @@ def notes(self, request, pk=None):
responses={status.HTTP_201_CREATED: serializers.FileSerializer},
)
@action(
- detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)
+ detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,),
)
def files(self, request, pk=None):
engagement = self.get_object()
@@ -553,7 +553,7 @@ def files(self, request, pk=None):
file = new_file.validated_data["file"]
else:
return Response(
- new_file.errors, status=status.HTTP_400_BAD_REQUEST
+ new_file.errors, status=status.HTTP_400_BAD_REQUEST,
)
file = FileUpload(title=title, file=file)
@@ -562,12 +562,12 @@ def files(self, request, pk=None):
serialized_file = serializers.FileSerializer(file)
return Response(
- serialized_file.data, status=status.HTTP_201_CREATED
+ serialized_file.data, status=status.HTTP_201_CREATED,
)
files = engagement.files.all()
serialized_files = serializers.EngagementToFilesSerializer(
- {"engagement_id": engagement, "files": files}
+ {"engagement_id": engagement, "files": files},
)
return Response(serialized_files.data, status=status.HTTP_200_OK)
@@ -575,7 +575,7 @@ def files(self, request, pk=None):
methods=["POST"],
request=serializers.EngagementCheckListSerializer,
responses={
- status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer
+ status.HTTP_201_CREATED: serializers.EngagementCheckListSerializer,
},
)
@action(detail=True, methods=["get", "post"])
@@ -588,25 +588,25 @@ def complete_checklist(self, request, pk=None):
if check_lists.count() > 0:
return Response(
{
- "message": "A completed checklist for this engagement already exists."
+ "message": "A completed checklist for this engagement already exists.",
},
status=status.HTTP_400_BAD_REQUEST,
)
check_list = serializers.EngagementCheckListSerializer(
- data=request.data
+ data=request.data,
)
if not check_list.is_valid():
return Response(
- check_list.errors, status=status.HTTP_400_BAD_REQUEST
+ check_list.errors, status=status.HTTP_400_BAD_REQUEST,
)
check_list = Check_List(**check_list.data)
check_list.engagement = engagement
check_list.save()
serialized_check_list = serializers.EngagementCheckListSerializer(
- check_list
+ check_list,
)
return Response(
- serialized_check_list.data, status=status.HTTP_201_CREATED
+ serialized_check_list.data, status=status.HTTP_201_CREATED,
)
prefetch_params = request.GET.get("prefetch", "").split(",")
prefetcher = _Prefetcher()
@@ -658,7 +658,7 @@ def download_file(self, request, file_id, pk=None):
class RiskAcceptanceViewSet(
- PrefetchDojoModelViewSet
+ PrefetchDojoModelViewSet,
):
serializer_class = serializers.RiskAcceptanceSerializer
queryset = Risk_Acceptance.objects.none()
@@ -682,7 +682,7 @@ def get_queryset(self):
return (
get_authorized_risk_acceptances(Permissions.Risk_Acceptance)
.prefetch_related(
- "notes", "engagement_set", "owner", "accepted_findings"
+ "notes", "engagement_set", "owner", "accepted_findings",
)
.distinct()
)
@@ -852,7 +852,7 @@ def perform_update(self, serializer):
def get_queryset(self):
findings = get_authorized_findings(
- Permissions.Finding_View
+ Permissions.Finding_View,
).prefetch_related(
"endpoints",
"reviewers",
@@ -893,7 +893,7 @@ def close(self, request, pk=None):
if request.method == "POST":
finding_close = serializers.FindingCloseSerializer(
- data=request.data
+ data=request.data,
)
if finding_close.is_valid():
finding.is_mitigated = finding_close.validated_data[
@@ -909,13 +909,13 @@ def close(self, request, pk=None):
finding.mitigated_by = request.user
finding.active = False
finding.false_p = finding_close.validated_data.get(
- "false_p", False
+ "false_p", False,
)
finding.duplicate = finding_close.validated_data.get(
- "duplicate", False
+ "duplicate", False,
)
finding.out_of_scope = finding_close.validated_data.get(
- "out_of_scope", False
+ "out_of_scope", False,
)
endpoints_status = finding.status_finding.all()
@@ -934,7 +934,7 @@ def close(self, request, pk=None):
finding.save()
else:
return Response(
- finding_close.errors, status=status.HTTP_400_BAD_REQUEST
+ finding_close.errors, status=status.HTTP_400_BAD_REQUEST,
)
serialized_finding = serializers.FindingCloseSerializer(finding)
return Response(serialized_finding.data)
@@ -961,7 +961,7 @@ def tags(self, request, pk=None):
]
for tag in tagulous.utils.parse_tags(
- new_tags.validated_data["tags"]
+ new_tags.validated_data["tags"],
):
if tag not in all_tags:
all_tags.append(tag)
@@ -970,7 +970,7 @@ def tags(self, request, pk=None):
finding.save()
else:
return Response(
- new_tags.errors, status=status.HTTP_400_BAD_REQUEST
+ new_tags.errors, status=status.HTTP_400_BAD_REQUEST,
)
tags = finding.tags
serialized_tags = serializers.TagSerializer({"tags": tags})
@@ -979,14 +979,14 @@ def tags(self, request, pk=None):
@extend_schema(
methods=["GET"],
responses={
- status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer
+ status.HTTP_200_OK: serializers.BurpRawRequestResponseSerializer,
},
)
@extend_schema(
methods=["POST"],
request=serializers.BurpRawRequestResponseSerializer,
responses={
- status.HTTP_201_CREATED: serializers.BurpRawRequestResponseSerializer
+ status.HTTP_201_CREATED: serializers.BurpRawRequestResponseSerializer,
},
)
@action(detail=True, methods=["get", "post"])
@@ -995,24 +995,24 @@ def request_response(self, request, pk=None):
if request.method == "POST":
burps = serializers.BurpRawRequestResponseSerializer(
- data=request.data, many=isinstance(request.data, list)
+ data=request.data, many=isinstance(request.data, list),
)
if burps.is_valid():
for pair in burps.validated_data["req_resp"]:
burp_rr = BurpRawRequestResponse(
finding=finding,
burpRequestBase64=base64.b64encode(
- pair["request"].encode("utf-8")
+ pair["request"].encode("utf-8"),
),
burpResponseBase64=base64.b64encode(
- pair["response"].encode("utf-8")
+ pair["response"].encode("utf-8"),
),
)
burp_rr.clean()
burp_rr.save()
else:
return Response(
- burps.errors, status=status.HTTP_400_BAD_REQUEST
+ burps.errors, status=status.HTTP_400_BAD_REQUEST,
)
# Not necessarily Burp scan specific - these are just any request/response pairs
burp_req_resp = BurpRawRequestResponse.objects.filter(finding=finding)
@@ -1026,7 +1026,7 @@ def request_response(self, request, pk=None):
response = burp.get_response()
burp_list.append({"request": request, "response": response})
serialized_burps = serializers.BurpRawRequestResponseSerializer(
- {"req_resp": burp_list}
+ {"req_resp": burp_list},
)
return Response(serialized_burps.data)
@@ -1044,7 +1044,7 @@ def notes(self, request, pk=None):
finding = self.get_object()
if request.method == "POST":
new_note = serializers.AddNewNoteOptionSerializer(
- data=request.data
+ data=request.data,
)
if new_note.is_valid():
entry = new_note.validated_data["entry"]
@@ -1052,7 +1052,7 @@ def notes(self, request, pk=None):
note_type = new_note.validated_data.get("note_type", None)
else:
return Response(
- new_note.errors, status=status.HTTP_400_BAD_REQUEST
+ new_note.errors, status=status.HTTP_400_BAD_REQUEST,
)
author = request.user
@@ -1071,15 +1071,15 @@ def notes(self, request, pk=None):
jira_helper.add_comment(finding.finding_group, note)
serialized_note = serializers.NoteSerializer(
- {"author": author, "entry": entry, "private": private}
+ {"author": author, "entry": entry, "private": private},
)
return Response(
- serialized_note.data, status=status.HTTP_201_CREATED
+ serialized_note.data, status=status.HTTP_201_CREATED,
)
notes = finding.notes.all()
serialized_notes = serializers.FindingToNotesSerializer(
- {"finding_id": finding, "notes": notes}
+ {"finding_id": finding, "notes": notes},
)
return Response(serialized_notes.data, status=status.HTTP_200_OK)
@@ -1093,7 +1093,7 @@ def notes(self, request, pk=None):
responses={status.HTTP_201_CREATED: serializers.FileSerializer},
)
@action(
- detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)
+ detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,),
)
def files(self, request, pk=None):
finding = self.get_object()
@@ -1104,7 +1104,7 @@ def files(self, request, pk=None):
file = new_file.validated_data["file"]
else:
return Response(
- new_file.errors, status=status.HTTP_400_BAD_REQUEST
+ new_file.errors, status=status.HTTP_400_BAD_REQUEST,
)
file = FileUpload(title=title, file=file)
@@ -1113,12 +1113,12 @@ def files(self, request, pk=None):
serialized_file = serializers.FileSerializer(file)
return Response(
- serialized_file.data, status=status.HTTP_201_CREATED
+ serialized_file.data, status=status.HTTP_201_CREATED,
)
files = finding.files.all()
serialized_files = serializers.FindingToFilesSerializer(
- {"finding_id": finding, "files": files}
+ {"finding_id": finding, "files": files},
)
return Response(serialized_files.data, status=status.HTTP_200_OK)
@@ -1217,7 +1217,7 @@ def remove_tags(self, request, pk=None):
# serializer turns it into a string, but we need a list
del_tags = tagulous.utils.parse_tags(
- delete_tags.validated_data["tags"]
+ delete_tags.validated_data["tags"],
)
if len(del_tags) < 1:
return Response(
@@ -1228,7 +1228,7 @@ def remove_tags(self, request, pk=None):
if tag not in all_tags:
return Response(
{
- "error": f"'{tag}' is not a valid tag in list"
+ "error": f"'{tag}' is not a valid tag in list",
},
status=status.HTTP_400_BAD_REQUEST,
)
@@ -1242,13 +1242,13 @@ def remove_tags(self, request, pk=None):
)
else:
return Response(
- delete_tags.errors, status=status.HTTP_400_BAD_REQUEST
+ delete_tags.errors, status=status.HTTP_400_BAD_REQUEST,
)
@extend_schema(
responses={
- status.HTTP_200_OK: serializers.FindingSerializer(many=True)
- }
+ status.HTTP_200_OK: serializers.FindingSerializer(many=True),
+ },
)
@action(
detail=True,
@@ -1261,7 +1261,7 @@ def get_duplicate_cluster(self, request, pk):
finding = self.get_object()
result = duplicate_cluster(request, finding)
serializer = serializers.FindingSerializer(
- instance=result, many=True, context={"request": request}
+ instance=result, many=True, context={"request": request},
)
return Response(serializer.data, status=status.HTTP_200_OK)
@@ -1272,7 +1272,7 @@ def get_duplicate_cluster(self, request, pk):
@action(detail=True, methods=["post"], url_path=r"duplicate/reset")
def reset_finding_duplicate_status(self, request, pk):
checked_duplicate_id = reset_finding_duplicate_status_internal(
- request.user, pk
+ request.user, pk,
)
if checked_duplicate_id is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
@@ -1282,13 +1282,13 @@ def reset_finding_duplicate_status(self, request, pk):
request=OpenApiTypes.NONE,
parameters=[
OpenApiParameter(
- "new_fid", OpenApiTypes.INT, OpenApiParameter.PATH
- )
+ "new_fid", OpenApiTypes.INT, OpenApiParameter.PATH,
+ ),
],
responses={status.HTTP_204_NO_CONTENT: ""},
)
@action(
- detail=True, methods=["post"], url_path=r"original/(?P\d+)"
+ detail=True, methods=["post"], url_path=r"original/(?P\d+)",
)
def set_finding_as_original(self, request, pk, new_fid):
success = set_finding_as_original_internal(request.user, pk, new_fid)
@@ -1301,14 +1301,14 @@ def set_finding_as_original(self, request, pk, new_fid):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=False, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=False, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request):
findings = self.get_queryset()
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -1325,7 +1325,7 @@ def generate_report(self, request):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, findings, options)
@@ -1335,7 +1335,7 @@ def generate_report(self, request):
def _get_metadata(self, request, finding):
metadata = DojoMeta.objects.filter(finding=finding)
serializer = serializers.FindingMetaSerializer(
- instance=metadata, many=True
+ instance=metadata, many=True,
)
return Response(serializer.data, status=status.HTTP_200_OK)
@@ -1343,7 +1343,7 @@ def _edit_metadata(self, request, finding):
metadata_name = request.query_params.get("name", None)
if metadata_name is None:
return Response(
- "Metadata name is required", status=status.HTTP_400_BAD_REQUEST
+ "Metadata name is required", status=status.HTTP_400_BAD_REQUEST,
)
try:
@@ -1383,7 +1383,7 @@ def _add_metadata(self, request, finding):
return Response(data=metadata_data.data, status=status.HTTP_200_OK)
else:
return Response(
- metadata_data.errors, status=status.HTTP_400_BAD_REQUEST
+ metadata_data.errors, status=status.HTTP_400_BAD_REQUEST,
)
def _remove_metadata(self, request, finding):
@@ -1395,7 +1395,7 @@ def _remove_metadata(self, request, finding):
)
metadata = get_object_or_404(
- DojoMeta.objects, finding=finding, name=name
+ DojoMeta.objects, finding=finding, name=name,
)
metadata.delete()
@@ -1406,7 +1406,7 @@ def _remove_metadata(self, request, finding):
responses={
status.HTTP_200_OK: serializers.FindingMetaSerializer(many=True),
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
},
)
@@ -1420,17 +1420,17 @@ def _remove_metadata(self, request, finding):
required=True,
description="name of the metadata to retrieve. If name is empty, return all the \
metadata associated with the finding",
- )
+ ),
],
responses={
status.HTTP_200_OK: OpenApiResponse(
- description="Returned if the metadata was correctly deleted"
+ description="Returned if the metadata was correctly deleted",
),
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
status.HTTP_400_BAD_REQUEST: OpenApiResponse(
- description="Returned if there was a problem with the metadata information"
+ description="Returned if there was a problem with the metadata information",
),
},
)
@@ -1440,10 +1440,10 @@ def _remove_metadata(self, request, finding):
responses={
status.HTTP_200_OK: serializers.FindingMetaSerializer,
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
status.HTTP_400_BAD_REQUEST: OpenApiResponse(
- description="Returned if there was a problem with the metadata information"
+ description="Returned if there was a problem with the metadata information",
),
},
)
@@ -1453,10 +1453,10 @@ def _remove_metadata(self, request, finding):
responses={
status.HTTP_200_OK: serializers.FindingMetaSerializer,
status.HTTP_404_NOT_FOUND: OpenApiResponse(
- description="Returned if finding does not exist"
+ description="Returned if finding does not exist",
),
status.HTTP_400_BAD_REQUEST: OpenApiResponse(
- description="Returned if there was a problem with the metadata information"
+ description="Returned if there was a problem with the metadata information",
),
},
)
@@ -1481,7 +1481,7 @@ def metadata(self, request, pk=None):
return self._remove_metadata(request, finding)
return Response(
- {"error", "unsupported method"}, status=status.HTTP_400_BAD_REQUEST
+ {"error", "unsupported method"}, status=status.HTTP_400_BAD_REQUEST,
)
@@ -1599,7 +1599,7 @@ class ProductAPIScanConfigurationViewSet(
def get_queryset(self):
return get_authorized_product_api_scan_configurations(
- Permissions.Product_API_Scan_Configuration_View
+ Permissions.Product_API_Scan_Configuration_View,
)
@@ -1722,7 +1722,7 @@ def destroy(self, request, *args, **kwargs):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
product = self.get_object()
@@ -1730,7 +1730,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -1747,7 +1747,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, product, options)
@@ -1794,11 +1794,11 @@ class ProductMemberViewSet(
def get_queryset(self):
return get_authorized_product_members(
- Permissions.Product_View
+ Permissions.Product_View,
).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -1845,11 +1845,11 @@ class ProductGroupViewSet(
def get_queryset(self):
return get_authorized_product_groups(
- Permissions.Product_Group_View
+ Permissions.Product_Group_View,
).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -1903,7 +1903,7 @@ class ProductTypeViewSet(
def get_queryset(self):
return get_authorized_product_types(
- Permissions.Product_Type_View
+ Permissions.Product_Type_View,
).distinct()
# Overwrite perfom_create of CreateModelMixin to add current user as owner
@@ -1932,7 +1932,7 @@ def destroy(self, request, *args, **kwargs):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
product_type = self.get_object()
@@ -1940,7 +1940,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -1957,7 +1957,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, product_type, options)
@@ -2004,14 +2004,14 @@ class ProductTypeMemberViewSet(
def get_queryset(self):
return get_authorized_product_type_members(
- Permissions.Product_Type_View
+ Permissions.Product_Type_View,
).distinct()
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.role.is_owner:
owners = Product_Type_Member.objects.filter(
- product_type=instance.product_type, role__is_owner=True
+ product_type=instance.product_type, role__is_owner=True,
).count()
if owners <= 1:
return Response(
@@ -2022,7 +2022,7 @@ def destroy(self, request, *args, **kwargs):
return Response(status=status.HTTP_204_NO_CONTENT)
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -2069,11 +2069,11 @@ class ProductTypeGroupViewSet(
def get_queryset(self):
return get_authorized_product_type_groups(
- Permissions.Product_Type_Group_View
+ Permissions.Product_Type_Group_View,
).distinct()
@extend_schema(
- exclude=True
+ exclude=True,
)
def partial_update(self, request, pk=None):
# Object authorization won't work if not all data is provided
@@ -2096,7 +2096,7 @@ class StubFindingsViewSet(
def get_queryset(self):
return get_authorized_stub_findings(
- Permissions.Finding_View
+ Permissions.Finding_View,
).distinct()
def get_serializer_class(self):
@@ -2160,7 +2160,7 @@ def get_serializer_class(self):
responses={status.HTTP_200_OK: serializers.ReportGenerateSerializer},
)
@action(
- detail=True, methods=["post"], permission_classes=[IsAuthenticated]
+ detail=True, methods=["post"], permission_classes=[IsAuthenticated],
)
def generate_report(self, request, pk=None):
test = self.get_object()
@@ -2168,7 +2168,7 @@ def generate_report(self, request, pk=None):
options = {}
# prepare post data
report_options = serializers.ReportGenerateOptionSerializer(
- data=request.data
+ data=request.data,
)
if report_options.is_valid():
options["include_finding_notes"] = report_options.validated_data[
@@ -2185,7 +2185,7 @@ def generate_report(self, request, pk=None):
] = report_options.validated_data["include_table_of_contents"]
else:
return Response(
- report_options.errors, status=status.HTTP_400_BAD_REQUEST
+ report_options.errors, status=status.HTTP_400_BAD_REQUEST,
)
data = report_generate(request, test, options)
@@ -2206,7 +2206,7 @@ def notes(self, request, pk=None):
test = self.get_object()
if request.method == "POST":
new_note = serializers.AddNewNoteOptionSerializer(
- data=request.data
+ data=request.data,
)
if new_note.is_valid():
entry = new_note.validated_data["entry"]
@@ -2214,7 +2214,7 @@ def notes(self, request, pk=None):
note_type = new_note.validated_data.get("note_type", None)
else:
return Response(
- new_note.errors, status=status.HTTP_400_BAD_REQUEST
+ new_note.errors, status=status.HTTP_400_BAD_REQUEST,
)
author = request.user
@@ -2228,15 +2228,15 @@ def notes(self, request, pk=None):
test.notes.add(note)
serialized_note = serializers.NoteSerializer(
- {"author": author, "entry": entry, "private": private}
+ {"author": author, "entry": entry, "private": private},
)
return Response(
- serialized_note.data, status=status.HTTP_201_CREATED
+ serialized_note.data, status=status.HTTP_201_CREATED,
)
notes = test.notes.all()
serialized_notes = serializers.TestToNotesSerializer(
- {"test_id": test, "notes": notes}
+ {"test_id": test, "notes": notes},
)
return Response(serialized_notes.data, status=status.HTTP_200_OK)
@@ -2250,7 +2250,7 @@ def notes(self, request, pk=None):
responses={status.HTTP_201_CREATED: serializers.FileSerializer},
)
@action(
- detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,)
+ detail=True, methods=["get", "post"], parser_classes=(MultiPartParser,),
)
def files(self, request, pk=None):
test = self.get_object()
@@ -2261,7 +2261,7 @@ def files(self, request, pk=None):
file = new_file.validated_data["file"]
else:
return Response(
- new_file.errors, status=status.HTTP_400_BAD_REQUEST
+ new_file.errors, status=status.HTTP_400_BAD_REQUEST,
)
file = FileUpload(title=title, file=file)
@@ -2270,12 +2270,12 @@ def files(self, request, pk=None):
serialized_file = serializers.FileSerializer(file)
return Response(
- serialized_file.data, status=status.HTTP_201_CREATED
+ serialized_file.data, status=status.HTTP_201_CREATED,
)
files = test.files.all()
serialized_files = serializers.TestToFilesSerializer(
- {"test_id": test, "files": files}
+ {"test_id": test, "files": files},
)
return Response(serialized_files.data, status=status.HTTP_200_OK)
@@ -2382,7 +2382,7 @@ class TestImportViewSet(
def get_queryset(self):
return get_authorized_test_imports(
- Permissions.Test_View
+ Permissions.Test_View,
).prefetch_related(
"test_import_finding_action_set",
"findings_affected",
@@ -2549,7 +2549,7 @@ class UserProfileView(GenericAPIView):
serializer_class = serializers.UserProfileSerializer
@action(
- detail=True, methods=["get"], filter_backends=[], pagination_class=None
+ detail=True, methods=["get"], filter_backends=[], pagination_class=None,
)
def get(self, request, format=None):
user = get_current_user()
@@ -2639,7 +2639,7 @@ def get_queryset(self):
# Authorization: authenticated users, DjangoModelPermissions
class EndpointMetaImporterView(
- mixins.CreateModelMixin, viewsets.GenericViewSet
+ mixins.CreateModelMixin, viewsets.GenericViewSet,
):
"""
Imports a CSV file into a product to propagate arbitrary meta and tags on endpoints.
@@ -2880,14 +2880,14 @@ def report_generate(request, obj, options):
prod_type=product_type,
queryset=prefetch_related_findings_for_report(
Finding.objects.filter(
- test__engagement__product__prod_type=product_type
- )
+ test__engagement__product__prod_type=product_type,
+ ),
),
)
if len(findings.qs) > 0:
start_date = timezone.make_aware(
- datetime.combine(findings.qs.last().date, datetime.min.time())
+ datetime.combine(findings.qs.last().date, datetime.min.time()),
)
else:
start_date = timezone.now()
@@ -2908,11 +2908,11 @@ def report_generate(request, obj, options):
request.GET,
product=product,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(test__engagement__product=product)
+ Finding.objects.filter(test__engagement__product=product),
),
)
ids = get_endpoint_ids(
- Endpoint.objects.filter(product=product).distinct()
+ Endpoint.objects.filter(product=product).distinct(),
)
endpoints = Endpoint.objects.filter(id__in=ids)
@@ -2922,14 +2922,14 @@ def report_generate(request, obj, options):
request.GET,
engagement=engagement,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(test__engagement=engagement)
+ Finding.objects.filter(test__engagement=engagement),
),
)
report_name = "Engagement Report: " + str(engagement)
ids = set(finding.id for finding in findings.qs) # noqa: C401
ids = get_endpoint_ids(
- Endpoint.objects.filter(product=engagement.product).distinct()
+ Endpoint.objects.filter(product=engagement.product).distinct(),
)
endpoints = Endpoint.objects.filter(id__in=ids)
@@ -2939,7 +2939,7 @@ def report_generate(request, obj, options):
request.GET,
engagement=test.engagement,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(test=test)
+ Finding.objects.filter(test=test),
),
)
report_name = "Test Report: " + str(test)
@@ -2949,12 +2949,12 @@ def report_generate(request, obj, options):
host = endpoint.host
report_name = "Endpoint Report: " + host
endpoints = Endpoint.objects.filter(
- host=host, product=endpoint.product
+ host=host, product=endpoint.product,
).distinct()
findings = report_finding_filter_class(
request.GET,
queryset=prefetch_related_findings_for_report(
- Finding.objects.filter(endpoints__in=endpoints)
+ Finding.objects.filter(endpoints__in=endpoints),
),
)
@@ -3130,7 +3130,7 @@ def report_generate(request, obj, options):
# Authorization: superuser
class SystemSettingsViewSet(
- mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet
+ mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet,
):
"""Basic control over System Settings. Use 'id' 1 for PUT, PATCHÂ operations"""
@@ -3221,7 +3221,7 @@ class ConfigurationPermissionViewSet(
):
serializer_class = serializers.ConfigurationPermissionSerializer
queryset = Permission.objects.filter(
- codename__in=get_configuration_permissions_codenames()
+ codename__in=get_configuration_permissions_codenames(),
)
filter_backends = (DjangoFilterBackend,)
filterset_fields = ["id", "name", "codename"]
@@ -3276,7 +3276,7 @@ class QuestionnaireGeneralSurveyViewSet(
class QuestionnaireEngagementSurveyViewSet(
- viewsets.ReadOnlyModelViewSet
+ viewsets.ReadOnlyModelViewSet,
):
serializer_class = serializers.QuestionnaireEngagementSurveySerializer
queryset = Engagement_Survey.objects.all()
@@ -3303,7 +3303,7 @@ class QuestionnaireAnsweredSurveyViewSet(
# Authorization: configuration
class AnnouncementViewSet(
- DojoModelViewSet
+ DojoModelViewSet,
):
serializer_class = serializers.AnnouncementSerializer
queryset = Announcement.objects.all()
diff --git a/dojo/apps.py b/dojo/apps.py
index e12ea7459b..9b3f786408 100644
--- a/dojo/apps.py
+++ b/dojo/apps.py
@@ -25,11 +25,11 @@ def ready(self):
# charfields/textfields are the fields that watson indexes by default (but we have to repeat here if we add extra fields)
# and watson likes to have tuples instead of lists
- watson.register(self.get_model('Product'), fields=get_model_fields_with_extra(self.get_model('Product'), ('id', 'prod_type__name', )), store=('prod_type__name', ))
+ watson.register(self.get_model('Product'), fields=get_model_fields_with_extra(self.get_model('Product'), ('id', 'prod_type__name')), store=('prod_type__name', ))
- watson.register(self.get_model('Test'), fields=get_model_fields_with_extra(self.get_model('Test'), ('id', 'engagement__product__name', )), store=('engagement__product__name', )) # test_type__name?
+ watson.register(self.get_model('Test'), fields=get_model_fields_with_extra(self.get_model('Test'), ('id', 'engagement__product__name')), store=('engagement__product__name', )) # test_type__name?
- watson.register(self.get_model('Finding'), fields=get_model_fields_with_extra(self.get_model('Finding'), ('id', 'url', 'unique_id_from_tool', 'test__engagement__product__name', 'jira_issue__jira_key', )),
+ watson.register(self.get_model('Finding'), fields=get_model_fields_with_extra(self.get_model('Finding'), ('id', 'url', 'unique_id_from_tool', 'test__engagement__product__name', 'jira_issue__jira_key')),
store=('status', 'jira_issue__jira_key', 'test__engagement__product__name', 'severity', 'severity_display', 'latest_note'))
# some thoughts on Finding fields that are not indexed yet:
@@ -60,7 +60,7 @@ def ready(self):
watson.register(self.get_model('Finding_Template'))
watson.register(self.get_model('Endpoint'), store=('product__name', )) # add product name also?
- watson.register(self.get_model('Engagement'), fields=get_model_fields_with_extra(self.get_model('Engagement'), ('id', 'product__name', )), store=('product__name', ))
+ watson.register(self.get_model('Engagement'), fields=get_model_fields_with_extra(self.get_model('Engagement'), ('id', 'product__name')), store=('product__name', ))
watson.register(self.get_model('App_Analysis'))
watson.register(self.get_model('Vulnerability_Id'), store=('finding__test__engagement__product__name', ))
diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py
index 8538101cf5..a542d7c6e0 100644
--- a/dojo/authorization/authorization.py
+++ b/dojo/authorization/authorization.py
@@ -57,7 +57,7 @@ def user_has_permission(user, obj, permission):
# permissions
member = get_product_type_member(user, obj)
if member is not None and role_has_permission(
- member.role.id, permission
+ member.role.id, permission,
):
return True
# Check if the user is in a group with a role for the product type with
@@ -78,7 +78,7 @@ def user_has_permission(user, obj, permission):
# permissions
member = get_product_member(user, obj)
if member is not None and role_has_permission(
- member.role.id, permission
+ member.role.id, permission,
):
return True
# Check if the user is in a group with a role for the product with the
@@ -101,14 +101,14 @@ def user_has_permission(user, obj, permission):
isinstance(obj, Finding) or isinstance(obj, Stub_Finding)
) and permission in Permissions.get_finding_permissions():
return user_has_permission(
- user, obj.test.engagement.product, permission
+ user, obj.test.engagement.product, permission,
)
elif (
isinstance(obj, Finding_Group)
and permission in Permissions.get_finding_group_permissions()
):
return user_has_permission(
- user, obj.test.engagement.product, permission
+ user, obj.test.engagement.product, permission,
)
elif (
isinstance(obj, Endpoint)
@@ -138,7 +138,7 @@ def user_has_permission(user, obj, permission):
if permission == Permissions.Product_Type_Member_Delete:
# Every member is allowed to remove himself
return obj.user == user or user_has_permission(
- user, obj.product_type, permission
+ user, obj.product_type, permission,
)
else:
return user_has_permission(user, obj.product_type, permission)
@@ -149,7 +149,7 @@ def user_has_permission(user, obj, permission):
if permission == Permissions.Product_Member_Delete:
# Every member is allowed to remove himself
return obj.user == user or user_has_permission(
- user, obj.product, permission
+ user, obj.product, permission,
)
else:
return user_has_permission(user, obj.product, permission)
@@ -171,7 +171,7 @@ def user_has_permission(user, obj, permission):
# permissions
group_member = get_group_member(user, obj)
return group_member is not None and role_has_permission(
- group_member.role.id, permission
+ group_member.role.id, permission,
)
elif (
isinstance(obj, Dojo_Group_Member)
@@ -180,7 +180,7 @@ def user_has_permission(user, obj, permission):
if permission == Permissions.Group_Member_Delete:
# Every user is allowed to remove himself
return obj.user == user or user_has_permission(
- user, obj.group, permission
+ user, obj.group, permission,
)
else:
return user_has_permission(user, obj.group, permission)
@@ -192,15 +192,15 @@ def user_has_permission(user, obj, permission):
return user_has_permission(user, obj.product, permission)
if obj.engagement:
return user_has_permission(
- user, obj.engagement.product, permission
+ user, obj.engagement.product, permission,
)
if obj.test:
return user_has_permission(
- user, obj.test.engagement.product, permission
+ user, obj.test.engagement.product, permission,
)
if obj.finding:
return user_has_permission(
- user, obj.finding.test.engagement.product, permission
+ user, obj.finding.test.engagement.product, permission,
)
else:
msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}"
@@ -233,7 +233,7 @@ def user_has_global_permission(user, permission):
hasattr(group, "global_role")
and group.global_role.role is not None
and role_has_global_permission(
- group.global_role.role.id, permission
+ group.global_role.role.id, permission,
)
):
return True
diff --git a/dojo/authorization/authorization_decorators.py b/dojo/authorization/authorization_decorators.py
index 3fa1926ec8..3063d0821d 100644
--- a/dojo/authorization/authorization_decorators.py
+++ b/dojo/authorization/authorization_decorators.py
@@ -15,7 +15,7 @@ def user_is_authorized(model, permission, arg, lookup="pk", func=None):
if func is None:
return functools.partial(
- user_is_authorized, model, permission, arg, lookup
+ user_is_authorized, model, permission, arg, lookup,
)
@functools.wraps(func)
diff --git a/dojo/banner/urls.py b/dojo/banner/urls.py
index 309f735263..c0b75f1ff7 100644
--- a/dojo/banner/urls.py
+++ b/dojo/banner/urls.py
@@ -4,6 +4,6 @@
urlpatterns = [
re_path(
- r"^configure_banner$", views.configure_banner, name="configure_banner"
+ r"^configure_banner$", views.configure_banner, name="configure_banner",
),
]
diff --git a/dojo/banner/views.py b/dojo/banner/views.py
index 5d81a1cd6e..dcdccc77cc 100644
--- a/dojo/banner/views.py
+++ b/dojo/banner/views.py
@@ -37,11 +37,11 @@ def configure_banner(request):
initial={
"banner_enable": banner_config.banner_enable,
"banner_message": banner_config.banner_message,
- }
+ },
)
add_breadcrumb(
- title="Banner Configuration", top_level=True, request=request
+ title="Banner Configuration", top_level=True, request=request,
)
return render(
request,
diff --git a/dojo/benchmark/views.py b/dojo/benchmark/views.py
index 2169fd34d0..ffdbea82e6 100644
--- a/dojo/benchmark/views.py
+++ b/dojo/benchmark/views.py
@@ -78,15 +78,15 @@ def update_benchmark(request, pid, _type):
"date": n.date.ctime(),
}
for n in notes
- ]
- }
+ ],
+ },
)
bench.save()
return JsonResponse({field: value})
return redirect_to_return_url_or_else(
- request, reverse("view_product_benchmark", args=(pid, _type))
+ request, reverse("view_product_benchmark", args=(pid, _type)),
)
@@ -110,7 +110,7 @@ def update_benchmark_summary(request, pid, _type, summary):
return JsonResponse(data)
return redirect_to_return_url_or_else(
- request, reverse("view_product_benchmark", args=(pid, _type))
+ request, reverse("view_product_benchmark", args=(pid, _type)),
)
@@ -179,7 +179,7 @@ def score_asvs(product, benchmark_type):
)
asvs_level_3_benchmark, asvs_level_3_score = return_score(asvs_level_3)
benchmark_product_summary = Benchmark_Product_Summary.objects.get(
- product=product, benchmark_type=benchmark_type
+ product=product, benchmark_type=benchmark_type,
)
benchmark_product_summary.asvs_level_1_benchmark = asvs_level_1_benchmark
@@ -197,34 +197,34 @@ def benchmark_view(request, pid, type, cat=None):
product = get_object_or_404(Product, id=pid)
benchmark_type = get_object_or_404(Benchmark_Type, id=type)
benchmark_category = Benchmark_Category.objects.filter(
- type=type, enabled=True
+ type=type, enabled=True,
).order_by("name")
# Add requirements to the product
new_benchmarks = Benchmark_Requirement.objects.filter(
- category__type=type, category__type__enabled=True, enabled=True
+ category__type=type, category__type__enabled=True, enabled=True,
).exclude(
id__in=Benchmark_Product.objects.filter(product=product).values_list(
- "control_id", flat=True
- )
+ "control_id", flat=True,
+ ),
)
add_benchmark(new_benchmarks, product)
# Create the benchmark summary category
try:
benchmark_product_summary = Benchmark_Product_Summary.objects.get(
- product=product, benchmark_type=benchmark_type
+ product=product, benchmark_type=benchmark_type,
)
except Exception:
benchmark_product_summary = Benchmark_Product_Summary(
- product=product, benchmark_type=benchmark_type
+ product=product, benchmark_type=benchmark_type,
)
benchmark_product_summary.save()
if cat:
benchmarks = (
Benchmark_Product.objects.select_related(
- "control", "control__category"
+ "control", "control__category",
)
.filter(
product=product.id,
@@ -239,7 +239,7 @@ def benchmark_view(request, pid, type, cat=None):
else:
benchmarks = (
Benchmark_Product.objects.select_related(
- "control", "control__category"
+ "control", "control__category",
)
.filter(
product=product.id,
@@ -252,7 +252,7 @@ def benchmark_view(request, pid, type, cat=None):
)
benchmark_summary_form = Benchmark_Product_SummaryForm(
- instance=benchmark_product_summary
+ instance=benchmark_product_summary,
)
noted_benchmarks = (
@@ -268,7 +268,7 @@ def benchmark_view(request, pid, type, cat=None):
key=lambda x: [int(_) for _ in x.control.objective_number.split(".")],
)
benchmark_category = sorted(
- benchmark_category, key=lambda x: int(x.name[:3].strip("V: "))
+ benchmark_category, key=lambda x: int(x.name[:3].strip("V: ")),
)
product_tab = Product_Tab(product, title=_("Benchmarks"), tab="benchmarks")
@@ -295,7 +295,7 @@ def benchmark_view(request, pid, type, cat=None):
def delete(request, pid, type):
product = get_object_or_404(Product, id=pid)
benchmark_product_summary = Benchmark_Product_Summary.objects.filter(
- product=product, benchmark_type=type
+ product=product, benchmark_type=type,
).first()
form = DeleteBenchmarkForm(instance=benchmark_product_summary)
@@ -305,11 +305,11 @@ def delete(request, pid, type):
and str(benchmark_product_summary.id) == request.POST["id"]
):
form = DeleteBenchmarkForm(
- request.POST, instance=benchmark_product_summary
+ request.POST, instance=benchmark_product_summary,
)
if form.is_valid():
benchmark_product = Benchmark_Product.objects.filter(
- product=product, control__category__type=type
+ product=product, control__category__type=type,
)
benchmark_product.delete()
benchmark_product_summary.delete()
@@ -322,7 +322,7 @@ def delete(request, pid, type):
return HttpResponseRedirect(reverse("product"))
product_tab = Product_Tab(
- product, title=_("Delete Benchmarks"), tab="benchmarks"
+ product, title=_("Delete Benchmarks"), tab="benchmarks",
)
return render(
request,
diff --git a/dojo/checks.py b/dojo/checks.py
index 1299f4d765..39762b76fc 100644
--- a/dojo/checks.py
+++ b/dojo/checks.py
@@ -13,6 +13,6 @@ def check_configuration_deduplication(app_configs, **kwargs):
hint=f'Check configuration ["HASHCODE_FIELDS_PER_SCANNER"]["{scanner}"] value',
obj=settings.HASHCODE_FIELDS_PER_SCANNER[scanner],
id="dojo.E001",
- )
+ ),
)
return errors
diff --git a/dojo/components/sql_group_concat.py b/dojo/components/sql_group_concat.py
index b7abd6c9ef..3b08bb4cc5 100644
--- a/dojo/components/sql_group_concat.py
+++ b/dojo/components/sql_group_concat.py
@@ -6,7 +6,7 @@ class Sql_GroupConcat(Aggregate):
allow_distinct = True
def __init__(
- self, expression, separator, distinct=False, ordering=None, **extra
+ self, expression, separator, distinct=False, ordering=None, **extra,
):
self.separator = separator
super().__init__(
@@ -15,7 +15,7 @@ def __init__(
ordering=f" ORDER BY {ordering}" if ordering is not None else "",
separator=f' SEPARATOR "{separator}"',
output_field=CharField(),
- **extra
+ **extra,
)
def as_mysql(self, compiler, connection):
@@ -31,5 +31,5 @@ def as_sql(self, compiler, connection, **extra):
compiler,
connection,
template="%(function)s(%(distinct)s%(expressions)s%(ordering)s)",
- **extra
+ **extra,
)
diff --git a/dojo/components/views.py b/dojo/components/views.py
index d4a7490fbb..45cf09727f 100644
--- a/dojo/components/views.py
+++ b/dojo/components/views.py
@@ -25,32 +25,32 @@ def components(request):
.order_by("component_name")
.annotate(
component_version=StringAgg(
- "component_version", delimiter=separator, distinct=True, default=Value('')
- )
+ "component_version", delimiter=separator, distinct=True, default=Value(''),
+ ),
)
)
else:
component_query = component_query.values("component_name").order_by(
- "component_name"
+ "component_name",
)
component_query = component_query.annotate(
component_version=Sql_GroupConcat(
- "component_version", separator=separator, distinct=True
- )
+ "component_version", separator=separator, distinct=True,
+ ),
)
# Append counts
component_query = component_query.annotate(total=Count("id")).order_by(
- "component_name"
+ "component_name",
)
component_query = component_query.annotate(
- active=Count("id", filter=Q(active=True))
+ active=Count("id", filter=Q(active=True)),
)
component_query = component_query.annotate(
- duplicate=(Count("id", filter=Q(duplicate=True)))
+ duplicate=(Count("id", filter=Q(duplicate=True))),
)
component_query = component_query.order_by(
- "-total"
+ "-total",
) # Default sort by total descending
filter_string_matching = get_system_setting("filter_string_matching", False)
@@ -60,7 +60,7 @@ def components(request):
# Filter out None values for auto-complete
component_words = component_query.exclude(
- component_name__isnull=True
+ component_name__isnull=True,
).values_list("component_name", flat=True)
return render(
diff --git a/dojo/context_processors.py b/dojo/context_processors.py
index c0bbb25046..12168d9ea6 100644
--- a/dojo/context_processors.py
+++ b/dojo/context_processors.py
@@ -49,7 +49,7 @@ def bind_announcement(request):
try:
if request.user.is_authenticated:
user_announcement = UserAnnouncement.objects.select_related(
- "announcement"
+ "announcement",
).get(user=request.user)
return {"announcement": user_announcement.announcement}
return {}
diff --git a/dojo/cred/views.py b/dojo/cred/views.py
index 26d5d62f87..53136b4994 100644
--- a/dojo/cred/views.py
+++ b/dojo/cred/views.py
@@ -30,7 +30,7 @@ def new_cred(request):
messages.SUCCESS,
'Credential Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('cred', ))
+ return HttpResponseRedirect(reverse('cred'))
else:
tform = CredUserForm()
add_breadcrumb(
@@ -64,7 +64,7 @@ def edit_cred(request, ttid):
messages.SUCCESS,
'Credential Successfully Updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('cred', ))
+ return HttpResponseRedirect(reverse('cred'))
else:
tool_config.password = prepare_for_view(tool_config.password)
@@ -112,7 +112,7 @@ def view_cred_details(request, ttid):
'cred': cred,
'form': form,
'notes': notes,
- 'cred_products': cred_products
+ 'cred_products': cred_products,
})
@@ -177,7 +177,7 @@ def view_cred_product(request, pid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'view_link': view_link
+ 'view_link': view_link,
})
@@ -226,7 +226,7 @@ def view_cred_product_engagement(request, eid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'cred_product': cred_product
+ 'cred_product': cred_product,
})
@@ -277,7 +277,7 @@ def view_cred_engagement_test(request, tid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'cred_product': cred_product
+ 'cred_product': cred_product,
})
@@ -328,7 +328,7 @@ def view_cred_finding(request, fid, ttid):
'cred_type': cred_type,
'edit_link': edit_link,
'delete_link': delete_link,
- 'cred_product': cred_product
+ 'cred_product': cred_product,
})
@@ -356,7 +356,7 @@ def edit_cred_product(request, pid, ttid):
return render(request, 'dojo/edit_cred_all.html', {
'tform': tform,
'product_tab': product_tab,
- 'cred_type': "Product"
+ 'cred_type': "Product",
})
@@ -390,7 +390,7 @@ def edit_cred_product_engagement(request, eid, ttid):
return render(request, 'dojo/edit_cred_all.html', {
'tform': tform,
- 'cred_type': "Engagement"
+ 'cred_type': "Engagement",
})
@@ -425,7 +425,7 @@ def new_cred_product(request, pid):
return render(request, 'dojo/new_cred_product.html', {
'tform': tform,
'pid': pid,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -476,7 +476,7 @@ def new_cred_product_engagement(request, eid):
request, 'dojo/new_cred_mapping.html', {
'tform': tform,
'eid': eid,
- 'formlink': reverse('new_cred_product_engagement', args=(eid, ))
+ 'formlink': reverse('new_cred_product_engagement', args=(eid, )),
})
@@ -526,7 +526,7 @@ def new_cred_engagement_test(request, tid):
request, 'dojo/new_cred_mapping.html', {
'tform': tform,
'eid': tid,
- 'formlink': reverse('new_cred_engagement_test', args=(tid, ))
+ 'formlink': reverse('new_cred_engagement_test', args=(tid, )),
})
@@ -577,7 +577,7 @@ def new_cred_finding(request, fid):
request, 'dojo/new_cred_mapping.html', {
'tform': tform,
'eid': fid,
- 'formlink': reverse('new_cred_finding', args=(fid, ))
+ 'formlink': reverse('new_cred_finding', args=(fid, )),
})
@@ -663,7 +663,7 @@ def delete_cred_controller(request, destination_url, id, ttid):
product_tab = Product_Tab(product, title="Delete Credential Mapping", tab="settings")
return render(request, 'dojo/delete_cred_all.html', {
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/endpoint/signals.py b/dojo/endpoint/signals.py
index 23ae7c4d07..4c18d03d91 100644
--- a/dojo/endpoint/signals.py
+++ b/dojo/endpoint/signals.py
@@ -17,7 +17,7 @@ def endpoint_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='endpoint'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The endpoint "%(name)s" was deleted by %(user)s') % {
'name': str(instance), 'user': le.actor}
diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py
index e40de5c5e1..9b7733c553 100644
--- a/dojo/endpoint/utils.py
+++ b/dojo/endpoint/utils.py
@@ -82,7 +82,7 @@ def endpoint_get_or_create(**kwargs):
else:
logger.warning(
f"Endpoints in your database are broken. "
- f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them."
+ f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them.",
)
# Get the oldest endpoint first, and return that instead
# a datetime is not captured on the endpoint model, so ID
@@ -217,7 +217,7 @@ def err_log(message, html_log, endpoint_html_log, endpoint):
path=endpoint.path,
query=endpoint.query,
fragment=endpoint.fragment,
- product_id=product.pk if product else None
+ product_id=product.pk if product else None,
).order_by('id')
if ep.count() > 1:
@@ -280,12 +280,12 @@ def validate_endpoints_to_add(endpoints_to_add):
endpoint_ins.port,
endpoint_ins.path,
endpoint_ins.query,
- endpoint_ins.fragment
+ endpoint_ins.fragment,
])
except ValidationError as ves:
for ve in ves:
errors.append(
- ValidationError(f"Invalid endpoint {endpoint}: {ve}")
+ ValidationError(f"Invalid endpoint {endpoint}: {ve}"),
)
return endpoint_list, errors
@@ -301,7 +301,7 @@ def save_endpoints_to_add(endpoint_list, product):
path=e[4],
query=e[5],
fragment=e[6],
- product=product
+ product=product,
)
processed_endpoints.append(endpoint)
return processed_endpoints
diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py
index 46a2098006..b6f838d793 100644
--- a/dojo/endpoint/views.py
+++ b/dojo/endpoint/views.py
@@ -400,7 +400,7 @@ def endpoint_bulk_update_all(request, pid=None):
mitigated=True,
mitigated_by=request.user,
mitigated_time=timezone.now(),
- last_modified=timezone.now()
+ last_modified=timezone.now(),
)
if updated_endpoint_count > 0:
@@ -472,7 +472,7 @@ def migrate_endpoints_view(request):
request, 'dojo/migrate_endpoints.html', {
'product_tab': None,
"name": view_name,
- "html_log": html_log
+ "html_log": html_log,
})
diff --git a/dojo/engagement/signals.py b/dojo/engagement/signals.py
index f8863ee862..7a8e3352ba 100644
--- a/dojo/engagement/signals.py
+++ b/dojo/engagement/signals.py
@@ -42,7 +42,7 @@ def engagement_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='engagement'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The engagement "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py
index 2fdc7e34a4..f28a0863fb 100644
--- a/dojo/engagement/views.py
+++ b/dojo/engagement/views.py
@@ -137,7 +137,7 @@ def engagement_calendar(request):
'caltype': 'engagements',
'leads': request.GET.getlist('lead', ''),
'engagements': engagements,
- 'users': get_authorized_users(Permissions.Engagement_View)
+ 'users': get_authorized_users(Permissions.Engagement_View),
})
@@ -158,7 +158,7 @@ def get_filtered_engagements(request, view):
if System_Settings.objects.get().enable_jira:
engagements = engagements.prefetch_related(
'jira_project__jira_instance',
- 'product__jira_project_set__jira_instance'
+ 'product__jira_project_set__jira_instance',
)
filter_string_matching = get_system_setting("filter_string_matching", False)
@@ -174,11 +174,11 @@ def get_test_counts(engagements):
engagement_test_counts = {
test['engagement']: test['test_count']
for test in Test.objects.filter(
- engagement__in=engagements
+ engagement__in=engagements,
).values(
- 'engagement'
+ 'engagement',
).annotate(
- test_count=Count('engagement')
+ test_count=Count('engagement'),
)
}
return engagement_test_counts
@@ -221,7 +221,7 @@ def engagements_all(request):
products_filter_class = ProductEngagementsFilterWithoutObjectLookups if filter_string_matching else ProductEngagementsFilter
engagement_query = Engagement.objects.annotate(test_count=Count('test__id'))
filter_qs = products_with_engagements.prefetch_related(
- Prefetch('engagement_set', queryset=products_filter_class(request.GET, engagement_query).qs)
+ Prefetch('engagement_set', queryset=products_filter_class(request.GET, engagement_query).qs),
)
filter_qs = filter_qs.prefetch_related(
@@ -233,12 +233,12 @@ def engagements_all(request):
if System_Settings.objects.get().enable_jira:
filter_qs = filter_qs.prefetch_related(
'engagement_set__jira_project__jira_instance',
- 'jira_project_set__jira_instance'
+ 'jira_project_set__jira_instance',
)
filter_class = EngagementFilterWithoutObjectLookups if filter_string_matching else EngagementFilter
filtered = filter_class(
request.GET,
- queryset=filter_qs
+ queryset=filter_qs,
)
prods = get_page_items(request, filtered.qs, 25)
@@ -500,7 +500,7 @@ def get(self, request, eid, *args, **kwargs):
'creds': creds,
'cred_eng': cred_eng,
'network': network,
- 'preset_test_type': preset_test_type
+ 'preset_test_type': preset_test_type,
})
def post(self, request, eid, *args, **kwargs):
@@ -589,7 +589,7 @@ def post(self, request, eid, *args, **kwargs):
'creds': creds,
'cred_eng': cred_eng,
'network': network,
- 'preset_test_type': preset_test_type
+ 'preset_test_type': preset_test_type,
})
@@ -686,7 +686,7 @@ def add_tests(request, eid):
'form': form,
'cred_form': cred_form,
'eid': eid,
- 'eng': eng
+ 'eng': eng,
})
@@ -767,9 +767,9 @@ def get_credential_form(
return CredMappingForm(
initial={
"cred_user_queryset": Cred_Mapping.objects.filter(
- engagement=engagement
+ engagement=engagement,
).order_by('cred_id'),
- }
+ },
)
def get_jira_form(
@@ -790,12 +790,12 @@ def get_jira_form(
jira_form = JIRAImportScanForm(
request.POST,
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
else:
jira_form = JIRAImportScanForm(
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
return jira_form, push_all_jira_issues
@@ -926,7 +926,7 @@ def import_findings(
try:
importer_client = DefaultImporter(**context)
context["test"], _, finding_count, closed_finding_count, _, _, _ = importer_client.process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
# Add a message to the view for the user to see the results
add_success_message_to_response(importer_client.construct_imported_message(
@@ -1023,7 +1023,7 @@ def process_credentials_form(
# Select the credential mapping object from the selected list and only allow if the credential is associated with the product
cred_user = Cred_Mapping.objects.filter(
pk=cred_user.id,
- engagement=context.get("engagement")
+ engagement=context.get("engagement"),
).first()
# Create the new credential mapping object
new_cred_mapping = form.save(commit=False)
@@ -1258,7 +1258,7 @@ def add_risk_acceptance(request, eid, fid=None):
return render(request, 'dojo/add_risk_acceptance.html', {
'eng': eng,
'product_tab': product_tab,
- 'form': form
+ 'form': form,
})
@@ -1551,7 +1551,7 @@ def engagement_ics(request, eid):
f"Set aside for engagement {eng.name}, on product {eng.product.name}. "
f"Additional detail can be found at {request.build_absolute_uri(reverse('view_engagement', args=(eng.id, )))}"
),
- uid
+ uid,
)
output = cal.serialize()
response = HttpResponse(content=output)
@@ -1687,7 +1687,7 @@ def excel_export(request):
response = HttpResponse(
content=stream,
- content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
+ content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
)
response['Content-Disposition'] = 'attachment; filename=engagements.xlsx'
return response
diff --git a/dojo/filters.py b/dojo/filters.py
index 76c65b92a6..6d124d67f0 100644
--- a/dojo/filters.py
+++ b/dojo/filters.py
@@ -96,7 +96,7 @@
local_tz = pytz.timezone(get_system_setting('time_zone'))
-BOOLEAN_CHOICES = (('false', 'No'), ('true', 'Yes'),)
+BOOLEAN_CHOICES = (('false', 'No'), ('true', 'Yes'))
EARLIEST_FINDING = None
@@ -183,7 +183,7 @@ def filter(self, qs, value):
earliest_finding = get_earliest_finding(qs)
if earliest_finding is not None:
start_date = local_tz.localize(datetime.combine(
- earliest_finding.date, datetime.min.time())
+ earliest_finding.date, datetime.min.time()),
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
@@ -213,7 +213,7 @@ def sla_violated(self, qs, name):
risk_accepted=False,
is_mitigated=False,
mitigated=None,
- ) & Q(sla_expiration_date__lt=timezone.now().date())
+ ) & Q(sla_expiration_date__lt=timezone.now().date()),
)
options = {
@@ -341,7 +341,7 @@ def get_finding_filterset_fields(metrics=False, similar=False, filter_string_mat
if similar:
fields.extend([
'id',
- 'hash_code'
+ 'hash_code',
])
fields.extend(['title', 'component_name', 'component_version'])
@@ -609,7 +609,7 @@ class DateRangeFilter(ChoiceFilter):
1: (_('Today'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
f'{name}__month': now().month,
- f'{name}__day': now().day
+ f'{name}__day': now().day,
})),
2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
f'{name}__gte': _truncate(now() - timedelta(days=7)),
@@ -625,7 +625,7 @@ class DateRangeFilter(ChoiceFilter):
})),
5: (_('Current month'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
- f'{name}__month': now().month
+ f'{name}__month': now().month,
})),
6: (_('Current year'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
@@ -655,7 +655,7 @@ class DateRangeOmniFilter(ChoiceFilter):
1: (_('Today'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
f'{name}__month': now().month,
- f'{name}__day': now().day
+ f'{name}__day': now().day,
})),
2: (_('Next 7 days'), lambda qs, name: qs.filter(**{
f'{name}__gte': _truncate(now() + timedelta(days=1)),
@@ -683,7 +683,7 @@ class DateRangeOmniFilter(ChoiceFilter):
})),
8: (_('Current month'), lambda qs, name: qs.filter(**{
f'{name}__year': now().year,
- f'{name}__month': now().month
+ f'{name}__month': now().month,
})),
9: (_('Past year'), lambda qs, name: qs.filter(**{
f'{name}__gte': _truncate(now() - timedelta(days=365)),
@@ -715,10 +715,10 @@ class ReportBooleanFilter(ChoiceFilter):
options = {
None: (_('Either'), lambda qs, name: qs.all()),
1: (_('Yes'), lambda qs, name: qs.filter(**{
- f'{name}': True
+ f'{name}': True,
})),
2: (_('No'), lambda qs, name: qs.filter(**{
- f'{name}': False
+ f'{name}': False,
})),
}
@@ -775,7 +775,7 @@ def any(self, qs, name):
earliest_finding = get_earliest_finding(qs)
if earliest_finding is not None:
start_date = local_tz.localize(datetime.combine(
- earliest_finding.date, datetime.min.time())
+ earliest_finding.date, datetime.min.time()),
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
@@ -787,7 +787,7 @@ def current_month(self, qs, name):
self.end_date = now()
return qs.filter(**{
f'{name}__year': self.start_date.year,
- f'{name}__month': self.start_date.month
+ f'{name}__month': self.start_date.month,
})
def current_year(self, qs, name):
@@ -843,7 +843,7 @@ def filter(self, qs, value):
earliest_finding = get_earliest_finding(qs)
if earliest_finding is not None:
start_date = local_tz.localize(datetime.combine(
- earliest_finding.date, datetime.min.time())
+ earliest_finding.date, datetime.min.time()),
)
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
@@ -872,7 +872,7 @@ class ProductComponentFilter(DojoFilter):
'active': 'Active',
'duplicate': 'Duplicate',
'total': 'Total',
- }
+ },
)
@@ -945,7 +945,7 @@ class EngagementDirectFilterHelper(FilterSet):
"product__name": "Product Name",
"product__prod_type__name": "Product Type",
"lead__first_name": "Lead",
- }
+ },
)
@@ -1026,7 +1026,7 @@ class EngagementFilterHelper(FilterSet):
field_labels={
"name": "Product Name",
"prod_type__name": "Product Type",
- }
+ },
)
@@ -1132,7 +1132,7 @@ class ProductEngagementFilterHelper(FilterSet):
),
field_labels={
'name': 'Engagement Name',
- }
+ },
)
class Meta:
@@ -1203,7 +1203,7 @@ class ApiEngagementFilter(DojoFilter):
),
field_labels={
'name': 'Engagement Name',
- }
+ },
)
@@ -1240,7 +1240,7 @@ class ProductFilterHelper(FilterSet):
('origin', 'origin'),
('external_audience', 'external_audience'),
('internet_accessible', 'internet_accessible'),
- ('findings_count', 'findings_count')
+ ('findings_count', 'findings_count'),
),
field_labels={
'name': 'Product Name',
@@ -1253,7 +1253,7 @@ class ProductFilterHelper(FilterSet):
'external_audience': 'External Audience ',
'internet_accessible': 'Internet Accessible ',
'findings_count': 'Findings Count ',
- }
+ },
)
@@ -1283,7 +1283,7 @@ class Meta:
fields = [
"name", "name_exact", "prod_type", "business_criticality",
"platform", "lifecycle", "origin", "external_audience",
- "internet_accessible", "tags"
+ "internet_accessible", "tags",
]
@@ -1377,8 +1377,8 @@ class ApiProductFilter(DojoFilter):
('prod_type', 'prod_type'),
('prod_type__name', 'prod_type__name'),
('updated', 'updated'),
- ('user_records', 'user_records')
- )
+ ('user_records', 'user_records'),
+ ),
)
@@ -1528,7 +1528,7 @@ def filter_percentage(self, queryset, name, value):
max_val = value + decimal.Decimal(f"1E{exponent}")
lookup_kwargs = {
f"{name}__gte": value,
- f"{name}__lt": max_val, }
+ f"{name}__lt": max_val}
return queryset.filter(**lookup_kwargs)
@@ -1652,7 +1652,7 @@ class FindingFilterHelper(FilterSet):
'test__engagement__product__name': 'Product Name',
'epss_score': 'EPSS Score',
'epss_percentile': 'EPSS Percentile',
- }
+ },
)
def __init__(self, *args, **kwargs):
@@ -1752,7 +1752,7 @@ class Meta:
'numerical_severity', 'line', 'duplicate_finding',
'hash_code', 'reviewers', 'created', 'files',
'sla_start_date', 'sla_expiration_date', 'cvssv3',
- 'severity_justification', 'steps_to_reproduce',]
+ 'severity_justification', 'steps_to_reproduce']
def __init__(self, *args, **kwargs):
self.user = None
@@ -1810,7 +1810,7 @@ class Meta:
'numerical_severity', 'line', 'duplicate_finding',
'hash_code', 'reviewers', 'created', 'files',
'sla_start_date', 'sla_expiration_date', 'cvssv3',
- 'severity_justification', 'steps_to_reproduce',]
+ 'severity_justification', 'steps_to_reproduce']
def __init__(self, *args, **kwargs):
self.user = None
@@ -1832,7 +1832,7 @@ def set_related_object_fields(self, *args: list, **kwargs: dict):
del self.form.fields['test__engagement__product__prod_type']
# TODO add authorized check to be sure
self.form.fields['test__engagement'].queryset = Engagement.objects.filter(
- product_id=self.pid
+ product_id=self.pid,
).all()
self.form.fields['test'].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related('test_type')
else:
@@ -1991,7 +1991,7 @@ class TemplateFindingFilter(DojoFilter):
),
field_labels={
'numerical_severity': 'Severity',
- }
+ },
)
class Meta:
@@ -2204,7 +2204,7 @@ def __init__(self, *args, **kwargs):
if self.pid:
del self.form.fields["finding__test__engagement__product__prod_type"]
self.form.fields["finding__test__engagement"].queryset = Engagement.objects.filter(
- product_id=self.pid
+ product_id=self.pid,
).all()
else:
self.form.fields["finding__test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View).order_by("name")
@@ -2669,7 +2669,7 @@ class EngagementTestFilterHelper(FilterSet):
),
field_labels={
'name': 'Test Name',
- }
+ },
)
@@ -2804,7 +2804,7 @@ class ApiTestFilter(DojoFilter):
),
field_labels={
'name': 'Test Name',
- }
+ },
)
class Meta:
@@ -3141,7 +3141,7 @@ class UserFilter(DojoFilter):
'username': 'User Name',
'is_active': 'Active',
'is_superuser': 'Superuser',
- }
+ },
)
class Meta:
@@ -3177,7 +3177,7 @@ class TestImportFilter(DojoFilter):
('build_id', 'build_id'),
('commit_hash', 'commit_hash'),
- )
+ ),
)
class Meta:
@@ -3191,7 +3191,7 @@ class TestImportFindingActionFilter(DojoFilter):
# tuple-mapping retains order
fields=(
('action', 'action'),
- )
+ ),
)
class Meta:
@@ -3219,8 +3219,8 @@ class Meta:
'filter_class': CharFilter,
'extra': lambda f: {
'lookup_expr': 'icontains',
- }
- }
+ },
+ },
}
diff --git a/dojo/finding/queries.py b/dojo/finding/queries.py
index e10cfca3dd..cfc7e9ace9 100644
--- a/dojo/finding/queries.py
+++ b/dojo/finding/queries.py
@@ -36,7 +36,7 @@ def get_authorized_groups(permission, user=None):
authorized_product_type_roles,
authorized_product_roles,
authorized_product_type_groups,
- authorized_product_groups
+ authorized_product_groups,
)
@@ -60,7 +60,7 @@ def get_authorized_findings(permission, queryset=None, user=None):
authorized_product_type_roles,
authorized_product_roles,
authorized_product_type_groups,
- authorized_product_groups
+ authorized_product_groups,
) = get_authorized_groups(permission, user=user)
findings = findings.annotate(
@@ -93,7 +93,7 @@ def get_authorized_stub_findings(permission):
authorized_product_type_roles,
authorized_product_roles,
authorized_product_type_groups,
- authorized_product_groups
+ authorized_product_groups,
) = get_authorized_groups(permission, user=user)
findings = Stub_Finding.objects.annotate(
diff --git a/dojo/finding/urls.py b/dojo/finding/urls.py
index 27549aeca1..5e20fd1b6f 100644
--- a/dojo/finding/urls.py
+++ b/dojo/finding/urls.py
@@ -7,113 +7,113 @@
re_path(
r'^finding/(?P\d+)$',
views.ViewFinding.as_view(),
- name='view_finding'
+ name='view_finding',
),
re_path(
r'^finding/(?P\d+)/edit$',
views.EditFinding.as_view(),
- name='edit_finding'
+ name='edit_finding',
),
re_path(
r'^finding/(?P\d+)/delete$',
views.DeleteFinding.as_view(),
- name='delete_finding'
+ name='delete_finding',
),
# Listing operations
re_path(
r'^finding$',
views.ListFindings.as_view(),
- name='all_findings'
+ name='all_findings',
),
re_path(
r'^finding/open$',
views.ListOpenFindings.as_view(),
- name='open_findings'
+ name='open_findings',
),
re_path(
r'^finding/verified$',
views.ListVerifiedFindings.as_view(),
- name='verified_findings'
+ name='verified_findings',
),
re_path(
r'^finding/closed$',
views.ListClosedFindings.as_view(),
- name='closed_findings'
+ name='closed_findings',
),
re_path(
r'^finding/accepted$',
views.ListAcceptedFindings.as_view(),
- name='accepted_findings'
+ name='accepted_findings',
),
re_path(
r'^product/(?P\d+)/finding/open$',
views.ListOpenFindings.as_view(),
- name='product_open_findings'
+ name='product_open_findings',
),
re_path(
r'^product/(?P\d+)/findings$',
views.ListOpenFindings.as_view(),
- name='view_product_findings_old'
+ name='view_product_findings_old',
),
re_path(
r'^product/(?P\d+)/finding/verified$',
views.ListVerifiedFindings.as_view(),
- name='product_verified_findings'
+ name='product_verified_findings',
),
re_path(
r'^product/(?P\d+)/finding/out_of_scope$',
views.ListOutOfScopeFindings.as_view(),
- name='product_out_of_scope_findings'
+ name='product_out_of_scope_findings',
),
re_path(
r'^product/(?P\d+)/finding/inactive$',
views.ListInactiveFindings.as_view(),
- name='product_inactive_findings'
+ name='product_inactive_findings',
),
re_path(
r'^product/(?P\d+)/finding/all$',
views.ListFindings.as_view(),
- name='product_all_findings'
+ name='product_all_findings',
),
re_path(
r'^product/(?P\d+)/finding/closed$',
views.ListClosedFindings.as_view(),
- name='product_closed_findings'
+ name='product_closed_findings',
),
re_path(
r'^product/(?P\d+)/finding/false_positive$',
views.ListFalsePositiveFindings.as_view(),
- name='product_false_positive_findings'
+ name='product_false_positive_findings',
),
re_path(
r'^product/(?P\d+)/finding/accepted$',
views.ListAcceptedFindings.as_view(),
- name='product_accepted_findings'
+ name='product_accepted_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/open$',
views.ListOpenFindings.as_view(),
- name='engagement_open_findings'
+ name='engagement_open_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/closed$',
views.ListClosedFindings.as_view(),
- name='engagement_closed_findings'
+ name='engagement_closed_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/verified$',
views.ListVerifiedFindings.as_view(),
- name='engagement_verified_findings'
+ name='engagement_verified_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/accepted$',
views.ListAcceptedFindings.as_view(),
- name='engagement_accepted_findings'
+ name='engagement_accepted_findings',
),
re_path(
r'^engagement/(?P\d+)/finding/all$',
views.ListFindings.as_view(),
- name='engagement_all_findings'
+ name='engagement_all_findings',
),
# findings
re_path(r'^finding/bulk$', views.finding_bulk_update_all,
diff --git a/dojo/finding/views.py b/dojo/finding/views.py
index c84154804a..8373022d72 100644
--- a/dojo/finding/views.py
+++ b/dojo/finding/views.py
@@ -133,34 +133,34 @@
def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True):
prefetched_findings = findings
if isinstance(
- findings, QuerySet
+ findings, QuerySet,
): # old code can arrive here with prods being a list because the query was already executed
prefetched_findings = prefetched_findings.prefetch_related("reporter")
prefetched_findings = prefetched_findings.prefetch_related(
- "jira_issue__jira_project__jira_instance"
+ "jira_issue__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("test__test_type")
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__jira_project__jira_instance"
+ "test__engagement__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__jira_project_set__jira_instance"
+ "test__engagement__product__jira_project_set__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("found_by")
# for open/active findings the following 4 prefetches are not needed
if prefetch_type != "open":
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set"
+ "risk_acceptance_set",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set__accepted_findings"
+ "risk_acceptance_set__accepted_findings",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "original_finding"
+ "original_finding",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "duplicate_finding"
+ "duplicate_finding",
)
if exclude_untouched:
@@ -169,13 +169,13 @@ def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True)
Prefetch(
"test_import_finding_action_set",
queryset=Test_Import_Finding_Action.objects.exclude(
- action=IMPORT_UNTOUCHED_FINDING
+ action=IMPORT_UNTOUCHED_FINDING,
),
- )
+ ),
)
else:
prefetched_findings = prefetched_findings.prefetch_related(
- "test_import_finding_action_set"
+ "test_import_finding_action_set",
)
"""
we could try to prefetch only the latest note with SubQuery and OuterRef,
@@ -187,23 +187,23 @@ def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True)
prefetched_findings = prefetched_findings.prefetch_related("status_finding")
prefetched_findings = prefetched_findings.annotate(
active_endpoint_count=Count(
- "status_finding__id", filter=Q(status_finding__mitigated=False)
- )
+ "status_finding__id", filter=Q(status_finding__mitigated=False),
+ ),
)
prefetched_findings = prefetched_findings.annotate(
mitigated_endpoint_count=Count(
- "status_finding__id", filter=Q(status_finding__mitigated=True)
- )
+ "status_finding__id", filter=Q(status_finding__mitigated=True),
+ ),
)
prefetched_findings = prefetched_findings.prefetch_related("finding_group_set")
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__members"
+ "test__engagement__product__members",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__prod_type__members"
+ "test__engagement__product__prod_type__members",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "vulnerability_id_set"
+ "vulnerability_id_set",
)
else:
logger.debug("unable to prefetch because query was already executed")
@@ -214,25 +214,25 @@ def prefetch_for_findings(findings, prefetch_type="all", exclude_untouched=True)
def prefetch_for_similar_findings(findings):
prefetched_findings = findings
if isinstance(
- findings, QuerySet
+ findings, QuerySet,
): # old code can arrive here with prods being a list because the query was already executed
prefetched_findings = prefetched_findings.prefetch_related("reporter")
prefetched_findings = prefetched_findings.prefetch_related(
- "jira_issue__jira_project__jira_instance"
+ "jira_issue__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("test__test_type")
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__jira_project__jira_instance"
+ "test__engagement__jira_project__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "test__engagement__product__jira_project_set__jira_instance"
+ "test__engagement__product__jira_project_set__jira_instance",
)
prefetched_findings = prefetched_findings.prefetch_related("found_by")
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set"
+ "risk_acceptance_set",
)
prefetched_findings = prefetched_findings.prefetch_related(
- "risk_acceptance_set__accepted_findings"
+ "risk_acceptance_set__accepted_findings",
)
prefetched_findings = prefetched_findings.prefetch_related("original_finding")
prefetched_findings = prefetched_findings.prefetch_related("duplicate_finding")
@@ -241,9 +241,9 @@ def prefetch_for_similar_findings(findings):
Prefetch(
"test_import_finding_action_set",
queryset=Test_Import_Finding_Action.objects.exclude(
- action=IMPORT_UNTOUCHED_FINDING
+ action=IMPORT_UNTOUCHED_FINDING,
),
- )
+ ),
)
"""
we could try to prefetch only the latest note with SubQuery and OuterRef,
@@ -252,7 +252,7 @@ def prefetch_for_similar_findings(findings):
prefetched_findings = prefetched_findings.prefetch_related("notes")
prefetched_findings = prefetched_findings.prefetch_related("tags")
prefetched_findings = prefetched_findings.prefetch_related(
- "vulnerability_id_set"
+ "vulnerability_id_set",
)
else:
logger.debug("unable to prefetch because query was already executed")
@@ -414,7 +414,7 @@ def add_breadcrumbs(self, request: HttpRequest, context: dict):
[
("Endpoints", reverse("vulnerable_endpoints")),
(endpoint, reverse("view_endpoint", args=(endpoint.id,))),
- ]
+ ],
)
# Show the "All findings" breadcrumb if nothing is coming from the product or engagement
elif not self.get_engagement_id() and not self.get_product_id():
@@ -556,7 +556,7 @@ def get_cwe_template(self, finding: Finding):
cwe_template = Finding_Template.objects.filter(cwe=finding.cwe).first()
return {
- "cwe_template": cwe_template
+ "cwe_template": cwe_template,
}
def get_request_response(self, finding: Finding):
@@ -609,12 +609,12 @@ def get_similar_findings(self, request: HttpRequest, finding: Finding):
}
# add related actions for non-similar and non-duplicate cluster members
finding.related_actions = calculate_possible_related_actions_for_similar_finding(
- request, finding, finding
+ request, finding, finding,
)
if finding.duplicate_finding:
finding.duplicate_finding.related_actions = (
calculate_possible_related_actions_for_similar_finding(
- request, finding, finding.duplicate_finding
+ request, finding, finding.duplicate_finding,
)
)
filter_string_matching = get_system_setting("filter_string_matching", False)
@@ -633,12 +633,12 @@ def get_similar_findings(self, request: HttpRequest, finding: Finding):
prefix="similar",
)
similar_findings.object_list = prefetch_for_similar_findings(
- similar_findings.object_list
+ similar_findings.object_list,
)
for similar_finding in similar_findings:
similar_finding.related_actions = (
calculate_possible_related_actions_for_similar_finding(
- request, finding, similar_finding
+ request, finding, similar_finding,
)
)
@@ -677,7 +677,7 @@ def get_typed_note_form(self, request: HttpRequest, context: dict):
args = [request.POST] if request.method == "POST" else []
# Set the initial form args
kwargs = {
- "available_note_types": context.get("available_note_types")
+ "available_note_types": context.get("available_note_types"),
}
return TypedNoteForm(*args, **kwargs)
@@ -698,7 +698,7 @@ def process_form(self, request: HttpRequest, finding: Finding, context: dict):
new_note.save()
# Add an entry to the note history
history = NoteHistory(
- data=new_note.entry, time=new_note.date, current_editor=new_note.author
+ data=new_note.entry, time=new_note.date, current_editor=new_note.author,
)
history.save()
new_note.history.add(history)
@@ -714,13 +714,13 @@ def process_form(self, request: HttpRequest, finding: Finding, context: dict):
jira_helper.add_comment(finding.finding_group, new_note)
# Send the notification of the note being added
url = request.build_absolute_uri(
- reverse("view_finding", args=(finding.id,))
+ reverse("view_finding", args=(finding.id,)),
)
title = f"Finding: {finding.title}"
process_tag_notifications(request, new_note, url, title)
# Add a message to the request
messages.add_message(
- request, messages.SUCCESS, "Note saved.", extra_tags="alert-success"
+ request, messages.SUCCESS, "Note saved.", extra_tags="alert-success",
)
return request, True
@@ -743,8 +743,8 @@ def get_initial_context(self, request: HttpRequest, finding: Finding, user: Dojo
"note_type_activation": note_type_activation,
"available_note_types": available_note_types,
"product_tab": Product_Tab(
- finding.test.engagement.product, title="View Finding", tab="findings"
- )
+ finding.test.engagement.product, title="View Finding", tab="findings",
+ ),
}
# Set the form using the context, and then update the context
form = self.get_form(request, context)
@@ -855,7 +855,7 @@ def get_github_form(self, request: HttpRequest, finding: Finding):
# Set the initial form args
kwargs = {
"enabled": finding.has_github_issue(),
- "prefix": "githubform"
+ "prefix": "githubform",
}
return GITHUBFindingForm(*args, **kwargs)
@@ -871,8 +871,8 @@ def get_initial_context(self, request: HttpRequest, finding: Finding):
"gform": self.get_github_form(request, finding),
"return_url": get_return_url(request),
"product_tab": Product_Tab(
- finding.test.engagement.product, title="Edit Finding", tab="findings"
- )
+ finding.test.engagement.product, title="Edit Finding", tab="findings",
+ ),
}
def validate_status_change(self, request: HttpRequest, finding: Finding, context: dict):
@@ -945,7 +945,7 @@ def process_false_positive_history(self, finding: Finding):
logger.debug('FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s', finding)
existing_fp_findings = match_finding_to_existing_findings(
- finding, product=finding.test.engagement.product
+ finding, product=finding.test.engagement.product,
).filter(false_p=True)
for fp in existing_fp_findings:
@@ -964,10 +964,10 @@ def process_burp_request_response(self, finding: Finding, context: dict):
except BurpRawRequestResponse.MultipleObjectsReturned:
burp_rr = BurpRawRequestResponse.objects.filter(finding=finding).first()
burp_rr.burpRequestBase64 = base64.b64encode(
- context["form"].cleaned_data["request"].encode()
+ context["form"].cleaned_data["request"].encode(),
)
burp_rr.burpResponseBase64 = base64.b64encode(
- context["form"].cleaned_data["response"].encode()
+ context["form"].cleaned_data["response"].encode(),
)
burp_rr.clean()
burp_rr.save()
@@ -1067,7 +1067,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
# Determine if a message should be added
if jira_message:
messages.add_message(
- request, messages.SUCCESS, jira_message, extra_tags="alert-success"
+ request, messages.SUCCESS, jira_message, extra_tags="alert-success",
)
return request, True, push_to_jira
@@ -1240,7 +1240,7 @@ def close_finding(request, fid):
finding.notes.add(new_note)
messages.add_message(
- request, messages.SUCCESS, "Note Saved.", extra_tags="alert-success"
+ request, messages.SUCCESS, "Note Saved.", extra_tags="alert-success",
)
if len(missing_note_types) <= 1:
@@ -1307,15 +1307,15 @@ def close_finding(request, fid):
url=reverse("view_finding", args=(finding.id,)),
)
return HttpResponseRedirect(
- reverse("view_test", args=(finding.test.id,))
+ reverse("view_test", args=(finding.test.id,)),
)
else:
return HttpResponseRedirect(
- reverse("close_finding", args=(finding.id,))
+ reverse("close_finding", args=(finding.id,)),
)
product_tab = Product_Tab(
- finding.test.engagement.product, title="Close", tab="findings"
+ finding.test.engagement.product, title="Close", tab="findings",
)
return render(
@@ -1394,7 +1394,7 @@ def defect_finding_review(request, fid):
jira_helper.push_to_jira(finding.finding_group)
messages.add_message(
- request, messages.SUCCESS, "Defect Reviewed", extra_tags="alert-success"
+ request, messages.SUCCESS, "Defect Reviewed", extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("view_test", args=(finding.test.id,)))
@@ -1402,7 +1402,7 @@ def defect_finding_review(request, fid):
form = DefectFindingForm()
product_tab = Product_Tab(
- finding.test.engagement.product, title="Jira Status Review", tab="findings"
+ finding.test.engagement.product, title="Jira Status Review", tab="findings",
)
return render(
@@ -1460,7 +1460,7 @@ def reopen_finding(request, fid):
reopen_external_issue(finding, "re-opened by defectdojo", "github")
messages.add_message(
- request, messages.SUCCESS, "Finding Reopened.", extra_tags="alert-success"
+ request, messages.SUCCESS, "Finding Reopened.", extra_tags="alert-success",
)
# Note: this notification has not be moved to "@receiver(pre_save, sender=Finding)" method as many other notifications
@@ -1508,7 +1508,7 @@ def copy_finding(request, fid):
finding = get_object_or_404(Finding, id=fid)
product = finding.test.engagement.product
tests = get_authorized_tests(Permissions.Test_Edit).filter(
- engagement=finding.test.engagement
+ engagement=finding.test.engagement,
)
form = CopyFindingForm(tests=tests)
@@ -1531,13 +1531,13 @@ def copy_finding(request, fid):
description=f'The finding "{finding.title}" was copied by {request.user} to {test.title}',
product=product,
url=request.build_absolute_uri(
- reverse("copy_finding", args=(finding_copy.id,))
+ reverse("copy_finding", args=(finding_copy.id,)),
),
recipients=[finding.test.engagement.lead],
icon="exclamation-triangle",
)
return redirect_to_return_url_or_else(
- request, reverse("view_test", args=(test.id,))
+ request, reverse("view_test", args=(test.id,)),
)
else:
messages.add_message(
@@ -1571,7 +1571,7 @@ def remediation_date(request, fid):
if form.is_valid():
finding.planned_remediation_date = request.POST.get(
- "planned_remediation_date", ""
+ "planned_remediation_date", "",
)
finding.save()
messages.add_message(
@@ -1605,7 +1605,7 @@ def touch_finding(request, fid):
finding.last_reviewed_by = request.user
finding.save()
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding.id,))
+ request, reverse("view_finding", args=(finding.id,)),
)
@@ -1619,11 +1619,11 @@ def simple_risk_accept(request, fid):
ra_helper.simple_risk_accept(finding)
messages.add_message(
- request, messages.WARNING, "Finding risk accepted.", extra_tags="alert-success"
+ request, messages.WARNING, "Finding risk accepted.", extra_tags="alert-success",
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding.id,))
+ request, reverse("view_finding", args=(finding.id,)),
)
@@ -1640,7 +1640,7 @@ def risk_unaccept(request, fid):
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding.id,))
+ request, reverse("view_finding", args=(finding.id,)),
)
@@ -1723,7 +1723,7 @@ def request_finding_review(request, fid):
return HttpResponseRedirect(reverse("view_finding", args=(finding.id,)))
product_tab = Product_Tab(
- finding.test.engagement.product, title="Review Finding", tab="findings"
+ finding.test.engagement.product, title="Review Finding", tab="findings",
)
return render(
@@ -1799,7 +1799,7 @@ def clear_finding_review(request, fid):
form = ClearFindingReviewForm(instance=finding)
product_tab = Product_Tab(
- finding.test.engagement.product, title="Clear Finding Review", tab="findings"
+ finding.test.engagement.product, title="Clear Finding Review", tab="findings",
)
return render(
@@ -1838,14 +1838,14 @@ def mktemplate(request, fid):
for vulnerability_id in finding.vulnerability_ids:
Vulnerability_Id_Template(
- finding_template=template, vulnerability_id=vulnerability_id
+ finding_template=template, vulnerability_id=vulnerability_id,
).save()
messages.add_message(
request,
messages.SUCCESS,
mark_safe(
- 'Finding template added successfully. You may edit it here.'.format(reverse("edit_template", args=(template.id,)))
+ 'Finding template added successfully. You may edit it here.'.format(reverse("edit_template", args=(template.id,))),
),
extra_tags="alert-success",
)
@@ -1858,7 +1858,7 @@ def find_template_to_apply(request, fid):
test = get_object_or_404(Test, id=finding.test.id)
templates_by_cve = (
Finding_Template.objects.annotate(
- cve_len=Length("cve"), order=models.Value(1, models.IntegerField())
+ cve_len=Length("cve"), order=models.Value(1, models.IntegerField()),
)
.filter(cve=finding.cve, cve_len__gt=0)
.order_by("-last_used")
@@ -1868,7 +1868,7 @@ def find_template_to_apply(request, fid):
Finding_Template.objects.all()
.order_by("-last_used")
.annotate(
- cve_len=Length("cve"), order=models.Value(2, models.IntegerField())
+ cve_len=Length("cve"), order=models.Value(2, models.IntegerField()),
)
)
templates = templates_by_last_used
@@ -1878,11 +1878,11 @@ def find_template_to_apply(request, fid):
.exclude(cve=finding.cve)
.order_by("-last_used")
.annotate(
- cve_len=Length("cve"), order=models.Value(2, models.IntegerField())
+ cve_len=Length("cve"), order=models.Value(2, models.IntegerField()),
)
)
templates = templates_by_last_used.union(templates_by_cve).order_by(
- "order", "-last_used"
+ "order", "-last_used",
)
templates = TemplateFindingFilter(request.GET, queryset=templates)
@@ -1891,7 +1891,7 @@ def find_template_to_apply(request, fid):
# just query all templates as this weird ordering above otherwise breaks Django ORM
title_words = get_words_for_field(Finding_Template, "title")
product_tab = Product_Tab(
- test.engagement.product, title="Apply Template to Finding", tab="findings"
+ test.engagement.product, title="Apply Template to Finding", tab="findings",
)
return render(
request,
@@ -1961,7 +1961,7 @@ def apply_template_to_finding(request, fid, tid):
finding.cve = None
finding_helper.save_vulnerability_ids(
- finding, form.cleaned_data["vulnerability_ids"].split()
+ finding, form.cleaned_data["vulnerability_ids"].split(),
)
finding.save()
@@ -2070,7 +2070,7 @@ def promote_to_finding(request, fid):
jform = None
use_jira = jira_helper.get_jira_project(finding) is not None
product_tab = Product_Tab(
- finding.test.engagement.product, title="Promote Finding", tab="findings"
+ finding.test.engagement.product, title="Promote Finding", tab="findings",
)
if request.method == "POST":
@@ -2087,17 +2087,17 @@ def promote_to_finding(request, fid):
if form.is_valid() and (jform is None or jform.is_valid()):
if jform:
logger.debug(
- "jform.jira_issue: %s", jform.cleaned_data.get("jira_issue")
+ "jform.jira_issue: %s", jform.cleaned_data.get("jira_issue"),
)
logger.debug(
- JFORM_PUSH_TO_JIRA_MESSAGE, jform.cleaned_data.get("push_to_jira")
+ JFORM_PUSH_TO_JIRA_MESSAGE, jform.cleaned_data.get("push_to_jira"),
)
new_finding = form.save(commit=False)
new_finding.test = test
new_finding.reporter = request.user
new_finding.numerical_severity = Finding.get_numerical_severity(
- new_finding.severity
+ new_finding.severity,
)
new_finding.active = True
@@ -2116,7 +2116,7 @@ def promote_to_finding(request, fid):
# Push to Jira?
logger.debug("jira form valid")
push_to_jira = push_all_jira_issues or jform.cleaned_data.get(
- "push_to_jira"
+ "push_to_jira",
)
# if the jira issue key was changed, update database
@@ -2138,7 +2138,7 @@ def promote_to_finding(request, fid):
elif new_jira_issue_key != new_finding.jira_issue.jira_key:
jira_helper.finding_unlink_jira(request, new_finding)
jira_helper.finding_link_jira(
- request, new_finding, new_jira_issue_key
+ request, new_finding, new_jira_issue_key,
)
else:
logger.debug("finding has no jira issue yet")
@@ -2146,11 +2146,11 @@ def promote_to_finding(request, fid):
logger.debug(
"finding has no jira issue yet, but jira issue specified in request. trying to link.")
jira_helper.finding_link_jira(
- request, new_finding, new_jira_issue_key
+ request, new_finding, new_jira_issue_key,
)
finding_helper.save_vulnerability_ids(
- new_finding, form.cleaned_data["vulnerability_ids"].split()
+ new_finding, form.cleaned_data["vulnerability_ids"].split(),
)
new_finding.save(push_to_jira=push_to_jira)
@@ -2161,7 +2161,7 @@ def promote_to_finding(request, fid):
request.POST,
prefix="githubform",
enabled=GITHUB_PKey.objects.get(
- product=test.engagement.product
+ product=test.engagement.product,
).push_all_issues,
)
if gform.is_valid():
@@ -2178,7 +2178,7 @@ def promote_to_finding(request, fid):
else:
form_error = True
add_error_message_to_response(
- "The form has errors, please correct them below."
+ "The form has errors, please correct them below.",
)
add_field_errors_to_response(jform)
add_field_errors_to_response(form)
@@ -2261,7 +2261,7 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True):
)
else:
finding_templates = Finding_Template.objects.filter(
- cwe=template.cwe, template_match=True, template_match_title=True
+ cwe=template.cwe, template_match=True, template_match_title=True,
)
finding_ids = None
@@ -2282,11 +2282,11 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True):
# If result_list is None the filter exclude won't work
if result_list:
count = Finding.objects.filter(
- active=True, verified=True, cwe=template.cwe
+ active=True, verified=True, cwe=template.cwe,
).exclude(id__in=result_list)
else:
count = Finding.objects.filter(
- active=True, verified=True, cwe=template.cwe
+ active=True, verified=True, cwe=template.cwe,
)
if update:
@@ -2302,7 +2302,7 @@ def apply_cwe_mitigation(apply_to_findings, template, update=True):
f"CWE remediation text applied to finding for CWE: {template.cwe} using template: {template.title}."
)
new_note.author, _created = User.objects.get_or_create(
- username="System"
+ username="System",
)
new_note.save()
finding.notes.add(new_note)
@@ -2321,15 +2321,15 @@ def add_template(request):
apply_message = ""
template = form.save(commit=False)
template.numerical_severity = Finding.get_numerical_severity(
- template.severity
+ template.severity,
)
template.save()
finding_helper.save_vulnerability_ids_template(
- template, form.cleaned_data["vulnerability_ids"].split()
+ template, form.cleaned_data["vulnerability_ids"].split(),
)
form.save_m2m()
count = apply_cwe_mitigation(
- form.cleaned_data["apply_to_findings"], template
+ form.cleaned_data["apply_to_findings"], template,
)
if count > 0:
apply_message = (
@@ -2352,7 +2352,7 @@ def add_template(request):
)
add_breadcrumb(title="Add Template", top_level=False, request=request)
return render(
- request, "dojo/add_template.html", {"form": form, "name": "Add Template"}
+ request, "dojo/add_template.html", {"form": form, "name": "Add Template"},
)
@@ -2369,16 +2369,16 @@ def edit_template(request, tid):
if form.is_valid():
template = form.save(commit=False)
template.numerical_severity = Finding.get_numerical_severity(
- template.severity
+ template.severity,
)
finding_helper.save_vulnerability_ids_template(
- template, form.cleaned_data["vulnerability_ids"].split()
+ template, form.cleaned_data["vulnerability_ids"].split(),
)
template.save()
form.save_m2m()
count = apply_cwe_mitigation(
- form.cleaned_data["apply_to_findings"], template
+ form.cleaned_data["apply_to_findings"], template,
)
if count > 0:
apply_message = (
@@ -2516,10 +2516,10 @@ def merge_finding_product(request, pid):
request.GET.get("merge_findings") or request.method == "POST"
) and finding_to_update:
finding = Finding.objects.get(
- id=finding_to_update[0], test__engagement__product=product
+ id=finding_to_update[0], test__engagement__product=product,
)
findings = Finding.objects.filter(
- id__in=finding_to_update, test__engagement__product=product
+ id__in=finding_to_update, test__engagement__product=product,
)
form = MergeFindings(
finding=finding,
@@ -2540,7 +2540,7 @@ def merge_finding_product(request, pid):
if finding_to_merge_into not in findings_to_merge:
for finding in findings_to_merge.exclude(
- pk=finding_to_merge_into.pk
+ pk=finding_to_merge_into.pk,
):
notes_entry = f"{notes_entry}\n- {finding.title} ({finding.id}),"
if finding.static_finding:
@@ -2565,7 +2565,7 @@ def merge_finding_product(request, pid):
# if checked merge the endpoints
if form.cleaned_data["add_endpoints"]:
finding_to_merge_into.endpoints.add(
- *finding.endpoints.all()
+ *finding.endpoints.all(),
)
# if checked merge the tags
@@ -2576,7 +2576,7 @@ def merge_finding_product(request, pid):
# if checked re-assign the burp requests to the merged finding
if form.cleaned_data["dynamic_raw"]:
BurpRawRequestResponse.objects.filter(
- finding=finding
+ finding=finding,
).update(finding=finding_to_merge_into)
# Add merge finding information to the note if set to inactive
@@ -2584,7 +2584,7 @@ def merge_finding_product(request, pid):
single_finding_notes_entry = ("Finding has been set to inactive "
f"and merged with the finding: {finding_to_merge_into.title}.")
note = Notes(
- entry=single_finding_notes_entry, author=request.user
+ entry=single_finding_notes_entry, author=request.user,
)
note.save()
finding.notes.add(note)
@@ -2646,7 +2646,7 @@ def merge_finding_product(request, pid):
extra_tags="alert-success",
)
return HttpResponseRedirect(
- reverse("edit_finding", args=(finding_to_merge_into.id,))
+ reverse("edit_finding", args=(finding_to_merge_into.id,)),
)
else:
messages.add_message(
@@ -2664,14 +2664,14 @@ def merge_finding_product(request, pid):
)
product_tab = Product_Tab(
- finding.test.engagement.product, title="Merge Findings", tab="findings"
+ finding.test.engagement.product, title="Merge Findings", tab="findings",
)
custom_breadcrumb = {
"Open Findings": reverse(
- "product_open_findings", args=(finding.test.engagement.product.id,)
+ "product_open_findings", args=(finding.test.engagement.product.id,),
)
+ "?test__engagement__product="
- + str(finding.test.engagement.product.id)
+ + str(finding.test.engagement.product.id),
}
return render(
@@ -2709,11 +2709,11 @@ def finding_bulk_update_all(request, pid=None):
if pid is not None:
product = get_object_or_404(Product, id=pid)
user_has_permission_or_403(
- request.user, product, Permissions.Finding_Delete
+ request.user, product, Permissions.Finding_Delete,
)
finds = get_authorized_findings(
- Permissions.Finding_Delete, finds
+ Permissions.Finding_Delete, finds,
).distinct()
skipped_find_count = total_find_count - finds.count()
@@ -2724,7 +2724,7 @@ def finding_bulk_update_all(request, pid=None):
if skipped_find_count > 0:
add_error_message_to_response(
- f"Skipped deletion of {skipped_find_count} findings because you are not authorized."
+ f"Skipped deletion of {skipped_find_count} findings because you are not authorized.",
)
if deleted_find_count > 0:
@@ -2739,12 +2739,12 @@ def finding_bulk_update_all(request, pid=None):
if pid is not None:
product = get_object_or_404(Product, id=pid)
user_has_permission_or_403(
- request.user, product, Permissions.Finding_Edit
+ request.user, product, Permissions.Finding_Edit,
)
# make sure users are not editing stuff they are not authorized for
finds = get_authorized_findings(
- Permissions.Finding_Edit, finds
+ Permissions.Finding_Edit, finds,
).distinct()
skipped_find_count = total_find_count - finds.count()
@@ -2752,7 +2752,7 @@ def finding_bulk_update_all(request, pid=None):
if skipped_find_count > 0:
add_error_message_to_response(
- f"Skipped update of {skipped_find_count} findings because you are not authorized."
+ f"Skipped update of {skipped_find_count} findings because you are not authorized.",
)
finds = prefetch_for_findings(finds)
@@ -2763,7 +2763,7 @@ def finding_bulk_update_all(request, pid=None):
if form.cleaned_data["severity"]:
find.severity = form.cleaned_data["severity"]
find.numerical_severity = Finding.get_numerical_severity(
- form.cleaned_data["severity"]
+ form.cleaned_data["severity"],
)
find.last_reviewed = now
find.last_reviewed_by = request.user
@@ -2793,7 +2793,7 @@ def finding_bulk_update_all(request, pid=None):
logger.debug('FALSE_POSITIVE_HISTORY: Reactivating existing findings based on: %s', find)
existing_fp_findings = match_finding_to_existing_findings(
- find, product=find.test.engagement.product
+ find, product=find.test.engagement.product,
).filter(false_p=True)
for fp in existing_fp_findings:
@@ -2859,20 +2859,20 @@ def finding_bulk_update_all(request, pid=None):
finding_group_name = form.cleaned_data["finding_group_create_name"]
logger.debug("finding_group_create_name: %s", finding_group_name)
finding_group, added, skipped = finding_helper.create_finding_group(
- finds, finding_group_name
+ finds, finding_group_name,
)
if added:
add_success_message_to_response(
- f"Created finding group with {added} findings"
+ f"Created finding group with {added} findings",
)
return_url = reverse(
- "view_finding_group", args=(finding_group.id,)
+ "view_finding_group", args=(finding_group.id,),
)
if skipped:
add_success_message_to_response(
- f"Skipped {skipped} findings in group creation, findings already part of another group"
+ f"Skipped {skipped} findings in group creation, findings already part of another group",
)
# refresh findings from db
@@ -2883,21 +2883,21 @@ def finding_bulk_update_all(request, pid=None):
fgid = form.cleaned_data["add_to_finding_group_id"]
finding_group = Finding_Group.objects.get(id=fgid)
finding_group, added, skipped = finding_helper.add_to_finding_group(
- finding_group, finds
+ finding_group, finds,
)
if added:
add_success_message_to_response(
- f"Added {added} findings to finding group {finding_group.name}"
+ f"Added {added} findings to finding group {finding_group.name}",
)
return_url = reverse(
- "view_finding_group", args=(finding_group.id,)
+ "view_finding_group", args=(finding_group.id,),
)
if skipped:
add_success_message_to_response(
f"Skipped {skipped} findings when adding to finding group {finding_group.name}, "
- "findings already part of another group"
+ "findings already part of another group",
)
# refresh findings from db
@@ -2919,14 +2919,14 @@ def finding_bulk_update_all(request, pid=None):
[
finding_group.name
for finding_group in finding_groups
- ]
+ ],
),
- )
+ ),
)
if skipped:
add_success_message_to_response(
- f"Skipped {skipped} findings when removing from any finding group, findings not part of any group"
+ f"Skipped {skipped} findings when removing from any finding group, findings not part of any group",
)
# refresh findings from db
@@ -2950,13 +2950,13 @@ def finding_bulk_update_all(request, pid=None):
if grouped:
add_success_message_to_response(
"Grouped %d findings into %d (%d newly created) finding groups"
- % (grouped, len(finding_groups), groups_created)
+ % (grouped, len(finding_groups), groups_created),
)
if skipped:
add_success_message_to_response(
f"Skipped {skipped} findings when grouping by {finding_group_by_option} as these findings "
- "were already in an existing group"
+ "were already in an existing group",
)
# refresh findings from db
@@ -2982,7 +2982,7 @@ def finding_bulk_update_all(request, pid=None):
)
note.save()
history = NoteHistory(
- data=note.entry, time=note.date, current_editor=note.author
+ data=note.entry, time=note.date, current_editor=note.author,
)
history.save()
note.history.add(history)
@@ -3022,7 +3022,7 @@ def finding_bulk_update_all(request, pid=None):
jira_helper.log_jira_alert(error_message, group)
else:
logger.debug(
- "pushing to jira from finding.finding_bulk_update_all()"
+ "pushing to jira from finding.finding_bulk_update_all()",
)
jira_helper.push_to_jira(group)
success_count += 1
@@ -3030,12 +3030,12 @@ def finding_bulk_update_all(request, pid=None):
for error_message, error_count in error_counts.items():
add_error_message_to_response(
"%i finding groups could not be pushed to JIRA: %s"
- % (error_count, error_message)
+ % (error_count, error_message),
)
if success_count > 0:
add_success_message_to_response(
- "%i finding groups pushed to JIRA successfully" % success_count
+ "%i finding groups pushed to JIRA successfully" % success_count,
)
groups_pushed_to_jira = True
@@ -3080,7 +3080,7 @@ def finding_bulk_update_all(request, pid=None):
jira_helper.log_jira_alert(error_message, finding)
else:
logger.debug(
- "pushing to jira from finding.finding_bulk_update_all()"
+ "pushing to jira from finding.finding_bulk_update_all()",
)
jira_helper.push_to_jira(finding)
success_count += 1
@@ -3088,12 +3088,12 @@ def finding_bulk_update_all(request, pid=None):
for error_message, error_count in error_counts.items():
add_error_message_to_response(
"%i findings could not be pushed to JIRA: %s"
- % (error_count, error_message)
+ % (error_count, error_message),
)
if success_count > 0:
add_success_message_to_response(
- "%i findings pushed to JIRA successfully" % success_count
+ "%i findings pushed to JIRA successfully" % success_count,
)
if updated_find_count > 0:
@@ -3119,10 +3119,10 @@ def finding_bulk_update_all(request, pid=None):
def find_available_notetypes(notes):
single_note_types = Note_Type.objects.filter(
- is_single=True, is_active=True
+ is_single=True, is_active=True,
).values_list("id", flat=True)
multiple_note_types = Note_Type.objects.filter(
- is_single=False, is_active=True
+ is_single=False, is_active=True,
).values_list("id", flat=True)
available_note_types = []
for note_type_id in multiple_note_types:
@@ -3140,7 +3140,7 @@ def find_available_notetypes(notes):
def get_missing_mandatory_notetypes(finding):
notes = finding.notes.all()
mandatory_note_types = Note_Type.objects.filter(
- is_mandatory=True, is_active=True
+ is_mandatory=True, is_active=True,
).values_list("id", flat=True)
notes_to_be_added = []
for note_type_id in mandatory_note_types:
@@ -3171,7 +3171,7 @@ def mark_finding_duplicate(request, original_id, duplicate_id):
extra_tags="alert-danger",
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(duplicate.id,))
+ request, reverse("view_finding", args=(duplicate.id,)),
)
duplicate.duplicate = True
@@ -3196,7 +3196,7 @@ def mark_finding_duplicate(request, original_id, duplicate_id):
original.save(dedupe_option=False)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(duplicate.id,))
+ request, reverse("view_finding", args=(duplicate.id,)),
)
@@ -3223,7 +3223,7 @@ def reset_finding_duplicate_status_internal(user, duplicate_id):
@require_POST
def reset_finding_duplicate_status(request, duplicate_id):
checked_duplicate_id = reset_finding_duplicate_status_internal(
- request.user, duplicate_id
+ request.user, duplicate_id,
)
if checked_duplicate_id is None:
messages.add_message(
@@ -3233,11 +3233,11 @@ def reset_finding_duplicate_status(request, duplicate_id):
extra_tags="alert-danger",
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(duplicate_id,))
+ request, reverse("view_finding", args=(duplicate_id,)),
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(checked_duplicate_id,))
+ request, reverse("view_finding", args=(checked_duplicate_id,)),
)
@@ -3274,7 +3274,7 @@ def set_finding_as_original_internal(user, finding_id, new_original_id):
cluster_member.save(dedupe_option=False)
logger.debug(
- "setting new original for old root %i to %i", finding.id, new_original.id
+ "setting new original for old root %i to %i", finding.id, new_original.id,
)
finding.duplicate = True
finding.duplicate_finding = new_original
@@ -3302,7 +3302,7 @@ def set_finding_as_original_internal(user, finding_id, new_original_id):
@require_POST
def set_finding_as_original(request, finding_id, new_original_id):
success = set_finding_as_original_internal(
- request.user, finding_id, new_original_id
+ request.user, finding_id, new_original_id,
)
if not success:
messages.add_message(
@@ -3314,7 +3314,7 @@ def set_finding_as_original(request, finding_id, new_original_id):
)
return redirect_to_return_url_or_else(
- request, reverse("view_finding", args=(finding_id,))
+ request, reverse("view_finding", args=(finding_id,)),
)
@@ -3323,7 +3323,7 @@ def set_finding_as_original(request, finding_id, new_original_id):
def unlink_jira(request, fid):
finding = get_object_or_404(Finding, id=fid)
logger.info(
- "trying to unlink a linked jira issue from %d:%s", finding.id, finding.title
+ "trying to unlink a linked jira issue from %d:%s", finding.id, finding.title,
)
if finding.has_jira_issue:
try:
@@ -3349,7 +3349,7 @@ def unlink_jira(request, fid):
return HttpResponse(status=500)
else:
messages.add_message(
- request, messages.ERROR, "Link to JIRA not found", extra_tags="alert-danger"
+ request, messages.ERROR, "Link to JIRA not found", extra_tags="alert-danger",
)
return HttpResponse(status=400)
@@ -3390,7 +3390,7 @@ def push_to_jira(request, fid):
logger.exception(e)
logger.error("Error pushing to JIRA: ", exc_info=True)
messages.add_message(
- request, messages.ERROR, "Error pushing to JIRA", extra_tags="alert-danger"
+ request, messages.ERROR, "Error pushing to JIRA", extra_tags="alert-danger",
)
return HttpResponse(status=500)
@@ -3405,7 +3405,7 @@ def duplicate_cluster(request, finding):
for duplicate_member in duplicate_cluster:
duplicate_member.related_actions = (
calculate_possible_related_actions_for_similar_finding(
- request, finding, duplicate_member
+ request, finding, duplicate_member,
)
)
@@ -3418,7 +3418,7 @@ def duplicate_cluster(request, finding):
# these actions are always calculated in the context of the finding the user is viewing
# because this determines which actions are possible
def calculate_possible_related_actions_for_similar_finding(
- request, finding, similar_finding
+ request, finding, similar_finding,
):
actions = []
if similar_finding.test.engagement != finding.test.engagement and (
@@ -3430,7 +3430,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "None",
"reason": ("This finding is in a different engagement and deduplication_inside_engagment "
"is enabled here or in that finding"),
- }
+ },
)
elif finding.duplicate_finding == similar_finding:
actions.append(
@@ -3438,7 +3438,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "None",
"reason": ("This finding is the root of the cluster, use an action on another row, "
"or the finding on top of the page to change the root of the cluser"),
- }
+ },
)
elif similar_finding.original_finding.all():
actions.append(
@@ -3446,7 +3446,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "None",
"reason": ("This finding is similar, but is already an original in a different cluster. "
"Remove it from that cluster before you connect it to this cluster."),
- }
+ },
)
else:
if similar_finding.duplicate_finding:
@@ -3457,7 +3457,7 @@ def calculate_possible_related_actions_for_similar_finding(
"reason": ("This will remove the finding from the cluster, "
"effectively marking it no longer as duplicate. "
"Will not trigger deduplication logic after saving."),
- }
+ },
)
if (
@@ -3471,7 +3471,7 @@ def calculate_possible_related_actions_for_similar_finding(
"reason": ("Sets this finding as the Original for the whole cluster. "
"The existing Original will be downgraded to become a member of the cluster and, "
"together with the other members, will be marked as duplicate of the new Original."),
- }
+ },
)
else:
# duplicate inside different cluster
@@ -3480,7 +3480,7 @@ def calculate_possible_related_actions_for_similar_finding(
"action": "mark_finding_duplicate",
"reason": ("Will mark this finding as duplicate of the root finding in this cluster, "
"effectively adding it to the cluster and removing it from the other cluster."),
- }
+ },
)
else:
# similar is not a duplicate yet
@@ -3489,7 +3489,7 @@ def calculate_possible_related_actions_for_similar_finding(
{
"action": "mark_finding_duplicate",
"reason": "Will mark this finding as duplicate of the root finding in this cluster",
- }
+ },
)
actions.append(
{
@@ -3497,7 +3497,7 @@ def calculate_possible_related_actions_for_similar_finding(
"reason": ("Sets this finding as the Original for the whole cluster. "
"The existing Original will be downgraded to become a member of the cluster and, "
"together with the other members, will be marked as duplicate of the new Original."),
- }
+ },
)
else:
# similar_finding is not an original/root of a cluster as per earlier if clause
@@ -3505,14 +3505,14 @@ def calculate_possible_related_actions_for_similar_finding(
{
"action": "mark_finding_duplicate",
"reason": "Will mark this finding as duplicate of the finding on this page.",
- }
+ },
)
actions.append(
{
"action": "set_finding_as_original",
"reason": ("Sets this finding as the Original marking the finding "
"on this page as duplicate of this original."),
- }
+ },
)
return actions
diff --git a/dojo/finding_group/signals.py b/dojo/finding_group/signals.py
index 1dc0e339e9..e73927f13a 100644
--- a/dojo/finding_group/signals.py
+++ b/dojo/finding_group/signals.py
@@ -17,7 +17,7 @@ def finding_group_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='finding_group'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The finding group "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/forms.py b/dojo/forms.py
index 734d97586a..91c16eb3d5 100644
--- a/dojo/forms.py
+++ b/dojo/forms.py
@@ -169,8 +169,7 @@ def render(self, name, value, attrs=None, renderer=None):
if isinstance(value, str):
match = RE_DATE.match(value)
if match:
- year_val,
- month_val,
+ year_val, month_val = match[1], match[2]
output = []
@@ -540,13 +539,13 @@ class ImportScanForm(forms.Form):
help_text="If set to True, the tags will be applied to the findings",
label="Apply Tags to Findings",
required=False,
- initial=False
+ initial=False,
)
apply_tags_to_endpoints = forms.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
label="Apply Tags to Endpoints",
required=False,
- initial=False
+ initial=False,
)
if is_finding_groups_enabled():
@@ -652,13 +651,13 @@ class ReImportScanForm(forms.Form):
help_text="If set to True, the tags will be applied to the findings",
label="Apply Tags to Findings",
required=False,
- initial=False
+ initial=False,
)
apply_tags_to_endpoints = forms.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
label="Apply Tags to Endpoints",
required=False,
- initial=False
+ initial=False,
)
if is_finding_groups_enabled():
@@ -724,17 +723,17 @@ class ImportEndpointMetaForm(forms.Form):
label="Create nonexisting Endpoint",
initial=True,
required=False,
- help_text="Create endpoints that do not already exist",)
+ help_text="Create endpoints that do not already exist")
create_tags = forms.BooleanField(
label="Add Tags",
initial=True,
required=False,
- help_text="Add meta from file as tags in the format key:value",)
+ help_text="Add meta from file as tags in the format key:value")
create_dojo_meta = forms.BooleanField(
label="Add Meta",
initial=False,
required=False,
- help_text="Add data from file as Metadata. Metadata is used for displaying custom fields",)
+ help_text="Add data from file as Metadata. Metadata is used for displaying custom fields")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -885,7 +884,7 @@ class AddFindingsRiskAcceptanceForm(forms.ModelForm):
queryset=Finding.objects.none(),
required=True,
label="",
- widget=TableCheckboxWidget(attrs={'size': 25})
+ widget=TableCheckboxWidget(attrs={'size': 25}),
)
class Meta:
@@ -926,7 +925,7 @@ class Meta:
'configuration_management', 'config_issues', 'authentication', 'auth_issues',
'authorization_and_access_control', 'author_issues',
'data_input_sanitization_validation', 'data_issues',
- 'sensitive_data', 'sensitive_issues', 'other', 'other_issues', ]
+ 'sensitive_data', 'sensitive_issues', 'other', 'other_issues']
class EngForm(forms.ModelForm):
@@ -1607,7 +1606,7 @@ def clean(self):
path=path,
query=query,
fragment=fragment,
- product=self.product
+ product=self.product,
)
if endpoint.count() > 1 or (endpoint.count() == 1 and endpoint.first().pk != self.endpoint_instance.pk):
msg = 'It appears as though an endpoint with this data already exists for this product.'
@@ -1651,7 +1650,7 @@ def save(self):
path=e[4],
query=e[5],
fragment=e[6],
- product=self.product
+ product=self.product,
)
processed_endpoints.append(endpoint)
return processed_endpoints
@@ -3120,12 +3119,12 @@ class LoginBanner(forms.Form):
label="Enable login banner",
initial=False,
required=False,
- help_text='Tick this box to enable a text banner on the login page'
+ help_text='Tick this box to enable a text banner on the login page',
)
banner_message = forms.CharField(
required=False,
- label="Message to display on the login page"
+ label="Message to display on the login page",
)
def clean(self):
@@ -3196,7 +3195,7 @@ def __init__(self, *args, **kwargs):
initial_answer = TextAnswer.objects.filter(
answered_survey=self.answered_survey,
- question=self.question
+ question=self.question,
)
if initial_answer.exists():
@@ -3271,14 +3270,14 @@ def __init__(self, *args, **kwargs):
required=not self.question.optional,
choices=choices,
initial=initial_choices,
- widget=widget
+ widget=widget,
)
self.fields['answer'] = field
# Render choice buttons inline
self.helper.layout = Layout(
- inline_type('answer')
+ inline_type('answer'),
)
def clean_answer(self):
@@ -3318,7 +3317,7 @@ def save(self):
# create a ChoiceAnswer
choice_answer = ChoiceAnswer.objects.create(
answered_survey=self.answered_survey,
- question=self.question
+ question=self.question,
)
# re save out the choices
diff --git a/dojo/github_issue_link/views.py b/dojo/github_issue_link/views.py
index aa4e9269cb..f575cf2d4d 100644
--- a/dojo/github_issue_link/views.py
+++ b/dojo/github_issue_link/views.py
@@ -44,14 +44,14 @@ def new_github(request):
messages.SUCCESS,
'GitHub Configuration Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('github', ))
+ return HttpResponseRedirect(reverse('github'))
except Exception as info:
logger.error(info)
messages.add_message(request,
messages.ERROR,
'Unable to authenticate on GitHub.',
extra_tags='alert-danger')
- return HttpResponseRedirect(reverse('github', ))
+ return HttpResponseRedirect(reverse('github'))
else:
gform = GITHUBForm()
add_breadcrumb(title="New GitHub Configuration", top_level=False, request=request)
diff --git a/dojo/group/urls.py b/dojo/group/urls.py
index 5348f97c1d..ddf7f03bd9 100644
--- a/dojo/group/urls.py
+++ b/dojo/group/urls.py
@@ -13,5 +13,5 @@
re_path(r'^group/(?P\d+)/add_group_member$', views.add_group_member, name='add_group_member'),
re_path(r'group/member/(?P\d+)/edit_group_member$', views.edit_group_member, name='edit_group_member'),
re_path(r'group/member/(?P\d+)/delete_group_member$', views.delete_group_member, name='delete_group_member'),
- re_path(r'^group/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_group_permissions')
+ re_path(r'^group/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_group_permissions'),
]
diff --git a/dojo/group/views.py b/dojo/group/views.py
index 46d2dd3196..1aea50fe35 100644
--- a/dojo/group/views.py
+++ b/dojo/group/views.py
@@ -244,7 +244,7 @@ def get_initial_context(self, request: HttpRequest, group: Dojo_Group):
return {
"form": self.get_group_form(request, group),
"to_delete": group,
- "rels": collector.nested()
+ "rels": collector.nested(),
}
def process_forms(self, request: HttpRequest, group: Dojo_Group, context: dict):
@@ -418,7 +418,7 @@ def add_group_member(request, gid):
add_breadcrumb(title="Add Group Member", top_level=False, request=request)
return render(request, 'dojo/new_group_member.html', {
'group': group,
- 'form': groupform
+ 'form': groupform,
})
@@ -460,7 +460,7 @@ def edit_group_member(request, mid):
add_breadcrumb(title="Edit a Group Member", top_level=False, request=request)
return render(request, 'dojo/edit_group_member.html', {
'memberid': mid,
- 'form': memberform
+ 'form': memberform,
})
@@ -501,7 +501,7 @@ def delete_group_member(request, mid):
add_breadcrumb("Delete a group member", top_level=False, request=request)
return render(request, 'dojo/delete_group_member.html', {
'memberid': mid,
- 'form': memberform
+ 'form': memberform,
})
@@ -531,7 +531,7 @@ def add_product_group(request, gid):
add_breadcrumb(title="Add Product Group", top_level=False, request=request)
return render(request, 'dojo/new_product_group_group.html', {
'group': group,
- 'form': group_form
+ 'form': group_form,
})
diff --git a/dojo/home/views.py b/dojo/home/views.py
index 26039f4a28..b79a5bf843 100644
--- a/dojo/home/views.py
+++ b/dojo/home/views.py
@@ -47,7 +47,7 @@ def dashboard(request: HttpRequest) -> HttpResponse:
punchcard, ticks = get_punchcard_data(findings, today - relativedelta(weeks=26), 26)
if user_has_configuration_permission(request.user, 'dojo.view_engagement_survey'):
- unassigned_surveys = Answered_Survey.objects.filter(assignee_id__isnull=True, completed__gt=0, ) \
+ unassigned_surveys = Answered_Survey.objects.filter(assignee_id__isnull=True, completed__gt=0) \
.filter(Q(engagement__isnull=True) | Q(engagement__in=engagements))
else:
unassigned_surveys = None
diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py
index 6122196d48..a0c24bffa8 100644
--- a/dojo/importers/auto_create_context.py
+++ b/dojo/importers/auto_create_context.py
@@ -296,7 +296,7 @@ def get_or_create_engagement(
engagement = self.get_target_engagement_if_exists(
engagement_id=engagement_id,
engagement_name=engagement_name,
- product=product
+ product=product,
)
# If we have an engagement, we cna just return it
if engagement:
diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py
index 449a9074b8..a2f4bb6794 100644
--- a/dojo/importers/base_importer.py
+++ b/dojo/importers/base_importer.py
@@ -299,7 +299,7 @@ def update_timestamps(self):
# target end date on the engagement
if self.test.engagement.engagement_type == 'CI/CD':
self.test.engagement.target_end = max_safe(
- [self.scan_date.date(), self.test.engagement.target_end]
+ [self.scan_date.date(), self.test.engagement.target_end],
)
# Set the target end date on the test in a similar fashion
max_test_start_date = max_safe([self.scan_date, self.test.target_end])
@@ -338,7 +338,7 @@ def update_import_history(
f"new: {len(new_findings)} "
f"closed: {len(closed_findings)} "
f"reactivated: {len(reactivated_findings)} "
- f"untouched: {len(untouched_findings)} "
+ f"untouched: {len(untouched_findings)} ",
)
# Create a dictionary to stuff into the test import object
import_settings = {}
@@ -597,7 +597,7 @@ def process_finding_groups(
def process_request_response_pairs(
self,
- finding: Finding
+ finding: Finding,
) -> None:
"""
Search the unsaved finding for the following attributes to determine
@@ -648,7 +648,7 @@ def process_endpoints(
def process_vulnerability_ids(
self,
- finding: Finding
+ finding: Finding,
) -> Finding:
"""
Parse the `unsaved_vulnerability_ids` field from findings after they are parsed
diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py
index 4591fe3633..78bb761feb 100644
--- a/dojo/importers/default_importer.py
+++ b/dojo/importers/default_importer.py
@@ -216,7 +216,7 @@ def process_findings(
findings,
self.group_by,
create_finding_groups_for_all_findings=self.create_finding_groups_for_all_findings,
- **kwargs
+ **kwargs,
)
if self.push_to_jira:
if findings[0].finding_group is not None:
@@ -226,7 +226,7 @@ def process_findings(
sync = kwargs.get('sync', True)
if not sync:
- return [serialize('json', [finding, ]) for finding in new_findings]
+ return [serialize('json', [finding]) for finding in new_findings]
return new_findings
def close_old_findings(
@@ -259,12 +259,12 @@ def close_old_findings(
# Get the initial filtered list of old findings to be closed without
# considering the scope of the product or engagement
old_findings = Finding.objects.exclude(
- test=self.test
+ test=self.test,
).exclude(
- hash_code__in=new_hash_codes
+ hash_code__in=new_hash_codes,
).filter(
test__test_type=self.test.test_type,
- active=True
+ active=True,
)
# Accommodate for product scope or engagement scope
if self.close_old_findings_product_scope:
diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py
index 0c930d9df7..ad0260f714 100644
--- a/dojo/importers/default_reimporter.py
+++ b/dojo/importers/default_reimporter.py
@@ -210,12 +210,12 @@ def process_findings(
if finding.dynamic_finding:
logger.debug(
"Re-import found an existing dynamic finding for this new "
- "finding. Checking the status of endpoints"
+ "finding. Checking the status of endpoints",
)
self.endpoint_manager.update_endpoint_status(
existing_finding,
unsaved_finding,
- self.user
+ self.user,
)
else:
finding = self.process_finding_that_was_not_matched(unsaved_finding)
@@ -372,18 +372,18 @@ def match_new_finding_to_existing_finding(
if self.deduplication_algorithm == 'hash_code':
return Finding.objects.filter(
test=self.test,
- hash_code=unsaved_finding.hash_code
+ hash_code=unsaved_finding.hash_code,
).exclude(hash_code=None).order_by('id')
elif self.deduplication_algorithm == 'unique_id_from_tool':
return Finding.objects.filter(
test=self.test,
- unique_id_from_tool=unsaved_finding.unique_id_from_tool
+ unique_id_from_tool=unsaved_finding.unique_id_from_tool,
).exclude(unique_id_from_tool=None).order_by('id')
elif self.deduplication_algorithm == 'unique_id_from_tool_or_hash_code':
query = Finding.objects.filter(
Q(test=self.test),
(Q(hash_code__isnull=False) & Q(hash_code=unsaved_finding.hash_code))
- | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=unsaved_finding.unique_id_from_tool))
+ | (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=unsaved_finding.unique_id_from_tool)),
).order_by('id')
deduplicationLogger.debug(query.query)
return query
@@ -440,7 +440,7 @@ def process_matched_special_status_finding(
f"Skipping existing finding (it is marked as false positive: {existing_finding.false_p} "
f"and/or out of scope: {existing_finding.out_of_scope} or is a risk accepted: "
f"{existing_finding.risk_accepted}) - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
# If all statuses are the same between findings, we can safely move on to the next
# finding in the report. Return True here to force a continue in the loop
@@ -499,7 +499,7 @@ def process_matched_mitigated_finding(
logger.debug(
"Skipping reactivating by user's choice do_not_reactivate: "
f" - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
# Search for an existing note that this finding has been skipped for reactivation
# before this current time
@@ -522,7 +522,7 @@ def process_matched_mitigated_finding(
else:
logger.debug(
f"Reactivating: - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
existing_finding.mitigated = None
existing_finding.is_mitigated = False
@@ -543,7 +543,7 @@ def process_matched_mitigated_finding(
endpoint_statuses = existing_finding.status_finding.exclude(
Q(false_positive=True)
| Q(out_of_scope=True)
- | Q(risk_accepted=True)
+ | Q(risk_accepted=True),
)
self.endpoint_manager.chunk_endpoints_and_reactivate(endpoint_statuses)
existing_finding.notes.add(note)
@@ -566,7 +566,7 @@ def process_matched_active_finding(
# existing findings may be from before we had component_name/version fields
logger.debug(
f"Updating existing finding: {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
# First check that the existing finding is definitely not mitigated
if not (existing_finding.mitigated and existing_finding.is_mitigated):
@@ -577,7 +577,7 @@ def process_matched_active_finding(
# as they could be force closed by the scanner but a DD user forces it open ?
logger.debug(
f"Closing: {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
existing_finding.mitigated = unsaved_finding.mitigated
existing_finding.is_mitigated = True
@@ -589,7 +589,7 @@ def process_matched_active_finding(
logger.debug('Reimported mitigated item matches a finding that is currently open, closing.')
logger.debug(
f"Closing: {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})"
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
)
existing_finding.risk_accepted = unsaved_finding.risk_accepted
existing_finding.false_p = unsaved_finding.false_p
@@ -639,7 +639,7 @@ def process_finding_that_was_not_matched(
logger.debug(
"Reimport created new finding as no existing finding match: "
f"{finding.id}: {finding.title} "
- f"({finding.component_name} - {finding.component_version})"
+ f"({finding.component_name} - {finding.component_version})",
)
# Manage the finding grouping selection
self.process_finding_groups(
@@ -690,7 +690,7 @@ def process_groups_for_all_findings(
findings,
self.group_by,
create_finding_groups_for_all_findings=self.create_finding_groups_for_all_findings,
- **kwargs
+ **kwargs,
)
if self.push_to_jira:
if findings[0].finding_group is not None:
diff --git a/dojo/importers/endpoint_manager.py b/dojo/importers/endpoint_manager.py
index e7b21043e1..2e885168aa 100644
--- a/dojo/importers/endpoint_manager.py
+++ b/dojo/importers/endpoint_manager.py
@@ -147,7 +147,7 @@ def chunk_endpoints_and_disperse(
def clean_unsaved_endpoints(
self,
- endpoints: List[Endpoint]
+ endpoints: List[Endpoint],
) -> None:
"""
Clean endpoints that are supplied. For any endpoints that fail this validation
@@ -232,13 +232,13 @@ def update_endpoint_status(
endpoint_status_to_mitigate = list(
filter(
lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint not in new_finding_endpoints_list,
- existing_finding_endpoint_status_list)
+ existing_finding_endpoint_status_list),
)
# Re-activate any endpoints in the old finding that are in the new finding
endpoint_status_to_reactivate = list(
filter(
lambda existing_finding_endpoint_status: existing_finding_endpoint_status.endpoint in new_finding_endpoints_list,
- existing_finding_endpoint_status_list)
+ existing_finding_endpoint_status_list),
)
self.chunk_endpoints_and_reactivate(endpoint_status_to_reactivate)
self.chunk_endpoints_and_mitigate(endpoint_status_to_mitigate, user)
diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py
index 32329431d7..f2b869e55a 100644
--- a/dojo/jira_link/helper.py
+++ b/dojo/jira_link/helper.py
@@ -50,12 +50,12 @@
'Mitigated',
'False Positive',
'Out of Scope',
- 'Duplicate'
+ 'Duplicate',
]
OPEN_STATUS = [
'Active',
- 'Verified'
+ 'Verified',
]
@@ -1303,7 +1303,7 @@ def add_epic(engagement, **kwargs):
epic_name = engagement.name
issue_dict = {
'project': {
- 'key': jira_project.project_key
+ 'key': jira_project.project_key,
},
'summary': epic_name,
'description': epic_name,
@@ -1393,7 +1393,7 @@ def add_simple_jira_comment(jira_instance, jira_issue, comment):
jira = get_jira_connection(jira_instance)
jira.add_comment(
- jira_issue.jira_id, comment
+ jira_issue.jira_id, comment,
)
return True
except Exception as e:
@@ -1620,7 +1620,7 @@ def process_resolution_from_jira(finding, resolution_id, resolution_name, assign
finding.false_p = False
ra = Risk_Acceptance.objects.create(
accepted_by=assignee_name,
- owner=finding.reporter
+ owner=finding.reporter,
)
finding.test.engagement.risk_acceptance.add(ra)
ra_helper.add_findings_to_risk_acceptance(ra, [finding])
diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py
index e0c43884c4..80065f78ad 100644
--- a/dojo/jira_link/views.py
+++ b/dojo/jira_link/views.py
@@ -385,7 +385,7 @@ def post(self, request):
description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was added by {request.user}",
url=request.build_absolute_uri(reverse('jira')))
- return HttpResponseRedirect(reverse('jira', ))
+ return HttpResponseRedirect(reverse('jira'))
return render(request, self.get_template(), {'jform': jform})
@@ -430,7 +430,7 @@ def post(self, request):
description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was added by {request.user}",
url=request.build_absolute_uri(reverse('jira')))
- return HttpResponseRedirect(reverse('jira', ))
+ return HttpResponseRedirect(reverse('jira'))
else:
logger.error('jform.errors: %s', jform.errors)
return render(request, self.get_template(), {'jform': jform})
@@ -485,7 +485,7 @@ def post(self, request, jid=None):
description=f"JIRA \"{jform.cleaned_data.get('configuration_name')}\" was edited by {request.user}",
url=request.build_absolute_uri(reverse('jira')))
- return HttpResponseRedirect(reverse('jira', ))
+ return HttpResponseRedirect(reverse('jira'))
return render(request, self.get_template(), {'jform': jform})
diff --git a/dojo/management/commands/dedupe.py b/dojo/management/commands/dedupe.py
index 1e77c82c9a..995d258f15 100644
--- a/dojo/management/commands/dedupe.py
+++ b/dojo/management/commands/dedupe.py
@@ -38,7 +38,7 @@ def add_arguments(self, parser):
'--parser',
dest='parser',
action='append',
- help="""List of parsers for which hash_code needs recomputing (defaults to all parsers)"""
+ help="""List of parsers for which hash_code needs recomputing (defaults to all parsers)""",
)
parser.add_argument('--hash_code_only', action='store_true', help='Only compute hash codes')
diff --git a/dojo/management/commands/initialize_permissions.py b/dojo/management/commands/initialize_permissions.py
index a5a204d686..9e14ecdb89 100644
--- a/dojo/management/commands/initialize_permissions.py
+++ b/dojo/management/commands/initialize_permissions.py
@@ -22,7 +22,7 @@ def handle(self, *args, **options):
Permission.objects.create(
name='Can change Google Sheet',
content_type=content_type_system_settings,
- codename='change_google_sheet'
+ codename='change_google_sheet',
)
logger.info('Non-standard permissions have been created')
diff --git a/dojo/management/commands/jira_status_reconciliation.py b/dojo/management/commands/jira_status_reconciliation.py
index 918b91a28f..db1337fda6 100644
--- a/dojo/management/commands/jira_status_reconciliation.py
+++ b/dojo/management/commands/jira_status_reconciliation.py
@@ -75,7 +75,7 @@ def jira_status_reconciliation(*args, **kwargs):
# convert from str to datetime
issue_from_jira.fields.updated = parse_datetime(issue_from_jira.fields.updated)
- find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated,
+ find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated
flag1, flag2, flag3 = None, None, None
diff --git a/dojo/management/commands/migrate_cve.py b/dojo/management/commands/migrate_cve.py
index 739f78f7d0..74a07325f2 100644
--- a/dojo/management/commands/migrate_cve.py
+++ b/dojo/management/commands/migrate_cve.py
@@ -15,13 +15,13 @@
def create_vulnerability_id(finding):
Vulnerability_Id.objects.get_or_create(
- finding=finding, vulnerability_id=finding.cve
+ finding=finding, vulnerability_id=finding.cve,
)
def create_vulnerability_id_template(finding_template):
Vulnerability_Id_Template.objects.get_or_create(
- finding_template=finding_template, vulnerability_id=finding_template.cve
+ finding_template=finding_template, vulnerability_id=finding_template.cve,
)
diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py
index 1a9d3f07c4..6de04ee72b 100644
--- a/dojo/metrics/utils.py
+++ b/dojo/metrics/utils.py
@@ -36,7 +36,7 @@
def finding_queries(
prod_type: QuerySet[Product_Type],
- request: HttpRequest
+ request: HttpRequest,
) -> dict[str, Any]:
# Get the initial list of findings the user is authorized to see
findings_query = get_authorized_findings(
@@ -94,7 +94,7 @@ def finding_queries(
active_findings,
accepted_findings,
start_date,
- MetricsType.FINDING
+ MetricsType.FINDING,
)
monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between)
@@ -110,9 +110,9 @@ def finding_queries(
prod_type__in=prod_type)
top_ten = severity_count(
- top_ten, 'annotate', 'engagement__test__finding__severity'
+ top_ten, 'annotate', 'engagement__test__finding__severity',
).order_by(
- '-critical', '-high', '-medium', '-low'
+ '-critical', '-high', '-medium', '-low',
)[:10]
return {
@@ -132,17 +132,17 @@ def finding_queries(
def endpoint_queries(
prod_type: QuerySet[Product_Type],
- request: HttpRequest
+ request: HttpRequest,
) -> dict[str, Any]:
endpoints_query = Endpoint_Status.objects.filter(
mitigated=False,
- finding__severity__in=('Critical', 'High', 'Medium', 'Low', 'Info')
+ finding__severity__in=('Critical', 'High', 'Medium', 'Low', 'Info'),
).prefetch_related(
'finding__test__engagement__product',
'finding__test__engagement__product__prod_type',
'finding__test__engagement__risk_acceptance',
'finding__risk_acceptance_set',
- 'finding__reporter'
+ 'finding__reporter',
)
endpoints_query = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_query, request.user)
@@ -166,30 +166,30 @@ def endpoint_queries(
if len(prod_type) > 0:
endpoints_closed = Endpoint_Status.objects.filter(
mitigated_time__range=[start_date, end_date],
- finding__test__engagement__product__prod_type__in=prod_type
+ finding__test__engagement__product__prod_type__in=prod_type,
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
# capture the accepted findings in period
accepted_endpoints = Endpoint_Status.objects.filter(
date__range=[start_date, end_date],
risk_accepted=True,
- finding__test__engagement__product__prod_type__in=prod_type
+ finding__test__engagement__product__prod_type__in=prod_type,
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
else:
endpoints_closed = Endpoint_Status.objects.filter(
- mitigated_time__range=[start_date, end_date]
+ mitigated_time__range=[start_date, end_date],
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
# capture the accepted findings in period
accepted_endpoints = Endpoint_Status.objects.filter(
date__range=[start_date, end_date],
- risk_accepted=True
+ risk_accepted=True,
).prefetch_related(
- 'finding__test__engagement__product'
+ 'finding__test__engagement__product',
)
endpoints_closed = get_authorized_endpoint_status(Permissions.Endpoint_View, endpoints_closed, request.user)
@@ -203,7 +203,7 @@ def endpoint_queries(
endpoints_qs.filter(finding__active=True),
accepted_endpoints,
start_date,
- MetricsType.ENDPOINT
+ MetricsType.ENDPOINT,
)
monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between)
@@ -218,9 +218,9 @@ def endpoint_queries(
prod_type__in=prod_type)
top_ten = severity_count(
- top_ten, 'annotate', 'engagement__test__finding__severity'
+ top_ten, 'annotate', 'engagement__test__finding__severity',
).order_by(
- '-critical', '-high', '-medium', '-low'
+ '-critical', '-high', '-medium', '-low',
)[:10]
return {
@@ -281,7 +281,7 @@ def query_counts(
active_qs: MetricsQuerySet,
accepted_qs: MetricsQuerySet,
start_date: date,
- metrics_type: MetricsType
+ metrics_type: MetricsType,
) -> Callable[[MetricsPeriod, int], dict[str, list[dict]]]:
"""
Given three QuerySets, a start date, and a MetricsType, returns a method that can be used to generate statistics for
@@ -302,13 +302,13 @@ def _aggregate_data(qs: MetricsQuerySet, include_closed: bool = False) -> list[d
return {
'opened_per_period': _aggregate_data(open_qs, True),
'active_per_period': _aggregate_data(active_qs),
- 'accepted_per_period': _aggregate_data(accepted_qs)
+ 'accepted_per_period': _aggregate_data(accepted_qs),
}
return _aggregates_for_period
def get_date_range(
- qs: QuerySet
+ qs: QuerySet,
) -> tuple[datetime, datetime]:
"""
Given a queryset of objects, returns a tuple of (earliest date, latest date) from among those objects, based on the
@@ -334,7 +334,7 @@ def get_date_range(
def severity_count(
queryset: MetricsQuerySet,
method: str,
- expression: str
+ expression: str,
) -> Union[MetricsQuerySet, dict[str, int]]:
"""
Aggregates counts by severity for the given queryset.
@@ -351,12 +351,12 @@ def severity_count(
high=Count('id', filter=Q(**{expression: 'High'})),
medium=Count('id', filter=Q(**{expression: 'Medium'})),
low=Count('id', filter=Q(**{expression: 'Low'})),
- info=Count('id', filter=Q(**{expression: 'Info'}))
+ info=Count('id', filter=Q(**{expression: 'Info'})),
)
def identify_view(
- request: HttpRequest
+ request: HttpRequest,
) -> str:
"""
Identifies the requested metrics view.
@@ -382,7 +382,7 @@ def identify_view(
def js_epoch(
- d: Union[date, datetime]
+ d: Union[date, datetime],
) -> int:
"""
Converts a date/datetime object to a JavaScript epoch time (for use in FE charts)
@@ -400,7 +400,7 @@ def get_charting_data(
start_date: date,
period: MetricsPeriod,
period_count: int,
- include_closed: bool
+ include_closed: bool,
) -> list[dict]:
"""
Given a queryset of severities data for charting, adds epoch timestamp information and fills in missing data points
@@ -479,20 +479,20 @@ def aggregate_counts_by_period(
:return: A queryset with aggregate severity counts grouped by period
"""
- desired_values = ('grouped_date', 'critical', 'high', 'medium', 'low', 'info', 'total',)
+ desired_values = ('grouped_date', 'critical', 'high', 'medium', 'low', 'info', 'total')
severities_by_period = severity_count(
# Group by desired period
qs.annotate(grouped_date=period.db_method('date')).values('grouped_date'),
'annotate',
- metrics_type.severity_lookup
+ metrics_type.severity_lookup,
)
if include_closed:
severities_by_period = severities_by_period.annotate(
# Include 'closed' counts
closed=Sum(Case(
When(Q(**{metrics_type.closed_lookup: True}), then=Value(1)),
- output_field=IntegerField(), default=0)
+ output_field=IntegerField(), default=0),
),
)
desired_values += ('closed',)
@@ -501,7 +501,7 @@ def aggregate_counts_by_period(
def findings_by_product(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> QuerySet[Finding]:
"""
Groups the given Findings queryset around related product (name/ID)
@@ -514,7 +514,7 @@ def findings_by_product(
def get_in_period_details(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> tuple[QuerySet[Finding], QuerySet[Finding], dict[str, int]]:
"""
Gathers details for the given queryset, corresponding to metrics information for 'in period' Findings
@@ -525,7 +525,7 @@ def get_in_period_details(
"""
in_period_counts = severity_count(findings, 'aggregate', 'severity')
in_period_details = severity_count(
- findings_by_product(findings), 'annotate', 'severity'
+ findings_by_product(findings), 'annotate', 'severity',
).order_by('product_name')
# Approach to age determination is db-engine dependent
@@ -536,7 +536,7 @@ def get_in_period_details(
# so datediff() it is.
finding_table = Finding.objects.model._meta.db_table
age_detail = findings.annotate(
- age=RawSQL(f'DATEDIFF(COALESCE({finding_table}.mitigated, CURRENT_TIMESTAMP), {finding_table}.date)', [])
+ age=RawSQL(f'DATEDIFF(COALESCE({finding_table}.mitigated, CURRENT_TIMESTAMP), {finding_table}.date)', []),
)
else:
raise ValueError
@@ -552,7 +552,7 @@ def get_in_period_details(
def get_accepted_in_period_details(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> QuerySet[Finding]:
"""
Gathers details for the given queryset, corresponding to metrics information for 'accepted' Findings
@@ -561,12 +561,12 @@ def get_accepted_in_period_details(
:return: A queryset of severity aggregates for Findings grouped by product (name/ID)
"""
return severity_count(
- findings_by_product(findings), 'annotate', 'severity'
+ findings_by_product(findings), 'annotate', 'severity',
).order_by('product_name')
def get_closed_in_period_details(
- findings: QuerySet[Finding]
+ findings: QuerySet[Finding],
) -> tuple[QuerySet[Finding], QuerySet[Finding]]:
"""
Gathers details for the given queryset, corresponding to metrics information for 'closed' Findings
@@ -578,13 +578,13 @@ def get_closed_in_period_details(
return (
severity_count(findings, 'aggregate', 'severity'),
severity_count(
- findings_by_product(findings), 'annotate', 'severity'
- ).order_by('product_name')
+ findings_by_product(findings), 'annotate', 'severity',
+ ).order_by('product_name'),
)
def findings_queryset(
- qs: MetricsQuerySet
+ qs: MetricsQuerySet,
) -> QuerySet[Finding]:
"""
Given a MetricsQuerySet, returns a QuerySet representing all its findings.
diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py
index 718b21cd01..a15d9979fc 100644
--- a/dojo/metrics/views.py
+++ b/dojo/metrics/views.py
@@ -68,7 +68,7 @@ def critical_product_metrics(request, mtype):
return render(request, template, {
'name': page_name,
'critical_prods': critical_products,
- 'url_prefix': get_system_setting('url_prefix')
+ 'url_prefix': get_system_setting('url_prefix'),
})
@@ -108,11 +108,11 @@ def metrics(request, mtype):
in_period_counts, in_period_details, age_detail = get_in_period_details(all_findings)
accepted_in_period_details = get_accepted_in_period_details(
- findings_queryset(filters['accepted'])
+ findings_queryset(filters['accepted']),
)
closed_in_period_counts, closed_in_period_details = get_closed_in_period_details(
- findings_queryset(filters['closed'])
+ findings_queryset(filters['closed']),
)
punchcard = []
@@ -387,7 +387,7 @@ def product_type_counts(request):
'overall_in_pt': aip,
'all_current_in_pt': all_current_in_pt,
'top_ten': top_ten,
- 'pt': pt}
+ 'pt': pt},
)
@@ -545,7 +545,7 @@ def product_tag_counts(request):
'overall_in_pt': aip,
'all_current_in_pt': all_current_in_pt,
'top_ten': top_ten,
- 'pt': pt}
+ 'pt': pt},
)
@@ -722,22 +722,22 @@ def view_engineer(request, eid):
z_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='Critical'
+ severity='Critical',
).count()
o_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='High'
+ severity='High',
).count()
t_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='Medium'
+ severity='Medium',
).count()
h_count += findings.filter(
test=test,
mitigated__isnull=True,
- severity='Low'
+ severity='Low',
).count()
prod = Product.objects.get(id=product)
all_findings_link = "{}".format(
diff --git a/dojo/models.py b/dojo/models.py
index 364f714b4a..e29c0641db 100644
--- a/dojo/models.py
+++ b/dojo/models.py
@@ -360,16 +360,16 @@ class System_Settings(models.Model):
"has been previously marked as a false positive on the same product. "
"ATTENTION: Although the deduplication algorithm is used to determine "
"if a finding should be marked as a false positive, this feature will "
- "not work if deduplication is enabled since it doesn't make sense to use both."
- )
+ "not work if deduplication is enabled since it doesn't make sense to use both.",
+ ),
)
retroactive_false_positive_history = models.BooleanField(
default=False, help_text=_(
"(EXPERIMENTAL) FP History will also retroactively mark/unmark all "
"existing equal findings in the same product as a false positives. "
- "Only works if the False Positive History feature is also enabled."
- )
+ "Only works if the False Positive History feature is also enabled.",
+ ),
)
url_prefix = models.CharField(max_length=300, default='', blank=True, help_text=_("URL prefix if DefectDojo is installed in it's own virtual subdirectory."))
@@ -470,7 +470,7 @@ class System_Settings(models.Model):
default=False,
blank=False,
verbose_name=_('Allow Anonymous Survey Responses'),
- help_text=_("Enable anyone with a link to the survey to answer a survey")
+ help_text=_("Enable anyone with a link to the survey to answer a survey"),
)
credentials = models.TextField(max_length=3000, blank=True)
disclaimer = models.TextField(max_length=3000, default='', blank=True,
@@ -580,7 +580,7 @@ class System_Settings(models.Model):
verbose_name=_("Filter String Matching Optimization"),
help_text=_(
"When turned on, all filter operations in the UI will require string matches rather than ID. "
- "This is a performance enhancement to avoid fetching objects unnecessarily."
+ "This is a performance enhancement to avoid fetching objects unnecessarily.",
))
from dojo.middleware import System_Settings_Manager
@@ -1590,7 +1590,7 @@ class Meta:
models.Index(fields=['endpoint', 'mitigated']),
]
constraints = [
- models.UniqueConstraint(fields=['finding', 'endpoint'], name='endpoint-finding relation')
+ models.UniqueConstraint(fields=['finding', 'endpoint'], name='endpoint-finding relation'),
]
def __str__(self):
@@ -1672,7 +1672,7 @@ def __str__(self):
)
for qe in self.query.split("&")
) if self.query else (), # inspired by https://github.com/python-hyper/hyperlink/blob/b8c9152cd826bbe8e6cc125648f3738235019705/src/hyperlink/_url.py#L1427
- fragment=self.fragment or ''
+ fragment=self.fragment or '',
)
# Return a normalized version of the URL to avoid differences where there shouldn't be any difference.
# Example: https://google.com and https://google.com:443
@@ -1828,7 +1828,7 @@ def vulnerable(self):
mitigated=False,
false_positive=False,
out_of_scope=False,
- risk_accepted=False
+ risk_accepted=False,
).count() > 0
@property
@@ -1844,7 +1844,7 @@ def active_findings(self):
duplicate=False,
status_finding__false_positive=False,
status_finding__out_of_scope=False,
- status_finding__risk_accepted=False
+ status_finding__risk_accepted=False,
).order_by('numerical_severity')
return findings
@@ -1858,7 +1858,7 @@ def active_verified_findings(self):
duplicate=False,
status_finding__false_positive=False,
status_finding__out_of_scope=False,
- status_finding__risk_accepted=False
+ status_finding__risk_accepted=False,
).order_by('numerical_severity')
return findings
@@ -1913,7 +1913,7 @@ def host_active_findings(self):
status_finding__false_positive=False,
status_finding__out_of_scope=False,
status_finding__risk_accepted=False,
- endpoints__in=self.host_endpoints()
+ endpoints__in=self.host_endpoints(),
).order_by('numerical_severity')
return findings
@@ -1928,7 +1928,7 @@ def host_active_verified_findings(self):
status_finding__false_positive=False,
status_finding__out_of_scope=False,
status_finding__risk_accepted=False,
- endpoints__in=self.host_endpoints()
+ endpoints__in=self.host_endpoints(),
).order_by('numerical_severity')
return findings
@@ -2030,7 +2030,7 @@ class Test(models.Model):
target_start = models.DateTimeField()
target_end = models.DateTimeField()
estimated_time = models.TimeField(null=True, blank=True, editable=False)
- actual_time = models.TimeField(null=True, blank=True, editable=False, )
+ actual_time = models.TimeField(null=True, blank=True, editable=False)
percent_complete = models.IntegerField(null=True, blank=True,
editable=True)
notes = models.ManyToManyField(Notes, blank=True,
@@ -2852,7 +2852,7 @@ def get_endpoints(self):
# sort endpoints strings
endpoint_str = ''.join(
sorted(
- endpoint_str_list
+ endpoint_str_list,
))
return endpoint_str
@@ -3674,7 +3674,7 @@ def get_breadcrumbs(self):
bc = self.engagement_set.first().get_breadcrumbs()
bc += [{'title': str(self),
'url': reverse('view_risk_acceptance', args=(
- self.engagement_set.first().product.id, self.id,))}]
+ self.engagement_set.first().product.id, self.id))}]
return bc
@property
@@ -3736,7 +3736,7 @@ def save(self, *args, **kwargs):
('info', 'Info'),
('success', 'Success'),
('warning', 'Warning'),
- ('danger', 'Danger')
+ ('danger', 'Danger'),
)
@@ -3820,7 +3820,7 @@ class JIRA_Instance(models.Model):
('Epic', 'Epic'),
('Spike', 'Spike'),
('Bug', 'Bug'),
- ('Security', 'Security')
+ ('Security', 'Security'),
)
default_issue_type = models.CharField(max_length=255,
choices=default_issue_type_choices,
@@ -4044,7 +4044,7 @@ class Notifications(models.Model):
class Meta:
constraints = [
- models.UniqueConstraint(fields=['user', 'product'], name="notifications_user_product")
+ models.UniqueConstraint(fields=['user', 'product'], name="notifications_user_product"),
]
indexes = [
models.Index(fields=['user', 'product']),
@@ -4467,7 +4467,7 @@ class Engagement_Survey(models.Model):
class Meta:
verbose_name = _("Engagement Survey")
verbose_name_plural = "Engagement Surveys"
- ordering = ('-active', 'name',)
+ ordering = ('-active', 'name')
def __str__(self):
return self.name
diff --git a/dojo/notes/urls.py b/dojo/notes/urls.py
index 0f5ce2b0b1..ee8861ce2b 100644
--- a/dojo/notes/urls.py
+++ b/dojo/notes/urls.py
@@ -5,5 +5,5 @@
urlpatterns = [
re_path(r'^notes/(?P\d+)/delete/(?P[\w-]+)/(?P\d+)$', views.delete_note, name='delete_note'),
re_path(r'^notes/(?P\d+)/edit/(?P[\w-]+)/(?P\d+)$', views.edit_note, name='edit_note'),
- re_path(r'^notes/(?P\d+)/history/(?P[\w-]+)/(?P\d+)$', views.note_history, name='note_history')
+ re_path(r'^notes/(?P\d+)/history/(?P[\w-]+)/(?P\d+)$', views.note_history, name='note_history'),
]
diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py
index 0afb0d6b36..b09bf1bea6 100644
--- a/dojo/notifications/helper.py
+++ b/dojo/notifications/helper.py
@@ -92,7 +92,7 @@ def create_notification(event=None, **kwargs):
users = Dojo_User.objects.filter(is_active=True).prefetch_related(Prefetch(
"notifications_set",
queryset=Notifications.objects.filter(Q(product_id=product) | Q(product__isnull=True)),
- to_attr="applicable_notifications"
+ to_attr="applicable_notifications",
)).annotate(applicable_notifications_count=Count('notifications__id', filter=Q(notifications__product_id=product) | Q(notifications__product__isnull=True)))\
.filter(Q(applicable_notifications_count__gt=0) | Q(is_superuser=True))
@@ -201,7 +201,7 @@ def _post_slack_message(channel):
'token': get_system_setting('slack_token'),
'channel': channel,
'username': get_system_setting('slack_username'),
- 'text': create_notification_message(event, user, 'slack', *args, **kwargs)
+ 'text': create_notification_message(event, user, 'slack', *args, **kwargs),
})
if 'error' in res.text:
diff --git a/dojo/notifications/urls.py b/dojo/notifications/urls.py
index 8ac8cf2171..b7171e3779 100644
--- a/dojo/notifications/urls.py
+++ b/dojo/notifications/urls.py
@@ -6,5 +6,5 @@
re_path(r'^notifications$', views.PersonalNotificationsView.as_view(), name='notifications'),
re_path(r'^notifications/system$', views.SystemNotificationsView.as_view(), name='system_notifications'),
re_path(r'^notifications/personal$', views.PersonalNotificationsView.as_view(), name='personal_notifications'),
- re_path(r'^notifications/template$', views.TemplateNotificationsView.as_view(), name='template_notifications')
+ re_path(r'^notifications/template$', views.TemplateNotificationsView.as_view(), name='template_notifications'),
]
diff --git a/dojo/notifications/views.py b/dojo/notifications/views.py
index f20e45224f..2c102f59ef 100644
--- a/dojo/notifications/views.py
+++ b/dojo/notifications/views.py
@@ -45,7 +45,7 @@ def get_initial_context(self, request: HttpRequest, notifications: Notifications
'form': self.get_form(request, notifications),
'scope': scope,
'enabled_notifications': self.get_enabled_notifications(),
- 'admin': request.user.is_superuser
+ 'admin': request.user.is_superuser,
}
def set_breadcrumbs(self, request: HttpRequest):
diff --git a/dojo/object/views.py b/dojo/object/views.py
index 86d45e067f..cdaa60b05a 100644
--- a/dojo/object/views.py
+++ b/dojo/object/views.py
@@ -51,7 +51,7 @@ def view_objects(request, pid):
{
'object_queryset': object_queryset,
'product_tab': product_tab,
- 'product': product
+ 'product': product,
})
@@ -81,7 +81,7 @@ def edit_object(request, pid, ttid):
'dojo/edit_object.html',
{
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -109,5 +109,5 @@ def delete_object(request, pid, ttid):
'dojo/delete_object.html',
{
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/okta.py b/dojo/okta.py
index 68934e1d5e..cad1bc081c 100644
--- a/dojo/okta.py
+++ b/dojo/okta.py
@@ -37,12 +37,12 @@ class OktaOAuth2(OktaMixin, BaseOAuth2):
SCOPE_SEPARATOR = ' '
DEFAULT_SCOPE = [
- 'openid', 'profile'
+ 'openid', 'profile',
]
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', 'expires'),
- ('token_type', 'token_type', True)
+ ('token_type', 'token_type', True),
]
def get_user_details(self, response):
@@ -58,7 +58,7 @@ def user_data(self, access_token, *args, **kwargs):
self._url('v1/userinfo'),
headers={
'Authorization': f'Bearer {access_token}',
- }
+ },
)
@@ -94,7 +94,7 @@ def validate_and_return_id_token(self, id_token, access_token):
k,
audience=client_id,
issuer=self.id_token_issuer(),
- access_token=access_token
+ access_token=access_token,
)
self.validate_claims(claims)
diff --git a/dojo/product/queries.py b/dojo/product/queries.py
index 96f1b626cb..90307238e3 100644
--- a/dojo/product/queries.py
+++ b/dojo/product/queries.py
@@ -244,7 +244,7 @@ def get_authorized_dojo_meta(permission):
finding__test__engagement__product__prod_type__member=Exists(finding_authorized_product_type_roles),
finding__test__engagement__product__member=Exists(finding_authorized_product_roles),
finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups),
- finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups)
+ finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups),
).order_by('name')
dojo_meta = dojo_meta.filter(
Q(product__prod_type__member=True)
diff --git a/dojo/product/signals.py b/dojo/product/signals.py
index 4ae3053b5f..02f93cd582 100644
--- a/dojo/product/signals.py
+++ b/dojo/product/signals.py
@@ -25,7 +25,7 @@ def product_post_delete(sender, instance, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='product'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The product "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/product/views.py b/dojo/product/views.py
index c3afce1524..95a133bc13 100644
--- a/dojo/product/views.py
+++ b/dojo/product/views.py
@@ -134,7 +134,7 @@ def product(request):
# see https://code.djangoproject.com/ticket/23771 and https://code.djangoproject.com/ticket/25375
name_words = prods.values_list('name', flat=True)
prods = prods.annotate(
- findings_count=Count('engagement__test__finding', filter=Q(engagement__test__finding__active=True))
+ findings_count=Count('engagement__test__finding', filter=Q(engagement__test__finding__active=True)),
)
filter_string_matching = get_system_setting("filter_string_matching", False)
filter_class = ProductFilterWithoutObjectLookups if filter_string_matching else ProductFilter
@@ -241,7 +241,7 @@ def view_product(request, pid):
'waiting': {'count': total_wait, 'percent': waiting_percent},
'fail': {'count': total_fail, 'percent': fail_percent},
'pass': total_pass + total_fail,
- 'total': total
+ 'total': total,
})
system_settings = System_Settings.objects.get()
@@ -336,7 +336,7 @@ def view_product_components(request, pid):
'filter': comp_filter,
'product_tab': product_tab,
'result': result,
- 'component_words': sorted(set(component_words))
+ 'component_words': sorted(set(component_words)),
})
@@ -410,18 +410,18 @@ def finding_querys(request, prod):
filters['open_vulns'] = findings_qs.filter(finding_helper.OPEN_FINDINGS_QUERY).filter(
cwe__isnull=False,
).order_by('cwe').values(
- 'cwe'
+ 'cwe',
).annotate(
- count=Count('cwe')
+ count=Count('cwe'),
)
filters['all_vulns'] = findings_qs.filter(
duplicate=False,
cwe__isnull=False,
).order_by('cwe').values(
- 'cwe'
+ 'cwe',
).annotate(
- count=Count('cwe')
+ count=Count('cwe'),
)
filters['start_date'] = start_date
@@ -496,21 +496,21 @@ def endpoint_querys(request, prod):
mitigated=True,
finding__cwe__isnull=False,
).order_by('finding__cwe').values(
- 'finding__cwe'
+ 'finding__cwe',
).annotate(
- count=Count('finding__cwe')
+ count=Count('finding__cwe'),
).annotate(
- cwe=F('finding__cwe')
+ cwe=F('finding__cwe'),
)
filters['all_vulns'] = endpoints_qs.filter(
finding__cwe__isnull=False,
).order_by('finding__cwe').values(
- 'finding__cwe'
+ 'finding__cwe',
).annotate(
- count=Count('finding__cwe')
+ count=Count('finding__cwe'),
).annotate(
- cwe=F('finding__cwe')
+ cwe=F('finding__cwe'),
)
filters['start_date'] = start_date
@@ -743,7 +743,7 @@ def async_burndown_metrics(request, pid):
'low': open_findings_burndown.get('Low', []),
'info': open_findings_burndown.get('Info', []),
'max': open_findings_burndown.get('y_max', 0),
- 'min': open_findings_burndown.get('y_min', 0)
+ 'min': open_findings_burndown.get('y_min', 0),
})
@@ -800,15 +800,15 @@ def view_engagements(request, pid):
def prefetch_for_view_engagements(engagements, recent_test_day_count):
engagements = engagements.select_related(
- 'lead'
+ 'lead',
).prefetch_related(
Prefetch('test_set', queryset=Test.objects.filter(
id__in=Subquery(
Test.objects.filter(
engagement_id=OuterRef('engagement_id'),
- updated__gte=timezone.now() - timedelta(days=recent_test_day_count)
- ).values_list('id', flat=True)
- ))
+ updated__gte=timezone.now() - timedelta(days=recent_test_day_count),
+ ).values_list('id', flat=True),
+ )),
),
'test_set__test_type',
).annotate(
@@ -1002,7 +1002,7 @@ def edit_product(request, pid):
'product_tab': product_tab,
'jform': jform,
'gform': gform,
- 'product': product
+ 'product': product,
})
@@ -1358,7 +1358,7 @@ def get_github_form(self, request: HttpRequest, test: Test):
# Set the initial form args
kwargs = {
"enabled": jira_helper.is_push_all_issues(test),
- "prefix": "githubform"
+ "prefix": "githubform",
}
return GITHUBFindingForm(*args, **kwargs)
@@ -1373,11 +1373,11 @@ def validate_status_change(self, request: HttpRequest, context: dict):
if closing_disabled != 0:
error_inactive = ValidationError(
_('Can not set a finding as inactive without adding all mandatory notes'),
- code='inactive_without_mandatory_notes'
+ code='inactive_without_mandatory_notes',
)
error_false_p = ValidationError(
_('Can not set a finding as false positive without adding all mandatory notes'),
- code='false_p_without_mandatory_notes'
+ code='false_p_without_mandatory_notes',
)
if context["form"]['active'].value() is False:
context["form"].add_error('active', error_inactive)
@@ -1447,7 +1447,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
# Determine if a message should be added
if jira_message:
messages.add_message(
- request, messages.SUCCESS, jira_message, extra_tags="alert-success"
+ request, messages.SUCCESS, jira_message, extra_tags="alert-success",
)
return request, True, push_to_jira
@@ -1811,7 +1811,7 @@ def view_api_scan_configurations(request, pid):
{
'product_api_scan_configurations': product_api_scan_configurations,
'product_tab': product_tab,
- 'pid': pid
+ 'pid': pid,
})
@@ -1885,7 +1885,7 @@ def delete_api_scan_configuration(request, pid, pascid):
'dojo/delete_product_api_scan_configuration.html',
{
'form': form,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/product_type/signals.py b/dojo/product_type/signals.py
index 65a06c1284..15f06b03e6 100644
--- a/dojo/product_type/signals.py
+++ b/dojo/product_type/signals.py
@@ -25,7 +25,7 @@ def product_type_post_delete(sender, instance, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='product_type'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The product type "%(name)s" was deleted by %(user)s') % {
'name': instance.name, 'user': le.actor}
diff --git a/dojo/product_type/urls.py b/dojo/product_type/urls.py
index 41f9b840c7..98c6b1cf81 100644
--- a/dojo/product_type/urls.py
+++ b/dojo/product_type/urls.py
@@ -28,5 +28,5 @@
re_path(r'^product/type/group/(?P\d+)/edit$', views.edit_product_type_group,
name='edit_product_type_group'),
re_path(r'^product/type/group/(?P\d+)/delete$', views.delete_product_type_group,
- name='delete_product_type_group')
+ name='delete_product_type_group'),
]
diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py
index efa46f73a8..08c91823c0 100644
--- a/dojo/product_type/views.py
+++ b/dojo/product_type/views.py
@@ -372,7 +372,7 @@ def edit_product_type_group(request, groupid):
return render(request, 'dojo/edit_product_type_group.html', {
'name': page_name,
'groupid': groupid,
- 'form': groupform
+ 'form': groupform,
})
@@ -401,5 +401,5 @@ def delete_product_type_group(request, groupid):
return render(request, 'dojo/delete_product_type_group.html', {
'name': page_name,
'groupid': groupid,
- 'form': groupform
+ 'form': groupform,
})
diff --git a/dojo/regulations/urls.py b/dojo/regulations/urls.py
index a16d3c9cca..e977103192 100644
--- a/dojo/regulations/urls.py
+++ b/dojo/regulations/urls.py
@@ -6,4 +6,4 @@
re_path(r'^regulations/add', views.new_regulation, name='new_regulation'),
re_path(r'^regulations/(?P\d+)/edit$', views.edit_regulations,
name='edit_regulations'),
- re_path(r'^regulations$', views.regulations, name='regulations'), ]
+ re_path(r'^regulations$', views.regulations, name='regulations')]
diff --git a/dojo/regulations/views.py b/dojo/regulations/views.py
index 0bcd19bf7b..16fb582e0d 100644
--- a/dojo/regulations/views.py
+++ b/dojo/regulations/views.py
@@ -26,7 +26,7 @@ def new_regulation(request):
messages.SUCCESS,
'Regulation Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('regulations', ))
+ return HttpResponseRedirect(reverse('regulations'))
else:
tform = RegulationForm()
add_breadcrumb(title="New regulation", top_level=False, request=request)
@@ -44,7 +44,7 @@ def edit_regulations(request, ttid):
messages.SUCCESS,
'Regulation Deleted.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('regulations', ))
+ return HttpResponseRedirect(reverse('regulations'))
elif request.method == 'POST':
tform = RegulationForm(request.POST, instance=regulation)
if tform.is_valid():
@@ -53,7 +53,7 @@ def edit_regulations(request, ttid):
messages.SUCCESS,
'Regulation Successfully Updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('regulations', ))
+ return HttpResponseRedirect(reverse('regulations'))
else:
tform = RegulationForm(instance=regulation)
add_breadcrumb(title="Edit Regulation", top_level=False, request=request)
diff --git a/dojo/reports/views.py b/dojo/reports/views.py
index b815c81eca..f67b2f40c5 100644
--- a/dojo/reports/views.py
+++ b/dojo/reports/views.py
@@ -112,7 +112,7 @@ def get_template(self):
def get_context(self, request: HttpRequest) -> dict:
return {
"available_widgets": self.get_available_widgets(request),
- "in_use_widgets": self.get_in_use_widgets(request), }
+ "in_use_widgets": self.get_in_use_widgets(request)}
class CustomReport(View):
@@ -153,7 +153,7 @@ def get_form(self, request):
def get_template(self):
if self.report_format == 'AsciiDoc':
- return 'dojo/custom_asciidoc_report.html',
+ return 'dojo/custom_asciidoc_report.html'
elif self.report_format == 'HTML':
return 'dojo/custom_html_report.html'
else:
@@ -165,7 +165,7 @@ def get_context(self):
"host": self.host,
"finding_notes": self.finding_notes,
"finding_images": self.finding_images,
- "user_id": self.request.user.id, }
+ "user_id": self.request.user.id}
def report_findings(request):
@@ -710,14 +710,14 @@ def prefetch_related_findings_for_report(findings):
'notes',
'files',
'reporter',
- 'mitigated_by'
+ 'mitigated_by',
)
def prefetch_related_endpoints_for_report(endpoints):
return endpoints.prefetch_related(
'product',
- 'tags'
+ 'tags',
)
@@ -1147,7 +1147,7 @@ def get(self, request):
response = HttpResponse(
content=stream,
- content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
+ content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
)
response['Content-Disposition'] = 'attachment; filename=findings.xlsx'
return response
diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py
index 9ceedfaab4..098bf52aaf 100644
--- a/dojo/risk_acceptance/helper.py
+++ b/dojo/risk_acceptance/helper.py
@@ -51,7 +51,7 @@ def expire_now(risk_acceptance):
create_notification(event='risk_acceptance_expiration', title=title, risk_acceptance=risk_acceptance, accepted_findings=accepted_findings,
reactivated_findings=reactivated_findings, engagement=risk_acceptance.engagement,
product=risk_acceptance.engagement.product,
- url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id, )))
+ url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id)))
def reinstate(risk_acceptance, old_expiration_date):
@@ -169,7 +169,7 @@ def expiration_handler(*args, **kwargs):
create_notification(event='risk_acceptance_expiration', title=notification_title, risk_acceptance=risk_acceptance,
accepted_findings=risk_acceptance.accepted_findings.all(), engagement=risk_acceptance.engagement,
product=risk_acceptance.engagement.product,
- url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id, )))
+ url=reverse('view_risk_acceptance', args=(risk_acceptance.engagement.id, risk_acceptance.id)))
post_jira_comments(risk_acceptance, expiration_warning_message_creator, heads_up_days)
@@ -266,7 +266,7 @@ def prefetch_for_expiration(risk_acceptances):
return risk_acceptances.prefetch_related('accepted_findings', 'accepted_findings__jira_issue',
'engagement_set',
'engagement__jira_project',
- 'engagement__jira_project__jira_instance'
+ 'engagement__jira_project__jira_instance',
)
diff --git a/dojo/settings/.settings.dist.py.sha256sum b/dojo/settings/.settings.dist.py.sha256sum
index 4885a81930..890d05663e 100644
--- a/dojo/settings/.settings.dist.py.sha256sum
+++ b/dojo/settings/.settings.dist.py.sha256sum
@@ -1 +1 @@
-7b3bb14160f3ffce537d75895ee18cb0a561232fa964bae88b4861f7d289b176
+cce215fa477d611d45cae69a29185e943eb209526fec2b38659666e5e9513fe3
diff --git a/dojo/settings/attribute-maps/django_saml_uri.py b/dojo/settings/attribute-maps/django_saml_uri.py
index b6f3f3a67c..83fd538420 100644
--- a/dojo/settings/attribute-maps/django_saml_uri.py
+++ b/dojo/settings/attribute-maps/django_saml_uri.py
@@ -15,5 +15,5 @@
'last_name': X500ATTR_OID + '4',
'email': PKCS_9 + '1',
'uid': UCL_DIR_PILOT + '1',
- }
+ },
}
diff --git a/dojo/settings/attribute-maps/saml_uri.py b/dojo/settings/attribute-maps/saml_uri.py
index 4922c50f89..c2e7694f89 100644
--- a/dojo/settings/attribute-maps/saml_uri.py
+++ b/dojo/settings/attribute-maps/saml_uri.py
@@ -239,5 +239,5 @@
'schacUserStatus': SCHAC + '19',
'schacProjectMembership': SCHAC + '20',
'schacProjectSpecificRole': SCHAC + '21',
- }
+ },
}
diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py
index e207309417..0c62f004bc 100644
--- a/dojo/settings/settings.dist.py
+++ b/dojo/settings/settings.dist.py
@@ -173,7 +173,7 @@
'Email': 'email',
'UserName': 'username',
'Firstname': 'first_name',
- 'Lastname': 'last_name'
+ 'Lastname': 'last_name',
}),
DD_SAML2_ALLOW_UNKNOWN_ATTRIBUTE=(bool, False),
# Authentication via HTTP Proxy which put username to HTTP Header REMOTE_USER
@@ -383,7 +383,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
# Parse database connection url strings like psql://user:pass@127.0.0.1:8458/db
if os.getenv('DD_DATABASE_URL') is not None:
DATABASES = {
- 'default': env.db('DD_DATABASE_URL')
+ 'default': env.db('DD_DATABASE_URL'),
}
else:
DATABASES = {
@@ -397,7 +397,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
'PASSWORD': env('DD_DATABASE_PASSWORD'),
'HOST': env('DD_DATABASE_HOST'),
'PORT': env('DD_DATABASE_PORT'),
- }
+ },
}
# Track migrations through source control rather than making migrations locally
@@ -637,23 +637,23 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
'NAME': 'dojo.user.validators.DojoCommonPasswordValidator',
},
{
- 'NAME': 'dojo.user.validators.MinLengthValidator'
+ 'NAME': 'dojo.user.validators.MinLengthValidator',
},
{
- 'NAME': 'dojo.user.validators.MaxLengthValidator'
+ 'NAME': 'dojo.user.validators.MaxLengthValidator',
},
{
- 'NAME': 'dojo.user.validators.NumberValidator'
+ 'NAME': 'dojo.user.validators.NumberValidator',
},
{
- 'NAME': 'dojo.user.validators.UppercaseValidator'
+ 'NAME': 'dojo.user.validators.UppercaseValidator',
},
{
- 'NAME': 'dojo.user.validators.LowercaseValidator'
+ 'NAME': 'dojo.user.validators.LowercaseValidator',
},
{
- 'NAME': 'dojo.user.validators.SymbolValidator'
- }
+ 'NAME': 'dojo.user.validators.SymbolValidator',
+ },
]
# https://django-ratelimit.readthedocs.io/en/stable/index.html
@@ -764,7 +764,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 25,
- 'EXCEPTION_HANDLER': 'dojo.api_v2.exception_handler.custom_exception_handler'
+ 'EXCEPTION_HANDLER': 'dojo.api_v2.exception_handler.custom_exception_handler',
}
if API_TOKENS_ENABLED:
@@ -783,8 +783,8 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param
# show file selection dialogue, see https://github.com/tfranzel/drf-spectacular/issues/455
"COMPONENT_SPLIT_REQUEST": True,
"SWAGGER_UI_SETTINGS": {
- "docExpansion": "none"
- }
+ "docExpansion": "none",
+ },
}
if not env('DD_DEFAULT_SWAGGER_UI'):
@@ -1092,7 +1092,7 @@ def saml2_attrib_map_format(dict):
env('DD_CELERY_BROKER_HOST'),
env('DD_CELERY_BROKER_PORT'),
env('DD_CELERY_BROKER_PATH'),
- env('DD_CELERY_BROKER_PARAMS')
+ env('DD_CELERY_BROKER_PARAMS'),
)
CELERY_TASK_IGNORE_RESULT = env('DD_CELERY_TASK_IGNORE_RESULT')
CELERY_RESULT_BACKEND = env('DD_CELERY_RESULT_BACKEND')
@@ -1113,7 +1113,7 @@ def saml2_attrib_map_format(dict):
'add-alerts': {
'task': 'dojo.tasks.add_alerts',
'schedule': timedelta(hours=1),
- 'args': [timedelta(hours=1)]
+ 'args': [timedelta(hours=1)],
},
'cleanup-alerts': {
'task': 'dojo.tasks.cleanup_alerts',
@@ -1122,7 +1122,7 @@ def saml2_attrib_map_format(dict):
'dedupe-delete': {
'task': 'dojo.tasks.async_dupe_delete',
'schedule': timedelta(minutes=1),
- 'args': [timedelta(minutes=1)]
+ 'args': [timedelta(minutes=1)],
},
'flush_auditlog': {
'task': 'dojo.tasks.flush_auditlog',
@@ -1163,9 +1163,9 @@ def saml2_attrib_map_format(dict):
if env('DD_DJANGO_METRICS_ENABLED'):
DJANGO_METRICS_ENABLED = env('DD_DJANGO_METRICS_ENABLED')
INSTALLED_APPS = INSTALLED_APPS + ('django_prometheus',)
- MIDDLEWARE = ['django_prometheus.middleware.PrometheusBeforeMiddleware', ] + \
+ MIDDLEWARE = ['django_prometheus.middleware.PrometheusBeforeMiddleware'] + \
MIDDLEWARE + \
- ['django_prometheus.middleware.PrometheusAfterMiddleware', ]
+ ['django_prometheus.middleware.PrometheusAfterMiddleware']
database_engine = DATABASES.get('default').get('ENGINE')
DATABASES['default']['ENGINE'] = database_engine.replace('django.', 'django_prometheus.', 1)
# CELERY_RESULT_BACKEND.replace('django.core','django_prometheus.', 1)
@@ -1269,7 +1269,7 @@ def saml2_attrib_map_format(dict):
'Bearer CLI': ['title', 'severity'],
'Nancy Scan': ['title', 'vuln_id_from_tool'],
'Wiz Scan': ['title', 'description', 'severity'],
- 'Kubescape JSON Importer': ['title', 'component_name']
+ 'Kubescape JSON Importer': ['title', 'component_name'],
}
# Override the hardcoded settings here via the env var
@@ -1331,7 +1331,7 @@ def saml2_attrib_map_format(dict):
'Codechecker Report native': True,
'Wazuh': True,
'Nuclei Scan': True,
- 'Threagile risks report': True
+ 'Threagile risks report': True,
}
# List of fields that are known to be usable in hash_code computation)
@@ -1488,7 +1488,7 @@ def saml2_attrib_map_format(dict):
'Bearer CLI': DEDUPE_ALGO_HASH_CODE,
'Wiz Scan': DEDUPE_ALGO_HASH_CODE,
'Deepfence Threatmapper Report': DEDUPE_ALGO_HASH_CODE,
- 'Kubescape JSON Importer': DEDUPE_ALGO_HASH_CODE
+ 'Kubescape JSON Importer': DEDUPE_ALGO_HASH_CODE,
}
# Override the hardcoded settings here via the env var
@@ -1518,15 +1518,15 @@ def saml2_attrib_map_format(dict):
('Epic', 'Epic'),
('Spike', 'Spike'),
('Bug', 'Bug'),
- ('Security', 'Security')
+ ('Security', 'Security'),
)
if env('DD_JIRA_EXTRA_ISSUE_TYPES') != '':
if env('DD_JIRA_EXTRA_ISSUE_TYPES').count(',') > 0:
for extra_type in env('DD_JIRA_EXTRA_ISSUE_TYPES').split(','):
- JIRA_ISSUE_TYPE_CHOICES_CONFIG += (extra_type, extra_type),
+ JIRA_ISSUE_TYPE_CHOICES_CONFIG += (extra_type, extra_type)
else:
- JIRA_ISSUE_TYPE_CHOICES_CONFIG += (env('DD_JIRA_EXTRA_ISSUE_TYPES'), env('DD_JIRA_EXTRA_ISSUE_TYPES')),
+ JIRA_ISSUE_TYPE_CHOICES_CONFIG += (env('DD_JIRA_EXTRA_ISSUE_TYPES'), env('DD_JIRA_EXTRA_ISSUE_TYPES'))
JIRA_SSL_VERIFY = env('DD_JIRA_SSL_VERIFY')
@@ -1550,7 +1550,7 @@ def saml2_attrib_map_format(dict):
'datefmt': '%d/%b/%Y %H:%M:%S',
},
'simple': {
- 'format': '%(levelname)s %(funcName)s %(lineno)d %(message)s'
+ 'format': '%(levelname)s %(funcName)s %(lineno)d %(message)s',
},
'json': {
'()': 'json_log_formatter.JSONFormatter',
@@ -1558,25 +1558,25 @@ def saml2_attrib_map_format(dict):
},
'filters': {
'require_debug_false': {
- '()': 'django.utils.log.RequireDebugFalse'
+ '()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
- '()': 'django.utils.log.RequireDebugTrue'
+ '()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
- 'class': 'django.utils.log.AdminEmailHandler'
+ 'class': 'django.utils.log.AdminEmailHandler',
},
'console': {
'class': 'logging.StreamHandler',
- 'formatter': 'verbose'
+ 'formatter': 'verbose',
},
'json_console': {
'class': 'logging.StreamHandler',
- 'formatter': 'json'
+ 'formatter': 'json',
},
},
'loggers': {
@@ -1624,7 +1624,7 @@ def saml2_attrib_map_format(dict):
'level': str(LOG_LEVEL),
'propagate': False,
},
- }
+ },
}
# override filter to ensure sensitive variables are also hidden when DEBUG = True
diff --git a/dojo/settings/settings.py b/dojo/settings/settings.py
index 20f13285a7..2d378c742f 100644
--- a/dojo/settings/settings.py
+++ b/dojo/settings/settings.py
@@ -9,7 +9,7 @@
include(
'settings.dist.py',
- optional('local_settings.py')
+ optional('local_settings.py'),
)
if not (DEBUG or ('collectstatic' in sys.argv)):
diff --git a/dojo/settings/unittest.py b/dojo/settings/unittest.py
index c8831991e3..7132d3b928 100644
--- a/dojo/settings/unittest.py
+++ b/dojo/settings/unittest.py
@@ -10,5 +10,5 @@
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'unittest.sqlite',
- }
+ },
}
diff --git a/dojo/sla_config/views.py b/dojo/sla_config/views.py
index da0c6b6a28..28aefd0c3b 100644
--- a/dojo/sla_config/views.py
+++ b/dojo/sla_config/views.py
@@ -24,7 +24,7 @@ def new_sla_config(request):
messages.SUCCESS,
'SLA configuration Successfully Created.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
else:
tform = SLAConfigForm()
add_breadcrumb(
@@ -55,13 +55,13 @@ def edit_sla_config(request, slaid):
messages.SUCCESS,
'SLA Configuration Deleted.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
else:
messages.add_message(request,
messages.ERROR,
'The Default SLA Configuration cannot be deleted.',
extra_tags='alert-danger')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
elif request.method == 'POST':
form = SLAConfigForm(request.POST, instance=sla_config)
@@ -71,7 +71,7 @@ def edit_sla_config(request, slaid):
messages.SUCCESS,
'SLA configuration successfully updated. All SLA expiration dates for findings within this SLA configuration will be recalculated asynchronously.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('sla_config', ))
+ return HttpResponseRedirect(reverse('sla_config'))
else:
form = SLAConfigForm(instance=sla_config)
@@ -100,5 +100,5 @@ def sla_config(request):
return render(request,
'dojo/sla_config.html',
{'confs': confs,
- 'settings': settings
+ 'settings': settings,
})
diff --git a/dojo/survey/views.py b/dojo/survey/views.py
index 091d68492e..5e036c6856 100644
--- a/dojo/survey/views.py
+++ b/dojo/survey/views.py
@@ -90,7 +90,7 @@ def delete_engagement_survey(request, eid, sid):
'survey': survey,
'form': form,
'engagement': engagement,
- 'questions': questions
+ 'questions': questions,
})
@@ -196,7 +196,7 @@ def view_questionnaire(request, eid, sid):
'user': request.user,
'engagement': engagement,
'questions': questions,
- 'name': survey.survey.name + " Questionnaire Responses"
+ 'name': survey.survey.name + " Questionnaire Responses",
})
@@ -254,7 +254,7 @@ def add_questionnaire(request, eid):
'surveys': surveys,
'user': user,
'form': form,
- 'engagement': engagement
+ 'engagement': engagement,
})
@@ -519,7 +519,7 @@ def create_question(request):
'name': 'Add Question',
'form': form,
'textForm': textQuestionForm,
- 'choiceForm': choiceQuestionFrom
+ 'choiceForm': choiceQuestionFrom,
})
@@ -582,7 +582,7 @@ def edit_question(request, qid):
return render(request, 'defectDojo-engagement-survey/edit_question.html', {
'name': 'Edit Question',
'question': question,
- 'form': form
+ 'form': form,
})
@@ -608,7 +608,7 @@ def add_choices(request):
add_breadcrumb(title="Add Choice", top_level=False, request=request)
return render(request, 'defectDojo-engagement-survey/add_choices.html', {
'name': 'Add Choice',
- 'form': form
+ 'form': form,
})
@@ -646,7 +646,7 @@ def add_empty_questionnaire(request):
'surveys': surveys,
'user': user,
'form': form,
- 'engagement': engagement
+ 'engagement': engagement,
})
@@ -664,7 +664,7 @@ def view_empty_survey(request, esid):
'user': request.user,
'engagement': engagement,
'questions': questions,
- 'name': survey.survey.name + " Questionnaire Responses"
+ 'name': survey.survey.name + " Questionnaire Responses",
})
diff --git a/dojo/system_settings/urls.py b/dojo/system_settings/urls.py
index 2e8d284e26..da5788bc81 100644
--- a/dojo/system_settings/urls.py
+++ b/dojo/system_settings/urls.py
@@ -6,6 +6,6 @@
re_path(
r'^system_settings$',
views.SystemSettingsView.as_view(),
- name='system_settings'
- )
+ name='system_settings',
+ ),
]
diff --git a/dojo/system_settings/views.py b/dojo/system_settings/views.py
index 365f06ca63..991fe46ca2 100644
--- a/dojo/system_settings/views.py
+++ b/dojo/system_settings/views.py
@@ -34,7 +34,7 @@ def get_context(
# Set the initial context
context = {
"system_settings_obj": system_settings_obj,
- "form": self.get_form(request, system_settings_obj)
+ "form": self.get_form(request, system_settings_obj),
}
# Check the status of celery
self.get_celery_status(context)
diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py
index 514cc685df..42b82dd085 100644
--- a/dojo/templatetags/display_tags.py
+++ b/dojo/templatetags/display_tags.py
@@ -53,25 +53,25 @@
}
markdown_styles = [
- "background-color"
+ "background-color",
]
finding_related_action_classes_dict = {
'reset_finding_duplicate_status': 'fa-solid fa-eraser',
'set_finding_as_original': 'fa-brands fa-superpowers',
- 'mark_finding_duplicate': 'fa-solid fa-copy'
+ 'mark_finding_duplicate': 'fa-solid fa-copy',
}
finding_related_action_title_dict = {
'reset_finding_duplicate_status': 'Reset duplicate status',
'set_finding_as_original': 'Set as original',
- 'mark_finding_duplicate': 'Mark as duplicate'
+ 'mark_finding_duplicate': 'Mark as duplicate',
}
supported_file_formats = [
'apng', 'avif', 'gif', 'jpg',
'jpeg', 'jfif', 'pjpeg', 'pjp',
- 'png', 'svg', 'webp', 'pdf'
+ 'png', 'svg', 'webp', 'pdf',
]
@@ -237,7 +237,7 @@ def asvs_level(benchmark_score):
return _("Checklist is %(level)s full (pass: %(total_viewed)s, total: %(total)s)") % {
'level': level,
'total_viewed': total_viewed,
- 'total': total
+ 'total': total,
}
@@ -734,7 +734,7 @@ def finding_display_status(finding):
if 'Risk Accepted' in display_status:
ra = finding.risk_acceptance
if ra:
- url = reverse('view_risk_acceptance', args=(finding.test.engagement.id, ra.id, ))
+ url = reverse('view_risk_acceptance', args=(finding.test.engagement.id, ra.id))
info = ra.name_and_expiration_info
link = 'Risk Accepted'
display_status = display_status.replace('Risk Accepted', link)
diff --git a/dojo/test/signals.py b/dojo/test/signals.py
index 47d4fdffb8..84b3de5571 100644
--- a/dojo/test/signals.py
+++ b/dojo/test/signals.py
@@ -19,7 +19,7 @@ def test_post_delete(sender, instance, using, origin, **kwargs):
le = LogEntry.objects.get(
action=LogEntry.Action.DELETE,
content_type=ContentType.objects.get(app_label='dojo', model='test'),
- object_id=instance.id
+ object_id=instance.id,
)
description = _('The test "%(name)s" was deleted by %(user)s') % {
'name': str(instance), 'user': le.actor}
diff --git a/dojo/test/urls.py b/dojo/test/urls.py
index c77aca7690..63a96711c5 100644
--- a/dojo/test/urls.py
+++ b/dojo/test/urls.py
@@ -8,7 +8,7 @@
re_path(
r'^test/(?P\d+)$',
views.ViewTest.as_view(),
- name='view_test'
+ name='view_test',
),
re_path(r'^test/(?P\d+)/ics$', views.test_ics,
name='test_ics'),
diff --git a/dojo/test/views.py b/dojo/test/views.py
index d15d518863..202247ad33 100644
--- a/dojo/test/views.py
+++ b/dojo/test/views.py
@@ -165,7 +165,7 @@ def get_typed_note_form(self, request: HttpRequest, context: dict):
args = [request.POST] if request.method == "POST" else []
# Set the initial form args
kwargs = {
- "available_note_types": context.get("available_note_types")
+ "available_note_types": context.get("available_note_types"),
}
return TypedNoteForm(*args, **kwargs)
@@ -437,9 +437,9 @@ def test_ics(request, tid):
_(f"Test: {test.test_type.name} ({test.engagement.product.name}"),
_(
f"Set aside for test {test.test_type.name}, on product {test.engagement.product.name}. "
- f"Additional detail can be found at {request.build_absolute_uri(reverse('view_test', args=(test.id,)))}"
+ f"Additional detail can be found at {request.build_absolute_uri(reverse('view_test', args=(test.id,)))}",
),
- uid
+ uid,
)
output = cal.serialize()
response = HttpResponse(content=output)
@@ -579,7 +579,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
# Determine if a message should be added
if jira_message:
messages.add_message(
- request, messages.SUCCESS, jira_message, extra_tags="alert-success"
+ request, messages.SUCCESS, jira_message, extra_tags="alert-success",
)
return request, True, push_to_jira
@@ -845,12 +845,12 @@ def get_jira_form(
jira_form = JIRAImportScanForm(
request.POST,
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
else:
jira_form = JIRAImportScanForm(
push_all=push_all_jira_issues,
- prefix='jiraform'
+ prefix='jiraform',
)
return jira_form, push_all_jira_issues
@@ -997,7 +997,7 @@ def reimport_findings(
untouched_finding_count,
_,
) = importer_client.process_scan(
- context.pop("scan", None)
+ context.pop("scan", None),
)
# Add a message to the view for the user to see the results
add_success_message_to_response(importer_client.construct_imported_message(
diff --git a/dojo/tool_config/views.py b/dojo/tool_config/views.py
index de8976e063..4744a260c6 100644
--- a/dojo/tool_config/views.py
+++ b/dojo/tool_config/views.py
@@ -34,7 +34,7 @@ def new_tool_config(request):
messages.SUCCESS,
'Tool Configuration successfully updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_config', ))
+ return HttpResponseRedirect(reverse('tool_config'))
except Exception as e:
logger.exception(e)
messages.add_message(request,
@@ -72,7 +72,7 @@ def edit_tool_config(request, ttid):
messages.SUCCESS,
'Tool Configuration successfully updated.',
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_config', ))
+ return HttpResponseRedirect(reverse('tool_config'))
except Exception as e:
logger.info(e)
messages.add_message(request,
diff --git a/dojo/tool_product/views.py b/dojo/tool_product/views.py
index 1564cb0ad5..ff24442d5f 100644
--- a/dojo/tool_product/views.py
+++ b/dojo/tool_product/views.py
@@ -41,7 +41,7 @@ def new_tool_product(request, pid):
return render(request, 'dojo/new_tool_product.html', {
'tform': tform,
'product_tab': product_tab,
- 'pid': pid
+ 'pid': pid,
})
@@ -53,7 +53,7 @@ def all_tool_product(request, pid):
return render(request, 'dojo/view_tool_product_all.html', {
'prod': prod,
'tools': tools,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -81,7 +81,7 @@ def edit_tool_product(request, pid, ttid):
product_tab = Product_Tab(product, title=_("Edit Product Tool Configuration"), tab="settings")
return render(request, 'dojo/edit_tool_product.html', {
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
@@ -109,5 +109,5 @@ def delete_tool_product(request, pid, ttid):
return render(request, 'dojo/delete_tool_product.html', {
'tform': tform,
- 'product_tab': product_tab
+ 'product_tab': product_tab,
})
diff --git a/dojo/tool_type/views.py b/dojo/tool_type/views.py
index 975f174246..75683718c4 100644
--- a/dojo/tool_type/views.py
+++ b/dojo/tool_type/views.py
@@ -25,7 +25,7 @@ def new_tool_type(request):
messages.SUCCESS,
_('Tool Type Configuration Successfully Created.'),
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_type', ))
+ return HttpResponseRedirect(reverse('tool_type'))
else:
tform = ToolTypeForm()
if 'name' in request.GET:
@@ -46,7 +46,7 @@ def edit_tool_type(request, ttid):
messages.SUCCESS,
_('Tool Type successfully updated.'),
extra_tags='alert-success')
- return HttpResponseRedirect(reverse('tool_type', ))
+ return HttpResponseRedirect(reverse('tool_type'))
else:
tform = ToolTypeForm(instance=tool_type)
diff --git a/dojo/tools/acunetix/parse_acunetix360_json.py b/dojo/tools/acunetix/parse_acunetix360_json.py
index 4398870542..fcff232a55 100644
--- a/dojo/tools/acunetix/parse_acunetix360_json.py
+++ b/dojo/tools/acunetix/parse_acunetix360_json.py
@@ -78,7 +78,7 @@ def get_findings(self, filename, test):
and (item["Classification"]["Cvss"]["Vector"] is not None)
):
cvss_objects = cvss_parser.parse_cvss_from_text(
- item["Classification"]["Cvss"]["Vector"]
+ item["Classification"]["Cvss"]["Vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
diff --git a/dojo/tools/acunetix/parse_acunetix_xml.py b/dojo/tools/acunetix/parse_acunetix_xml.py
index ae6ca8d5ee..22171bf24b 100644
--- a/dojo/tools/acunetix/parse_acunetix_xml.py
+++ b/dojo/tools/acunetix/parse_acunetix_xml.py
@@ -24,7 +24,7 @@ def get_findings(self, filename, test):
# get report date
if scan.findtext("StartTime") and "" != scan.findtext("StartTime"):
report_date = dateutil.parser.parse(
- scan.findtext("StartTime")
+ scan.findtext("StartTime"),
).date()
for item in scan.findall("ReportItems/ReportItem"):
finding = Finding(
@@ -32,10 +32,10 @@ def get_findings(self, filename, test):
title=item.findtext("Name"),
severity=self.get_severity(item.findtext("Severity")),
description=html2text.html2text(
- item.findtext("Description")
+ item.findtext("Description"),
).strip(),
false_p=self.get_false_positive(
- item.findtext("IsFalsePositive")
+ item.findtext("IsFalsePositive"),
),
static_finding=True,
dynamic_finding=False,
@@ -44,14 +44,14 @@ def get_findings(self, filename, test):
if item.findtext("Impact") and "" != item.findtext("Impact"):
finding.impact = item.findtext("Impact")
if item.findtext("Recommendation") and "" != item.findtext(
- "Recommendation"
+ "Recommendation",
):
finding.mitigation = item.findtext("Recommendation")
if report_date:
finding.date = report_date
if item.findtext("CWEList/CWE"):
finding.cwe = self.get_cwe_number(
- item.findtext("CWEList/CWE")
+ item.findtext("CWEList/CWE"),
)
references = []
for reference in item.findall("References/Reference"):
@@ -62,7 +62,7 @@ def get_findings(self, filename, test):
finding.references = "\n".join(references)
if item.findtext("CVSS3/Descriptor"):
cvss_objects = cvss_parser.parse_cvss_from_text(
- item.findtext("CVSS3/Descriptor")
+ item.findtext("CVSS3/Descriptor"),
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
@@ -72,7 +72,7 @@ def get_findings(self, filename, test):
and len(item.findtext("Details").strip()) > 0
):
finding.description += "\n\n**Details:**\n{}".format(
- html2text.html2text(item.findtext("Details"))
+ html2text.html2text(item.findtext("Details")),
)
if (
item.findtext("TechnicalDetails")
@@ -80,7 +80,7 @@ def get_findings(self, filename, test):
):
finding.description += (
"\n\n**TechnicalDetails:**\n\n{}".format(
- item.findtext("TechnicalDetails")
+ item.findtext("TechnicalDetails"),
)
)
# add requests
@@ -94,7 +94,7 @@ def get_findings(self, filename, test):
)
for request in item.findall("TechnicalDetails/Request"):
finding.unsaved_req_resp.append(
- {"req": (request.text or ""), "resp": ""}
+ {"req": (request.text or ""), "resp": ""},
)
# manage the endpoint
url = hyperlink.parse(start_url)
@@ -112,8 +112,8 @@ def get_findings(self, filename, test):
finding.title,
str(finding.impact),
str(finding.mitigation),
- ]
- ).encode("utf-8")
+ ],
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
@@ -124,14 +124,14 @@ def get_findings(self, filename, test):
):
find.description += (
"\n-----\n\n**Details:**\n{}".format(
- html2text.html2text(item.findtext("Details"))
+ html2text.html2text(item.findtext("Details")),
)
)
find.unsaved_endpoints.extend(finding.unsaved_endpoints)
find.unsaved_req_resp.extend(finding.unsaved_req_resp)
find.nb_occurences += finding.nb_occurences
logger.debug(
- f"Duplicate finding : {finding.title}"
+ f"Duplicate finding : {finding.title}",
)
else:
dupes[dupe_key] = finding
diff --git a/dojo/tools/anchore_engine/parser.py b/dojo/tools/anchore_engine/parser.py
index aeb2aab875..0e3ed39fdd 100644
--- a/dojo/tools/anchore_engine/parser.py
+++ b/dojo/tools/anchore_engine/parser.py
@@ -101,7 +101,7 @@ def get_findings(self, filename, test):
dupe_key = "|".join(
[
item.get(
- "image_digest", item.get("imageDigest", "None")
+ "image_digest", item.get("imageDigest", "None"),
), # depending on version image_digest/imageDigest
item["feed"],
item["feed_group"],
@@ -109,7 +109,7 @@ def get_findings(self, filename, test):
item["package_version"],
item["package_path"],
item["vuln"],
- ]
+ ],
)
if dupe_key in dupes:
diff --git a/dojo/tools/anchore_enterprise/parser.py b/dojo/tools/anchore_enterprise/parser.py
index 03e7cc1ee8..e58ee166d3 100644
--- a/dojo/tools/anchore_enterprise/parser.py
+++ b/dojo/tools/anchore_enterprise/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
repo, tag = row[1].split(":", 2)
description = row[5]
severity = map_gate_action_to_severity(
- row[6]
+ row[6],
)
policyid = row[8]
policyname = policy_name(
@@ -79,7 +79,7 @@ def get_findings(self, filename, test):
)
if vulnerability_id:
find.unsaved_vulnerability_ids = [
- vulnerability_id
+ vulnerability_id,
]
items.append(find)
except (KeyError, IndexError) as err:
@@ -89,7 +89,7 @@ def get_findings(self, filename, test):
# import empty policies without error (e.g. policies or images
# objects are not a dictionary)
logger.warning(
- "Exception at %s", "parsing anchore policy", exc_info=err
+ "Exception at %s", "parsing anchore policy", exc_info=err,
)
return items
diff --git a/dojo/tools/anchore_grype/parser.py b/dojo/tools/anchore_grype/parser.py
index 395955b1eb..c457f63e65 100644
--- a/dojo/tools/anchore_grype/parser.py
+++ b/dojo/tools/anchore_grype/parser.py
@@ -53,7 +53,7 @@ def get_findings(self, file, test):
rel_description = related_vulnerability.get("description")
rel_cvss = related_vulnerability.get("cvss")
vulnerability_ids = self.get_vulnerability_ids(
- vuln_id, related_vulnerabilities
+ vuln_id, related_vulnerabilities,
)
matches = item["matchDetails"]
@@ -96,7 +96,7 @@ def get_findings(self, file, test):
f"\n**Matcher:** {matches[0]['matcher']}"
)
finding_tags = [
- matches[0]["matcher"].replace("-matcher", "")
+ matches[0]["matcher"].replace("-matcher", ""),
]
else:
finding_description += "\n**Matchers:**"
@@ -198,7 +198,7 @@ def get_cvss(self, cvss):
vector = cvss_item["vector"]
cvss_objects = cvss_parser.parse_cvss_from_text(vector)
if len(cvss_objects) > 0 and isinstance(
- cvss_objects[0], CVSS3
+ cvss_objects[0], CVSS3,
):
return vector
return None
diff --git a/dojo/tools/anchorectl_policies/parser.py b/dojo/tools/anchorectl_policies/parser.py
index 1e31b08e68..30dd42e32b 100644
--- a/dojo/tools/anchorectl_policies/parser.py
+++ b/dojo/tools/anchorectl_policies/parser.py
@@ -72,7 +72,7 @@ def get_findings(self, filename, test):
# import empty policies without error (e.g. policies or images
# objects are not a dictionary)
logger.warning(
- "Exception at %s", "parsing anchore policy", exc_info=err
+ "Exception at %s", "parsing anchore policy", exc_info=err,
)
return items
diff --git a/dojo/tools/anchorectl_vulns/parser.py b/dojo/tools/anchorectl_vulns/parser.py
index 70371a955b..13632e84b8 100644
--- a/dojo/tools/anchorectl_vulns/parser.py
+++ b/dojo/tools/anchorectl_vulns/parser.py
@@ -92,7 +92,7 @@ def get_findings(self, filename, test):
dupe_key = "|".join(
[
item.get(
- "imageDigest", "None"
+ "imageDigest", "None",
), # depending on version image_digest/imageDigest
item["feed"],
item["feedGroup"],
@@ -100,7 +100,7 @@ def get_findings(self, filename, test):
item["packageVersion"],
item["packagePath"],
item["vuln"],
- ]
+ ],
)
if dupe_key in dupes:
diff --git a/dojo/tools/api_blackduck/api_client.py b/dojo/tools/api_blackduck/api_client.py
index 6d5342d580..98c0aeb533 100644
--- a/dojo/tools/api_blackduck/api_client.py
+++ b/dojo/tools/api_blackduck/api_client.py
@@ -47,5 +47,5 @@ def get_vulnerable_bom_components(self, version):
def get_vulnerabilities(self, component):
return self.client.get_json(
- f'/api/vulnerabilities/{component["vulnerabilityWithRemediation"]["vulnerabilityName"]}'
+ f'/api/vulnerabilities/{component["vulnerabilityWithRemediation"]["vulnerabilityName"]}',
)
diff --git a/dojo/tools/api_blackduck/parser.py b/dojo/tools/api_blackduck/parser.py
index 0be6680787..ccd228c89c 100644
--- a/dojo/tools/api_blackduck/parser.py
+++ b/dojo/tools/api_blackduck/parser.py
@@ -52,7 +52,7 @@ def get_findings(self, file, test):
test=test,
title=f"{vulnerability_id} in {component_name}:{component_version}",
description=entry["vulnerabilityWithRemediation"].get(
- "description"
+ "description",
),
severity=entry["vulnerabilityWithRemediation"][
"severity"
@@ -62,13 +62,13 @@ def get_findings(self, file, test):
static_finding=True,
dynamic_finding=False,
unique_id_from_tool=entry["vulnerabilityWithRemediation"].get(
- "vulnerabilityName"
+ "vulnerabilityName",
),
)
# get CWE
if entry["vulnerabilityWithRemediation"].get("cweId"):
cwe_raw = entry["vulnerabilityWithRemediation"]["cweId"].split(
- "-"
+ "-",
)
if len(cwe_raw) == 2 and cwe_raw[1].isdigit():
finding.cwe = int(cwe_raw[1])
diff --git a/dojo/tools/api_bugcrowd/api_client.py b/dojo/tools/api_bugcrowd/api_client.py
index 6bed971e31..bf76608380 100644
--- a/dojo/tools/api_bugcrowd/api_client.py
+++ b/dojo/tools/api_bugcrowd/api_client.py
@@ -20,7 +20,7 @@ def __init__(self, tool_config):
if tool_config.authentication_type == "API":
self.api_token = tool_config.api_key
self.session.headers.update(
- {"Authorization": f"Token {self.api_token}"}
+ {"Authorization": f"Token {self.api_token}"},
)
self.session.headers.update(self.default_headers)
else:
@@ -67,7 +67,7 @@ def get_findings(self, program, target):
# Otherwise, keep updating next link
next = "{}{}".format(
- self.bugcrowd_api_url, data["links"]["next"]
+ self.bugcrowd_api_url, data["links"]["next"],
)
else:
next = "over"
@@ -75,13 +75,13 @@ def get_findings(self, program, target):
def test_connection(self):
# Request programs
response_programs = self.session.get(
- url=f"{self.bugcrowd_api_url}/programs"
+ url=f"{self.bugcrowd_api_url}/programs",
)
response_programs.raise_for_status()
# Request submissions to validate the org token
response_subs = self.session.get(
- url=f"{self.bugcrowd_api_url}/submissions"
+ url=f"{self.bugcrowd_api_url}/submissions",
)
response_subs.raise_for_status()
if response_programs.ok and response_subs.ok:
@@ -91,20 +91,20 @@ def test_connection(self):
progs = list(filter(lambda prog: prog["type"] == "program", data))
program_names = ", ".join(
- [p["attributes"]["code"] for p in progs]
+ [p["attributes"]["code"] for p in progs],
)
# Request targets to validate the org token
response_targets = self.session.get(
- url=f"{self.bugcrowd_api_url}/targets"
+ url=f"{self.bugcrowd_api_url}/targets",
)
response_targets.raise_for_status()
if response_targets.ok:
data_targets = response_targets.json().get("data")
targets = list(
- filter(lambda prog: prog["type"] == "target", data_targets)
+ filter(lambda prog: prog["type"] == "target", data_targets),
)
target_names = ", ".join(
- [p["attributes"]["name"] for p in targets]
+ [p["attributes"]["name"] for p in targets],
)
return (
f'With {total_subs} submissions, you have access to the "{program_names}" '
diff --git a/dojo/tools/api_bugcrowd/importer.py b/dojo/tools/api_bugcrowd/importer.py
index 677174cac0..4fb1219cb3 100644
--- a/dojo/tools/api_bugcrowd/importer.py
+++ b/dojo/tools/api_bugcrowd/importer.py
@@ -17,7 +17,7 @@ class BugcrowdApiImporter:
def get_findings(self, test):
client, config = self.prepare_client(test)
logger.debug(
- f"Fetching submissions program {str(config.service_key_1)} and target {str(config.service_key_2)}"
+ f"Fetching submissions program {str(config.service_key_1)} and target {str(config.service_key_2)}",
)
submissions_paged = client.get_findings(
diff --git a/dojo/tools/api_bugcrowd/parser.py b/dojo/tools/api_bugcrowd/parser.py
index 6ad71f295c..df9dbbf131 100644
--- a/dojo/tools/api_bugcrowd/parser.py
+++ b/dojo/tools/api_bugcrowd/parser.py
@@ -62,11 +62,11 @@ def get_findings(self, file, test):
if test.api_scan_configuration:
config = test.api_scan_configuration
links = "https://tracker.bugcrowd.com/{}{}".format(
- str(config.service_key_1), entry["links"]["self"]
+ str(config.service_key_1), entry["links"]["self"],
)
if api_scan_config is not None:
links = "https://tracker.bugcrowd.com/{}{}".format(
- str(api_scan_config.service_key_1), entry["links"]["self"]
+ str(api_scan_config.service_key_1), entry["links"]["self"],
)
else:
links = None
@@ -94,12 +94,12 @@ def get_findings(self, file, test):
"://" in entry["attributes"]["bug_url"]
): # is the host full uri?
bug_endpoint = Endpoint.from_uri(
- entry["attributes"]["bug_url"].strip()
+ entry["attributes"]["bug_url"].strip(),
)
# can raise exception if the host is not valid URL
else:
bug_endpoint = Endpoint.from_uri(
- "//" + entry["attributes"]["bug_url"].strip()
+ "//" + entry["attributes"]["bug_url"].strip(),
)
# can raise exception if there is no way to parse the
# host
@@ -108,8 +108,8 @@ def get_findings(self, file, test):
): # We don't want to fail the whole import just for 1 error in the bug_url
logger.error(
"Error parsing bugcrowd bug_url : {}".format(
- entry["attributes"]["bug_url"].strip()
- )
+ entry["attributes"]["bug_url"].strip(),
+ ),
)
bug_url = entry["attributes"]["bug_url"]
@@ -122,7 +122,7 @@ def get_findings(self, file, test):
f"- Bug Url: [{bug_url}]({bug_url})",
"",
f"Bugcrowd link: [{links}]({links})",
- ]
+ ],
)
mitigation = entry["attributes"]["remediation_advice"]
steps_to_reproduce = entry["attributes"]["description"]
@@ -160,11 +160,11 @@ def get_findings(self, file, test):
finding.unsaved_endpoints = [bug_endpoint]
except Exception as e:
logger.error(
- f"{str(bug_endpoint)} bug url from bugcrowd failed to parse to endpoint, error= {e}"
+ f"{str(bug_endpoint)} bug url from bugcrowd failed to parse to endpoint, error= {e}",
)
except ValidationError:
logger.error(
- f"Broken Bugcrowd endpoint {bug_endpoint.host} was skipped."
+ f"Broken Bugcrowd endpoint {bug_endpoint.host} was skipped.",
)
findings.append(finding)
@@ -202,7 +202,7 @@ def include_finding(self, entry):
else:
msg = (
"{} not in allowed bugcrowd submission states".format(
- entry["attributes"]["state"]
+ entry["attributes"]["state"],
)
)
raise ValueError(msg)
diff --git a/dojo/tools/api_cobalt/api_client.py b/dojo/tools/api_cobalt/api_client.py
index 36f37d734d..c18234ae73 100644
--- a/dojo/tools/api_cobalt/api_client.py
+++ b/dojo/tools/api_cobalt/api_client.py
@@ -44,7 +44,7 @@ def get_assets(self):
else:
msg = (
"Unable to get assets due to {} - {}".format(
- response.status_code, response.content.decode("utf-8")
+ response.status_code, response.content.decode("utf-8"),
)
)
raise Exception(msg)
@@ -65,7 +65,7 @@ def get_findings(self, asset_id):
else:
msg = (
"Unable to get asset findings due to {} - {}".format(
- response.status_code, response.content.decode("utf-8")
+ response.status_code, response.content.decode("utf-8"),
)
)
raise Exception(msg)
@@ -86,7 +86,7 @@ def test_connection(self):
if response_orgs.ok and response_assets.ok:
data = response_orgs.json().get("data")
orgs = filter(
- lambda org: org["resource"]["token"] == self.org_token, data
+ lambda org: org["resource"]["token"] == self.org_token, data,
)
org = list(orgs)[0]
org_name = org["resource"]["name"]
diff --git a/dojo/tools/api_cobalt/parser.py b/dojo/tools/api_cobalt/parser.py
index df0425d92b..fa82acabf5 100644
--- a/dojo/tools/api_cobalt/parser.py
+++ b/dojo/tools/api_cobalt/parser.py
@@ -67,7 +67,7 @@ def get_findings(self, file, test):
"",
"Cobalt.io link:",
links["ui"]["url"],
- ]
+ ],
)
mitigation = resource["suggested_fix"]
steps_to_reproduce = resource["proof_of_concept"]
diff --git a/dojo/tools/api_edgescan/importer.py b/dojo/tools/api_edgescan/importer.py
index e740051afa..e4e9bf0c98 100644
--- a/dojo/tools/api_edgescan/importer.py
+++ b/dojo/tools/api_edgescan/importer.py
@@ -27,7 +27,7 @@ def prepare_client(self, test):
raise ValidationError(msg)
else:
configs = Product_API_Scan_Configuration.objects.filter(
- product=product
+ product=product,
)
if configs.count() == 1:
config = configs.first()
diff --git a/dojo/tools/api_edgescan/parser.py b/dojo/tools/api_edgescan/parser.py
index b9becbfc5d..66b00f9246 100644
--- a/dojo/tools/api_edgescan/parser.py
+++ b/dojo/tools/api_edgescan/parser.py
@@ -60,7 +60,7 @@ def make_finding(self, test, vulnerability):
if vulnerability["cvss_version"] == 3:
if vulnerability["cvss_vector"]:
cvss_objects = cvss_parser.parse_cvss_from_text(
- vulnerability["cvss_vector"]
+ vulnerability["cvss_vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
@@ -76,7 +76,7 @@ def make_finding(self, test, vulnerability):
finding.unsaved_endpoints = [
Endpoint.from_uri(vulnerability["location"])
if "://" in vulnerability["location"]
- else Endpoint.from_uri("//" + vulnerability["location"])
+ else Endpoint.from_uri("//" + vulnerability["location"]),
]
return finding
diff --git a/dojo/tools/api_sonarqube/api_client.py b/dojo/tools/api_sonarqube/api_client.py
index 09a983d744..e42150f641 100644
--- a/dojo/tools/api_sonarqube/api_client.py
+++ b/dojo/tools/api_sonarqube/api_client.py
@@ -281,7 +281,7 @@ def get_rule(self, rule_id, organization=None):
rule = self.rules_cache.get(rule_id)
if not rule:
request_filter = {
- "key": rule_id
+ "key": rule_id,
}
if organization:
request_filter["organization"] = organization
@@ -424,7 +424,7 @@ def test_connection(self):
def test_product_connection(self, api_scan_configuration):
organization = api_scan_configuration.service_key_2 or None
project = self.get_project(
- api_scan_configuration.service_key_1, organization=organization
+ api_scan_configuration.service_key_1, organization=organization,
)
project_name = project.get("name")
message_prefix = "You have access to project"
diff --git a/dojo/tools/api_sonarqube/importer.py b/dojo/tools/api_sonarqube/importer.py
index 79794e3a56..a8bd748108 100644
--- a/dojo/tools/api_sonarqube/importer.py
+++ b/dojo/tools/api_sonarqube/importer.py
@@ -128,7 +128,7 @@ def import_issues(self, test):
branch=test.branch_tag,
)
logging.info(
- f'Found {len(issues)} issues for component {component["key"]}'
+ f'Found {len(issues)} issues for component {component["key"]}',
)
sonarUrl = client.sonar_api_url[:-3] # [:-3] removes the /api part of the sonarqube/cloud URL
@@ -158,7 +158,7 @@ def import_issues(self, test):
# custom (user defined) SQ rules may not have 'htmlDesc'
if "htmlDesc" in rule:
description = self.clean_rule_description_html(
- rule["htmlDesc"]
+ rule["htmlDesc"],
)
cwe = self.clean_cwe(rule["htmlDesc"])
references = sonarqube_permalink + self.get_references(rule["htmlDesc"])
@@ -178,7 +178,7 @@ def import_issues(self, test):
# Only assign the SonarQube_issue to the first finding related
# to the issue
if Finding.objects.filter(
- sonarqube_issue=sonarqube_issue
+ sonarqube_issue=sonarqube_issue,
).exists():
sonarqube_issue = None
@@ -247,7 +247,7 @@ def import_hotspots(self, test):
branch=test.branch_tag,
)
logging.info(
- f'Found {len(hotspots)} hotspots for project {component["key"]}'
+ f'Found {len(hotspots)} hotspots for project {component["key"]}',
)
sonarUrl = client.sonar_api_url[:-3] # [:-3] removes the /api part of the sonarqube/cloud URL
@@ -269,19 +269,19 @@ def import_hotspots(self, test):
else:
severity = "Info"
title = textwrap.shorten(
- text=hotspot.get("message", ""), width=500
+ text=hotspot.get("message", ""), width=500,
)
component_key = hotspot.get("component")
line = hotspot.get("line")
rule_id = hotspot.get("key", "")
rule = client.get_hotspot_rule(rule_id)
scanner_confidence = self.convert_scanner_confidence(
- hotspot.get("vulnerabilityProbability", "")
+ hotspot.get("vulnerabilityProbability", ""),
)
description = self.clean_rule_description_html(
rule.get(
- "vulnerabilityDescription", "No description provided."
- )
+ "vulnerabilityDescription", "No description provided.",
+ ),
)
cwe = self.clean_cwe(rule.get("fixRecommendations", ""))
try:
@@ -289,7 +289,7 @@ def import_hotspots(self, test):
except KeyError:
sonarqube_permalink = "No permalink \n"
references = sonarqube_permalink + self.get_references(
- rule.get("riskDescription", "")
+ rule.get("riskDescription", ""),
) + self.get_references(rule.get("fixRecommendations", ""))
sonarqube_issue, _ = Sonarqube_Issue.objects.update_or_create(
@@ -300,7 +300,7 @@ def import_hotspots(self, test):
# Only assign the SonarQube_issue to the first finding related
# to the issue
if Finding.objects.filter(
- sonarqube_issue=sonarqube_issue
+ sonarqube_issue=sonarqube_issue,
).exists():
sonarqube_issue = None
diff --git a/dojo/tools/api_sonarqube/updater.py b/dojo/tools/api_sonarqube/updater.py
index 67c724660d..980079f894 100644
--- a/dojo/tools/api_sonarqube/updater.py
+++ b/dojo/tools/api_sonarqube/updater.py
@@ -68,7 +68,7 @@ def get_sonarqube_status_for(finding):
return target_status
def get_sonarqube_required_transitions_for(
- self, current_status, target_status
+ self, current_status, target_status,
):
# If current and target is the same... do nothing
if current_status == target_status:
@@ -107,7 +107,7 @@ def get_sonarqube_required_transitions_for(
for t_from in transition.get("from"):
possible_transition = (
self.get_sonarqube_required_transitions_for(
- current_status, t_from
+ current_status, t_from,
)
)
if possible_transition:
@@ -120,7 +120,7 @@ def update_sonarqube_finding(self, finding):
return
logger.debug(
- f"Checking if finding '{finding}' needs to be updated in SonarQube"
+ f"Checking if finding '{finding}' needs to be updated in SonarQube",
)
client, _ = SonarQubeApiImporter.prepare_client(finding.test)
@@ -135,21 +135,21 @@ def update_sonarqube_finding(self, finding):
): # Issue could have disappeared in SQ because a previous scan has resolved the issue as fixed
if issue.get("resolution"):
current_status = "{} / {}".format(
- issue.get("status"), issue.get("resolution")
+ issue.get("status"), issue.get("resolution"),
)
else:
current_status = issue.get("status")
logger.debug(
- f"--> SQ Current status: {current_status}. Current target status: {target_status}"
+ f"--> SQ Current status: {current_status}. Current target status: {target_status}",
)
transitions = self.get_sonarqube_required_transitions_for(
- current_status, target_status
+ current_status, target_status,
)
if transitions:
logger.info(
- f"Updating finding '{finding}' in SonarQube"
+ f"Updating finding '{finding}' in SonarQube",
)
for transition in transitions:
@@ -162,7 +162,7 @@ def update_sonarqube_finding(self, finding):
# to sonarqube we changed Accepted into Risk Accepted, but we change it back to be sure we don't
# break the integration
finding_status=finding.status().replace(
- "Risk Accepted", "Accepted"
+ "Risk Accepted", "Accepted",
)
if finding.status()
else finding.status(),
diff --git a/dojo/tools/api_sonarqube/updater_from_source.py b/dojo/tools/api_sonarqube/updater_from_source.py
index 1c97f8fe40..93afa04c4a 100644
--- a/dojo/tools/api_sonarqube/updater_from_source.py
+++ b/dojo/tools/api_sonarqube/updater_from_source.py
@@ -41,7 +41,7 @@ def update(self, finding):
current_status = issue.get("resolution") or issue.get("status")
current_finding_status = self.get_sonarqube_status_for(finding)
logger.debug(
- f"--> SQ Current status: {current_status}. Finding status: {current_finding_status}"
+ f"--> SQ Current status: {current_status}. Finding status: {current_finding_status}",
)
if (
@@ -49,7 +49,7 @@ def update(self, finding):
and current_finding_status != current_status
):
logger.info(
- f"Original SonarQube issue '{sonarqube_issue}' has changed. Updating DefectDojo finding '{finding}'..."
+ f"Original SonarQube issue '{sonarqube_issue}' has changed. Updating DefectDojo finding '{finding}'...",
)
self.update_finding_status(finding, current_status)
diff --git a/dojo/tools/api_vulners/importer.py b/dojo/tools/api_vulners/importer.py
index 89950ae97d..8ebbbe83f6 100644
--- a/dojo/tools/api_vulners/importer.py
+++ b/dojo/tools/api_vulners/importer.py
@@ -37,7 +37,7 @@ def prepare_client(self, test):
raise ValidationError(msg)
else:
configs = Product_API_Scan_Configuration.objects.filter(
- product=product, tool_configuration__tool_type__name="Vulners"
+ product=product, tool_configuration__tool_type__name="Vulners",
)
if configs.count() == 1:
config = configs.first()
diff --git a/dojo/tools/api_vulners/parser.py b/dojo/tools/api_vulners/parser.py
index a6203ec559..5d6382caaf 100644
--- a/dojo/tools/api_vulners/parser.py
+++ b/dojo/tools/api_vulners/parser.py
@@ -91,7 +91,7 @@ def get_findings(self, file, test):
finding.cvssv3 = CVSS3(
vuln.get("cvss3", {})
.get("cvssV3", {})
- .get("vectorString", "")
+ .get("vectorString", ""),
).clean_vector()
# References
diff --git a/dojo/tools/aqua/parser.py b/dojo/tools/aqua/parser.py
index d6ea61edc9..ab8c6e1bdb 100644
--- a/dojo/tools/aqua/parser.py
+++ b/dojo/tools/aqua/parser.py
@@ -83,7 +83,7 @@ def get_item(resource, vuln, test):
f"NVD score v3 ({score}) used for classification.\n"
)
severity_justification += "\nNVD v3 vectors: {}".format(
- vuln.get("nvd_vectors_v3")
+ vuln.get("nvd_vectors_v3"),
)
# Add the CVSS3 to Finding
cvssv3 = vuln.get("nvd_vectors_v3")
@@ -93,7 +93,7 @@ def get_item(resource, vuln, test):
f"NVD score v2 ({score}) used for classification.\n"
)
severity_justification += "\nNVD v2 vectors: {}".format(
- vuln.get("nvd_vectors")
+ vuln.get("nvd_vectors"),
)
severity = severity_of(score)
severity_justification += f"\n{used_for_classification}"
diff --git a/dojo/tools/arachni/parser.py b/dojo/tools/arachni/parser.py
index 7ca6528de0..7b28d7e9f0 100644
--- a/dojo/tools/arachni/parser.py
+++ b/dojo/tools/arachni/parser.py
@@ -32,7 +32,7 @@ def get_items(self, tree, test):
report_date = None
if "finish_datetime" in tree:
report_date = datetime.strptime(
- tree.get("finish_datetime"), "%Y-%m-%d %H:%M:%S %z"
+ tree.get("finish_datetime"), "%Y-%m-%d %H:%M:%S %z",
)
for node in tree["issues"]:
item = self.get_item(node, report_date)
diff --git a/dojo/tools/auditjs/parser.py b/dojo/tools/auditjs/parser.py
index 678e11e8e6..8135fe1fc5 100644
--- a/dojo/tools/auditjs/parser.py
+++ b/dojo/tools/auditjs/parser.py
@@ -55,7 +55,7 @@ def get_findings(self, filename, test):
)
component_name, component_version = pacakge_full_name.split(
- "@"
+ "@",
)
# Check if there are any vulnerabilities
@@ -94,16 +94,16 @@ def get_findings(self, filename, test):
cvss_score = vulnerability["cvssScore"]
if "cvssVector" in vulnerability:
cvss_vectors = cvss.parser.parse_cvss_from_text(
- vulnerability["cvssVector"]
+ vulnerability["cvssVector"],
)
if len(cvss_vectors) > 0 and isinstance(
- cvss_vectors[0], CVSS3
+ cvss_vectors[0], CVSS3,
):
# Only set finding vector if it's version 3
cvss_vector = cvss_vectors[0].clean_vector()
severity = cvss_vectors[0].severities()[0]
elif len(cvss_vectors) > 0 and isinstance(
- cvss_vectors[0], CVSS2
+ cvss_vectors[0], CVSS2,
):
# Otherwise add it to description
description = (
@@ -148,7 +148,7 @@ def get_findings(self, filename, test):
if finding.description:
find.description += "\n" + finding.description
find.unsaved_endpoints.extend(
- finding.unsaved_endpoints
+ finding.unsaved_endpoints,
)
dupes[dupe_key] = find
else:
diff --git a/dojo/tools/aws_prowler/parser.py b/dojo/tools/aws_prowler/parser.py
index 4a1ed7af91..8a084ff6f3 100644
--- a/dojo/tools/aws_prowler/parser.py
+++ b/dojo/tools/aws_prowler/parser.py
@@ -104,7 +104,7 @@ def process_csv(self, file, test):
# improving key to get duplicates
dupe_key = hashlib.sha256(
- (sev + "|" + region + "|" + result_extended).encode("utf-8")
+ (sev + "|" + region + "|" + result_extended).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
@@ -189,7 +189,7 @@ def process_json(self, file, test):
# improving key to get duplicates
dupe_key = hashlib.sha256(
- (sev + "|" + region + "|" + result_extended).encode("utf-8")
+ (sev + "|" + region + "|" + result_extended).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/awssecurityhub/parser.py b/dojo/tools/awssecurityhub/parser.py
index b761bdd214..2bc71c2e91 100644
--- a/dojo/tools/awssecurityhub/parser.py
+++ b/dojo/tools/awssecurityhub/parser.py
@@ -31,7 +31,7 @@ def get_tests(self, scan_type, scan):
aws_acc.append(finding.get("AwsAccountId"))
report_date = data.get("createdAt")
test = ParserTest(
- name=self.ID, type=self.ID, version=""
+ name=self.ID, type=self.ID, version="",
)
test.description = "**AWS Accounts:** " + ', '.join(set(aws_acc)) + "\n"
test.description += "**Finding Origins:** " + ', '.join(set(prod)) + "\n"
diff --git a/dojo/tools/azure_security_center_recommendations/parser.py b/dojo/tools/azure_security_center_recommendations/parser.py
index e4f02cf3b8..7fbfac83c9 100644
--- a/dojo/tools/azure_security_center_recommendations/parser.py
+++ b/dojo/tools/azure_security_center_recommendations/parser.py
@@ -47,7 +47,7 @@ def process_csv(self, file, test):
recommendation_id = row.get("recommendationId")
recommendation_name = row.get("recommendationName")
recommendation_display_name = row.get(
- "recommendationDisplayName"
+ "recommendationDisplayName",
)
azure_description = row.get("description")
remediation_steps = row.get("remediationSteps")
@@ -57,7 +57,7 @@ def process_csv(self, file, test):
status_change_date = row.get("statusChangeDate")
controls = row.get("controls")
azure_portal_recommendation_link = row.get(
- "azurePortalRecommendationLink"
+ "azurePortalRecommendationLink",
)
native_cloud_account_id = row.get("nativeCloudAccountId")
@@ -107,13 +107,13 @@ def process_csv(self, file, test):
references=azure_portal_recommendation_link,
mitigation=remediation_steps,
date=datetime.strptime(
- status_change_date[0:10], "%Y-%m-%d"
+ status_change_date[0:10], "%Y-%m-%d",
).date(),
vuln_id_from_tool=recommendation_name,
unique_id_from_tool=recommendation_id,
static_finding=True,
dynamic_finding=False,
- )
+ ),
)
return findings
diff --git a/dojo/tools/bandit/parser.py b/dojo/tools/bandit/parser.py
index b209648847..1ad385114a 100644
--- a/dojo/tools/bandit/parser.py
+++ b/dojo/tools/bandit/parser.py
@@ -34,7 +34,7 @@ def get_findings(self, filename, test):
"```",
str(item.get("code")).replace("```", "\\`\\`\\`"),
"```",
- ]
+ ],
)
finding = Finding(
@@ -48,7 +48,7 @@ def get_findings(self, filename, test):
static_finding=True,
dynamic_finding=False,
vuln_id_from_tool=":".join(
- [item["test_name"], item["test_id"]]
+ [item["test_name"], item["test_id"]],
),
nb_occurences=1,
)
diff --git a/dojo/tools/blackduck/importer.py b/dojo/tools/blackduck/importer.py
index 7c1e098a37..0afd9d4771 100644
--- a/dojo/tools/blackduck/importer.py
+++ b/dojo/tools/blackduck/importer.py
@@ -38,7 +38,7 @@ def _process_csvfile(self, report):
project_ids = set(security_issues.keys())
return self._process_project_findings(
- project_ids, security_issues, None
+ project_ids, security_issues, None,
)
def _process_zipfile(self, report):
@@ -63,11 +63,11 @@ def _process_zipfile(self, report):
project_ids = set(files.keys()) & set(security_issues.keys())
return self._process_project_findings(
- project_ids, security_issues, files
+ project_ids, security_issues, files,
)
def _process_project_findings(
- self, project_ids, security_issues, files=None
+ self, project_ids, security_issues, files=None,
):
"""
Process findings per projects and return a BlackduckFinding object per the model
@@ -97,7 +97,7 @@ def _process_project_findings(
for issue in security_issues[project_id]:
security_issue_dict = dict(issue)
cve = self.get_cve(
- security_issue_dict.get("Vulnerability id")
+ security_issue_dict.get("Vulnerability id"),
).upper()
location = ", ".join(locations)
diff --git a/dojo/tools/blackduck/parser.py b/dojo/tools/blackduck/parser.py
index 4b21c28619..a79e9db967 100644
--- a/dojo/tools/blackduck/parser.py
+++ b/dojo/tools/blackduck/parser.py
@@ -29,7 +29,7 @@ def normalize_findings(self, filename):
importer = BlackduckImporter()
findings = sorted(
- importer.parse_findings(filename), key=lambda f: f.vuln_id
+ importer.parse_findings(filename), key=lambda f: f.vuln_id,
)
return findings
@@ -46,7 +46,7 @@ def ingest_findings(self, normalized_findings, test):
references = self.format_reference(i)
dupe_key = hashlib.md5(
- f"{title} | {i.vuln_source}".encode()
+ f"{title} | {i.vuln_source}".encode(),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/blackduck_binary_analysis/importer.py b/dojo/tools/blackduck_binary_analysis/importer.py
index 2c7528ae2d..5b54321129 100644
--- a/dojo/tools/blackduck_binary_analysis/importer.py
+++ b/dojo/tools/blackduck_binary_analysis/importer.py
@@ -32,11 +32,11 @@ def _process_csvfile(self, report, orig_report_name):
sha1_hash_keys = set(vulnerabilities.keys())
return self._process_vuln_results(
- sha1_hash_keys, report, orig_report_name, vulnerabilities
+ sha1_hash_keys, report, orig_report_name, vulnerabilities,
)
def _process_vuln_results(
- self, sha1_hash_keys, report, orig_report_name, vulnerabilities
+ self, sha1_hash_keys, report, orig_report_name, vulnerabilities,
):
"""
Process findings for each project.
@@ -72,7 +72,7 @@ def _process_vuln_results(
vuln_dict.get("Vulnerability URL"),
vuln_dict.get("Missing exploit mitigations"),
vuln_dict.get("BDSA"),
- vuln_dict.get("Version override type")
+ vuln_dict.get("Version override type"),
)
def __partition_by_key(self, csv_file):
diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py
index 2f0523223e..b69fad5d91 100644
--- a/dojo/tools/blackduck_binary_analysis/parser.py
+++ b/dojo/tools/blackduck_binary_analysis/parser.py
@@ -47,7 +47,7 @@ def ingest_findings(self, sorted_findings, test):
if str(i.cvss_vector_v3) != "":
cvss_vectors = "{}{}".format(
"CVSS:3.1/",
- i.cvss_vector_v3
+ i.cvss_vector_v3,
)
cvss_obj = CVSS3(cvss_vectors)
elif str(i.cvss_vector_v2) != "":
@@ -68,7 +68,7 @@ def ingest_findings(self, sorted_findings, test):
references = self.format_references(i)
unique_finding_key = hashlib.sha256(
- f"{file_path + object_sha1 + title}".encode()
+ f"{file_path + object_sha1 + title}".encode(),
).hexdigest()
if unique_finding_key in findings:
diff --git a/dojo/tools/blackduck_component_risk/parser.py b/dojo/tools/blackduck_component_risk/parser.py
index 7e683364f4..274ff74e02 100644
--- a/dojo/tools/blackduck_component_risk/parser.py
+++ b/dojo/tools/blackduck_component_risk/parser.py
@@ -149,23 +149,23 @@ def license_description(self, component, source):
:return:
"""
desc = "**License Name:** {} \n".format(
- component.get("License names")
+ component.get("License names"),
)
desc += "**License Families:** {} \n".format(
- component.get("License families")
+ component.get("License families"),
)
desc += "**License Usage:** {} \n".format(component.get("Usage"))
desc += "**License Origin name:** {} \n".format(
- component.get("Origin name")
+ component.get("Origin name"),
)
desc += "**License Origin id:** {} \n".format(
- component.get("Origin id")
+ component.get("Origin id"),
)
desc += "**Match type:** {}\n".format(component.get("Match type"))
try:
desc += "**Path:** {}\n".format(source.get("Path"))
desc += "**Archive context:** {}\n".format(
- source.get("Archive context")
+ source.get("Archive context"),
)
desc += "**Scan:** {}\n".format(source.get("Scan"))
except KeyError:
@@ -207,7 +207,7 @@ def security_title(self, vulns):
:return:
"""
title = "Security Risk: {}:{}".format(
- vulns[0]["Component name"], vulns[0]["Component version name"]
+ vulns[0]["Component name"], vulns[0]["Component version name"],
)
return title
@@ -225,12 +225,12 @@ def security_description(self, vulns):
for vuln in vulns:
desc += "###{} \n".format(vuln["Vulnerability id"])
desc += "**Base Score:** {} \n**Exploitability:** {} \n**Impact:** {}\n".format(
- vuln["Base score"], vuln["Exploitability"], vuln["Impact"]
+ vuln["Base score"], vuln["Exploitability"], vuln["Impact"],
)
# Not all have a URL
if vuln["URL"] != "":
desc += "**URL:** [{}]({})\n".format(
- vuln["Vulnerability id"], vuln["URL"]
+ vuln["Vulnerability id"], vuln["URL"],
)
desc += "**Description:** {}\n".format(vuln["Description"])
return desc
@@ -290,7 +290,7 @@ def security_mitigation(self, vulns):
:return:
"""
mit = "Update component {}:{} to a secure version".format(
- vulns[0]["Component name"], vulns[0]["Component version name"]
+ vulns[0]["Component name"], vulns[0]["Component version name"],
)
return mit
@@ -318,7 +318,7 @@ def security_references(self, vulns):
for vuln in vulns:
if vuln["URL"] != "":
references += "{}: [{}]({})\n".format(
- vuln["Vulnerability id"], vuln["URL"], vuln["URL"]
+ vuln["Vulnerability id"], vuln["URL"], vuln["URL"],
)
return references
@@ -334,7 +334,7 @@ def security_filepath(self, vulns):
"""
if vulns[0]["Component origin id"] == "":
component_key = "{}/{}".format(
- vulns[0]["Component name"], vulns[0]["Component version name"]
+ vulns[0]["Component name"], vulns[0]["Component version name"],
)
else:
component_key = vulns[0]["Component origin id"]
diff --git a/dojo/tools/bugcrowd/parser.py b/dojo/tools/bugcrowd/parser.py
index d3672255bf..a643499976 100644
--- a/dojo/tools/bugcrowd/parser.py
+++ b/dojo/tools/bugcrowd/parser.py
@@ -25,7 +25,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
@@ -38,7 +38,7 @@ def get_findings(self, filename, test):
url = row.get("bug_url", None)
pre_description = self.split_description(
- row.get("description", None)
+ row.get("description", None),
)
Description = (
pre_description.get("description", "")
@@ -120,7 +120,7 @@ def get_findings(self, filename, test):
+ row.get("vrt_lineage", "")
)
finding.steps_to_reproduce = pre_description.get(
- "steps_to_reproduce", None
+ "steps_to_reproduce", None,
)
finding.references = References
finding.severity = self.convert_severity(row.get("priority", 0))
@@ -139,7 +139,7 @@ def get_findings(self, filename, test):
finding.description = ""
key = hashlib.md5(
- (finding.title + "|" + finding.description).encode("utf-8")
+ (finding.title + "|" + finding.description).encode("utf-8"),
).hexdigest()
if key not in dupes:
@@ -173,7 +173,7 @@ def description_parse(self, ret):
ret[
"steps_to_reproduce"
] = "### Steps To Reproduce\n" + ret.get(
- "imsteps_to_reproducepact", ""
+ "imsteps_to_reproducepact", "",
)
steps = skip = 1
poc = impact = 0
diff --git a/dojo/tools/burp/parser.py b/dojo/tools/burp/parser.py
index f260e598b3..3fcc728a19 100644
--- a/dojo/tools/burp/parser.py
+++ b/dojo/tools/burp/parser.py
@@ -74,7 +74,7 @@ def get_attrib_from_subnode(xml_node, subnode_xpath_expr, attrib_name):
if ETREE_VERSION[0] <= 1 and ETREE_VERSION[1] < 3:
match_obj = re.search(
- r"([^\@]+?)\[\@([^=]*?)=\'([^\']*?)\'", subnode_xpath_expr
+ r"([^\@]+?)\[\@([^=]*?)=\'([^\']*?)\'", subnode_xpath_expr,
)
if match_obj is not None:
node_to_find = match_obj.group(1)
@@ -111,7 +111,7 @@ def get_clean_base64(value):
return ""
try:
return base64.b64decode(value).decode(
- "utf-8", "replace"
+ "utf-8", "replace",
) # wouldn't this be cleaner than below?
except UnicodeDecodeError:
# decoding of UTF-8 fail when you have a binary payload in the HTTP response
@@ -120,7 +120,7 @@ def get_clean_base64(value):
[
base64.b64decode(value).split(b"\r\n\r\n")[0].decode(),
"",
- ]
+ ],
)
@@ -152,7 +152,7 @@ def get_item(item_node, test):
request = get_clean_base64(request_response.findall("request")[0].text)
if request_response.findall("response"):
response = get_clean_base64(
- request_response.findall("response")[0].text
+ request_response.findall("response")[0].text,
)
else:
response = ""
@@ -186,10 +186,10 @@ def get_item(item_node, test):
for request_response in event.findall("./requestresponse"):
request = get_clean_base64(
- request_response.findall("request")[0].text
+ request_response.findall("request")[0].text,
)
response = get_clean_base64(
- request_response.findall("response")[0].text
+ request_response.findall("response")[0].text,
)
unsaved_req_resp.append({"req": request, "resp": response})
if collab_details[0] == "HTTP":
@@ -275,7 +275,7 @@ def get_item(item_node, test):
if len(cwes) > 1:
# FIXME support more than one CWE
logger.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if len(cwes) > 0:
finding.cwe = cwes[0]
diff --git a/dojo/tools/burp_api/parser.py b/dojo/tools/burp_api/parser.py
index 75e4e87507..1ec4c6b62d 100644
--- a/dojo/tools/burp_api/parser.py
+++ b/dojo/tools/burp_api/parser.py
@@ -65,10 +65,10 @@ def get_findings(self, file, test):
static_finding=False, # by definition
dynamic_finding=True, # by definition
unique_id_from_tool=str(
- issue.get("serial_number", "")
+ issue.get("serial_number", ""),
), # the serial number is a good candidate for this attribute
vuln_id_from_tool=str(
- issue.get("type_index", "")
+ issue.get("type_index", ""),
), # the type index is a good candidate for this attribute
)
# manage confidence
@@ -78,8 +78,8 @@ def get_findings(self, file, test):
if "origin" in issue and "path" in issue:
finding.unsaved_endpoints = [
Endpoint.from_uri(
- issue.get("origin") + issue.get("path")
- )
+ issue.get("origin") + issue.get("path"),
+ ),
]
finding.unsaved_req_resp = []
for evidence in issue.get("evidence", []):
@@ -89,13 +89,13 @@ def get_findings(self, file, test):
]:
continue
request = self.get_clean_base64(
- evidence.get("request_response").get("request")
+ evidence.get("request_response").get("request"),
)
response = self.get_clean_base64(
- evidence.get("request_response").get("response")
+ evidence.get("request_response").get("response"),
)
finding.unsaved_req_resp.append(
- {"req": request, "resp": response}
+ {"req": request, "resp": response},
)
items.append(finding)
diff --git a/dojo/tools/burp_enterprise/parser.py b/dojo/tools/burp_enterprise/parser.py
index 1984cc65d7..b652dda32c 100644
--- a/dojo/tools/burp_enterprise/parser.py
+++ b/dojo/tools/burp_enterprise/parser.py
@@ -72,10 +72,10 @@ def get_content(self, container):
def pre_allocate_items(self, tree):
items = []
endpoint_text = tree.xpath(
- "/html/body/div/div[contains(@class, 'section')]/h1"
+ "/html/body/div/div[contains(@class, 'section')]/h1",
)
severities = tree.xpath(
- "/html/body/div/div[contains(@class, 'section')]/table[contains(@class, 'issue-table')]/tbody"
+ "/html/body/div/div[contains(@class, 'section')]/table[contains(@class, 'issue-table')]/tbody",
)
endpoint_text = [
endpoint
@@ -116,7 +116,7 @@ def get_items(self, tree, test):
# Check that there is at least one vulnerability (the vulnerabilities
# table is absent when no vuln are found)
vulns = tree.xpath(
- "/html/body/div/div[contains(@class, 'section details')]/div[contains(@class, 'issue-container')]"
+ "/html/body/div/div[contains(@class, 'section details')]/div[contains(@class, 'issue-container')]",
)
if len(vulns) == 0:
return []
@@ -237,7 +237,7 @@ def create_findings(self, items, test):
unsaved_req_resp = []
for index in range(len(requests)):
unsaved_req_resp.append(
- {"req": requests[index], "resp": responses[index]}
+ {"req": requests[index], "resp": responses[index]},
)
find.unsaved_req_resp = unsaved_req_resp
diff --git a/dojo/tools/burp_graphql/parser.py b/dojo/tools/burp_graphql/parser.py
index f6d032bc28..c026694122 100644
--- a/dojo/tools/burp_graphql/parser.py
+++ b/dojo/tools/burp_graphql/parser.py
@@ -63,7 +63,7 @@ def parse_findings(self, scan_data):
for issue in scan_data:
if not issue.get("issue_type") or not issue["issue_type"].get(
- "name"
+ "name",
):
msg = "Issue does not have a name"
raise ValueError(msg)
@@ -89,11 +89,11 @@ def combine_findings(self, finding, issue):
if issue.get("evidence"):
finding["Evidence"] = finding["Evidence"] + self.parse_evidence(
- issue.get("evidence")
+ issue.get("evidence"),
)
finding["Endpoints"].append(
- Endpoint.from_uri(issue["origin"] + issue["path"])
+ Endpoint.from_uri(issue["origin"] + issue["path"]),
)
def create_finding(self, issue):
@@ -107,18 +107,18 @@ def create_finding(self, issue):
if issue.get("description_html"):
finding["Description"] += "**Issue Detail**\n"
finding["Description"] += html2text.html2text(
- issue.get("description_html")
+ issue.get("description_html"),
)
if issue["issue_type"].get("description_html"):
finding["Impact"] += "**Issue Background**\n"
finding["Impact"] += html2text.html2text(
- issue["issue_type"].get("description_html")
+ issue["issue_type"].get("description_html"),
)
elif issue["issue_type"].get("description_html"):
finding["Description"] += "**Issue Background**\n"
finding["Description"] += html2text.html2text(
- issue["issue_type"].get("description_html")
+ issue["issue_type"].get("description_html"),
)
if issue.get("remediation_html"):
@@ -128,12 +128,12 @@ def create_finding(self, issue):
if issue["issue_type"].get("remediation_html"):
finding["Mitigation"] += "**Remediation Background**\n"
finding["Mitigation"] += html2text.html2text(
- issue["issue_type"].get("remediation_html")
+ issue["issue_type"].get("remediation_html"),
)
elif issue["issue_type"].get("remediation_html"):
finding["Impact"] += "**Remediation Background**\n"
finding["Impact"] += html2text.html2text(
- issue["issue_type"].get("remediation_html")
+ issue["issue_type"].get("remediation_html"),
)
if issue.get("severity"):
@@ -142,7 +142,7 @@ def create_finding(self, issue):
finding["Severity"] = "Info"
finding["Endpoints"] = [
- Endpoint.from_uri(issue["origin"] + issue["path"])
+ Endpoint.from_uri(issue["origin"] + issue["path"]),
]
if issue.get("evidence"):
@@ -153,16 +153,16 @@ def create_finding(self, issue):
if issue["issue_type"].get("references_html"):
finding["References"] += "**References**\n"
finding["References"] += html2text.html2text(
- issue["issue_type"].get("references_html")
+ issue["issue_type"].get("references_html"),
)
if issue["issue_type"].get("vulnerability_classifications_html"):
finding["References"] += "**CWE Information**\n"
finding["References"] += html2text.html2text(
- issue["issue_type"].get("vulnerability_classifications_html")
+ issue["issue_type"].get("vulnerability_classifications_html"),
)
finding["CWE"] = self.get_cwe(
- issue["issue_type"].get("vulnerability_classifications_html")
+ issue["issue_type"].get("vulnerability_classifications_html"),
)
else:
finding["CWE"] = 0
@@ -182,11 +182,11 @@ def parse_evidence(self, evidence):
for data in request_dict.get("request_segments"):
if data.get("data_html"):
request += html2text.html2text(
- data.get("data_html")
+ data.get("data_html"),
).strip()
elif data.get("highlight_html"):
request += html2text.html2text(
- data.get("highlight_html")
+ data.get("highlight_html"),
).strip()
if (
@@ -201,11 +201,11 @@ def parse_evidence(self, evidence):
for data in response_dict.get("response_segments"):
if data.get("data_html"):
response += html2text.html2text(
- data.get("data_html")
+ data.get("data_html"),
).strip()
elif data.get("highlight_html"):
response += html2text.html2text(
- data.get("highlight_html")
+ data.get("highlight_html"),
).strip()
i += 2
diff --git a/dojo/tools/cargo_audit/parser.py b/dojo/tools/cargo_audit/parser.py
index e992e93651..1447bf5908 100644
--- a/dojo/tools/cargo_audit/parser.py
+++ b/dojo/tools/cargo_audit/parser.py
@@ -52,7 +52,7 @@ def get_findings(self, filename, test):
)
references = f"{advisory.get('url')}\n" + "\n".join(
- advisory["references"]
+ advisory["references"],
)
date = advisory.get("date")
@@ -73,8 +73,8 @@ def get_findings(self, filename, test):
mitigation = "No information about patched version"
dupe_key = hashlib.sha256(
(vuln_id + date + package_name + package_version).encode(
- "utf-8"
- )
+ "utf-8",
+ ),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py
index 1cfdfdac25..02e242c0d1 100644
--- a/dojo/tools/checkmarx/parser.py
+++ b/dojo/tools/checkmarx/parser.py
@@ -82,17 +82,17 @@ def _get_findings_xml(self, filename, test):
if result.get("Status") is not None:
findingdetail = "{}**Status:** {}\n".format(
- findingdetail, result.get("Status")
+ findingdetail, result.get("Status"),
)
deeplink = "[{}]({})".format(
- result.get("DeepLink"), result.get("DeepLink")
+ result.get("DeepLink"), result.get("DeepLink"),
)
findingdetail = f"{findingdetail}**Finding Link:** {deeplink}\n"
if self.mode == "detailed":
self._process_result_detailed(
- test, dupes, findingdetail, query, result, find_date
+ test, dupes, findingdetail, query, result, find_date,
)
else:
self._process_result_file_name_aggregated(
@@ -111,11 +111,11 @@ def _get_findings_xml(self, filename, test):
for key in list(dupes):
vuln_ids_from_tool[key].sort
dupes[key].vuln_id_from_tool = ",".join(
- vuln_ids_from_tool[key]
+ vuln_ids_from_tool[key],
)[:500]
for lang in language_list:
add_language(
- test.engagement.product, lang, files=language_list[lang]
+ test.engagement.product, lang, files=language_list[lang],
)
return list(dupes.values())
@@ -137,7 +137,7 @@ def _process_result_file_name_aggregated(
_name, cwe, _categories, queryId = self.getQueryElements(query)
titleStart = query.get("name").replace("_", " ")
description, lastPathnode = self.get_description_file_name_aggregated(
- query, result
+ query, result,
)
sinkFilename = lastPathnode.find("FileName").text
if sinkFilename:
@@ -218,14 +218,14 @@ def get_description_file_name_aggregated(self, query, result):
# At this point we have iterated over all path nodes (function calls)
# and pathnode is at the sink of the vulnerability
sinkFilename, sinkLineNumber, sinkObject = self.get_pathnode_elements(
- pathnode
+ pathnode,
)
description = f"Source file: {sourceFilename} (line {sourceLineNumber})\nSource object: {sourceObject}"
description = f"{description}\nSink file: {sinkFilename} (line {sinkLineNumber})\nSink object: {sinkObject}"
return description, pathnode
def _process_result_detailed(
- self, test, dupes, findingdetail, query, result, find_date
+ self, test, dupes, findingdetail, query, result, find_date,
):
"""Process one result = one pathId for scanner "Checkmarx Scan detailed"
Create the finding and add it into the dupes list
@@ -240,7 +240,7 @@ def _process_result_detailed(
logger.warning(
"Checkmarx scan: more than one path found: "
+ str(len(paths))
- + ". Only the last one will be used"
+ + ". Only the last one will be used",
)
for path in paths:
@@ -257,7 +257,7 @@ def _process_result_detailed(
# Loop over function calls / assignments in the data flow graph
for pathnode in path.findall("PathNode"):
findingdetail = self.get_description_detailed(
- pathnode, findingdetail
+ pathnode, findingdetail,
)
nodeId = pathnode.find("NodeId").text
if nodeId == "1":
@@ -313,17 +313,17 @@ def get_pathnode_elements(self, pathnode):
def get_description_detailed(self, pathnode, findingdetail):
if pathnode.find("Line").text is not None:
findingdetail = "{}**Line Number:** {}\n".format(
- findingdetail, pathnode.find("Line").text
+ findingdetail, pathnode.find("Line").text,
)
if pathnode.find("Column").text is not None:
findingdetail = "{}**Column:** {}\n".format(
- findingdetail, pathnode.find("Column").text
+ findingdetail, pathnode.find("Column").text,
)
if pathnode.find("Name").text is not None:
findingdetail = "{}**Source Object:** {}\n".format(
- findingdetail, pathnode.find("Name").text
+ findingdetail, pathnode.find("Name").text,
)
for codefragment in pathnode.findall("Snippet/Line"):
@@ -392,7 +392,7 @@ def _get_findings_json(self, file, test):
description=descriptiondetails,
title=title,
date=self._parse_date(
- vulnerability.get("firstFoundDate")
+ vulnerability.get("firstFoundDate"),
),
severity=vulnerability.get("severity").title(),
active=(
@@ -412,7 +412,7 @@ def _get_findings_json(self, file, test):
)
else:
finding.unique_id_from_tool = str(
- vulnerability.get("similarityId")
+ vulnerability.get("similarityId"),
)
# get the last node and set some values
if vulnerability.get("nodes"):
@@ -431,7 +431,7 @@ def _get_findings_json(self, file, test):
title=f"{component_name}:{component_version} | {cve}",
description=vulnerability.get("description"),
date=self._parse_date(
- vulnerability.get("firstFoundDate")
+ vulnerability.get("firstFoundDate"),
),
severity=vulnerability.get("severity").title(),
active=(
@@ -449,15 +449,15 @@ def _get_findings_json(self, file, test):
)
if vulnerability.get("cveId"):
finding.unsaved_vulnerability_ids = [
- vulnerability.get("cveId")
+ vulnerability.get("cveId"),
]
if vulnerability.get("id"):
finding.unique_id_from_tool = vulnerability.get(
- "id"
+ "id",
)
else:
finding.unique_id_from_tool = str(
- vulnerability.get("similarityId")
+ vulnerability.get("similarityId"),
)
finding.unsaved_tags = [result_type]
findings.append(finding)
@@ -469,7 +469,7 @@ def _get_findings_json(self, file, test):
title=f'{name} | {vulnerability.get("issueType")}',
description=vulnerability.get("description"),
date=self._parse_date(
- vulnerability.get("firstFoundDate")
+ vulnerability.get("firstFoundDate"),
),
severity=vulnerability.get("severity").title(),
active=(
@@ -482,18 +482,18 @@ def _get_findings_json(self, file, test):
file_path=vulnerability.get("fileName"),
line=vulnerability.get("line", 0),
severity_justification=vulnerability.get(
- "actualValue"
+ "actualValue",
),
test=test,
static_finding=True,
)
if vulnerability.get("id"):
finding.unique_id_from_tool = vulnerability.get(
- "id"
+ "id",
)
else:
finding.unique_id_from_tool = str(
- vulnerability.get("similarityId")
+ vulnerability.get("similarityId"),
)
finding.unsaved_tags = [result_type, name]
findings.append(finding)
diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py
index 381f705f82..aa6e1b4a11 100644
--- a/dojo/tools/checkmarx_one/parser.py
+++ b/dojo/tools/checkmarx_one/parser.py
@@ -135,7 +135,7 @@ def get_node_snippet(nodes: list) -> str:
f"**File Name**: {node.get('fileName')}\n"
f"**Method**: {node.get('method')}\n"
f"**Line**: {node.get('line')}\n"
- f"**Code Snippet**: {node.get('code')}\n"
+ f"**Code Snippet**: {node.get('code')}\n",
)
return "\n---\n".join(formatted_nodes)
@@ -148,7 +148,7 @@ def get_node_snippet(nodes: list) -> str:
# instance of the vulnerability
base_finding_details = {
"title": result.get(
- "queryPath", result.get("queryName", "SAST Finding")
+ "queryPath", result.get("queryName", "SAST Finding"),
).replace("_", " "),
"description": (
f"{result.get('description')}\n\n"
diff --git a/dojo/tools/checkmarx_osa/parser.py b/dojo/tools/checkmarx_osa/parser.py
index f61f5de656..43cb255698 100644
--- a/dojo/tools/checkmarx_osa/parser.py
+++ b/dojo/tools/checkmarx_osa/parser.py
@@ -52,7 +52,7 @@ def get_findings(self, filehandle, test):
vulnerability_id = item.get("cveName", "NC")
finding_item = Finding(
title="{} {} | {}".format(
- library["name"], library["version"], vulnerability_id
+ library["name"], library["version"], vulnerability_id,
),
severity=item["severity"]["name"],
description=item.get("description", "NC"),
@@ -69,14 +69,14 @@ def get_findings(self, filehandle, test):
cwe=1035,
cvssv3_score=item.get("score", None),
publish_date=datetime.strptime(
- item["publishDate"], "%Y-%m-%dT%H:%M:%S"
+ item["publishDate"], "%Y-%m-%dT%H:%M:%S",
)
if "publishDate" in item
else None,
static_finding=True,
dynamic_finding=False,
scanner_confidence=self.checkmarx_confidence_to_defectdojo_confidence(
- library["confidenceLevel"]
+ library["confidenceLevel"],
)
if "confidenceLevel" in library
else None,
@@ -115,7 +115,7 @@ def get_vunlerabilities(self, tree):
# 100% = Certain
# 70% = Firm
def checkmarx_confidence_to_defectdojo_confidence(
- self, checkmarx_confidence
+ self, checkmarx_confidence,
):
return round((100 - checkmarx_confidence) / 10) + 1
diff --git a/dojo/tools/chefinspect/parser.py b/dojo/tools/chefinspect/parser.py
index 1dd413df62..22b7cdfd0a 100644
--- a/dojo/tools/chefinspect/parser.py
+++ b/dojo/tools/chefinspect/parser.py
@@ -54,6 +54,6 @@ def get_findings(self, file, test):
description=description,
severity=self.convert_score(json_object.get("impact")),
active=True,
- )
+ ),
)
return result
diff --git a/dojo/tools/clair/clairklar_parser.py b/dojo/tools/clair/clairklar_parser.py
index c42ba78b32..efef6483d5 100644
--- a/dojo/tools/clair/clairklar_parser.py
+++ b/dojo/tools/clair/clairklar_parser.py
@@ -19,7 +19,7 @@ def get_items_klar(self, tree, test):
]
for clair_severity in clair_severities:
items.extend(
- self.set_items_for_severity(tree, test, clair_severity)
+ self.set_items_for_severity(tree, test, clair_severity),
)
return items
@@ -60,7 +60,7 @@ def get_item_clairklar(self, item_node, test):
)
if "FeatureVersion" in item_node:
description += " Vulnerable Versions: " + str(
- item_node["FeatureVersion"]
+ item_node["FeatureVersion"],
)
mitigation = ""
diff --git a/dojo/tools/cloudsploit/parser.py b/dojo/tools/cloudsploit/parser.py
index 22e8de1a2b..7ad446bcf7 100644
--- a/dojo/tools/cloudsploit/parser.py
+++ b/dojo/tools/cloudsploit/parser.py
@@ -56,7 +56,7 @@ def get_findings(self, file, test):
# internal de-duplication
dupe_key = hashlib.sha256(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/cobalt/parser.py b/dojo/tools/cobalt/parser.py
index 2e4a74f41f..4ac5c43b73 100644
--- a/dojo/tools/cobalt/parser.py
+++ b/dojo/tools/cobalt/parser.py
@@ -25,7 +25,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
for row in reader:
@@ -76,7 +76,7 @@ def get_findings(self, filename, test):
finding.description = ""
key = hashlib.md5(
- (finding.title + "|" + finding.description).encode("utf-8")
+ (finding.title + "|" + finding.description).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/contrast/parser.py b/dojo/tools/contrast/parser.py
index 73e3b94c7b..fb31316e5f 100644
--- a/dojo/tools/contrast/parser.py
+++ b/dojo/tools/contrast/parser.py
@@ -41,7 +41,7 @@ def get_findings(self, filename, test):
if severity == "Note":
severity = "Info"
date_raw = datetime.datetime.utcfromtimestamp(
- int(row.get("First Seen")) / 1000
+ int(row.get("First Seen")) / 1000,
)
finding = Finding(
title=title.split(" from")[0],
@@ -76,11 +76,11 @@ def get_findings(self, filename, test):
+ "\n"
+ row.get("Request Body"),
"resp": "",
- }
+ },
)
dupe_key = hashlib.sha256(
- f"{finding.vuln_id_from_tool}".encode()
+ f"{finding.vuln_id_from_tool}".encode(),
).digest()
if dupe_key in dupes:
@@ -90,7 +90,7 @@ def get_findings(self, filename, test):
+ finding.description
)
dupes[dupe_key].unsaved_endpoints.extend(
- finding.unsaved_endpoints
+ finding.unsaved_endpoints,
)
dupes[dupe_key].nb_occurences += finding.nb_occurences
dupes[
diff --git a/dojo/tools/coverity_api/parser.py b/dojo/tools/coverity_api/parser.py
index c3b15f573f..194939de98 100644
--- a/dojo/tools/coverity_api/parser.py
+++ b/dojo/tools/coverity_api/parser.py
@@ -35,14 +35,14 @@ def get_findings(self, file, test):
f"**Type:** `{issue.get('displayType')}`",
f"**Status:** `{issue.get('status')}`",
f"**Classification:** `{issue.get('classification')}`",
- ]
+ ],
)
finding = Finding()
finding.test = test
finding.title = issue["displayType"]
finding.severity = self.convert_displayImpact(
- issue.get("displayImpact")
+ issue.get("displayImpact"),
)
finding.description = description_formated
finding.static_finding = True
@@ -51,7 +51,7 @@ def get_findings(self, file, test):
if "firstDetected" in issue:
finding.date = datetime.strptime(
- issue["firstDetected"], "%m/%d/%y"
+ issue["firstDetected"], "%m/%d/%y",
).date()
if "cwe" in issue and isinstance(issue["cwe"], int):
diff --git a/dojo/tools/crashtest_security/parser.py b/dojo/tools/crashtest_security/parser.py
index 8770013b79..2c118d8466 100644
--- a/dojo/tools/crashtest_security/parser.py
+++ b/dojo/tools/crashtest_security/parser.py
@@ -30,7 +30,7 @@ def get_findings(self, file, test):
crashtest_scan = crashtest_scan["data"]
descriptions = self.create_descriptions_dict(
- crashtest_scan["descriptions"]
+ crashtest_scan["descriptions"],
)
# Iterate scanner which contain the items
@@ -39,14 +39,14 @@ def get_findings(self, file, test):
# Iterate all findings of the scanner
for finding in scanner:
items.append(
- self.generate_finding(finding, test, descriptions)
+ self.generate_finding(finding, test, descriptions),
)
# Iterate all connected CVE findings if any
if "cve_findings" in finding:
for cve_finding in finding["cve_findings"]:
items.append(
- self.generate_cve_finding(cve_finding, test)
+ self.generate_cve_finding(cve_finding, test),
)
return items
@@ -103,7 +103,7 @@ def generate_cve_finding(self, cve_finding, test):
"""
severity = self.get_severity(cve_finding["cvss"])
references = "https://nvd.nist.gov/vuln/detail/{}".format(
- cve_finding["cve_id"]
+ cve_finding["cve_id"],
)
finding = Finding(
title=cve_finding["cve_id"],
diff --git a/dojo/tools/cred_scan/parser.py b/dojo/tools/cred_scan/parser.py
index 6b67305caa..e796284346 100644
--- a/dojo/tools/cred_scan/parser.py
+++ b/dojo/tools/cred_scan/parser.py
@@ -29,7 +29,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8-sig")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
@@ -41,11 +41,11 @@ def get_findings(self, filename, test):
description += "\n Is Supressed: " + str(row["IsSuppressed"])
if "SuppressJustification" in row:
description += "\n Supress Justifcation: " + str(
- row["SuppressJustification"]
+ row["SuppressJustification"],
)
if "MatchingScore" in row:
description += "\n Matching Score: " + str(
- row["MatchingScore"]
+ row["MatchingScore"],
)
finding = Finding(
@@ -59,7 +59,7 @@ def get_findings(self, filename, test):
# Update the finding date if it specified
if "TimeofDiscovery" in row:
finding.date = parser.parse(
- row["TimeofDiscovery"].replace("Z", "")
+ row["TimeofDiscovery"].replace("Z", ""),
)
# internal de-duplication
diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py
index ff09dddcd6..10917ca84b 100644
--- a/dojo/tools/crunch42/parser.py
+++ b/dojo/tools/crunch42/parser.py
@@ -49,7 +49,7 @@ def get_items(self, tree, test):
for key, node in results.items():
for issue in node["issues"]:
item = self.get_item(
- issue, key, test
+ issue, key, test,
)
items[iterator] = item
iterator += 1
diff --git a/dojo/tools/cyclonedx/helpers.py b/dojo/tools/cyclonedx/helpers.py
index fb658dfdc1..8e2bd29d24 100644
--- a/dojo/tools/cyclonedx/helpers.py
+++ b/dojo/tools/cyclonedx/helpers.py
@@ -15,7 +15,7 @@ def _get_cvssv3(self, raw_vector):
return CVSS3(raw_vector)
except BaseException:
LOGGER.exception(
- f"error while parsing vector CVSS v3 {raw_vector}"
+ f"error while parsing vector CVSS v3 {raw_vector}",
)
return None
diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py
index a57b6debaf..772e948f86 100644
--- a/dojo/tools/cyclonedx/json_parser.py
+++ b/dojo/tools/cyclonedx/json_parser.py
@@ -17,7 +17,7 @@ def _get_findings_json(self, file, test):
report_date = None
if data.get("metadata") and data.get("metadata").get("timestamp"):
report_date = dateutil.parser.parse(
- data.get("metadata").get("timestamp")
+ data.get("metadata").get("timestamp"),
)
# for each component we keep data
components = {}
@@ -55,7 +55,7 @@ def _get_findings_json(self, file, test):
for affect in vulnerability.get("affects", []):
reference = affect["ref"] # required by the specification
component_name, component_version = Cyclonedxhelper()._get_component(
- components, reference
+ components, reference,
)
if not description:
description = "Description was not provided."
@@ -105,7 +105,7 @@ def _get_findings_json(self, file, test):
if cwes and len(cwes) > 1:
# FIXME support more than one CWE
LOGGER.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if cwes and len(cwes) > 0:
finding.cwe = cwes[0]
@@ -138,7 +138,7 @@ def _flatten_components(self, components, flatted_components):
for component in components:
if "components" in component:
self._flatten_components(
- component.get("components", []), flatted_components
+ component.get("components", []), flatted_components,
)
# according to specification 1.4, 'bom-ref' is mandatory but some
# tools don't provide it
diff --git a/dojo/tools/cyclonedx/xml_parser.py b/dojo/tools/cyclonedx/xml_parser.py
index 91ba3ab0a9..dc0cfe4ff3 100644
--- a/dojo/tools/cyclonedx/xml_parser.py
+++ b/dojo/tools/cyclonedx/xml_parser.py
@@ -20,21 +20,21 @@ def _get_findings_xml(self, file, test):
raise ValueError(msg)
ns = {
"b": namespace.replace("{", "").replace(
- "}", ""
+ "}", "",
), # we accept whatever the version
"v": "http://cyclonedx.org/schema/ext/vulnerability/1.0",
}
# get report date
report_date = None
report_date_raw = root.findtext(
- "b:metadata/b:timestamp", namespaces=ns
+ "b:metadata/b:timestamp", namespaces=ns,
)
if report_date_raw:
report_date = dateutil.parser.parse(report_date_raw)
bom_refs = {}
findings = []
for component in root.findall(
- "b:components/b:component", namespaces=ns
+ "b:components/b:component", namespaces=ns,
):
component_name = component.findtext(f"{namespace}name")
component_version = component.findtext(f"{namespace}version")
@@ -46,7 +46,7 @@ def _get_findings_xml(self, file, test):
}
# for each vulnerabilities add a finding
for vulnerability in component.findall(
- "v:vulnerabilities/v:vulnerability", namespaces=ns
+ "v:vulnerabilities/v:vulnerability", namespaces=ns,
):
finding_vuln = self.manage_vulnerability_legacy(
vulnerability,
@@ -59,20 +59,20 @@ def _get_findings_xml(self, file, test):
findings.append(finding_vuln)
# manage adhoc vulnerabilities
for vulnerability in root.findall(
- "v:vulnerabilities/v:vulnerability", namespaces=ns
+ "v:vulnerabilities/v:vulnerability", namespaces=ns,
):
finding_vuln = self.manage_vulnerability_legacy(
- vulnerability, ns, bom_refs, report_date
+ vulnerability, ns, bom_refs, report_date,
)
findings.append(finding_vuln)
# manage adhoc vulnerabilities (compatible with 1.4 of the spec)
for vulnerability in root.findall(
- "b:vulnerabilities/b:vulnerability", namespaces=ns
+ "b:vulnerabilities/b:vulnerability", namespaces=ns,
):
findings.extend(
self._manage_vulnerability_xml(
- vulnerability, ns, bom_refs, report_date
- )
+ vulnerability, ns, bom_refs, report_date,
+ ),
)
return findings
@@ -94,7 +94,7 @@ def manage_vulnerability_legacy(
vuln_id = vulnerability.findtext("v:id", namespaces=ns)
severity = vulnerability.findtext(
- "v:ratings/v:rating/v:severity", namespaces=ns
+ "v:ratings/v:rating/v:severity", namespaces=ns,
)
description = vulnerability.findtext("v:description", namespaces=ns)
# by the schema, only id and ref are mandatory, even the severity is
@@ -105,7 +105,7 @@ def manage_vulnerability_legacy(
f"**Ref:** {ref}",
f"**Id:** {vuln_id}",
f"**Severity:** {str(severity)}",
- ]
+ ],
)
if component_name is None:
bom = bom_refs[ref]
@@ -115,7 +115,7 @@ def manage_vulnerability_legacy(
severity = Cyclonedxhelper().fix_severity(severity)
references = ""
for adv in vulnerability.findall(
- "v:advisories/v:advisory", namespaces=ns
+ "v:advisories/v:advisory", namespaces=ns,
):
references += f"{adv.text}\n"
finding = Finding(
@@ -132,14 +132,14 @@ def manage_vulnerability_legacy(
finding.date = report_date
mitigation = ""
for recommend in vulnerability.findall(
- "v:recommendations/v:recommendation", namespaces=ns
+ "v:recommendations/v:recommendation", namespaces=ns,
):
mitigation += f"{recommend.text}\n"
if mitigation != "":
finding.mitigation = mitigation
# manage CVSS
for rating in vulnerability.findall(
- "v:ratings/v:rating", namespaces=ns
+ "v:ratings/v:rating", namespaces=ns,
):
if "CVSSv3" == rating.findtext("v:method", namespaces=ns):
raw_vector = rating.findtext("v:vector", namespaces=ns)
@@ -156,7 +156,7 @@ def manage_vulnerability_legacy(
if len(cwes) > 1:
# FIXME support more than one CWE
LOGGER.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if len(cwes) > 0:
finding.cwe = cwes[0]
@@ -171,7 +171,7 @@ def manage_vulnerability_legacy(
def get_cwes(self, node, prefix, namespaces):
cwes = []
for cwe in node.findall(
- prefix + ":cwes/" + prefix + ":cwe", namespaces
+ prefix + ":cwes/" + prefix + ":cwe", namespaces,
):
if cwe.text.isdigit():
cwes.append(int(cwe.text))
@@ -195,12 +195,12 @@ def _manage_vulnerability_xml(
else:
description = f"\n{detail}"
severity = vulnerability.findtext(
- "b:ratings/b:rating/b:severity", namespaces=ns
+ "b:ratings/b:rating/b:severity", namespaces=ns,
)
severity = Cyclonedxhelper().fix_severity(severity)
references = ""
for advisory in vulnerability.findall(
- "b:advisories/b:advisory", namespaces=ns
+ "b:advisories/b:advisory", namespaces=ns,
):
title = advisory.findtext("b:title", namespaces=ns)
if title:
@@ -215,7 +215,7 @@ def _manage_vulnerability_xml(
vulnerability_ids.append(vuln_id)
# check references to see if we have other vulnerability ids
for reference in vulnerability.findall(
- "b:references/b:reference", namespaces=ns
+ "b:references/b:reference", namespaces=ns,
):
vulnerability_id = reference.findtext("b:id", namespaces=ns)
if vulnerability_id:
@@ -223,18 +223,18 @@ def _manage_vulnerability_xml(
# for all component affected
findings = []
for target in vulnerability.findall(
- "b:affects/b:target", namespaces=ns
+ "b:affects/b:target", namespaces=ns,
):
ref = target.find("b:ref", namespaces=ns)
component_name, component_version = Cyclonedxhelper()._get_component(
- bom_refs, ref.text
+ bom_refs, ref.text,
)
finding = Finding(
title=f"{component_name}:{component_version} | {vuln_id}",
description=description,
severity=severity,
mitigation=vulnerability.findtext(
- "b:recommendation", namespaces=ns
+ "b:recommendation", namespaces=ns,
),
references=references,
component_name=component_name,
@@ -250,7 +250,7 @@ def _manage_vulnerability_xml(
finding.date = report_date
# manage CVSS
for rating in vulnerability.findall(
- "b:ratings/b:rating", namespaces=ns
+ "b:ratings/b:rating", namespaces=ns,
):
method = rating.findtext("b:method", namespaces=ns)
if "CVSSv3" == method or "CVSSv31" == method:
@@ -270,7 +270,7 @@ def _manage_vulnerability_xml(
if len(cwes) > 1:
# FIXME support more than one CWE
LOGGER.debug(
- f"more than one CWE for a finding {cwes}. NOT supported by parser API"
+ f"more than one CWE for a finding {cwes}. NOT supported by parser API",
)
if len(cwes) > 0:
finding.cwe = cwes[0]
@@ -291,7 +291,7 @@ def _manage_vulnerability_xml(
finding.active = False
if not finding.active:
detail = analysis[0].findtext(
- "b:detail", namespaces=ns
+ "b:detail", namespaces=ns,
)
if detail:
finding.mitigation = (
diff --git a/dojo/tools/dawnscanner/parser.py b/dojo/tools/dawnscanner/parser.py
index 9fb2085a1f..c2b9ab930a 100644
--- a/dojo/tools/dawnscanner/parser.py
+++ b/dojo/tools/dawnscanner/parser.py
@@ -45,7 +45,7 @@ def get_findings(self, filename, test):
if self.CVE_REGEX.match(item["name"]):
finding.unsaved_vulnerability_ids = [
- self.CVE_REGEX.findall(item["name"])[0]
+ self.CVE_REGEX.findall(item["name"])[0],
]
items.append(finding)
diff --git a/dojo/tools/dependency_check/parser.py b/dojo/tools/dependency_check/parser.py
index 870aba11fb..11b9bb10e7 100644
--- a/dojo/tools/dependency_check/parser.py
+++ b/dojo/tools/dependency_check/parser.py
@@ -29,22 +29,22 @@ def add_finding(self, finding, dupes):
str(finding.title),
str(finding.cwe),
str(finding.file_path).lower(),
- ]
+ ],
)
key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
if key not in dupes:
dupes[key] = finding
def get_filename_and_path_from_dependency(
- self, dependency, related_dependency, namespace
+ self, dependency, related_dependency, namespace,
):
if not related_dependency:
return dependency.findtext(
- f"{namespace}fileName"
+ f"{namespace}fileName",
), dependency.findtext(f"{namespace}filePath")
if related_dependency.findtext(f"{namespace}fileName"):
return related_dependency.findtext(
- f"{namespace}fileName"
+ f"{namespace}fileName",
), related_dependency.findtext(f"{namespace}filePath")
else:
# without filename, it would be just a duplicate finding so we have to skip it. filename
@@ -54,7 +54,7 @@ def get_filename_and_path_from_dependency(
return None, None
def get_component_name_and_version_from_dependency(
- self, dependency, related_dependency, namespace
+ self, dependency, related_dependency, namespace,
):
identifiers_node = dependency.find(namespace + "identifiers")
if identifiers_node:
@@ -94,7 +94,7 @@ def get_component_name_and_version_from_dependency(
# return component_name, component_version
cpe_node = identifiers_node.find(
- ".//" + namespace + 'identifier[@type="cpe"]'
+ ".//" + namespace + 'identifier[@type="cpe"]',
)
if cpe_node:
id = cpe_node.findtext(f"{namespace}name")
@@ -116,11 +116,11 @@ def get_component_name_and_version_from_dependency(
return component_name, component_version
maven_node = identifiers_node.find(
- ".//" + namespace + 'identifier[@type="maven"]'
+ ".//" + namespace + 'identifier[@type="maven"]',
)
if maven_node:
maven_parts = maven_node.findtext(f"{namespace}name").split(
- ":"
+ ":",
)
# logger.debug('maven_parts:' + str(maven_parts))
if len(maven_parts) == 3:
@@ -131,7 +131,7 @@ def get_component_name_and_version_from_dependency(
# TODO what happens when there multiple evidencecollectednodes with
# product or version as type?
evidence_collected_node = dependency.find(
- namespace + "evidenceCollected"
+ namespace + "evidenceCollected",
)
if evidence_collected_node:
#
@@ -149,16 +149,16 @@ def get_component_name_and_version_from_dependency(
# since 6.0.0 howoever it seems like there's always a packageurl above so not sure if we need the effort to
# implement more logic here
product_node = evidence_collected_node.find(
- ".//" + namespace + 'evidence[@type="product"]'
+ ".//" + namespace + 'evidence[@type="product"]',
)
if product_node:
component_name = product_node.findtext(f"{namespace}value")
version_node = evidence_collected_node.find(
- ".//" + namespace + 'evidence[@type="version"]'
+ ".//" + namespace + 'evidence[@type="version"]',
)
if version_node:
component_version = version_node.findtext(
- f"{namespace}value"
+ f"{namespace}value",
)
return component_name, component_version
@@ -166,13 +166,13 @@ def get_component_name_and_version_from_dependency(
return None, None
def get_finding_from_vulnerability(
- self, dependency, related_dependency, vulnerability, test, namespace
+ self, dependency, related_dependency, vulnerability, test, namespace,
):
(
dependency_filename,
dependency_filepath,
) = self.get_filename_and_path_from_dependency(
- dependency, related_dependency, namespace
+ dependency, related_dependency, namespace,
)
# logger.debug('dependency_filename: %s', dependency_filename)
@@ -185,7 +185,7 @@ def get_finding_from_vulnerability(
name = vulnerability.findtext(f"{namespace}name")
if vulnerability.find(f"{namespace}cwes"):
cwe_field = vulnerability.find(f"{namespace}cwes").findtext(
- f"{namespace}cwe"
+ f"{namespace}cwe",
)
else:
cwe_field = vulnerability.findtext(f"{namespace}cwe")
@@ -217,13 +217,13 @@ def get_finding_from_vulnerability(
component_name,
component_version,
) = self.get_component_name_and_version_from_dependency(
- dependency, related_dependency, namespace
+ dependency, related_dependency, namespace,
)
stripped_name = name
# startswith CVE-XXX-YYY
stripped_name = re.sub(
- r"^CVE-\d{4}-\d{4,7}", "", stripped_name
+ r"^CVE-\d{4}-\d{4,7}", "", stripped_name,
).strip()
# startswith CWE-XXX:
stripped_name = re.sub(r"^CWE-\d+\:", "", stripped_name).strip()
@@ -232,7 +232,7 @@ def get_finding_from_vulnerability(
if component_name is None:
logger.warning(
- f"component_name was None for File: {dependency_filename}, using dependency file name instead."
+ f"component_name was None for File: {dependency_filename}, using dependency file name instead.",
)
component_name = dependency_filename
@@ -261,7 +261,7 @@ def get_finding_from_vulnerability(
if severity:
if severity.strip().lower() not in self.SEVERITY_MAPPING:
logger.warning(
- f"Warning: Unknow severity value detected '{severity}'. Bypass to 'Medium' value"
+ f"Warning: Unknow severity value detected '{severity}'. Bypass to 'Medium' value",
)
severity = "Medium"
else:
@@ -275,7 +275,7 @@ def get_finding_from_vulnerability(
if references_node is not None:
reference_detail = ""
for reference_node in references_node.findall(
- namespace + "reference"
+ namespace + "reference",
):
ref_source = reference_node.findtext(f"{namespace}source")
ref_url = reference_node.findtext(f"{namespace}url")
@@ -365,17 +365,17 @@ def get_findings(self, filename, test):
projectInfo_node = scan.find(f"{namespace}projectInfo")
if projectInfo_node.findtext(f"{namespace}reportDate"):
scan_date = dateutil.parser.parse(
- projectInfo_node.findtext(f"{namespace}reportDate")
+ projectInfo_node.findtext(f"{namespace}reportDate"),
)
if dependencies:
for dependency in dependencies.findall(namespace + "dependency"):
vulnerabilities = dependency.find(
- namespace + "vulnerabilities"
+ namespace + "vulnerabilities",
)
if vulnerabilities is not None:
for vulnerability in vulnerabilities.findall(
- namespace + "vulnerability"
+ namespace + "vulnerability",
):
if vulnerability:
finding = self.get_finding_from_vulnerability(
@@ -390,13 +390,13 @@ def get_findings(self, filename, test):
self.add_finding(finding, dupes)
relatedDependencies = dependency.find(
- namespace + "relatedDependencies"
+ namespace + "relatedDependencies",
)
if relatedDependencies:
for (
relatedDependency
) in relatedDependencies.findall(
- namespace + "relatedDependency"
+ namespace + "relatedDependency",
):
finding = (
self.get_finding_from_vulnerability(
@@ -413,7 +413,7 @@ def get_findings(self, filename, test):
self.add_finding(finding, dupes)
for suppressedVulnerability in vulnerabilities.findall(
- namespace + "suppressedVulnerability"
+ namespace + "suppressedVulnerability",
):
if suppressedVulnerability:
finding = self.get_finding_from_vulnerability(
diff --git a/dojo/tools/detect_secrets/parser.py b/dojo/tools/detect_secrets/parser.py
index 7f139b8230..b3ff15af67 100644
--- a/dojo/tools/detect_secrets/parser.py
+++ b/dojo/tools/detect_secrets/parser.py
@@ -38,7 +38,7 @@ def get_findings(self, filename, test):
description += "**Type:** " + type + "\n"
dupe_key = hashlib.sha256(
- (type + file + str(line) + hashed_secret).encode("utf-8")
+ (type + file + str(line) + hashed_secret).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/dockerbench/parser.py b/dojo/tools/dockerbench/parser.py
index 4c08b850e0..120da8eb6f 100644
--- a/dojo/tools/dockerbench/parser.py
+++ b/dojo/tools/dockerbench/parser.py
@@ -34,7 +34,7 @@ def get_tests(tree, test):
for node in tree["tests"]:
items_from_results = get_results(
- node, test, test_start, test_end, description
+ node, test, test_start, test_end, description,
)
items_from_tests += items_from_results
@@ -108,7 +108,7 @@ def get_item(vuln, test, test_start, test_end, description):
if vuln.get("remediation-impact"):
mitigation += "\n"
mitigation += "mitigation impact: {}\n".format(
- vuln["remediation-impact"]
+ vuln["remediation-impact"],
)
finding = Finding(
diff --git a/dojo/tools/dockle/parser.py b/dojo/tools/dockle/parser.py
index b650694078..6bb70769dd 100644
--- a/dojo/tools/dockle/parser.py
+++ b/dojo/tools/dockle/parser.py
@@ -41,7 +41,7 @@ def get_findings(self, filename, test):
description = sorted(item.get("alerts", []))
description = "\n".join(description)
dupe_key = hashlib.sha256(
- (code + title).encode("utf-8")
+ (code + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/dsop/parser.py b/dojo/tools/dsop/parser.py
index 0e4834f367..0d01e5b8d8 100644
--- a/dojo/tools/dsop/parser.py
+++ b/dojo/tools/dsop/parser.py
@@ -21,11 +21,11 @@ def get_findings(self, file, test):
self.__parse_disa(test, items, book["OpenSCAP - DISA Compliance"])
self.__parse_oval(test, items, book["OpenSCAP - OVAL Results"])
self.__parse_twistlock(
- test, items, book["Twistlock Vulnerability Results"]
+ test, items, book["Twistlock Vulnerability Results"],
)
self.__parse_anchore(test, items, book["Anchore CVE Results"])
self.__parse_anchore_compliance(
- test, items, book["Anchore Compliance Results"]
+ test, items, book["Anchore Compliance Results"],
)
return items
@@ -68,7 +68,7 @@ def __parse_disa(self, test, items, sheet):
if row[headers["identifiers"]]:
finding.unsaved_vulnerability_ids = [
- row[headers["identifiers"]]
+ row[headers["identifiers"]],
]
finding.unsaved_tags = tags
@@ -140,7 +140,7 @@ def __parse_twistlock(self, test, items, sheet):
component_name = row[headers["packageName"]]
component_version = row[headers["packageVersion"]]
title = "{}: {} - {}".format(
- row[headers["cve"]], component_name, component_version
+ row[headers["cve"]], component_name, component_version,
)
if row[headers["severity"]] == "important":
severity = "High"
@@ -235,7 +235,7 @@ def __parse_anchore_compliance(self, test, items, sheet):
row[headers["check_output"]],
)
title = "{}: {}".format(
- row[headers["policy_id"]], row[headers["trigger_id"]]
+ row[headers["policy_id"]], row[headers["trigger_id"]],
)
tags = "anchore_compliance"
diff --git a/dojo/tools/eslint/parser.py b/dojo/tools/eslint/parser.py
index 2b698e7b17..9e282cca41 100644
--- a/dojo/tools/eslint/parser.py
+++ b/dojo/tools/eslint/parser.py
@@ -48,7 +48,7 @@ def get_findings(self, filename, test):
findingdetail += "Line number: " + str(message["line"]) + "\n"
sev = self._convert_eslint_severity_to_dojo_severity(
- message["severity"]
+ message["severity"],
)
find = Finding(
diff --git a/dojo/tools/fortify/fpr_parser.py b/dojo/tools/fortify/fpr_parser.py
index ca4d268668..b2fb90474e 100644
--- a/dojo/tools/fortify/fpr_parser.py
+++ b/dojo/tools/fortify/fpr_parser.py
@@ -63,7 +63,7 @@ def parse_fpr(self, filename, test):
unique_id_from_tool=ClassID,
file_path=SourceLocationpath,
line=SourceLocationline,
- )
+ ),
)
return items
diff --git a/dojo/tools/fortify/xml_parser.py b/dojo/tools/fortify/xml_parser.py
index 92469da88c..5a09e8e1e7 100644
--- a/dojo/tools/fortify/xml_parser.py
+++ b/dojo/tools/fortify/xml_parser.py
@@ -75,7 +75,7 @@ def parse_xml(self, filename, test):
dupes = set()
for issue_key, issue in issue_map.items():
title = self.format_title(
- issue["Category"], issue["FileName"], issue["LineStart"]
+ issue["Category"], issue["FileName"], issue["LineStart"],
)
if title not in dupes:
items.append(
@@ -89,7 +89,7 @@ def parse_xml(self, filename, test):
description=self.format_description(issue, cat_meta),
mitigation=self.format_mitigation(issue, cat_meta),
unique_id_from_tool=issue_key,
- )
+ ),
)
dupes.add(title)
return items
@@ -117,7 +117,7 @@ def format_description(self, issue, meta_info) -> str:
"##Source:\nThis snippet provides more context on the execution path that "
"leads to this finding. \n"
"####Snippet:\n**File: {}: {}**\n```\n{}\n```\n".format(
- source["FileName"], source["LineStart"], source["Snippet"]
+ source["FileName"], source["LineStart"], source["Snippet"],
)
)
if explanation:
diff --git a/dojo/tools/gcloud_artifact_scan/parser.py b/dojo/tools/gcloud_artifact_scan/parser.py
index d531f9b6f6..e53da28967 100644
--- a/dojo/tools/gcloud_artifact_scan/parser.py
+++ b/dojo/tools/gcloud_artifact_scan/parser.py
@@ -51,7 +51,7 @@ def get_findings(self, json_output, test):
component_version=vuln["vulnerability"]["packageIssue"][0]["affectedVersion"]["fullName"],
static_finding=True,
dynamic_finding=False,
- cvssv3_score=vuln["vulnerability"]["cvssScore"]
+ cvssv3_score=vuln["vulnerability"]["cvssScore"],
)
findings.append(finding)
return findings
diff --git a/dojo/tools/generic/csv_parser.py b/dojo/tools/generic/csv_parser.py
index 2bf500da1b..b41b3789f2 100644
--- a/dojo/tools/generic/csv_parser.py
+++ b/dojo/tools/generic/csv_parser.py
@@ -16,7 +16,7 @@ def _get_findings_csv(self, filename):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
@@ -27,7 +27,7 @@ def _get_findings_csv(self, filename):
date=parse(row["Date"]).date(),
severity=self.get_severity(row["Severity"]),
duplicate=self._convert_bool(
- row.get("Duplicate", "FALSE")
+ row.get("Duplicate", "FALSE"),
), # bool False by default
nb_occurences=1,
)
@@ -56,11 +56,11 @@ def _get_findings_csv(self, filename):
if "Vulnerability Id" in row and row["Vulnerability Id"]:
if finding.unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids.append(
- row["Vulnerability Id"]
+ row["Vulnerability Id"],
)
else:
finding.unsaved_vulnerability_ids = [
- row["Vulnerability Id"]
+ row["Vulnerability Id"],
]
# manage CWE
if "CweId" in row:
@@ -76,19 +76,19 @@ def _get_findings_csv(self, filename):
finding.unsaved_endpoints = [
Endpoint.from_uri(row["Url"])
if "://" in row["Url"]
- else Endpoint.from_uri("//" + row["Url"])
+ else Endpoint.from_uri("//" + row["Url"]),
]
# manage internal de-duplication
key = hashlib.sha256(
- f"{finding.severity}|{finding.title}|{finding.description}".encode()
+ f"{finding.severity}|{finding.title}|{finding.description}".encode(),
).hexdigest()
if key in dupes:
find = dupes[key]
find.unsaved_endpoints.extend(finding.unsaved_endpoints)
if find.unsaved_vulnerability_ids:
find.unsaved_vulnerability_ids.extend(
- finding.unsaved_vulnerability_ids
+ finding.unsaved_vulnerability_ids,
)
else:
find.unsaved_vulnerability_ids = (
diff --git a/dojo/tools/generic/json_parser.py b/dojo/tools/generic/json_parser.py
index ecf605e835..296209f3d2 100644
--- a/dojo/tools/generic/json_parser.py
+++ b/dojo/tools/generic/json_parser.py
@@ -109,7 +109,7 @@ def _get_test_json(self, data):
if unsaved_vulnerability_ids:
if finding.unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids.append(
- unsaved_vulnerability_ids
+ unsaved_vulnerability_ids,
)
else:
finding.unsaved_vulnerability_ids = (
diff --git a/dojo/tools/ggshield/parser.py b/dojo/tools/ggshield/parser.py
index 383c334390..54a5bd23e7 100644
--- a/dojo/tools/ggshield/parser.py
+++ b/dojo/tools/ggshield/parser.py
@@ -110,7 +110,7 @@ def get_items(self, item, findings, dupes, test):
+ findings["match"]
+ str(findings["line_start"])
+ str(findings["line_end"])
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/github_vulnerability/parser.py b/dojo/tools/github_vulnerability/parser.py
index 3739fc6f20..b03dbc01e5 100644
--- a/dojo/tools/github_vulnerability/parser.py
+++ b/dojo/tools/github_vulnerability/parser.py
@@ -27,7 +27,7 @@ def get_findings(self, filename, test):
if "repository" in data["data"]:
if "nameWithOwner" in data["data"]["repository"]:
repository_url = "https://github.com/{}".format(
- data["data"]["repository"]["nameWithOwner"]
+ data["data"]["repository"]["nameWithOwner"],
)
if "url" in data["data"]["repository"]:
repository_url = data["data"]["repository"]["url"]
@@ -50,7 +50,7 @@ def get_findings(self, filename, test):
test=test,
description=description,
severity=self._convert_security(
- alert["securityVulnerability"].get("severity", "MODERATE")
+ alert["securityVulnerability"].get("severity", "MODERATE"),
),
static_finding=True,
dynamic_finding=False,
@@ -85,7 +85,7 @@ def get_findings(self, filename, test):
]:
if identifier.get("value"):
unsaved_vulnerability_ids.append(
- identifier.get("value")
+ identifier.get("value"),
)
if unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids = (
@@ -110,7 +110,7 @@ def get_findings(self, filename, test):
]["cvss"]["vectorString"]
if cvss_vector_string is not None:
cvss_objects = cvss_parser.parse_cvss_from_text(
- cvss_vector_string
+ cvss_vector_string,
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
diff --git a/dojo/tools/gitlab_api_fuzzing/parser.py b/dojo/tools/gitlab_api_fuzzing/parser.py
index a0992b0e51..f7fae3718e 100644
--- a/dojo/tools/gitlab_api_fuzzing/parser.py
+++ b/dojo/tools/gitlab_api_fuzzing/parser.py
@@ -42,7 +42,7 @@ def get_findings(self, file, test):
static_finding=False,
dynamic_finding=True,
unique_id_from_tool=vulnerability["id"],
- )
+ ),
)
return findings
diff --git a/dojo/tools/gitlab_container_scan/parser.py b/dojo/tools/gitlab_container_scan/parser.py
index 4aa245c399..7dd65305e8 100644
--- a/dojo/tools/gitlab_container_scan/parser.py
+++ b/dojo/tools/gitlab_container_scan/parser.py
@@ -119,13 +119,13 @@ def get_findings(self, file, test):
dependency_name = self._get_dependency_name(dependency)
if dependency_name:
finding.component_name = textwrap.shorten(
- dependency_name, width=190, placeholder="..."
+ dependency_name, width=190, placeholder="...",
)
dependency_version = self._get_dependency_version(dependency)
if dependency_version:
finding.component_version = textwrap.shorten(
- dependency_version, width=90, placeholder="..."
+ dependency_version, width=90, placeholder="...",
)
if "solution" in vulnerability:
diff --git a/dojo/tools/gitlab_dast/parser.py b/dojo/tools/gitlab_dast/parser.py
index 83a7829af6..7728dd00ef 100644
--- a/dojo/tools/gitlab_dast/parser.py
+++ b/dojo/tools/gitlab_dast/parser.py
@@ -35,12 +35,12 @@ def get_items(self, tree, test):
item = self.get_item(node, test, scanner)
item_key = hashlib.sha256(
- f"{item.severity}|{item.title}|{item.description}".encode()
+ f"{item.severity}|{item.title}|{item.description}".encode(),
).hexdigest()
if item_key in items:
items[item_key].unsaved_endpoints.extend(
- item.unsaved_endpoints
+ item.unsaved_endpoints,
)
items[item_key].nb_occurences += 1
else:
@@ -64,7 +64,7 @@ def get_confidence_numeric(self, confidence):
def get_item(self, vuln, test, scanner):
# scanner_confidence
scanner_confidence = self.get_confidence_numeric(
- vuln.get("confidence", "Could not be determined")
+ vuln.get("confidence", "Could not be determined"),
)
# description
@@ -88,7 +88,7 @@ def get_item(self, vuln, test, scanner):
# date
if "discovered_at" in vuln:
finding.date = datetime.strptime(
- vuln["discovered_at"], "%Y-%m-%dT%H:%M:%S.%f"
+ vuln["discovered_at"], "%Y-%m-%dT%H:%M:%S.%f",
)
# id
diff --git a/dojo/tools/gitlab_sast/parser.py b/dojo/tools/gitlab_sast/parser.py
index b00a04a5e6..68dcfc9fd6 100644
--- a/dojo/tools/gitlab_sast/parser.py
+++ b/dojo/tools/gitlab_sast/parser.py
@@ -37,7 +37,7 @@ def get_tests(self, scan_type, handle):
test = ParserTest(
name=scanner_name,
type=scanner_type,
- version=scanner_version
+ version=scanner_version,
)
test.findings = self.get_items(tree)
return [test]
@@ -67,7 +67,7 @@ def get_confidence_numeric(self, argument):
'High': 3, # Firm
'Medium': 4, # Firm
'Low': 6, # Tentative
- 'Experimental': 7 # Tentative
+ 'Experimental': 7, # Tentative
}
return switcher.get(argument, None)
diff --git a/dojo/tools/gitleaks/parser.py b/dojo/tools/gitleaks/parser.py
index 40ec9b9a81..83c4b3beb3 100644
--- a/dojo/tools/gitleaks/parser.py
+++ b/dojo/tools/gitleaks/parser.py
@@ -98,7 +98,7 @@ def get_finding_legacy(self, issue, test, dupes):
finding.unsaved_tags = issue.get("tags", "").split(", ")
dupe_key = hashlib.sha256(
- (issue["offender"] + file_path + str(line)).encode("utf-8")
+ (issue["offender"] + file_path + str(line)).encode("utf-8"),
).hexdigest()
if dupe_key not in dupes:
@@ -152,7 +152,7 @@ def get_finding_current(self, issue, test, dupes):
severity = "High"
dupe_key = hashlib.md5(
- (title + secret + str(line)).encode("utf-8")
+ (title + secret + str(line)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -173,7 +173,7 @@ def get_finding_current(self, issue, test, dupes):
line=line,
dynamic_finding=False,
static_finding=True,
- nb_occurences=1
+ nb_occurences=1,
)
if tags:
finding.unsaved_tags = tags
diff --git a/dojo/tools/gosec/parser.py b/dojo/tools/gosec/parser.py
index 69056d9281..cbcf3b4507 100644
--- a/dojo/tools/gosec/parser.py
+++ b/dojo/tools/gosec/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
# Best attempt at ongoing documentation provided by gosec, based on
# rule id
references = "https://securego.io/docs/rules/{}.html".format(
- item["rule_id"]
+ item["rule_id"],
).lower()
if scanner_confidence:
@@ -80,7 +80,7 @@ def get_findings(self, filename, test):
file_path=filename,
line=line,
scanner_confidence=scanner_confidence,
- static_finding=True
+ static_finding=True,
)
dupes[dupe_key] = find
diff --git a/dojo/tools/govulncheck/parser.py b/dojo/tools/govulncheck/parser.py
index f348a33a06..6e76330e68 100644
--- a/dojo/tools/govulncheck/parser.py
+++ b/dojo/tools/govulncheck/parser.py
@@ -81,7 +81,7 @@ def get_findings(self, scan_file, test):
# Parsing for old govulncheck output format
list_vulns = data["Vulns"]
for cve, elems in groupby(
- list_vulns, key=lambda vuln: vuln["OSV"]["aliases"][0]
+ list_vulns, key=lambda vuln: vuln["OSV"]["aliases"][0],
):
first_elem = list(islice(elems, 1))
d = {
@@ -92,7 +92,7 @@ def get_findings(self, scan_file, test):
"package"
]["name"],
"component_version": self.get_version(
- data, first_elem[0]["RequireSink"]
+ data, first_elem[0]["RequireSink"],
),
}
d["references"] = first_elem[0]["OSV"]["references"][0][
@@ -105,19 +105,19 @@ def get_findings(self, scan_file, test):
vuln_methods = set(
first_elem[0]["OSV"]["affected"][0][
"ecosystem_specific"
- ]["imports"][0]["symbols"]
+ ]["imports"][0]["symbols"],
)
impact = set(
- self.get_location(data, first_elem[0]["CallSink"])
+ self.get_location(data, first_elem[0]["CallSink"]),
)
for elem in elems:
impact.update(
- self.get_location(data, elem["CallSink"])
+ self.get_location(data, elem["CallSink"]),
)
vuln_methods.update(
elem["OSV"]["affected"][0]["ecosystem_specific"][
"imports"
- ][0]["symbols"]
+ ][0]["symbols"],
)
d["impact"] = "; ".join(impact) if impact else None
d[
@@ -151,7 +151,7 @@ def get_findings(self, scan_file, test):
range_info = "\n ".join(formatted_ranges)
vuln_functions = ", ".join(
- set(osv_data["affected"][0]["ecosystem_specific"]["imports"][0].get("symbols", []))
+ set(osv_data["affected"][0]["ecosystem_specific"]["imports"][0].get("symbols", [])),
)
description = (
@@ -195,7 +195,7 @@ def get_findings(self, scan_file, test):
"references": references,
"file_path": path,
"url": db_specific_url,
- "unique_id_from_tool": id
+ "unique_id_from_tool": id,
}
findings.append(Finding(**d))
diff --git a/dojo/tools/h1/parser.py b/dojo/tools/h1/parser.py
index 9708bedfc0..457e01c06f 100644
--- a/dojo/tools/h1/parser.py
+++ b/dojo/tools/h1/parser.py
@@ -38,7 +38,7 @@ def get_findings(self, file, test):
# Get all relevant data
date = content["attributes"]["created_at"]
date = datetime.strftime(
- datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d"
+ datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d",
)
# Build the title of the Dojo finding
title = "#" + content["id"] + " " + content["attributes"]["title"]
@@ -68,7 +68,7 @@ def get_findings(self, file, test):
severity = "Info"
# Build the references of the Dojo finding
ref_link = "https://hackerone.com/reports/{}".format(
- content.get("id")
+ content.get("id"),
)
references += f"[{ref_link}]({ref_link})"
@@ -83,13 +83,13 @@ def get_findings(self, file, test):
cwe = int(
content["relationships"]["weakness"]["data"]["attributes"][
"external_id"
- ][4:]
+ ][4:],
)
except Exception:
cwe = 0
dupe_key = hashlib.md5(
- str(references + title).encode("utf-8")
+ str(references + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
@@ -112,7 +112,7 @@ def get_findings(self, file, test):
impact="No impact provided",
references=references,
cwe=cwe,
- dynamic_finding=False
+ dynamic_finding=False,
)
finding.unsaved_endpoints = []
dupes[dupe_key] = finding
@@ -121,7 +121,7 @@ def get_findings(self, file, test):
def build_description(self, content):
date = content["attributes"]["created_at"]
date = datetime.strftime(
- datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d"
+ datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d",
)
reporter = content["relationships"]["reporter"]["data"]["attributes"][
"username"
@@ -151,7 +151,7 @@ def build_description(self, content):
# Build rest of description meat
description += "##Report: \n{}\n".format(
- content["attributes"]["vulnerability_information"]
+ content["attributes"]["vulnerability_information"],
)
# Try to grab weakness if it's there
diff --git a/dojo/tools/hadolint/parser.py b/dojo/tools/hadolint/parser.py
index 4624dcbf99..d781e83b8a 100644
--- a/dojo/tools/hadolint/parser.py
+++ b/dojo/tools/hadolint/parser.py
@@ -55,7 +55,7 @@ def get_item(vulnerability, test):
file_path=vulnerability["file"],
line=vulnerability["line"],
description="Vulnerability ID: {}\nDetails: {}\n".format(
- vulnerability["code"], vulnerability["message"]
+ vulnerability["code"], vulnerability["message"],
),
static_finding=True,
dynamic_finding=False,
diff --git a/dojo/tools/horusec/parser.py b/dojo/tools/horusec/parser.py
index 8e9571820f..7dce06ad64 100644
--- a/dojo/tools/horusec/parser.py
+++ b/dojo/tools/horusec/parser.py
@@ -29,7 +29,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
data = json.load(filename)
report_date = datetime.strptime(
- data.get("createdAt")[0:10], "%Y-%m-%d"
+ data.get("createdAt")[0:10], "%Y-%m-%d",
)
return [
self._get_finding(node, report_date)
@@ -40,7 +40,7 @@ def get_tests(self, scan_type, scan):
data = json.load(scan)
report_date = parse(data.get("createdAt"))
test = ParserTest(
- name=self.ID, type=self.ID, version=data.get("version").lstrip("v")
+ name=self.ID, type=self.ID, version=data.get("version").lstrip("v"),
) # remove the v in vX.Y.Z
test.description = "\n".join(
[
@@ -49,7 +49,7 @@ def get_tests(self, scan_type, scan):
"```",
data.get("errors").replace("```", "``````"),
"```",
- ]
+ ],
)
test.findings = [
self._get_finding(node, report_date)
@@ -65,7 +65,7 @@ def _get_finding(self, data, date):
f"```{data['vulnerabilities']['language']}",
data["vulnerabilities"]["code"].replace("```", "``````").replace("\x00", ""),
"```",
- ]
+ ],
)
finding = Finding(
title=data["vulnerabilities"]["details"].split("\n")[0],
diff --git a/dojo/tools/huskyci/parser.py b/dojo/tools/huskyci/parser.py
index 472ffbbf6f..028f4e1845 100644
--- a/dojo/tools/huskyci/parser.py
+++ b/dojo/tools/huskyci/parser.py
@@ -53,7 +53,7 @@ def get_items(self, tree, test):
if vuln["severity"] not in ("High", "Medium", "Low"):
continue
unique_key = hashlib.md5(
- str(vuln).encode("utf-8")
+ str(vuln).encode("utf-8"),
).hexdigest()
item = get_item(vuln, test)
items[unique_key] = item
@@ -86,7 +86,7 @@ def get_item(item_node, test):
line=item_node.get("line"),
static_finding=True,
dynamic_finding=False,
- impact="No impact provided"
+ impact="No impact provided",
)
return finding
diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py
index 677130bd47..c42e8637f2 100644
--- a/dojo/tools/hydra/parser.py
+++ b/dojo/tools/hydra/parser.py
@@ -44,7 +44,7 @@ def get_findings(self, json_output, test):
return findings
def __extract_findings(
- self, raw_findings, metadata: HydraScanMetadata, test
+ self, raw_findings, metadata: HydraScanMetadata, test,
):
findings = []
@@ -54,13 +54,13 @@ def __extract_findings(
findings.append(finding)
except ValueError:
logger.warning(
- "Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!"
+ "Error when digesting a finding from hydra! Please revise supplied report, vital information was missing (e.g. host)!",
)
return findings
def __extract_finding(
- self, raw_finding, metadata: HydraScanMetadata, test
+ self, raw_finding, metadata: HydraScanMetadata, test,
) -> Finding:
host = raw_finding.get("host")
port = raw_finding.get("port")
@@ -92,7 +92,7 @@ def __extract_finding(
+ password,
static_finding=False,
dynamic_finding=True,
- service=metadata.service_type
+ service=metadata.service_type,
)
finding.unsaved_endpoints = [Endpoint(host=host, port=port)]
diff --git a/dojo/tools/ibm_app/parser.py b/dojo/tools/ibm_app/parser.py
index 72700fd72c..908b632926 100644
--- a/dojo/tools/ibm_app/parser.py
+++ b/dojo/tools/ibm_app/parser.py
@@ -53,21 +53,21 @@ def get_findings(self, file, test):
if severity == "Informational":
severity = "Info"
issue_description = self.fetch_advisory_group(
- root, issue_data["advisory"]
+ root, issue_data["advisory"],
)
for fix_recommendation_group in root.iter(
- "fix-recommendation-group"
+ "fix-recommendation-group",
):
for recommendation in fix_recommendation_group.iter(
- "item"
+ "item",
):
if (
recommendation.attrib["id"]
== issue_data["fix-recommendation"]
):
data = recommendation.find(
- "general/fixRecommendation"
+ "general/fixRecommendation",
)
for data_text in data.iter("text"):
recommendation_data += (
@@ -82,8 +82,8 @@ def get_findings(self, file, test):
# endpoints
dupe_key = hashlib.md5(
str(issue_description + name + severity).encode(
- "utf-8"
- )
+ "utf-8",
+ ),
).hexdigest()
# check if finding is a duplicate
if dupe_key in dupes:
@@ -100,11 +100,11 @@ def get_findings(self, file, test):
severity=severity,
mitigation=recommendation_data,
references=ref_link,
- dynamic_finding=True
+ dynamic_finding=True,
)
if vulnerability_id:
finding.unsaved_vulnerability_ids = [
- vulnerability_id
+ vulnerability_id,
]
finding.unsaved_endpoints = []
dupes[dupe_key] = finding
@@ -115,7 +115,7 @@ def get_findings(self, file, test):
# urls
if url:
finding.unsaved_endpoints.append(
- Endpoint.from_uri(url)
+ Endpoint.from_uri(url),
)
return list(dupes.values())
@@ -129,7 +129,7 @@ def fetch_issue_types(self, root):
"name": item.find("name").text,
"advisory": item.find("advisory/ref").text,
"fix-recommendation": item.find(
- "fix-recommendation/ref"
+ "fix-recommendation/ref",
).text,
}
@@ -155,7 +155,7 @@ def fetch_advisory_group(self, root, advisory):
for item in advisory_group.iter("item"):
if item.attrib["id"] == advisory:
return item.find(
- "advisory/testTechnicalDescription/text"
+ "advisory/testTechnicalDescription/text",
).text
return "N/A"
diff --git a/dojo/tools/immuniweb/parser.py b/dojo/tools/immuniweb/parser.py
index 5076259f7f..53242fcd2f 100644
--- a/dojo/tools/immuniweb/parser.py
+++ b/dojo/tools/immuniweb/parser.py
@@ -58,7 +58,7 @@ def get_findings(self, file, test):
url = vulnerability.find("URL").text
dupe_key = hashlib.md5(
- str(description + title + severity).encode("utf-8")
+ str(description + title + severity).encode("utf-8"),
).hexdigest()
# check if finding is a duplicate
@@ -78,7 +78,7 @@ def get_findings(self, file, test):
mitigation=mitigation,
impact=impact,
references=reference,
- dynamic_finding=True
+ dynamic_finding=True,
)
if vulnerability_id:
finding.unsaved_vulnerability_ids = [vulnerability_id]
diff --git a/dojo/tools/intsights/csv_handler.py b/dojo/tools/intsights/csv_handler.py
index 828cfaf802..c9493d7db2 100644
--- a/dojo/tools/intsights/csv_handler.py
+++ b/dojo/tools/intsights/csv_handler.py
@@ -33,7 +33,7 @@ def _parse_csv(self, csv_file) -> [dict]:
"Closed Reason",
"Additional Info",
"Rating",
- "Alert Link"
+ "Alert Link",
]
# These keys require a value. If one ore more of the values is null or empty, the entire Alert is ignored.
@@ -47,12 +47,12 @@ def _parse_csv(self, csv_file) -> [dict]:
if isinstance(content, bytes):
content = content.decode("utf-8")
csv_reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
# Don't bother parsing if the keys don't match exactly what's expected
if collections.Counter(default_keys) == collections.Counter(
- csv_reader.fieldnames
+ csv_reader.fieldnames,
):
default_valud = "None provided"
for alert in csv_reader:
@@ -64,13 +64,13 @@ def _parse_csv(self, csv_file) -> [dict]:
"Type",
)
alert["source_date"] = alert.pop(
- "Source Date (UTC)", default_valud
+ "Source Date (UTC)", default_valud,
)
alert["report_date"] = alert.pop(
- "Report Date (UTC)", default_valud
+ "Report Date (UTC)", default_valud,
)
alert["network_type"] = alert.pop(
- "Network Type", default_valud
+ "Network Type", default_valud,
)
alert["source_url"] = alert.pop("Source URL", default_valud)
alert["assets"] = alert.pop("Assets", default_valud)
@@ -89,7 +89,7 @@ def _parse_csv(self, csv_file) -> [dict]:
alerts.append(alert)
else:
self._LOGGER.error(
- "The CSV file has one or more missing or unexpected header values"
+ "The CSV file has one or more missing or unexpected header values",
)
return alerts
diff --git a/dojo/tools/intsights/json_handler.py b/dojo/tools/intsights/json_handler.py
index ec315ac101..db45e50364 100644
--- a/dojo/tools/intsights/json_handler.py
+++ b/dojo/tools/intsights/json_handler.py
@@ -21,19 +21,19 @@ def _parse_json(self, json_file) -> [dict]:
alert["severity"] = original_alert["Details"]["Severity"]
alert["type"] = original_alert["Details"]["Type"]
alert["source_date"] = original_alert["Details"]["Source"].get(
- "Date", "None provided"
+ "Date", "None provided",
)
alert["report_date"] = original_alert.get(
- "FoundDate", "None provided"
+ "FoundDate", "None provided",
)
alert["network_type"] = original_alert["Details"]["Source"].get(
- "NetworkType"
+ "NetworkType",
)
alert["source_url"] = original_alert["Details"]["Source"].get(
- "URL"
+ "URL",
)
alert["assets"] = ",".join(
- [item.get("Value") for item in original_alert["Assets"]]
+ [item.get("Value") for item in original_alert["Assets"]],
)
alert["tags"] = original_alert["Details"].get("Tags")
alert["status"] = (
diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py
index cd6a61a57a..e49c61b852 100644
--- a/dojo/tools/intsights/parser.py
+++ b/dojo/tools/intsights/parser.py
@@ -38,8 +38,8 @@ def _build_finding_description(self, alert: dict) -> str:
f'**Source Date**: ` {alert.get("source_date", "None provided")} `',
f'**Source Network Type**: `{alert.get("network_type", "None provided")} `',
f'**Assets Affected**: `{alert.get("assets", "None provided")} `',
- f'**Alert Link**: {alert.get("alert_link", "None provided")}'
- ]
+ f'**Alert Link**: {alert.get("alert_link", "None provided")}',
+ ],
)
return description
@@ -66,7 +66,7 @@ def get_findings(self, file, test):
references=alert["alert_link"],
static_finding=False,
dynamic_finding=True,
- unique_id_from_tool=alert["alert_id"]
+ unique_id_from_tool=alert["alert_id"],
)
duplicates[dupe_key] = alert
if dupe_key not in duplicates:
diff --git a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
index 7453669b47..5261b802f2 100644
--- a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
+++ b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py
@@ -98,7 +98,7 @@ def get_item(
artifact_sha256
+ impact_path.name
+ impact_path.version
- + vulnerability["issue_id"]
+ + vulnerability["issue_id"],
)
vuln_id_from_tool = vulnerability["issue_id"]
elif cve:
@@ -108,7 +108,7 @@ def get_item(
artifact_sha256
+ impact_path.name
+ impact_path.version
- + vulnerability["summary"]
+ + vulnerability["summary"],
)
vuln_id_from_tool = ""
result.update(unique_id.encode())
diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py
index e8b36d1b34..12efe1afff 100644
--- a/dojo/tools/jfrog_xray_unified/parser.py
+++ b/dojo/tools/jfrog_xray_unified/parser.py
@@ -106,7 +106,7 @@ def get_item(vulnerability, test):
references = "\n".join(vulnerability["references"])
scan_time = datetime.strptime(
- vulnerability["artifact_scan_time"], "%Y-%m-%dT%H:%M:%S%z"
+ vulnerability["artifact_scan_time"], "%Y-%m-%dT%H:%M:%S%z",
)
# component has several parts separated by colons. Last part is the
diff --git a/dojo/tools/jfrogxray/parser.py b/dojo/tools/jfrogxray/parser.py
index 36ffa900cf..a1351dc077 100644
--- a/dojo/tools/jfrogxray/parser.py
+++ b/dojo/tools/jfrogxray/parser.py
@@ -32,7 +32,7 @@ def get_items(self, tree, test):
title_cve = "No CVE"
more_details = node.get("component_versions").get(
- "more_details"
+ "more_details",
)
if "cves" in more_details:
if "cve" in more_details.get("cves")[0]:
@@ -97,13 +97,13 @@ def get_item(vulnerability, test):
if "fixed_versions" in vulnerability["component_versions"]:
mitigation = "**Versions containing a fix:**\n"
mitigation = mitigation + "\n".join(
- vulnerability["component_versions"]["fixed_versions"]
+ vulnerability["component_versions"]["fixed_versions"],
)
if "vulnerable_versions" in vulnerability["component_versions"]:
extra_desc = "\n**Versions that are vulnerable:**\n"
extra_desc += "\n".join(
- vulnerability["component_versions"]["vulnerable_versions"]
+ vulnerability["component_versions"]["vulnerable_versions"],
)
provider = (
diff --git a/dojo/tools/kics/parser.py b/dojo/tools/kics/parser.py
index f0b2c1defc..2708047399 100644
--- a/dojo/tools/kics/parser.py
+++ b/dojo/tools/kics/parser.py
@@ -65,7 +65,7 @@ def get_findings(self, filename, test):
+ file_name
+ expected_value
+ str(line_number)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/kiuwan/parser.py b/dojo/tools/kiuwan/parser.py
index 4eeb8146af..5d91e5a315 100644
--- a/dojo/tools/kiuwan/parser.py
+++ b/dojo/tools/kiuwan/parser.py
@@ -40,7 +40,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
@@ -124,7 +124,7 @@ def get_findings(self, filename, test):
+ finding.description
+ "|"
+ str(finding.cwe)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/kubehunter/parser.py b/dojo/tools/kubehunter/parser.py
index 54e2bfa842..ef9abf25c8 100644
--- a/dojo/tools/kubehunter/parser.py
+++ b/dojo/tools/kubehunter/parser.py
@@ -74,7 +74,7 @@ def get_findings(self, file, test):
duplicate=False,
out_of_scope=False,
vuln_id_from_tool=vulnerability_id,
- steps_to_reproduce=steps_to_reproduce
+ steps_to_reproduce=steps_to_reproduce,
)
# internal de-duplication
diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py
index be9cd6d741..d26031a6a7 100644
--- a/dojo/tools/kubescape/parser.py
+++ b/dojo/tools/kubescape/parser.py
@@ -116,7 +116,7 @@ def get_findings(self, filename, test):
severity=severity,
component_name=resourceid,
static_finding=True,
- dynamic_finding=False
+ dynamic_finding=False,
)
findings.append(find)
return findings
diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py
index 5fc6464526..60ad893109 100644
--- a/dojo/tools/mend/parser.py
+++ b/dojo/tools/mend/parser.py
@@ -76,7 +76,7 @@ def _build_common_output(node, lib_name=None):
cvss3_score = node.get("cvss3_score", None)
cvss3_vector = node.get("scoreMetadataVector", None)
severity_justification = "CVSS v3 score: {} ({})".format(
- cvss3_score if cvss3_score is not None else "N/A", cvss3_vector if cvss3_vector is not None else "N/A"
+ cvss3_score if cvss3_score is not None else "N/A", cvss3_vector if cvss3_vector is not None else "N/A",
)
cwe = 1035 # default OWASP a9 until the report actually has them
@@ -99,7 +99,7 @@ def _build_common_output(node, lib_name=None):
filepaths.append(sfile.get("localPath"))
except Exception:
logger.exception(
- "Error handling local paths for vulnerability."
+ "Error handling local paths for vulnerability.",
)
new_finding = Finding(
@@ -115,7 +115,7 @@ def _build_common_output(node, lib_name=None):
severity_justification=severity_justification,
dynamic_finding=True,
cvssv3=cvss3_vector,
- cvssv3_score=float(cvss3_score) if cvss3_score is not None else None
+ cvssv3_score=float(cvss3_score) if cvss3_score is not None else None,
)
if cve:
new_finding.unsaved_vulnerability_ids = [cve]
@@ -136,7 +136,7 @@ def _build_common_output(node, lib_name=None):
):
for vuln in lib_node.get("vulnerabilities"):
findings.append(
- _build_common_output(vuln, lib_node.get("name"))
+ _build_common_output(vuln, lib_node.get("name")),
)
elif "vulnerabilities" in content:
@@ -152,7 +152,7 @@ def create_finding_key(f: Finding) -> str:
"""
return hashlib.md5(
f.description.encode("utf-8")
- + f.title.encode("utf-8")
+ + f.title.encode("utf-8"),
).hexdigest()
dupes = {}
diff --git a/dojo/tools/meterian/parser.py b/dojo/tools/meterian/parser.py
index ab9fa93392..cb5f0193be 100644
--- a/dojo/tools/meterian/parser.py
+++ b/dojo/tools/meterian/parser.py
@@ -20,11 +20,11 @@ def get_findings(self, report, test):
report_json = json.load(report)
security_reports = self.get_security_reports(report_json)
scan_date = str(
- datetime.fromisoformat(report_json["timestamp"]).date()
+ datetime.fromisoformat(report_json["timestamp"]).date(),
)
for single_security_report in security_reports:
findings += self.do_get_findings(
- single_security_report, scan_date, test
+ single_security_report, scan_date, test,
)
return findings
diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py
index 9764a2e8db..6b1669ffaf 100644
--- a/dojo/tools/microfocus_webinspect/parser.py
+++ b/dojo/tools/microfocus_webinspect/parser.py
@@ -36,7 +36,7 @@ def get_findings(self, file, test):
mitigation = None
reference = None
severity = MicrofocusWebinspectParser.convert_severity(
- issue.find("Severity").text
+ issue.find("Severity").text,
)
for content in issue.findall("ReportSection"):
name = content.find("Name").text
@@ -49,7 +49,7 @@ def get_findings(self, file, test):
if "Reference" in name:
if name and content.find("SectionText").text:
reference = html2text.html2text(
- content.find("SectionText").text
+ content.find("SectionText").text,
)
cwe = 0
description = ""
@@ -81,7 +81,7 @@ def get_findings(self, file, test):
# make dupe hash key
dupe_key = hashlib.sha256(
- f"{finding.description}|{finding.title}|{finding.severity}".encode()
+ f"{finding.description}|{finding.title}|{finding.severity}".encode(),
).hexdigest()
# check if dupes are present.
if dupe_key in dupes:
diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py
index 2cbdca7920..6567c69b82 100644
--- a/dojo/tools/mobsf/parser.py
+++ b/dojo/tools/mobsf/parser.py
@@ -95,7 +95,7 @@ def get_findings(self, filename, test):
"title": details.get("name", ""),
"severity": self.getSeverityForPermission(details.get("status")),
"description": "**Permission Type:** " + details.get("name", "") + " (" + details.get("status", "") + ")\n\n**Description:** " + details.get("description", "") + "\n\n**Reason:** " + details.get("reason", ""),
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -105,7 +105,7 @@ def get_findings(self, filename, test):
"title": permission,
"severity": self.getSeverityForPermission(details.get("status", "")),
"description": "**Permission Type:** " + permission + "\n\n**Description:** " + details.get("description", ""),
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -121,7 +121,7 @@ def get_findings(self, filename, test):
"title": "Insecure Connections",
"severity": "Low",
"description": insecure_urls,
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -136,7 +136,7 @@ def get_findings(self, filename, test):
"title": details[2],
"severity": details[0].title(),
"description": details[1] + "\n\n**Certificate Info:** " + certificate_info,
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
elif len(details) == 2:
@@ -145,7 +145,7 @@ def get_findings(self, filename, test):
"title": details[1],
"severity": details[0].title(),
"description": details[1] + "\n\n**Certificate Info:** " + certificate_info,
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -161,7 +161,7 @@ def get_findings(self, filename, test):
"title": details["title"],
"severity": details["severity"].title(),
"description": details["description"] + "\n\n " + details["name"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -171,7 +171,7 @@ def get_findings(self, filename, test):
"title": details["title"],
"severity": details["stat"].title(),
"description": details["desc"] + "\n\n " + details["name"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -186,7 +186,7 @@ def get_findings(self, filename, test):
"title": details,
"severity": metadata["metadata"]["severity"].title(),
"description": metadata["metadata"]["description"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -198,7 +198,7 @@ def get_findings(self, filename, test):
"title": details,
"severity": metadata["metadata"]["severity"].title(),
"description": metadata["metadata"]["description"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -213,7 +213,7 @@ def get_findings(self, filename, test):
"title": details[binary_analysis_type]["description"].split(".")[0],
"severity": details[binary_analysis_type]["severity"].title(),
"description": details[binary_analysis_type]["description"],
- "file_path": details["name"]
+ "file_path": details["name"],
}
mobsf_findings.append(mobsf_item)
elif data["binary_analysis"].get("findings"):
@@ -232,7 +232,7 @@ def get_findings(self, filename, test):
"title": details["detailed_desc"],
"severity": details["severity"].title(),
"description": details["detailed_desc"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
else:
@@ -250,7 +250,7 @@ def get_findings(self, filename, test):
"title": details["detailed_desc"],
"severity": details["severity"].title(),
"description": details["detailed_desc"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -282,7 +282,7 @@ def get_findings(self, filename, test):
"title": details["metadata"]["description"],
"severity": details["metadata"]["severity"].title(),
"description": "**API:** " + api + "\n\n**Description:** " + details["metadata"]["description"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -294,7 +294,7 @@ def get_findings(self, filename, test):
"title": details["title"],
"severity": details["stat"],
"description": details["desc"],
- "file_path": None
+ "file_path": None,
}
mobsf_findings.append(mobsf_item)
@@ -316,7 +316,7 @@ def get_findings(self, filename, test):
"title": title,
"severity": finding["level"],
"description": description,
- "file_path": file_path
+ "file_path": file_path,
}
mobsf_findings.append(mobsf_item)
@@ -327,7 +327,7 @@ def get_findings(self, filename, test):
"title": finding["name"],
"severity": finding["severity"],
"description": finding["description"] + "\n" + "**apk_exploit_dict:** " + str(finding["apk_exploit_dict"]) + "\n" + "**line_number:** " + str(finding["line_number"]),
- "file_path": finding["file_object"]
+ "file_path": finding["file_object"],
}
mobsf_findings.append(mobsf_item)
for mobsf_finding in mobsf_findings:
diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py
index 67c30ffb1c..ae7eecc122 100644
--- a/dojo/tools/mobsfscan/parser.py
+++ b/dojo/tools/mobsfscan/parser.py
@@ -35,8 +35,8 @@ def get_findings(self, filename, test):
metadata = item.get("metadata")
cwe = int(
re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group(
- 2
- )
+ 2,
+ ),
)
masvs = metadata.get("masvs")
owasp_mobile = metadata.get("owasp-mobile")
@@ -45,7 +45,7 @@ def get_findings(self, filename, test):
f"**Description:** `{metadata.get('description')}`",
f"**OWASP MASVS:** `{masvs}`",
f"**OWASP Mobile:** `{owasp_mobile}`",
- ]
+ ],
)
references = metadata.get("reference")
if metadata.get("severity") in self.SEVERITY:
@@ -70,7 +70,7 @@ def get_findings(self, filename, test):
finding.line = line
dupe_key = hashlib.sha256(
- (key + str(cwe) + masvs + owasp_mobile).encode("utf-8")
+ (key + str(cwe) + masvs + owasp_mobile).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py
index 1d88b3cf11..783e0ada6f 100644
--- a/dojo/tools/mozilla_observatory/parser.py
+++ b/dojo/tools/mozilla_observatory/parser.py
@@ -41,7 +41,7 @@ def get_findings(self, file, test):
+ "`",
"**Result** : `" + node["result"] + "`"
"**expectation** : " + str(node.get("expectation")) + "`",
- ]
+ ],
)
finding = Finding(
diff --git a/dojo/tools/netsparker/parser.py b/dojo/tools/netsparker/parser.py
index e0cbce557c..35a0892054 100644
--- a/dojo/tools/netsparker/parser.py
+++ b/dojo/tools/netsparker/parser.py
@@ -26,11 +26,11 @@ def get_findings(self, filename, test):
dupes = {}
if "UTC" in data["Generated"]:
scan_date = datetime.datetime.strptime(
- data["Generated"].split(" ")[0], "%d/%m/%Y"
+ data["Generated"].split(" ")[0], "%d/%m/%Y",
).date()
else:
scan_date = datetime.datetime.strptime(
- data["Generated"], "%d/%m/%Y %H:%M %p"
+ data["Generated"], "%d/%m/%Y %H:%M %p",
).date()
for item in data["Vulnerabilities"]:
@@ -79,13 +79,13 @@ def get_findings(self, filename, test):
if item["Classification"] is not None:
if item["Classification"].get("Cvss") is not None and item["Classification"].get("Cvss").get("Vector") is not None:
cvss_objects = cvss_parser.parse_cvss_from_text(
- item["Classification"]["Cvss"]["Vector"]
+ item["Classification"]["Cvss"]["Vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
elif item["Classification"].get("Cvss31") is not None and item["Classification"].get("Cvss31").get("Vector") is not None:
cvss_objects = cvss_parser.parse_cvss_from_text(
- item["Classification"]["Cvss31"]["Vector"]
+ item["Classification"]["Cvss31"]["Vector"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
diff --git a/dojo/tools/neuvector/parser.py b/dojo/tools/neuvector/parser.py
index f2f20ebd53..7cf278ce7e 100644
--- a/dojo/tools/neuvector/parser.py
+++ b/dojo/tools/neuvector/parser.py
@@ -43,7 +43,7 @@ def get_items(self, tree, test):
unique_key = node.get("name") + str(
package_name
+ str(node.get("package_version"))
- + str(node.get("severity"))
+ + str(node.get("severity")),
)
items[unique_key] = item
return list(items.values())
diff --git a/dojo/tools/nexpose/parser.py b/dojo/tools/nexpose/parser.py
index 67908e03d6..c695b819f2 100644
--- a/dojo/tools/nexpose/parser.py
+++ b/dojo/tools/nexpose/parser.py
@@ -191,13 +191,13 @@ def get_vuln_definitions(self, tree):
url_index += 1
else:
vuln["refs"][ref.get("source")] = str(
- ref.text
+ ref.text,
).strip()
elif item.tag == "solution":
for htmlType in list(item):
vuln["resolution"] += self.parse_html_type(
- htmlType
+ htmlType,
)
# there is currently no method to register tags in vulns
@@ -224,7 +224,7 @@ def get_items(self, tree, vulns, test):
"name": "Host Up",
"desc": "Host is up because it replied on ICMP request or some TCP/UDP port is up",
"severity": "Info",
- }
+ },
)
for names in node.findall("names"):
@@ -242,11 +242,11 @@ def get_items(self, tree, vulns, test):
for service in services.findall("service"):
svc["name"] = service.get("name", "").lower()
svc["vulns"] = self.parse_tests_type(
- service, vulns
+ service, vulns,
)
for configs in service.findall(
- "configurations"
+ "configurations",
):
for config in configs.findall("config"):
if "banner" in config.get("name"):
@@ -269,11 +269,11 @@ def get_items(self, tree, vulns, test):
"[^A-Za-z0-9]+",
"-",
service.get("name").lower(),
- ).rstrip("-")
+ ).rstrip("-"),
]
if service.get("name") != ""
else [],
- }
+ },
)
host["services"].append(svc)
@@ -308,7 +308,7 @@ def get_items(self, tree, vulns, test):
else service["protocol"],
fragment=service["protocol"].lower()
if service["name"] == "dns"
- else None
+ else None,
# A little dirty hack but in case of DNS it is
# important to know if vulnerability is on TCP or UDP
)
diff --git a/dojo/tools/nikto/json_parser.py b/dojo/tools/nikto/json_parser.py
index bde6ef3e62..a51deafce6 100644
--- a/dojo/tools/nikto/json_parser.py
+++ b/dojo/tools/nikto/json_parser.py
@@ -27,12 +27,12 @@ def process_json(self, file, test):
description=description,
vuln_id_from_tool=vulnerability.get("id"),
nb_occurences=1,
- references=vulnerability.get("references")
+ references=vulnerability.get("references"),
)
# manage if we have an ID from OSVDB
if "OSVDB" in vulnerability and "0" != vulnerability.get("OSVDB"):
finding.unique_id_from_tool = "OSVDB-" + vulnerability.get(
- "OSVDB"
+ "OSVDB",
)
finding.description += "\n*This finding is marked as medium as there is a link to OSVDB*"
finding.severity = "Medium"
diff --git a/dojo/tools/nikto/xml_parser.py b/dojo/tools/nikto/xml_parser.py
index ab5dffe906..bb831b7c3c 100644
--- a/dojo/tools/nikto/xml_parser.py
+++ b/dojo/tools/nikto/xml_parser.py
@@ -33,7 +33,7 @@ def process_scandetail(self, scan, test, dupes):
description = item.findtext("description")
# Cut the title down to the first sentence
sentences = re.split(
- r"(? 0:
titleText = sentences[0][:900]
@@ -45,7 +45,7 @@ def process_scandetail(self, scan, test, dupes):
f"**Host:** `{item.findtext('iplink')}`",
f"**Description:** `{item.findtext('description')}`",
f"**HTTP Method:** `{item.attrib.get('method')}`",
- ]
+ ],
)
# Manage severity the same way with JSON
severity = "Info" # Nikto doesn't assign severity, default to Info
diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py
index f0eb012895..35ea7c3464 100644
--- a/dojo/tools/nmap/parser.py
+++ b/dojo/tools/nmap/parser.py
@@ -27,7 +27,7 @@ def get_findings(self, file, test):
report_date = None
try:
report_date = datetime.datetime.fromtimestamp(
- int(root.attrib["start"])
+ int(root.attrib["start"]),
)
except ValueError:
pass
@@ -57,7 +57,7 @@ def get_findings(self, file, test):
)
if "accuracy" in os_match.attrib:
host_info += "**Accuracy:** {}%\n".format(
- os_match.attrib["accuracy"]
+ os_match.attrib["accuracy"],
)
host_info += "\n\n"
@@ -65,7 +65,7 @@ def get_findings(self, file, test):
for port_element in host.findall("ports/port"):
protocol = port_element.attrib["protocol"]
endpoint = Endpoint(
- host=fqdn if fqdn else ip, protocol=protocol
+ host=fqdn if fqdn else ip, protocol=protocol,
)
if (
"portid" in port_element.attrib
@@ -104,10 +104,10 @@ def get_findings(self, file, test):
# manage some script like
# https://github.com/vulnersCom/nmap-vulners
for script_element in port_element.findall(
- 'script[@id="vulners"]'
+ 'script[@id="vulners"]',
):
self.manage_vulner_script(
- test, dupes, script_element, endpoint, report_date
+ test, dupes, script_element, endpoint, report_date,
)
severity = "Info"
@@ -153,7 +153,7 @@ def convert_cvss_score(self, raw_value):
return "Critical"
def manage_vulner_script(
- self, test, dupes, script_element, endpoint, report_date=None
+ self, test, dupes, script_element, endpoint, report_date=None,
):
for component_element in script_element.findall("table"):
component_cpe = CPE(component_element.attrib["key"])
diff --git a/dojo/tools/noseyparker/parser.py b/dojo/tools/noseyparker/parser.py
index 965b9e28f5..787d696f46 100644
--- a/dojo/tools/noseyparker/parser.py
+++ b/dojo/tools/noseyparker/parser.py
@@ -91,7 +91,7 @@ def get_findings(self, file, test):
line=line_num,
static_finding=True,
nb_occurences=1,
- dynamic_finding=False
+ dynamic_finding=False,
)
dupes[key] = finding
diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py
index 4e97c4f6b7..4ce0d22b74 100644
--- a/dojo/tools/nuclei/parser.py
+++ b/dojo/tools/nuclei/parser.py
@@ -78,7 +78,7 @@ def get_findings(self, filename, test):
finding.description = info.get("description")
if item.get("extracted-results"):
finding.description += "\n**Results:**\n" + "\n".join(
- item.get("extracted-results")
+ item.get("extracted-results"),
)
if info.get("tags"):
finding.unsaved_tags = info.get("tags")
@@ -108,7 +108,7 @@ def get_findings(self, filename, test):
and classification["cvss-metrics"]
):
cvss_objects = cvss_parser.parse_cvss_from_text(
- classification["cvss-metrics"]
+ classification["cvss-metrics"],
)
if len(cvss_objects) > 0:
finding.cvssv3 = cvss_objects[0].clean_vector()
@@ -151,8 +151,8 @@ def get_findings(self, filename, test):
dupe_key = hashlib.sha256(
(template_id + item_type + matcher + endpoint.host).encode(
- "utf-8"
- )
+ "utf-8",
+ ),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/openscap/parser.py b/dojo/tools/openscap/parser.py
index a96a1cdcca..186243526b 100644
--- a/dojo/tools/openscap/parser.py
+++ b/dojo/tools/openscap/parser.py
@@ -36,7 +36,7 @@ def get_findings(self, file, test):
rules = {}
for rule in root.findall(f".//{namespace}Rule"):
rules[rule.attrib["id"]] = {
- "title": rule.findtext(f"./{namespace}title")
+ "title": rule.findtext(f"./{namespace}title"),
}
# go to test result
test_result = tree.find(f"./{namespace}TestResult")
@@ -51,7 +51,7 @@ def get_findings(self, file, test):
# run both rule, and rule-result in parallel so that we can get title
# for failed test from rule.
for rule_result in test_result.findall(
- f"./{namespace}rule-result"
+ f"./{namespace}rule-result",
):
result = rule_result.findtext(f"./{namespace}result")
# find only failed report.
@@ -63,11 +63,11 @@ def get_findings(self, file, test):
[
"**IdRef:** `" + rule_result.attrib["idref"] + "`",
"**Title:** `" + title + "`",
- ]
+ ],
)
vulnerability_ids = []
for vulnerability_id in rule_result.findall(
- f"./{namespace}ident[@system='http://cve.mitre.org']"
+ f"./{namespace}ident[@system='http://cve.mitre.org']",
):
vulnerability_ids.append(vulnerability_id.text)
# get severity.
@@ -82,7 +82,7 @@ def get_findings(self, file, test):
references = ""
# get references.
for check_content in rule_result.findall(
- f"./{namespace}check/{namespace}check-content-ref"
+ f"./{namespace}check/{namespace}check-content-ref",
):
references += (
"**name:** : " + check_content.attrib["name"] + "\n"
@@ -115,7 +115,7 @@ def get_findings(self, file, test):
finding.unsaved_endpoints.append(endpoint)
dupe_key = hashlib.sha256(
- references.encode("utf-8")
+ references.encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/openvas/csv_parser.py b/dojo/tools/openvas/csv_parser.py
index ff9e8bf888..1a5cc9a056 100644
--- a/dojo/tools/openvas/csv_parser.py
+++ b/dojo/tools/openvas/csv_parser.py
@@ -289,7 +289,7 @@ def get_findings(self, filename, test):
column_number = 0
for column in row:
chain.process_column(
- column_names[column_number], column, finding
+ column_names[column_number], column, finding,
)
column_number += 1
if finding is not None and row_number > 0:
@@ -306,7 +306,7 @@ def get_findings(self, filename, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
dupes[key] = finding
diff --git a/dojo/tools/openvas/xml_parser.py b/dojo/tools/openvas/xml_parser.py
index 3746d5c27b..bd9d365e0d 100644
--- a/dojo/tools/openvas/xml_parser.py
+++ b/dojo/tools/openvas/xml_parser.py
@@ -41,7 +41,7 @@ def get_findings(self, filename, test):
description="\n".join(description),
severity=severity,
dynamic_finding=True,
- static_finding=False
+ static_finding=False,
)
findings.append(finding)
return findings
diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py
index b2c33b0c45..7be74278cb 100644
--- a/dojo/tools/ort/parser.py
+++ b/dojo/tools/ort/parser.py
@@ -47,16 +47,16 @@ def get_items(self, evaluatedModel, test):
rule_violations = evaluatedModel["rule_violations"]
licenses = evaluatedModel["licenses"]
rule_violations_unresolved = get_unresolved_rule_violations(
- rule_violations
+ rule_violations,
)
rule_violations_models = get_rule_violation_models(
- rule_violations_unresolved, packages, licenses, dependency_trees
+ rule_violations_unresolved, packages, licenses, dependency_trees,
)
for model in rule_violations_models:
item = get_item(model, test)
unique_key = hashlib.md5(
- (item.title + item.references).encode()
+ (item.title + item.references).encode(),
).hexdigest()
items[unique_key] = item
@@ -109,23 +109,23 @@ def get_name_id_for_package(packages, package__id):
def get_rule_violation_models(
- rule_violations_unresolved, packages, licenses, dependency_trees
+ rule_violations_unresolved, packages, licenses, dependency_trees,
):
models = []
for violation in rule_violations_unresolved:
models.append(
get_rule_violation_model(
- violation, packages, licenses, dependency_trees
- )
+ violation, packages, licenses, dependency_trees,
+ ),
)
return models
def get_rule_violation_model(
- rule_violation_unresolved, packages, licenses, dependency_trees
+ rule_violation_unresolved, packages, licenses, dependency_trees,
):
project_ids = get_project_ids_for_package(
- dependency_trees, rule_violation_unresolved["pkg"]
+ dependency_trees, rule_violation_unresolved["pkg"],
)
project_names = []
for id in project_ids:
@@ -140,7 +140,7 @@ def get_rule_violation_model(
license_id = find_license_id(licenses, license_tmp)
return RuleViolationModel(
- package, license_id, project_names, rule_violation_unresolved
+ package, license_id, project_names, rule_violation_unresolved,
)
@@ -193,7 +193,7 @@ def get_item(model, test):
# rule_violation: dict
RuleViolationModel = namedtuple(
- "RuleViolationModel", ["pkg", "license_id", "projects", "rule_violation"]
+ "RuleViolationModel", ["pkg", "license_id", "projects", "rule_violation"],
)
diff --git a/dojo/tools/ossindex_devaudit/parser.py b/dojo/tools/ossindex_devaudit/parser.py
index e9abb97770..ed89887e29 100644
--- a/dojo/tools/ossindex_devaudit/parser.py
+++ b/dojo/tools/ossindex_devaudit/parser.py
@@ -60,7 +60,7 @@ def get_items(self, tree, test):
def get_item(
- dependency_name, dependency_version, dependency_source, vulnerability, test
+ dependency_name, dependency_version, dependency_source, vulnerability, test,
):
cwe_data = vulnerability.get("cwe", "CWE-1035")
if cwe_data is None or cwe_data.startswith("CWE") is False:
diff --git a/dojo/tools/outpost24/parser.py b/dojo/tools/outpost24/parser.py
index 6d42ee855e..011e38b2d1 100644
--- a/dojo/tools/outpost24/parser.py
+++ b/dojo/tools/outpost24/parser.py
@@ -26,13 +26,13 @@ def get_findings(self, file, test):
# date = detail.findtext('date') # can be used for Finding.date?
vulnerability_id = detail.findtext("./cve/id")
url = detail.findtext(
- "./referencelist/reference/[type='solution']/../url"
+ "./referencelist/reference/[type='solution']/../url",
)
description = detail.findtext("description")
mitigation = detail.findtext("solution")
impact = detail.findtext("information")
cvss_score = detail.findtext("cvss_v3_score") or detail.findtext(
- "cvss_score"
+ "cvss_score",
)
if not cvss_score:
cvss_score = 0
@@ -80,7 +80,7 @@ def get_findings(self, file, test):
logger.debug("General port given. Assigning 0 as default.")
port = 0
finding.unsaved_endpoints.append(
- Endpoint(protocol=protocol, host=host, port=port)
+ Endpoint(protocol=protocol, host=host, port=port),
)
items.append(finding)
return items
diff --git a/dojo/tools/php_security_audit_v2/parser.py b/dojo/tools/php_security_audit_v2/parser.py
index 91b7e4c6c3..e677e25254 100644
--- a/dojo/tools/php_security_audit_v2/parser.py
+++ b/dojo/tools/php_security_audit_v2/parser.py
@@ -36,7 +36,7 @@ def get_findings(self, filename, test):
findingdetail += "Details: " + issue["message"] + "\n"
sev = PhpSecurityAuditV2Parser.get_severity_word(
- issue["severity"]
+ issue["severity"],
)
dupe_key = (
diff --git a/dojo/tools/php_symfony_security_check/parser.py b/dojo/tools/php_symfony_security_check/parser.py
index a124a4d419..f22bf45f4e 100644
--- a/dojo/tools/php_symfony_security_check/parser.py
+++ b/dojo/tools/php_symfony_security_check/parser.py
@@ -43,10 +43,10 @@ def get_items(self, tree, test):
for advisory in advisories:
item = get_item(
- dependency_name, dependency_version, advisory, test
+ dependency_name, dependency_version, advisory, test,
)
unique_key = str(dependency_name) + str(
- dependency_data["version"] + str(advisory["cve"])
+ dependency_data["version"] + str(advisory["cve"]),
)
items[unique_key] = item
diff --git a/dojo/tools/pmd/parser.py b/dojo/tools/pmd/parser.py
index 484d289b03..1047a92a95 100644
--- a/dojo/tools/pmd/parser.py
+++ b/dojo/tools/pmd/parser.py
@@ -22,7 +22,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = list(
- csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"')
+ csv.DictReader(io.StringIO(content), delimiter=",", quotechar='"'),
)
for row in reader:
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
finding.severity = priority
description = "Description: {}\n".format(
- row["Description"].strip()
+ row["Description"].strip(),
)
description += "Rule set: {}\n".format(row["Rule set"].strip())
description += "Problem: {}\n".format(row["Problem"].strip())
@@ -55,7 +55,7 @@ def get_findings(self, filename, test):
finding.mitigation = "No mitigation provided"
key = hashlib.sha256(
- f"{finding.title}|{finding.description}|{finding.file_path}|{finding.line}".encode()
+ f"{finding.title}|{finding.description}|{finding.file_path}|{finding.line}".encode(),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/popeye/parser.py b/dojo/tools/popeye/parser.py
index 65ac0d8580..e3806c6f8d 100644
--- a/dojo/tools/popeye/parser.py
+++ b/dojo/tools/popeye/parser.py
@@ -37,7 +37,7 @@ def get_findings(self, file, test):
+ issue["message"]
)
severity = self.get_defect_dojo_severity(
- issue["level"]
+ issue["level"],
)
description = (
"**Sanitizer** : "
@@ -56,7 +56,7 @@ def get_findings(self, file, test):
+ issue["message"]
)
vuln_id_from_tool = re.search(
- r"\[(POP-\d+)\].+", issue["message"]
+ r"\[(POP-\d+)\].+", issue["message"],
).group(1)
finding = Finding(
title=title,
@@ -69,7 +69,7 @@ def get_findings(self, file, test):
)
# internal de-duplication
dupe_key = hashlib.sha256(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key not in dupes:
dupes[dupe_key] = finding
diff --git a/dojo/tools/progpilot/parser.py b/dojo/tools/progpilot/parser.py
index 9947976e6e..6badb4c044 100644
--- a/dojo/tools/progpilot/parser.py
+++ b/dojo/tools/progpilot/parser.py
@@ -64,7 +64,7 @@ def get_findings(self, filename, test):
severity="Medium",
dynamic_finding=False,
static_finding=True,
- unique_id_from_tool=vuln_id
+ unique_id_from_tool=vuln_id,
)
if sink_line is not None:
find.line = sink_line
diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py
index 0a4ba9652e..d66afa3512 100644
--- a/dojo/tools/pwn_sast/parser.py
+++ b/dojo/tools/pwn_sast/parser.py
@@ -59,7 +59,7 @@ def get_findings(self, filename, test):
"Install pwn_sast Driver via: https://github.com/0dayinc/pwn#installation",
"Execute the pwn_sast Driver via:",
f"```pwn_sast --dir-path . --uri-source-root {git_repo_root_uri} -s```",
- ]
+ ],
)
for line in line_no_and_contents:
@@ -76,7 +76,7 @@ def get_findings(self, filename, test):
f"Committed By: {author}",
"Line Contents:",
f"```{contents}```",
- ]
+ ],
)
impact = "\n".join(
@@ -84,17 +84,17 @@ def get_findings(self, filename, test):
f"Security Control Impacted: {section}",
f"NIST 800-53 Security Control Details: {nist_800_53_uri}",
f"CWE Details: {cwe_uri}",
- ]
+ ],
)
mitigation = "\n".join(
[
f"NIST 800-53 Security Control Details / Mitigation Strategy: {nist_800_53_uri}",
- ]
+ ],
)
unique_finding_key = hashlib.sha256(
- (offending_uri + contents).encode("utf-8")
+ (offending_uri + contents).encode("utf-8"),
).hexdigest()
if unique_finding_key in findings:
diff --git a/dojo/tools/qualys/csv_parser.py b/dojo/tools/qualys/csv_parser.py
index 20f5314305..98a8ec00af 100644
--- a/dojo/tools/qualys/csv_parser.py
+++ b/dojo/tools/qualys/csv_parser.py
@@ -23,7 +23,7 @@ def parse_csv(csv_file) -> [Finding]:
if isinstance(content, bytes):
content = content.decode("utf-8")
csv_reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
report_findings = get_report_findings(csv_reader)
@@ -78,17 +78,17 @@ def _extract_cvss_vectors(cvss_base, cvss_temporal):
if cvss_temporal:
try:
cvss_temporal_vector = re.search(
- vector_pattern, cvss_temporal
+ vector_pattern, cvss_temporal,
).group(1)
cvss_vector += "/"
cvss_vector += cvss_temporal_vector
except IndexError:
_logger.error(
- f"CVSS3 Temporal Vector not found in {cvss_base}"
+ f"CVSS3 Temporal Vector not found in {cvss_base}",
)
except AttributeError:
_logger.error(
- f"CVSS3 Temporal Vector not found in {cvss_base}"
+ f"CVSS3 Temporal Vector not found in {cvss_base}",
)
return cvss_vector
@@ -159,11 +159,11 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
if "CVSS3 Base" in report_finding:
cvssv3 = _extract_cvss_vectors(
- report_finding["CVSS3 Base"], report_finding["CVSS3 Temporal"]
+ report_finding["CVSS3 Base"], report_finding["CVSS3 Temporal"],
)
elif "CVSS3.1 Base" in report_finding:
cvssv3 = _extract_cvss_vectors(
- report_finding["CVSS3.1 Base"], report_finding["CVSS3.1 Temporal"]
+ report_finding["CVSS3.1 Base"], report_finding["CVSS3.1 Temporal"],
)
# Get the date based on the first_seen setting
try:
@@ -189,13 +189,13 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
impact=report_finding["Impact"],
date=date,
vuln_id_from_tool=report_finding["QID"],
- cvssv3=cvssv3
+ cvssv3=cvssv3,
)
# Qualys reports regression findings as active, but with a Date Last
# Fixed.
if report_finding["Date Last Fixed"]:
finding.mitigated = datetime.strptime(
- report_finding["Date Last Fixed"], "%m/%d/%Y %H:%M:%S"
+ report_finding["Date Last Fixed"], "%m/%d/%Y %H:%M:%S",
)
finding.is_mitigated = True
else:
@@ -229,7 +229,7 @@ def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
severity=report_finding["SEVERITY"],
impact=report_finding["IMPACT"],
date=date,
- vuln_id_from_tool=report_finding["QID"]
+ vuln_id_from_tool=report_finding["QID"],
)
# Make sure we have something to append to
if isinstance(finding.unsaved_vulnerability_ids, list):
diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py
index 2af9a528f1..ade88d2d32 100644
--- a/dojo/tools/qualys/parser.py
+++ b/dojo/tools/qualys/parser.py
@@ -111,7 +111,7 @@ def split_cvss(value, _temp):
# remove ")" at the end
if _temp.get("CVSS_vector") is None:
_temp["CVSS_vector"] = CVSS3(
- "CVSS:3.0/" + split[1][:-1]
+ "CVSS:3.0/" + split[1][:-1],
).clean_vector()
else:
if _temp.get("CVSS_value") is None:
@@ -174,7 +174,7 @@ def parse_finding(host, tree):
last_fixed = vuln_details.findtext("LAST_FIXED")
if last_fixed is not None:
_temp["mitigation_date"] = datetime.datetime.strptime(
- last_fixed, "%Y-%m-%dT%H:%M:%SZ"
+ last_fixed, "%Y-%m-%dT%H:%M:%SZ",
)
else:
_temp["mitigation_date"] = None
@@ -217,7 +217,7 @@ def parse_finding(host, tree):
htmltext("First Found: " + _first_found),
htmltext("Last Found: " + _last_found),
htmltext("Times Found: " + _times_found),
- ]
+ ],
)
# Impact description
_temp["IMPACT"] = htmltext(vuln_item.findtext("IMPACT"))
diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py
index 1ac6909eea..f252e7d541 100644
--- a/dojo/tools/qualys_infrascan_webgui/parser.py
+++ b/dojo/tools/qualys_infrascan_webgui/parser.py
@@ -59,7 +59,7 @@ def issue_r(raw_row, vuln, scan_date):
_description = str(vuln_details.findtext("DIAGNOSIS"))
# Solution Strips Heading Workaround(s)
_temp["solution"] = htmltext(
- str(vuln_details.findtext("SOLUTION"))
+ str(vuln_details.findtext("SOLUTION")),
)
# Vuln_description
@@ -70,11 +70,11 @@ def issue_r(raw_row, vuln, scan_date):
htmltext("**QID:** " + str(_gid)),
htmltext("**Port:** " + str(_port)),
htmltext("**Result Evidence:** " + _result),
- ]
+ ],
)
# Impact description
_temp["IMPACT"] = htmltext(
- str(vuln_details.findtext("CONSEQUENCE"))
+ str(vuln_details.findtext("CONSEQUENCE")),
)
# CVE and LINKS
diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py
index 4c8c595cf1..47be5bb948 100644
--- a/dojo/tools/qualys_webapp/parser.py
+++ b/dojo/tools/qualys_webapp/parser.py
@@ -90,13 +90,13 @@ def attach_unique_extras(
protocol=protocol,
query=truncate_str(query, 1000),
fragment=truncate_str(fragment, 500),
- )
+ ),
)
for i in range(len(requests)):
if requests[i] != "" or responses[i] != "":
finding.unsaved_req_resp.append(
- {"req": requests[i], "resp": responses[i]}
+ {"req": requests[i], "resp": responses[i]},
)
if active_text is not None:
@@ -133,7 +133,7 @@ def attach_extras(endpoints, requests, responses, finding, date, qid, test):
for i in range(len(requests)):
if requests[i] != "" or responses[i] != "":
finding.unsaved_req_resp.append(
- {"req": requests[i], "resp": responses[i]}
+ {"req": requests[i], "resp": responses[i]},
)
return finding
@@ -186,7 +186,7 @@ def get_request_response(payloads):
def get_unique_vulnerabilities(
- vulnerabilities, test, is_info=False, is_app_report=False
+ vulnerabilities, test, is_info=False, is_app_report=False,
):
findings = {}
# Iterate through all vulnerabilites to pull necessary info
@@ -216,11 +216,11 @@ def get_unique_vulnerabilities(
if raw_finding_date is not None:
if raw_finding_date.endswith("GMT"):
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT",
)
else:
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT%z"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT%z",
)
else:
finding_date = None
@@ -253,7 +253,7 @@ def get_unique_vulnerabilities(
# Traverse and retreive any information in the VULNERABILITY_LIST
# section of the report. This includes all endpoints and request/response pairs
def get_vulnerabilities(
- vulnerabilities, test, is_info=False, is_app_report=False
+ vulnerabilities, test, is_info=False, is_app_report=False,
):
findings = {}
# Iterate through all vulnerabilites to pull necessary info
@@ -283,18 +283,18 @@ def get_vulnerabilities(
if raw_finding_date is not None:
if raw_finding_date.endswith("GMT"):
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT",
)
else:
finding_date = datetime.strptime(
- raw_finding_date, "%d %b %Y %I:%M%p GMT%z"
+ raw_finding_date, "%d %b %Y %I:%M%p GMT%z",
)
else:
finding_date = None
finding = findings.get(qid, None)
findings[qid] = attach_extras(
- urls, req_resps[0], req_resps[1], finding, finding_date, qid, test
+ urls, req_resps[0], req_resps[1], finding, finding_date, qid, test,
)
return findings
@@ -351,22 +351,22 @@ def get_unique_items(
findings = {}
for unique_id, finding in get_unique_vulnerabilities(
- vulnerabilities, test, False, is_app_report
+ vulnerabilities, test, False, is_app_report,
).items():
qid = int(finding.vuln_id_from_tool)
if qid in g_qid_list:
index = g_qid_list.index(qid)
findings[unique_id] = get_glossary_item(
- glossary[index], finding, enable_weakness=enable_weakness
+ glossary[index], finding, enable_weakness=enable_weakness,
)
for unique_id, finding in get_unique_vulnerabilities(
- info_gathered, test, True, is_app_report
+ info_gathered, test, True, is_app_report,
).items():
qid = int(finding.vuln_id_from_tool)
if qid in g_qid_list:
index = g_qid_list.index(qid)
finding = get_glossary_item(
- glossary[index], finding, True, enable_weakness=enable_weakness
+ glossary[index], finding, True, enable_weakness=enable_weakness,
)
if qid in ig_qid_list:
index = ig_qid_list.index(qid)
@@ -390,20 +390,20 @@ def get_items(
findings = {}
for qid, finding in get_vulnerabilities(
- vulnerabilities, test, False, is_app_report
+ vulnerabilities, test, False, is_app_report,
).items():
if qid in g_qid_list:
index = g_qid_list.index(qid)
findings[qid] = get_glossary_item(
- glossary[index], finding, enable_weakness=enable_weakness
+ glossary[index], finding, enable_weakness=enable_weakness,
)
for qid, finding in get_vulnerabilities(
- info_gathered, test, True, is_app_report
+ info_gathered, test, True, is_app_report,
).items():
if qid in g_qid_list:
index = g_qid_list.index(qid)
finding = get_glossary_item(
- glossary[index], finding, True, enable_weakness=enable_weakness
+ glossary[index], finding, True, enable_weakness=enable_weakness,
)
if qid in ig_qid_list:
index = ig_qid_list.index(qid)
@@ -423,17 +423,17 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False):
if is_app_report:
vulnerabilities = tree.findall(
- "./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY"
+ "./RESULTS/WEB_APPLICATION/VULNERABILITY_LIST/VULNERABILITY",
)
info_gathered = tree.findall(
- "./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED"
+ "./RESULTS/WEB_APPLICATION/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED",
)
else:
vulnerabilities = tree.findall(
- "./RESULTS/VULNERABILITY_LIST/VULNERABILITY"
+ "./RESULTS/VULNERABILITY_LIST/VULNERABILITY",
)
info_gathered = tree.findall(
- "./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED"
+ "./RESULTS/INFORMATION_GATHERED_LIST/INFORMATION_GATHERED",
)
glossary = tree.findall("./GLOSSARY/QID_LIST/QID")
@@ -446,7 +446,7 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False):
is_app_report,
test,
enable_weakness,
- ).values()
+ ).values(),
)
else:
items = list(
@@ -457,7 +457,7 @@ def qualys_webapp_parser(qualys_xml_file, test, unique, enable_weakness=False):
is_app_report,
test,
enable_weakness,
- ).values()
+ ).values(),
)
return items
@@ -474,8 +474,8 @@ def get_description_for_scan_types(self, scan_type):
return "Qualys WebScan output files can be imported in XML format."
def get_findings(
- self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN
+ self, file, test, enable_weakness=QUALYS_WAS_WEAKNESS_IS_VULN,
):
return qualys_webapp_parser(
- file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness
+ file, test, QUALYS_WAS_UNIQUE_ID, enable_weakness,
)
diff --git a/dojo/tools/retirejs/parser.py b/dojo/tools/retirejs/parser.py
index aaf038f898..4cb162e8a4 100644
--- a/dojo/tools/retirejs/parser.py
+++ b/dojo/tools/retirejs/parser.py
@@ -35,7 +35,7 @@ def get_items(self, tree, test):
+ ")"
)
item.description += "\n\n Raw Result: " + str(
- json.dumps(vulnerability, indent=4, sort_keys=True)
+ json.dumps(vulnerability, indent=4, sort_keys=True),
)
item.references = item.references
@@ -47,7 +47,7 @@ def get_items(self, tree, test):
unique_key = hashlib.md5(
(
item.title + item.references + encrypted_file
- ).encode()
+ ).encode(),
).hexdigest()
items[unique_key] = item
return list(items.values())
diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py
index 7d14b6ebce..30c08e5161 100644
--- a/dojo/tools/risk_recon/parser.py
+++ b/dojo/tools/risk_recon/parser.py
@@ -104,7 +104,7 @@ def _get_findings_internal(self, findings, test):
finding.unsaved_tags = tags
dupe_key = item.get(
- "finding_id", title + "|" + tags + "|" + findingdetail
+ "finding_id", title + "|" + tags + "|" + findingdetail,
)
if dupe_key in dupes:
diff --git a/dojo/tools/rubocop/parser.py b/dojo/tools/rubocop/parser.py
index f0454a7652..4d6459dd1c 100644
--- a/dojo/tools/rubocop/parser.py
+++ b/dojo/tools/rubocop/parser.py
@@ -49,7 +49,7 @@ def get_findings(self, scan_file, test):
f"**Message**: {offense.get('message')}",
f"**Is correctable?**: `{offense.get('correctable')}`",
f"**Location**: `{'-'.join(offense['location'])}`",
- ]
+ ],
)
finding = Finding(
test=test,
diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py
index 4ffd6c9ade..fa2a4f6ebc 100644
--- a/dojo/tools/rusty_hog/parser.py
+++ b/dojo/tools/rusty_hog/parser.py
@@ -25,7 +25,7 @@ def parse_json(self, json_output):
def get_items(self, json_output, scanner, test):
items = {}
findings = self.__getitem(
- vulnerabilities=self.parse_json(json_output), scanner=scanner
+ vulnerabilities=self.parse_json(json_output), scanner=scanner,
)
for finding in findings:
unique_key = f"Finding {finding}"
@@ -67,7 +67,7 @@ def get_tests(self, scan_type, handle):
else:
test.description = parsername
test.findings = self.__getitem(
- vulnerabilities=tree, scanner=parsername
+ vulnerabilities=tree, scanner=parsername,
)
tests.append(test)
return tests
@@ -85,15 +85,15 @@ def __getitem(self, vulnerabilities, scanner):
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("commit") is not None:
description += "\n**Commit message:** {}".format(
- vulnerability.get("commit")
+ vulnerability.get("commit"),
)
if vulnerability.get("commitHash") is not None:
description += "\n**Commit hash:** {}".format(
- vulnerability.get("commitHash")
+ vulnerability.get("commitHash"),
)
if vulnerability.get("parent_commit_hash") is not None:
description += "\n**Parent commit hash:** {}".format(
- vulnerability.get("parent_commit_hash")
+ vulnerability.get("parent_commit_hash"),
)
if (
vulnerability.get("old_file_id") is not None
@@ -121,15 +121,15 @@ def __getitem(self, vulnerabilities, scanner):
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("path") is not None:
description += "\n**Path of Issue:** {}".format(
- vulnerability.get("path")
+ vulnerability.get("path"),
)
if vulnerability.get("linenum") is not None:
description += "\n**Linenum of Issue:** {}".format(
- vulnerability.get("linenum")
+ vulnerability.get("linenum"),
)
if vulnerability.get("diff") is not None:
description += "\n**Diff:** {}".format(
- vulnerability.get("diff")
+ vulnerability.get("diff"),
)
elif scanner == "Gottingen Hog":
"""Gottingen Hog"""
@@ -137,31 +137,31 @@ def __getitem(self, vulnerabilities, scanner):
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("issue_id") is not None:
description += "\n**JIRA Issue ID:** {}".format(
- vulnerability.get("issue_id")
+ vulnerability.get("issue_id"),
)
if vulnerability.get("location") is not None:
description += "\n**JIRA location:** {}".format(
- vulnerability.get("location")
+ vulnerability.get("location"),
)
if vulnerability.get("url") is not None:
description += "\n**JIRA url:** [{}]({})".format(
- vulnerability.get("url"), vulnerability.get("url")
+ vulnerability.get("url"), vulnerability.get("url"),
)
elif scanner == "Essex Hog":
found_secret_string = vulnerability.get("stringsFound")
description = f"**This string was found:** {found_secret_string}"
if vulnerability.get("page_id") is not None:
description += "\n**Confluence URL:** [{}]({})".format(
- vulnerability.get("url"), vulnerability.get("url")
+ vulnerability.get("url"), vulnerability.get("url"),
)
description += "\n**Confluence Page ID:** {}".format(
- vulnerability.get("page_id")
+ vulnerability.get("page_id"),
)
"""General - for all Rusty Hogs"""
file_path = vulnerability.get("path")
if vulnerability.get("date") is not None:
description += "\n**Date:** {}".format(
- vulnerability.get("date")
+ vulnerability.get("date"),
)
"""Finding Title"""
if scanner == "Choctaw Hog":
@@ -172,7 +172,7 @@ def __getitem(self, vulnerabilities, scanner):
)
elif scanner == "Duroc Hog":
title = "{} found in path {}".format(
- vulnerability.get("reason"), vulnerability.get("path")
+ vulnerability.get("reason"), vulnerability.get("path"),
)
elif scanner == "Gottingen Hog":
title = "{} found in Jira ID {} ({})".format(
@@ -182,7 +182,7 @@ def __getitem(self, vulnerabilities, scanner):
)
elif scanner == "Essex Hog":
title = "{} found in Confluence Page ID {}".format(
- vulnerability.get("reason"), vulnerability.get("page_id")
+ vulnerability.get("reason"), vulnerability.get("page_id"),
)
# create the finding object
finding = Finding(
diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py
index 2fe52197b1..f311d03463 100644
--- a/dojo/tools/sarif/parser.py
+++ b/dojo/tools/sarif/parser.py
@@ -164,16 +164,16 @@ def get_title(result, rule):
title = None
if "message" in result:
title = get_message_from_multiformatMessageString(
- result["message"], rule
+ result["message"], rule,
)
if title is None and rule is not None:
if "shortDescription" in rule:
title = get_message_from_multiformatMessageString(
- rule["shortDescription"], rule
+ rule["shortDescription"], rule,
)
elif "fullDescription" in rule:
title = get_message_from_multiformatMessageString(
- rule["fullDescription"], rule
+ rule["fullDescription"], rule,
)
elif "name" in rule:
title = rule["name"]
@@ -267,7 +267,7 @@ def get_description(result, rule):
message = ""
if "message" in result:
message = get_message_from_multiformatMessageString(
- result["message"], rule
+ result["message"], rule,
)
description += f"**Result message:** {message}\n"
if get_snippet(result) is not None:
@@ -278,13 +278,13 @@ def get_description(result, rule):
shortDescription = ""
if "shortDescription" in rule:
shortDescription = get_message_from_multiformatMessageString(
- rule["shortDescription"], rule
+ rule["shortDescription"], rule,
)
if shortDescription != message:
description += f"**{_('Rule short description')}:** {shortDescription}\n"
if "fullDescription" in rule:
fullDescription = get_message_from_multiformatMessageString(
- rule["fullDescription"], rule
+ rule["fullDescription"], rule,
)
if (
fullDescription != message
@@ -308,7 +308,7 @@ def get_references(rule):
reference = rule["helpUri"]
elif "help" in rule:
helpText = get_message_from_multiformatMessageString(
- rule["help"], rule
+ rule["help"], rule,
)
if helpText.startswith("http"):
reference = helpText
@@ -435,7 +435,7 @@ def get_item(result, rules, artifacts, run_date):
# manage fixes provided in the report
if "fixes" in result:
finding.mitigation = "\n".join(
- [fix.get("description", {}).get("text") for fix in result["fixes"]]
+ [fix.get("description", {}).get("text") for fix in result["fixes"]],
)
if run_date:
@@ -460,7 +460,7 @@ def get_item(result, rules, artifacts, run_date):
hashes = get_fingerprints_hashes(result["partialFingerprints"])
sorted_hashes = sorted(hashes.keys())
finding.unique_id_from_tool = "|".join(
- [f'{key}:{hashes[key]["value"]}' for key in sorted_hashes]
+ [f'{key}:{hashes[key]["value"]}' for key in sorted_hashes],
)
return finding
diff --git a/dojo/tools/scantist/parser.py b/dojo/tools/scantist/parser.py
index b2b3b5f302..c67de9a51c 100644
--- a/dojo/tools/scantist/parser.py
+++ b/dojo/tools/scantist/parser.py
@@ -84,7 +84,7 @@ def get_findings(vuln, test):
if item:
hash_key = hashlib.md5(
node.get("Public ID").encode("utf-8")
- + node.get("Library").encode("utf-8")
+ + node.get("Library").encode("utf-8"),
).hexdigest()
items[hash_key] = get_findings(node, test)
diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py
index e6344fa67a..45dd1dbdf0 100644
--- a/dojo/tools/scout_suite/parser.py
+++ b/dojo/tools/scout_suite/parser.py
@@ -63,7 +63,7 @@ def get_tests(self, scan_type, handle):
str(items["max_level"]),
str(items["resources_count"]),
str(items["rules_count"]),
- ]
+ ],
)
tests = []
@@ -92,7 +92,7 @@ def __get_items(self, data):
last_run_date = None
if "time" in data.get("last_run", {}):
last_run_date = datetime.strptime(
- data["last_run"]["time"][0:10], "%Y-%m-%d"
+ data["last_run"]["time"][0:10], "%Y-%m-%d",
).date()
# Configured Services
@@ -138,7 +138,7 @@ def __get_items(self, data):
dynamic_finding=False,
static_finding=True,
vuln_id_from_tool=":".join(
- [data["provider_code"], finding_name]
+ [data["provider_code"], finding_name],
),
)
if finding.get("references"):
diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py
index 97e711bf23..aa4f730750 100644
--- a/dojo/tools/semgrep/parser.py
+++ b/dojo/tools/semgrep/parser.py
@@ -45,20 +45,20 @@ def get_findings(self, filename, test):
item["extra"]["metadata"]
.get("cwe")[0]
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
else:
finding.cwe = int(
item["extra"]["metadata"]
.get("cwe")
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
# manage references from metadata
if "references" in item["extra"]["metadata"]:
finding.references = "\n".join(
- item["extra"]["metadata"]["references"]
+ item["extra"]["metadata"]["references"],
)
# manage mitigation from metadata
@@ -71,7 +71,7 @@ def get_findings(self, filename, test):
"\n```\n",
json.dumps(item["extra"]["fix_regex"]),
"\n```\n",
- ]
+ ],
)
dupe_key = finding.title + finding.file_path + str(finding.line)
@@ -109,14 +109,14 @@ def get_findings(self, filename, test):
item["advisory"]["references"]
.get("cweIds")[0]
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
else:
finding.cwe = int(
item["advisory"]["references"]
.get("cweIds")
.partition(":")[0]
- .partition("-")[2]
+ .partition("-")[2],
)
dupe_key = finding.title + finding.file_path + str(finding.line)
diff --git a/dojo/tools/skf/parser.py b/dojo/tools/skf/parser.py
index 74ec86bba4..887716c509 100644
--- a/dojo/tools/skf/parser.py
+++ b/dojo/tools/skf/parser.py
@@ -32,7 +32,7 @@ def __init__(self):
def map_column_value(self, finding, column_value):
finding.date = datetime.strptime(
- column_value, "%Y-%m-%d %H:%M:%S"
+ column_value, "%Y-%m-%d %H:%M:%S",
).date()
@@ -101,7 +101,7 @@ def get_findings(self, filename, test):
row_number = 0
reader = csv.reader(
- io.StringIO(content), delimiter=",", quotechar='"', escapechar="\\"
+ io.StringIO(content), delimiter=",", quotechar='"', escapechar="\\",
)
dupes = {}
for row in reader:
@@ -116,7 +116,7 @@ def get_findings(self, filename, test):
column_number = 0
for column in row:
chain.process_column(
- column_names[column_number], column, finding
+ column_names[column_number], column, finding,
)
column_number += 1
@@ -127,8 +127,8 @@ def get_findings(self, filename, test):
+ "|"
+ finding.title
+ "|"
- + finding.description
- ).encode("utf-8")
+ + finding.description,
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/snyk/parser.py b/dojo/tools/snyk/parser.py
index 055d278d29..4d1a0e8943 100644
--- a/dojo/tools/snyk/parser.py
+++ b/dojo/tools/snyk/parser.py
@@ -51,7 +51,7 @@ def get_items(self, tree, test):
vulnerabilityTree = tree["vulnerabilities"]
for node in vulnerabilityTree:
item = self.get_item(
- node, test, target_file=target_file, upgrades=upgrades
+ node, test, target_file=target_file, upgrades=upgrades,
)
items[iterator] = item
iterator += 1
@@ -59,7 +59,7 @@ def get_items(self, tree, test):
results = tree["runs"][0]["results"]
for node in results:
item = self.get_code_item(
- node, test
+ node, test,
)
items[iterator] = item
iterator += 1
@@ -70,7 +70,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
# or an array for multiple versions depending on the language.
if isinstance(vulnerability["semver"]["vulnerable"], list):
vulnerable_versions = ", ".join(
- vulnerability["semver"]["vulnerable"]
+ vulnerability["semver"]["vulnerable"],
)
else:
vulnerable_versions = vulnerability["semver"]["vulnerable"]
@@ -172,7 +172,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
references = ""
if "id" in vulnerability:
references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format(
- vulnerability["id"]
+ vulnerability["id"],
)
if cwe_references:
@@ -211,7 +211,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
for lib in tertiary_upgrade_list
):
finding.unsaved_tags.append(
- f"upgrade_to:{upgraded_pack}"
+ f"upgrade_to:{upgraded_pack}",
)
finding.mitigation += f"\nUpgrade from {current_pack_version} to {upgraded_pack} to fix this issue, as well as updating the following:\n - "
finding.mitigation += "\n - ".join(tertiary_upgrade_list)
diff --git a/dojo/tools/snyk_code/parser.py b/dojo/tools/snyk_code/parser.py
index cd7d74a4a4..a35b37251c 100644
--- a/dojo/tools/snyk_code/parser.py
+++ b/dojo/tools/snyk_code/parser.py
@@ -51,7 +51,7 @@ def get_items(self, tree, test):
vulnerabilityTree = tree["vulnerabilities"]
for node in vulnerabilityTree:
item = self.get_item(
- node, test, target_file=target_file, upgrades=upgrades
+ node, test, target_file=target_file, upgrades=upgrades,
)
items[iterator] = item
iterator += 1
@@ -59,7 +59,7 @@ def get_items(self, tree, test):
results = tree["runs"][0]["results"]
for node in results:
item = self.get_code_item(
- node, test
+ node, test,
)
items[iterator] = item
iterator += 1
@@ -70,7 +70,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
# or an array for multiple versions depending on the language.
if isinstance(vulnerability["semver"]["vulnerable"], list):
vulnerable_versions = ", ".join(
- vulnerability["semver"]["vulnerable"]
+ vulnerability["semver"]["vulnerable"],
)
else:
vulnerable_versions = vulnerability["semver"]["vulnerable"]
@@ -168,7 +168,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
references = ""
if "id" in vulnerability:
references = "**SNYK ID**: https://app.snyk.io/vuln/{}\n\n".format(
- vulnerability["id"]
+ vulnerability["id"],
)
if cwe_references:
@@ -207,7 +207,7 @@ def get_item(self, vulnerability, test, target_file=None, upgrades=None):
for lib in tertiary_upgrade_list
):
finding.unsaved_tags.append(
- f"upgrade_to:{upgraded_pack}"
+ f"upgrade_to:{upgraded_pack}",
)
finding.mitigation += f"\nUpgrade from {current_pack_version} to {upgraded_pack} to fix this issue, as well as updating the following:\n - "
finding.mitigation += "\n - ".join(tertiary_upgrade_list)
diff --git a/dojo/tools/solar_appscreener/parser.py b/dojo/tools/solar_appscreener/parser.py
index b5655a4292..fc6110ebcd 100644
--- a/dojo/tools/solar_appscreener/parser.py
+++ b/dojo/tools/solar_appscreener/parser.py
@@ -26,7 +26,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
diff --git a/dojo/tools/sonarqube/parser.py b/dojo/tools/sonarqube/parser.py
index 1f268001d8..9d92c6e3e9 100644
--- a/dojo/tools/sonarqube/parser.py
+++ b/dojo/tools/sonarqube/parser.py
@@ -53,6 +53,6 @@ def get_findings(self, file, test):
raise ValueError(
"Internal error: Invalid mode "
+ self.mode
- + ". Expected: one of None, 'detailed'"
+ + ". Expected: one of None, 'detailed'",
)
return SonarQubeSoprasteriaHTML().get_items(tree, test, self.mode)
diff --git a/dojo/tools/sonarqube/soprasteria_helper.py b/dojo/tools/sonarqube/soprasteria_helper.py
index 99d2c3125f..47ddc3ddf7 100644
--- a/dojo/tools/sonarqube/soprasteria_helper.py
+++ b/dojo/tools/sonarqube/soprasteria_helper.py
@@ -27,7 +27,7 @@ def convert_sonar_severity(self, sonar_severity):
def get_description(self, vuln_details):
rule_description = etree.tostring(
- vuln_details, pretty_print=True
+ vuln_details, pretty_print=True,
).decode("utf-8", errors="replace")
rule_description = rule_description.split("See", 1)[0]
rule_description = (str(rule_description)).replace("", "**")
diff --git a/dojo/tools/sonarqube/soprasteria_html.py b/dojo/tools/sonarqube/soprasteria_html.py
index 8865ac618e..c4fb4e688c 100644
--- a/dojo/tools/sonarqube/soprasteria_html.py
+++ b/dojo/tools/sonarqube/soprasteria_html.py
@@ -10,7 +10,7 @@ def get_items(self, tree, test, mode):
# Check that there is at least one vulnerability (the vulnerabilities
# table is absent when no vuln are found)
detailTbody = tree.xpath(
- "/html/body/div[contains(@class,'detail')]/table/tbody"
+ "/html/body/div[contains(@class,'detail')]/table/tbody",
)
dupes = {}
if len(detailTbody) == 2:
@@ -32,7 +32,7 @@ def get_items(self, tree, test, mode):
rule_key = list(vuln_properties[0].iter("a"))[0].text
vuln_rule_name = rule_key and rule_key.strip()
vuln_severity = SonarQubeSoprasteriaHelper().convert_sonar_severity(
- vuln_properties[1].text and vuln_properties[1].text.strip()
+ vuln_properties[1].text and vuln_properties[1].text.strip(),
)
vuln_file_path = vuln_properties[2].text and vuln_properties[2].text.strip()
vuln_line = vuln_properties[3].text and vuln_properties[3].text.strip()
@@ -42,13 +42,13 @@ def get_items(self, tree, test, mode):
if vuln_title is None or vuln_mitigation is None:
raise ValueError(
"Parser ValueError: can't find a title or a mitigation for vulnerability of name "
- + vuln_rule_name
+ + vuln_rule_name,
)
try:
vuln_details = rulesDic[vuln_rule_name]
vuln_description = SonarQubeSoprasteriaHelper().get_description(vuln_details)
vuln_references = SonarQubeSoprasteriaHelper().get_references(
- vuln_rule_name, vuln_details
+ vuln_rule_name, vuln_details,
)
vuln_cwe = SonarQubeSoprasteriaHelper().get_cwe(vuln_references)
except KeyError:
diff --git a/dojo/tools/sonarqube/soprasteria_json.py b/dojo/tools/sonarqube/soprasteria_json.py
index 5feb49343c..aabc637740 100644
--- a/dojo/tools/sonarqube/soprasteria_json.py
+++ b/dojo/tools/sonarqube/soprasteria_json.py
@@ -24,7 +24,7 @@ def get_json_items(self, json_content, test, mode):
if title is None or mitigation is None:
raise ValueError(
"Parser ValueError: can't find a title or a mitigation for vulnerability of name "
- + rule_id
+ + rule_id,
)
try:
@@ -34,7 +34,7 @@ def get_json_items(self, json_content, test, mode):
issue_description = SonarQubeSoprasteriaHelper().get_description(html_desc_as_e_tree)
logger.debug(issue_description)
issue_references = SonarQubeSoprasteriaHelper().get_references(
- rule_id, html_desc_as_e_tree
+ rule_id, html_desc_as_e_tree,
)
issue_cwe = SonarQubeSoprasteriaHelper().get_cwe(issue_references)
except KeyError:
diff --git a/dojo/tools/sonatype/parser.py b/dojo/tools/sonatype/parser.py
index ef2f0df367..e1b7bac167 100644
--- a/dojo/tools/sonatype/parser.py
+++ b/dojo/tools/sonatype/parser.py
@@ -57,7 +57,7 @@ def get_finding(security_issue, component, test):
mitigation=status,
references=reference,
impact=threat_category,
- static_finding=True
+ static_finding=True,
)
if "cwe" in security_issue:
finding.cwe = security_issue["cwe"]
diff --git a/dojo/tools/spotbugs/parser.py b/dojo/tools/spotbugs/parser.py
index 367fd54d49..65ecac2153 100644
--- a/dojo/tools/spotbugs/parser.py
+++ b/dojo/tools/spotbugs/parser.py
@@ -36,8 +36,8 @@ def get_findings(self, filename, test):
# Parse ... html content
html_text = html_parser.handle(
ET.tostring(pattern.find("Details"), method="text").decode(
- "utf-8"
- )
+ "utf-8",
+ ),
)
# Parse mitigation from html
@@ -109,7 +109,7 @@ def get_findings(self, filename, test):
finding.file_path = source_extract.get("sourcepath")
finding.sast_source_object = source_extract.get("classname")
finding.sast_source_file_path = source_extract.get(
- "sourcepath"
+ "sourcepath",
)
if (
"start" in source_extract.attrib
diff --git a/dojo/tools/ssl_labs/parser.py b/dojo/tools/ssl_labs/parser.py
index f70992674a..6a1ff7a7d9 100644
--- a/dojo/tools/ssl_labs/parser.py
+++ b/dojo/tools/ssl_labs/parser.py
@@ -113,7 +113,7 @@ def get_findings(self, filename, test):
for item in endpoints["details"]["suites"]:
for suites in item["list"]:
suite_info = suite_info + self.suite_data(
- suites
+ suites,
)
except Exception:
suite_info = "Not provided." + "\n\n"
@@ -336,16 +336,16 @@ def get_findings(self, filename, test):
find.unsaved_endpoints = []
find.unsaved_endpoints.append(
- Endpoint(host=hostName, port=port, protocol=protocol)
+ Endpoint(host=hostName, port=port, protocol=protocol),
)
if ipAddress:
find.unsaved_endpoints.append(
- Endpoint(host=ipAddress, port=port, protocol=protocol)
+ Endpoint(host=ipAddress, port=port, protocol=protocol),
)
if endpoints["details"]["httpTransactions"]:
for url in endpoints["details"]["httpTransactions"]:
find.unsaved_endpoints.append(
- Endpoint.from_uri(url["requestUrl"])
+ Endpoint.from_uri(url["requestUrl"]),
)
return list(dupes.values())
diff --git a/dojo/tools/sslscan/parser.py b/dojo/tools/sslscan/parser.py
index 421e197442..621ded3daf 100644
--- a/dojo/tools/sslscan/parser.py
+++ b/dojo/tools/sslscan/parser.py
@@ -67,7 +67,7 @@ def get_findings(self, file, test):
if title and description is not None:
dupe_key = hashlib.sha256(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
diff --git a/dojo/tools/sslyze/parser_json.py b/dojo/tools/sslyze/parser_json.py
index 48dc625c04..e364de8faf 100644
--- a/dojo/tools/sslyze/parser_json.py
+++ b/dojo/tools/sslyze/parser_json.py
@@ -256,7 +256,7 @@ def get_heartbleed(node, test, endpoint):
)
vulnerability_id = "CVE-2014-0160"
return get_finding(
- title, description, vulnerability_id, None, test, endpoint
+ title, description, vulnerability_id, None, test, endpoint,
)
elif "result" in heartbleed:
hb_result = heartbleed["result"]
@@ -296,7 +296,7 @@ def get_ccs(node, test, endpoint):
)
vulnerability_id = "CVE-2014-0224"
return get_finding(
- title, description, vulnerability_id, None, test, endpoint
+ title, description, vulnerability_id, None, test, endpoint,
)
elif "result" in ccs_injection:
@@ -354,7 +354,7 @@ def get_renegotiation(node, test, endpoint):
)
if vulnerable:
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
elif "result" in renegotiation:
@@ -370,7 +370,7 @@ def get_renegotiation(node, test, endpoint):
+ " has problems with session renegotiation:"
)
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
if "supports_secure_renegotiation" in reneg_result:
reneg_secure = reneg_result["supports_secure_renegotiation"]
@@ -381,7 +381,7 @@ def get_renegotiation(node, test, endpoint):
+ " has problems with session renegotiation:"
)
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
return None
return None
@@ -401,7 +401,7 @@ def get_weak_protocol(cipher, text, node, test, endpoint):
get_url(endpoint) + " accepts " + text + " connections"
)
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
elif "result" in weak_node:
weak_node_result = weak_node["result"]
@@ -414,7 +414,7 @@ def get_weak_protocol(cipher, text, node, test, endpoint):
get_url(endpoint) + " accepts " + text + " connections"
)
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
return None
return None
@@ -446,7 +446,7 @@ def get_strong_protocol(cipher, text, suites, node, test, endpoint):
description += "\n - " + cs_node["name"]
if unrecommended_cipher_found:
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
elif "result" in strong_node:
@@ -473,7 +473,7 @@ def get_strong_protocol(cipher, text, suites, node, test, endpoint):
description += "\n - " + cs_node["name"]
if unrecommended_cipher_found:
return get_finding(
- title, description, None, REFERENCES, test, endpoint
+ title, description, None, REFERENCES, test, endpoint,
)
return None
return None
@@ -523,7 +523,7 @@ def get_certificate_information(node, test, endpoint):
description += ", version " + version
if vulnerable:
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
elif "result" in ci_node:
@@ -565,14 +565,14 @@ def get_certificate_information(node, test, endpoint):
description += ", version " + version
if vulnerable:
return get_finding(
- title, description, None, None, test, endpoint
+ title, description, None, None, test, endpoint,
)
return None
return None
def get_finding(
- title, description, vulnerability_id, references, test, endpoint
+ title, description, vulnerability_id, references, test, endpoint,
):
title += " (" + get_url(endpoint) + ")"
severity = "Medium"
diff --git a/dojo/tools/sslyze/parser_xml.py b/dojo/tools/sslyze/parser_xml.py
index 07c2adcaad..bddda3ac49 100644
--- a/dojo/tools/sslyze/parser_xml.py
+++ b/dojo/tools/sslyze/parser_xml.py
@@ -120,7 +120,7 @@ def get_findings(self, file, test):
if cipher.attrib["name"] in WEAK_CIPHER_LIST:
if cipher.attrib["name"] not in weak_cipher[element.tag]:
weak_cipher[element.tag].append(
- cipher.attrib["name"]
+ cipher.attrib["name"],
)
if len(weak_cipher[element.tag]) > 0:
title = (
@@ -135,7 +135,7 @@ def get_findings(self, file, test):
)
if title and description is not None:
dupe_key = hashlib.md5(
- str(description + title).encode("utf-8")
+ str(description + title).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
finding = dupes[dupe_key]
@@ -158,7 +158,7 @@ def get_findings(self, file, test):
if host is not None:
finding.unsaved_endpoints.append(
Endpoint(
- host=host, port=port, protocol=protocol
- )
+ host=host, port=port, protocol=protocol,
+ ),
)
return dupes.values()
diff --git a/dojo/tools/stackhawk/parser.py b/dojo/tools/stackhawk/parser.py
index 5aa85dfa49..99d708cdc8 100644
--- a/dojo/tools/stackhawk/parser.py
+++ b/dojo/tools/stackhawk/parser.py
@@ -38,7 +38,7 @@ def get_findings(self, json_output, test):
return findings
def __extract_findings(
- self, completed_scan, metadata: StackHawkScanMetadata, test
+ self, completed_scan, metadata: StackHawkScanMetadata, test,
):
findings = {}
@@ -49,19 +49,19 @@ def __extract_findings(
key = raw_finding["pluginId"]
if key not in findings:
finding = self.__extract_finding(
- raw_finding, metadata, test
+ raw_finding, metadata, test,
)
findings[key] = finding
# Update the test description these scan results are linked to.
test.description = "View scan details here: " + self.__hyperlink(
- completed_scan["scan"]["scanURL"]
+ completed_scan["scan"]["scanURL"],
)
return list(findings.values())
def __extract_finding(
- self, raw_finding, metadata: StackHawkScanMetadata, test
+ self, raw_finding, metadata: StackHawkScanMetadata, test,
) -> Finding:
steps_to_reproduce = "Use a specific message link and click 'Validate' to see the cURL!\n\n"
@@ -83,10 +83,10 @@ def __extract_finding(
endpoints.append(endpoint)
are_all_endpoints_risk_accepted = self.__are_all_endpoints_in_status(
- paths, "RISK_ACCEPTED"
+ paths, "RISK_ACCEPTED",
)
are_all_endpoints_false_positive = self.__are_all_endpoints_in_status(
- paths, "FALSE_POSITIVE"
+ paths, "FALSE_POSITIVE",
)
finding = Finding(
diff --git a/dojo/tools/sysdig_reports/sysdig_data.py b/dojo/tools/sysdig_reports/sysdig_data.py
index 24f3019fbf..930c07c411 100644
--- a/dojo/tools/sysdig_reports/sysdig_data.py
+++ b/dojo/tools/sysdig_reports/sysdig_data.py
@@ -9,7 +9,7 @@ def _map_severity(self, severity):
"HIGH": "High",
"MEDIUM": "Medium",
"LOW": "Low",
- "NEGLIGIBLE": "Informational"
+ "NEGLIGIBLE": "Informational",
}
return severity_mapping.get(severity, "Informational")
diff --git a/dojo/tools/talisman/parser.py b/dojo/tools/talisman/parser.py
index 20d2874c40..f3d0413887 100644
--- a/dojo/tools/talisman/parser.py
+++ b/dojo/tools/talisman/parser.py
@@ -76,7 +76,7 @@ def get_findings(self, filename, test):
+ file_path
+ description
+ severity
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py
index c88287cf6e..e4a3cd9cd8 100644
--- a/dojo/tools/tenable/csv_format.py
+++ b/dojo/tools/tenable/csv_format.py
@@ -51,7 +51,7 @@ def _format_cve(self, val):
if val is None or val == "":
return None
cve_match = re.findall(
- r"CVE-[0-9]+-[0-9]+", val.upper(), re.IGNORECASE
+ r"CVE-[0-9]+-[0-9]+", val.upper(), re.IGNORECASE,
)
if cve_match:
return cve_match
@@ -130,7 +130,7 @@ def get_findings(self, filename: str, test: Test):
cvss_vector = row.get("CVSS V3 Vector", "")
if cvss_vector != "":
find.cvssv3 = CVSS3(
- "CVSS:3.0/" + str(cvss_vector)
+ "CVSS:3.0/" + str(cvss_vector),
).clean_vector(output_prefix=True)
# Add CVSS score if present
@@ -143,7 +143,7 @@ def get_findings(self, filename: str, test: Test):
# FIXME support more than one CPE in Nessus CSV parser
if len(detected_cpe) > 1:
LOGGER.debug(
- "more than one CPE for a finding. NOT supported by Nessus CSV parser"
+ "more than one CPE for a finding. NOT supported by Nessus CSV parser",
)
cpe_decoded = CPE(detected_cpe[0])
find.component_name = (
diff --git a/dojo/tools/tenable/parser.py b/dojo/tools/tenable/parser.py
index b24b072a68..2c8e00c468 100644
--- a/dojo/tools/tenable/parser.py
+++ b/dojo/tools/tenable/parser.py
@@ -16,7 +16,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if filename.name.lower().endswith(
- ".xml"
+ ".xml",
) or filename.name.lower().endswith(".nessus"):
return TenableXMLParser().get_findings(filename, test)
elif filename.name.lower().endswith(".csv"):
diff --git a/dojo/tools/tenable/xml_format.py b/dojo/tools/tenable/xml_format.py
index d0c231b67d..11f842d2f7 100644
--- a/dojo/tools/tenable/xml_format.py
+++ b/dojo/tools/tenable/xml_format.py
@@ -74,7 +74,7 @@ def get_findings(self, filename: str, test: Test) -> list:
ip = host.attrib.get("name")
fqdn = None
fqdn_element_text = self.safely_get_element_text(
- host.find('.//HostProperties/tag[@name="host-fqdn"]')
+ host.find('.//HostProperties/tag[@name="host-fqdn"]'),
)
if fqdn_element_text is not None:
fqdn = fqdn_element_text
@@ -104,12 +104,12 @@ def get_findings(self, filename: str, test: Test) -> list:
description = ""
plugin_output = None
synopsis_element_text = self.safely_get_element_text(
- item.find("synopsis")
+ item.find("synopsis"),
)
if synopsis_element_text is not None:
description = f"{synopsis_element_text}\n\n"
plugin_output_element_text = self.safely_get_element_text(
- item.find("plugin_output")
+ item.find("plugin_output"),
)
if plugin_output_element_text is not None:
plugin_output = f"Plugin Output: {ip}{str(f':{port}' if port is not None else '')}"
@@ -123,27 +123,27 @@ def get_findings(self, filename: str, test: Test) -> list:
# Build up the impact
impact = ""
description_element_text = self.safely_get_element_text(
- item.find("description")
+ item.find("description"),
)
if description_element_text is not None:
impact = description_element_text + "\n\n"
cvss_element_text = self.safely_get_element_text(
- item.find("cvss")
+ item.find("cvss"),
)
if cvss_element_text is not None:
impact += f"CVSS Score: {cvss_element_text}\n"
cvssv3_element_text = self.safely_get_element_text(
- item.find("cvssv3")
+ item.find("cvssv3"),
)
if cvssv3_element_text is not None:
impact += f"CVSSv3 Score: {cvssv3_element_text}\n"
cvss_vector_element_text = self.safely_get_element_text(
- item.find("cvss_vector")
+ item.find("cvss_vector"),
)
if cvss_vector_element_text is not None:
impact += f"CVSS Vector: {cvss_vector_element_text}\n"
cvssv3_vector_element_text = self.safely_get_element_text(
- item.find("cvss3_vector")
+ item.find("cvss3_vector"),
)
if cvssv3_vector_element_text is not None:
impact += (
@@ -151,14 +151,14 @@ def get_findings(self, filename: str, test: Test) -> list:
)
cvss_base_score_element_text = (
self.safely_get_element_text(
- item.find("cvss_base_score")
+ item.find("cvss_base_score"),
)
)
if cvss_base_score_element_text is not None:
impact += f"CVSS Base Score: {cvss_base_score_element_text}\n"
cvss_temporal_score_element_text = (
self.safely_get_element_text(
- item.find("cvss_temporal_score")
+ item.find("cvss_temporal_score"),
)
)
if cvss_temporal_score_element_text is not None:
@@ -167,7 +167,7 @@ def get_findings(self, filename: str, test: Test) -> list:
# Set the mitigation
mitigation = "N/A"
mitigation_element_text = self.safely_get_element_text(
- item.find("solution")
+ item.find("solution"),
)
if mitigation_element_text is not None:
mitigation = mitigation_element_text
@@ -187,21 +187,21 @@ def get_findings(self, filename: str, test: Test) -> list:
vulnerability_id = None
cve_element_text = self.safely_get_element_text(
- item.find("cve")
+ item.find("cve"),
)
if cve_element_text is not None:
vulnerability_id = cve_element_text
cwe = None
cwe_element_text = self.safely_get_element_text(
- item.find("cwe")
+ item.find("cwe"),
)
if cwe_element_text is not None:
cwe = cwe_element_text
cvssv3 = None
cvssv3_element_text = self.safely_get_element_text(
- item.find("cvss3_vector")
+ item.find("cvss3_vector"),
)
if cvssv3_element_text is not None:
if "CVSS:3.0/" not in cvssv3_element_text:
@@ -209,12 +209,12 @@ def get_findings(self, filename: str, test: Test) -> list:
f"CVSS:3.0/{cvssv3_element_text}"
)
cvssv3 = CVSS3(cvssv3_element_text).clean_vector(
- output_prefix=True
+ output_prefix=True,
)
cvssv3_score = None
cvssv3_score_element_text = self.safely_get_element_text(
- item.find("cvssv3")
+ item.find("cvssv3"),
)
if cvssv3_score_element_text is not None:
cvssv3_score = cvssv3_score_element_text
diff --git a/dojo/tools/terrascan/parser.py b/dojo/tools/terrascan/parser.py
index ebc761f93b..c8b07f4e14 100644
--- a/dojo/tools/terrascan/parser.py
+++ b/dojo/tools/terrascan/parser.py
@@ -55,7 +55,7 @@ def get_findings(self, filename, test):
+ resource_type
+ file
+ str(line)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/testssl/parser.py b/dojo/tools/testssl/parser.py
index 01369ea439..8eb41184df 100644
--- a/dojo/tools/testssl/parser.py
+++ b/dojo/tools/testssl/parser.py
@@ -20,7 +20,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
dupes = {}
@@ -68,7 +68,7 @@ def get_findings(self, filename, test):
finding.cwe = int(row["cwe"].split("-")[1].strip())
# manage endpoint
finding.unsaved_endpoints = [
- Endpoint(host=row["fqdn/ip"].split("/")[0])
+ Endpoint(host=row["fqdn/ip"].split("/")[0]),
]
if row.get("port") and row["port"].isdigit():
finding.unsaved_endpoints[0].port = int(row["port"])
@@ -80,16 +80,16 @@ def get_findings(self, filename, test):
finding.description,
finding.title,
str(vulnerability),
- ]
- ).encode("utf-8")
+ ],
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
dupes[dupe_key].unsaved_endpoints.extend(
- finding.unsaved_endpoints
+ finding.unsaved_endpoints,
)
if dupes[dupe_key].unsaved_vulnerability_ids:
dupes[dupe_key].unsaved_vulnerability_ids.extend(
- finding.unsaved_vulnerability_ids
+ finding.unsaved_vulnerability_ids,
)
else:
dupes[
diff --git a/dojo/tools/tfsec/parser.py b/dojo/tools/tfsec/parser.py
index 8e145a92d9..d0bc390f3a 100644
--- a/dojo/tools/tfsec/parser.py
+++ b/dojo/tools/tfsec/parser.py
@@ -47,7 +47,7 @@ def get_findings(self, filename, test):
start_line = item.get("location").get("start_line")
end_line = item.get("location").get("end_line")
description = "\n".join(
- ["Rule ID: " + rule_id, item.get("description")]
+ ["Rule ID: " + rule_id, item.get("description")],
)
impact = item.get("impact")
resolution = item.get("resolution")
@@ -67,7 +67,7 @@ def get_findings(self, filename, test):
+ file
+ str(start_line)
+ str(end_line)
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/threagile/parser.py b/dojo/tools/threagile/parser.py
index 418fabcf31..796d260e6d 100644
--- a/dojo/tools/threagile/parser.py
+++ b/dojo/tools/threagile/parser.py
@@ -44,7 +44,7 @@
"untrusted-deserialization": 502,
"wrong-communication-link": 1008,
"wrong-trust-boudnary-content": 1008,
- "xml-external-entity": 611
+ "xml-external-entity": 611,
}
@@ -92,7 +92,7 @@ def get_items(self, tree, test):
impact=item.get("exploitation_impact"),
severity=severity,
test=test,
- unique_id_from_tool=item.get("synthetic_id")
+ unique_id_from_tool=item.get("synthetic_id"),
)
self.determine_mitigated(finding, item)
self.determine_accepted(finding, item)
diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py
index 400f71c36d..e50ce0963b 100644
--- a/dojo/tools/trivy/parser.py
+++ b/dojo/tools/trivy/parser.py
@@ -108,7 +108,7 @@ def get_findings(self, scan_file, test):
if len(service_name) >= 3:
service_name = service_name[:-3]
findings += self.get_result_items(
- test, service.get("Results", []), service_name
+ test, service.get("Results", []), service_name,
)
misconfigurations = data.get("Misconfigurations", [])
for service in misconfigurations:
@@ -125,7 +125,7 @@ def get_findings(self, scan_file, test):
if len(service_name) >= 3:
service_name = service_name[:-3]
findings += self.get_result_items(
- test, service.get("Results", []), service_name
+ test, service.get("Results", []), service_name,
)
resources = data.get("Resources", [])
for resource in resources:
@@ -141,7 +141,7 @@ def get_findings(self, scan_file, test):
if len(resource_name) >= 3:
resource_name = resource_name[:-3]
findings += self.get_result_items(
- test, resource.get("Results", []), resource_name
+ test, resource.get("Results", []), resource_name,
)
return findings
else:
@@ -259,7 +259,7 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""):
target=target_target,
type=misc_type,
description=misc_description,
- message=misc_message
+ message=misc_message,
)
severity = TRIVY_SEVERITIES[misc_severity]
references = None
diff --git a/dojo/tools/trivy_operator/parser.py b/dojo/tools/trivy_operator/parser.py
index 7bd3a3d1da..3e83cfccf0 100644
--- a/dojo/tools/trivy_operator/parser.py
+++ b/dojo/tools/trivy_operator/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, scan_file, test):
findings = []
if report is not None:
resource_namespace = labels.get(
- "trivy-operator.resource.namespace", ""
+ "trivy-operator.resource.namespace", "",
)
resource_kind = labels.get("trivy-operator.resource.kind", "")
resource_name = labels.get("trivy-operator.resource.name", "")
diff --git a/dojo/tools/trivy_operator/vulnerability_handler.py b/dojo/tools/trivy_operator/vulnerability_handler.py
index bdd282648e..13be3e55a4 100644
--- a/dojo/tools/trivy_operator/vulnerability_handler.py
+++ b/dojo/tools/trivy_operator/vulnerability_handler.py
@@ -55,7 +55,7 @@ def handle_vulns(self, service, vulnerabilities, test):
file_path = None
description = DESCRIPTION_TEMPLATE.format(
- title=vulnerability.get("title"), fixed_version=mitigation
+ title=vulnerability.get("title"), fixed_version=mitigation,
)
title = f"{vuln_id} {package_name} {package_version}"
diff --git a/dojo/tools/trufflehog/parser.py b/dojo/tools/trufflehog/parser.py
index 7c6dc905f0..9dd8234d09 100644
--- a/dojo/tools/trufflehog/parser.py
+++ b/dojo/tools/trufflehog/parser.py
@@ -168,7 +168,7 @@ def get_findings_v3(self, data, test):
severity = "Medium"
dupe_key = hashlib.md5(
- (file + detector_name + str(line_number) + commit + (raw + rawV2)).encode("utf-8")
+ (file + detector_name + str(line_number) + commit + (raw + rawV2)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -193,7 +193,7 @@ def get_findings_v3(self, data, test):
url="N/A",
dynamic_finding=False,
static_finding=True,
- nb_occurences=1
+ nb_occurences=1,
)
dupes[dupe_key] = finding
@@ -207,7 +207,7 @@ def walk_dict(self, obj, tab_count=1):
for key, value in obj.items():
if isinstance(value, dict):
return_string += self.walk_dict(
- value, tab_count=(tab_count + 1)
+ value, tab_count=(tab_count + 1),
)
continue
else:
diff --git a/dojo/tools/trufflehog3/parser.py b/dojo/tools/trufflehog3/parser.py
index 11cbe68072..c4879bc4cc 100644
--- a/dojo/tools/trufflehog3/parser.py
+++ b/dojo/tools/trufflehog3/parser.py
@@ -142,7 +142,7 @@ def get_finding_current(self, json_data, test, dupes):
description = description[:-1]
dupe_key = hashlib.md5(
- (title + secret + severity + str(line)).encode("utf-8")
+ (title + secret + severity + str(line)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
diff --git a/dojo/tools/trustwave/parser.py b/dojo/tools/trustwave/parser.py
index 229d658802..4e0d1562cc 100644
--- a/dojo/tools/trustwave/parser.py
+++ b/dojo/tools/trustwave/parser.py
@@ -20,7 +20,7 @@ def get_findings(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
severity_mapping = {
@@ -44,7 +44,7 @@ def get_findings(self, filename, test):
if row.get("Port") is not None and not "" == row.get("Port"):
finding.unsaved_endpoints[0].port = int(row["Port"])
if row.get("Protocol") is not None and not "" == row.get(
- "Protocol"
+ "Protocol",
):
finding.unsaved_endpoints[0].protocol = row["Protocol"]
finding.title = row["Vulnerability Name"]
@@ -60,7 +60,7 @@ def get_findings(self, filename, test):
finding.unsaved_vulnerability_ids = [row.get("CVE")]
dupes_key = hashlib.sha256(
- f"{finding.severity}|{finding.title}|{finding.description}".encode()
+ f"{finding.severity}|{finding.title}|{finding.description}".encode(),
).hexdigest()
if dupes_key in dupes:
diff --git a/dojo/tools/trustwave_fusion_api/parser.py b/dojo/tools/trustwave_fusion_api/parser.py
index 6b6bf2a27a..1e3f14b92e 100644
--- a/dojo/tools/trustwave_fusion_api/parser.py
+++ b/dojo/tools/trustwave_fusion_api/parser.py
@@ -32,12 +32,12 @@ def get_findings(self, file, test):
item = get_item(node, test)
item_key = hashlib.sha256(
- f"{item.severity}|{item.title}|{item.description}".encode()
+ f"{item.severity}|{item.title}|{item.description}".encode(),
).hexdigest()
if item_key in items:
items[item_key].unsaved_endpoints.extend(
- item.unsaved_endpoints
+ item.unsaved_endpoints,
)
items[item_key].nb_occurences += 1
else:
diff --git a/dojo/tools/twistlock/parser.py b/dojo/tools/twistlock/parser.py
index d561555042..53a7f21fd1 100644
--- a/dojo/tools/twistlock/parser.py
+++ b/dojo/tools/twistlock/parser.py
@@ -49,7 +49,7 @@ def parse_issue(self, row, test):
+ "
",
mitigation=data_fix_status,
component_name=textwrap.shorten(
- data_package_name, width=200, placeholder="..."
+ data_package_name, width=200, placeholder="...",
),
component_version=data_package_version,
false_p=False,
@@ -73,7 +73,7 @@ def parse(self, filename, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
for row in reader:
finding = self.parse_issue(row, test)
@@ -85,7 +85,7 @@ def parse(self, filename, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
dupes[key] = finding
@@ -122,7 +122,7 @@ def get_items(self, tree, test):
unique_key = node["id"] + str(
node["packageName"]
+ str(node["packageVersion"])
- + str(node["severity"])
+ + str(node["severity"]),
)
items[unique_key] = item
return list(items.values())
diff --git a/dojo/tools/vcg/parser.py b/dojo/tools/vcg/parser.py
index 9c2bc3a540..0d29448a2c 100644
--- a/dojo/tools/vcg/parser.py
+++ b/dojo/tools/vcg/parser.py
@@ -81,7 +81,7 @@ def parse_issue(self, issue, test):
data.priority = 6
else:
data.priority = int(
- float(self.get_field_from_xml(issue, "Priority"))
+ float(self.get_field_from_xml(issue, "Priority")),
)
data.title = (
@@ -119,7 +119,7 @@ def parse(self, content, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
@@ -159,7 +159,7 @@ def parse_issue(self, row, test):
data.priority = 6
else:
data.priority = int(
- float(self.get_field_from_row(row, priority_column))
+ float(self.get_field_from_row(row, priority_column)),
)
data.severity = self.get_field_from_row(row, severity_column)
@@ -187,7 +187,7 @@ def parse(self, content, test):
+ finding.title
+ "|"
+ finding.description
- ).encode("utf-8")
+ ).encode("utf-8"),
).hexdigest()
if key not in dupes:
diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py
index 9e6818effc..fe707b964c 100644
--- a/dojo/tools/veracode/json_parser.py
+++ b/dojo/tools/veracode/json_parser.py
@@ -197,7 +197,7 @@ def add_dynamic_details(self, finding, finding_details, backup_title=None) -> Fi
if url := finding_details.get("url"):
# Create the Endpoint object from the url
finding.unsaved_endpoints.append(
- Endpoint.from_uri(url)
+ Endpoint.from_uri(url),
)
else:
# build it from the other attributes
@@ -210,7 +210,7 @@ def add_dynamic_details(self, finding, finding_details, backup_title=None) -> Fi
host=host,
port=port,
path=path,
- )
+ ),
)
# Add the plugin if available
if plugin := finding_details.get("plugin"):
diff --git a/dojo/tools/veracode/xml_parser.py b/dojo/tools/veracode/xml_parser.py
index ce08e14f45..ecf620aa6c 100644
--- a/dojo/tools/veracode/xml_parser.py
+++ b/dojo/tools/veracode/xml_parser.py
@@ -30,7 +30,7 @@ def get_findings(self, filename, test):
app_id = root.attrib["app_id"]
report_date = datetime.strptime(
- root.attrib["last_update_time"], "%Y-%m-%d %H:%M:%S %Z"
+ root.attrib["last_update_time"], "%Y-%m-%d %H:%M:%S %Z",
)
dupes = {}
@@ -39,13 +39,13 @@ def get_findings(self, filename, test):
# This assumes `` only exists within the ``
# nodes.
for category_node in root.findall(
- "x:severity/x:category", namespaces=XML_NAMESPACE
+ "x:severity/x:category", namespaces=XML_NAMESPACE,
):
# Mitigation text.
mitigation_text = ""
mitigation_text += (
category_node.find(
- "x:recommendations/x:para", namespaces=XML_NAMESPACE
+ "x:recommendations/x:para", namespaces=XML_NAMESPACE,
).get("text")
+ "\n\n"
)
@@ -54,11 +54,11 @@ def get_findings(self, filename, test):
[" * " + x.get("text") + "\n" for x in category_node.findall(
"x:recommendations/x:para/x:bulletitem",
namespaces=XML_NAMESPACE,
- )]
+ )],
)
for flaw_node in category_node.findall(
- "x:cwe/x:staticflaws/x:flaw", namespaces=XML_NAMESPACE
+ "x:cwe/x:staticflaws/x:flaw", namespaces=XML_NAMESPACE,
):
dupe_key = flaw_node.attrib["issueid"]
@@ -66,17 +66,17 @@ def get_findings(self, filename, test):
if dupe_key not in dupes:
# Add to list.
dupes[dupe_key] = self.__xml_static_flaw_to_finding(
- app_id, flaw_node, mitigation_text, test
+ app_id, flaw_node, mitigation_text, test,
)
for flaw_node in category_node.findall(
- "x:cwe/x:dynamicflaws/x:flaw", namespaces=XML_NAMESPACE
+ "x:cwe/x:dynamicflaws/x:flaw", namespaces=XML_NAMESPACE,
):
dupe_key = flaw_node.attrib["issueid"]
if dupe_key not in dupes:
dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding(
- app_id, flaw_node, mitigation_text, test
+ app_id, flaw_node, mitigation_text, test,
)
# Get SCA findings
@@ -98,7 +98,7 @@ def get_findings(self, filename, test):
_version = component.attrib["version"]
for vulnerability in component.findall(
- "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE
+ "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE,
):
# We don't have a Id for SCA findings so just generate a random
# one
@@ -121,7 +121,7 @@ def __xml_flaw_to_unique_id(cls, app_id, xml_node):
@classmethod
def __xml_flaw_to_severity(cls, xml_node):
return cls.vc_severity_mapping.get(
- int(xml_node.attrib["severity"]), "Info"
+ int(xml_node.attrib["severity"]), "Info",
)
@classmethod
@@ -133,7 +133,7 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test):
finding.static_finding = True
finding.dynamic_finding = False
finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id(
- app_id, xml_node
+ app_id, xml_node,
)
# Report values
@@ -189,11 +189,11 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test):
# This happens if any mitigation (including 'Potential false positive')
# was accepted in VC.
for mitigation in xml_node.findall(
- "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE
+ "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE,
):
_is_mitigated = True
_mitigated_date = datetime.strptime(
- mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z"
+ mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z",
)
finding.is_mitigated = _is_mitigated
finding.mitigated = _mitigated_date
@@ -217,10 +217,10 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test):
@classmethod
def __xml_static_flaw_to_finding(
- cls, app_id, xml_node, mitigation_text, test
+ cls, app_id, xml_node, mitigation_text, test,
):
finding = cls.__xml_flaw_to_finding(
- app_id, xml_node, mitigation_text, test
+ app_id, xml_node, mitigation_text, test,
)
finding.static_finding = True
finding.dynamic_finding = False
@@ -253,10 +253,10 @@ def __xml_static_flaw_to_finding(
@classmethod
def __xml_dynamic_flaw_to_finding(
- cls, app_id, xml_node, mitigation_text, test
+ cls, app_id, xml_node, mitigation_text, test,
):
finding = cls.__xml_flaw_to_finding(
- app_id, xml_node, mitigation_text, test
+ app_id, xml_node, mitigation_text, test,
)
finding.static_finding = False
finding.dynamic_finding = True
@@ -279,7 +279,7 @@ def _get_cwe(val):
@classmethod
def __xml_sca_flaw_to_finding(
- cls, test, report_date, vendor, library, version, xml_node
+ cls, test, report_date, vendor, library, version, xml_node,
):
# Defaults
finding = Finding()
@@ -311,7 +311,7 @@ def __xml_sca_flaw_to_finding(
xml_node.attrib.get("first_found_date"),
xml_node.attrib["cvss_score"],
cls.vc_severity_mapping.get(
- int(xml_node.attrib["severity"]), "Info"
+ int(xml_node.attrib["severity"]), "Info",
),
xml_node.attrib["cve_summary"],
)
@@ -329,11 +329,11 @@ def __xml_sca_flaw_to_finding(
# This happens if any mitigation (including 'Potential false positive')
# was accepted in VC.
for mitigation in xml_node.findall(
- "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE
+ "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE,
):
_is_mitigated = True
_mitigated_date = datetime.strptime(
- mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z"
+ mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z",
)
finding.is_mitigated = _is_mitigated
finding.mitigated = _mitigated_date
diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py
index a37a08cf7e..15de639330 100644
--- a/dojo/tools/veracode_sca/parser.py
+++ b/dojo/tools/veracode_sca/parser.py
@@ -81,7 +81,7 @@ def _get_findings_json(self, file, test):
"Project name: {}\n"
"Title: \n>{}"
"\n\n-----\n\n".format(
- issue.get("project_name"), vulnerability.get("title")
+ issue.get("project_name"), vulnerability.get("title"),
)
)
@@ -119,7 +119,7 @@ def _get_findings_json(self, file, test):
finding.cwe = int(cwe)
finding.references = "\n\n" + issue.get("_links").get("html").get(
- "href"
+ "href",
)
status = issue.get("issue_status")
if (
@@ -144,7 +144,7 @@ def get_findings_csv(self, file, test):
if isinstance(content, bytes):
content = content.decode("utf-8")
reader = csv.DictReader(
- io.StringIO(content), delimiter=",", quotechar='"'
+ io.StringIO(content), delimiter=",", quotechar='"',
)
csvarray = []
@@ -162,7 +162,7 @@ def get_findings_csv(self, file, test):
issueId = list(row.values())[0]
library = row.get("Library", None)
if row.get("Package manager") == "MAVEN" and row.get(
- "Coordinate 2"
+ "Coordinate 2",
):
library = row.get("Coordinate 2")
version = row.get("Version in use", None)
@@ -178,11 +178,11 @@ def get_findings_csv(self, file, test):
try:
if settings.USE_FIRST_SEEN:
date = datetime.strptime(
- row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z"
+ row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z",
)
else:
date = datetime.strptime(
- row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z"
+ row.get("Issue opened: Scan date"), "%d %b %Y %H:%M%p %Z",
)
except Exception:
date = None
diff --git a/dojo/tools/wapiti/parser.py b/dojo/tools/wapiti/parser.py
index 4245e72f1a..deb6309d5a 100644
--- a/dojo/tools/wapiti/parser.py
+++ b/dojo/tools/wapiti/parser.py
@@ -56,7 +56,7 @@ def get_findings(self, file, test):
if reference_title.startswith("CWE"):
cwe = self.get_cwe(reference_title)
references.append(
- f"* [{reference_title}]({reference.findtext('url')})"
+ f"* [{reference_title}]({reference.findtext('url')})",
)
references = "\n".join(references)
@@ -84,12 +84,12 @@ def get_findings(self, file, test):
finding.unsaved_endpoints = [Endpoint.from_uri(url)]
finding.unsaved_req_resp = [
- {"req": entry.findtext("http_request"), "resp": ""}
+ {"req": entry.findtext("http_request"), "resp": ""},
]
# make dupe hash key
dupe_key = hashlib.sha256(
- str(description + title + severity).encode("utf-8")
+ str(description + title + severity).encode("utf-8"),
).hexdigest()
# check if dupes are present.
if dupe_key in dupes:
diff --git a/dojo/tools/wfuzz/parser.py b/dojo/tools/wfuzz/parser.py
index 2ac1dfbb27..41d4ebeee6 100644
--- a/dojo/tools/wfuzz/parser.py
+++ b/dojo/tools/wfuzz/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, filename, test):
severity = self.severity_mapper(input=return_code)
description = f"The URL {url.to_text()} must not be exposed\n Please review your configuration\n"
dupe_key = hashlib.sha256(
- (url.to_text() + str(return_code)).encode("utf-8")
+ (url.to_text() + str(return_code)).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -67,10 +67,10 @@ def get_findings(self, filename, test):
host=url.host,
protocol=url.scheme,
port=url.port,
- )
+ ),
]
finding.unsaved_req_resp = [
- {"req": item["payload"], "resp": str(return_code)}
+ {"req": item["payload"], "resp": str(return_code)},
]
dupes[dupe_key] = finding
return list(dupes.values())
diff --git a/dojo/tools/whispers/parser.py b/dojo/tools/whispers/parser.py
index 5c819df6ac..5fa1401459 100644
--- a/dojo/tools/whispers/parser.py
+++ b/dojo/tools/whispers/parser.py
@@ -62,7 +62,7 @@ def get_findings(self, file, test):
references="https://cwe.mitre.org/data/definitions/798.html",
cwe=798,
severity=self.SEVERITY_MAP.get(
- vuln.get("severity"), "Info"
+ vuln.get("severity"), "Info",
),
file_path=vuln.get("file"),
line=int(vuln.get("line")),
@@ -70,7 +70,7 @@ def get_findings(self, file, test):
static_finding=True,
dynamic_finding=False,
test=test,
- )
+ ),
)
return findings
diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py
index 77428939ec..c478786b59 100644
--- a/dojo/tools/whitehat_sentinel/parser.py
+++ b/dojo/tools/whitehat_sentinel/parser.py
@@ -43,7 +43,7 @@ def get_findings(self, file, test):
# Convert a WhiteHat Vuln with Attack Vectors to a list of DefectDojo
# findings
dojo_findings = self._convert_whitehat_sentinel_vulns_to_dojo_finding(
- findings_collection["collection"], test
+ findings_collection["collection"], test,
)
# # Loop through each vuln from WhiteHat
@@ -54,7 +54,7 @@ def get_findings(self, file, test):
return dojo_findings
def _convert_whitehat_severity_id_to_dojo_severity(
- self, whitehat_severity_id: int
+ self, whitehat_severity_id: int,
) -> Union[str, None]:
"""
Converts a WhiteHat Sentinel numerical severity to a DefectDojo severity.
@@ -109,12 +109,12 @@ def _parse_description(self, whitehat_sentinel_description: dict):
description = description_chunks[0]
description_ref["description"] = self.__remove_paragraph_tags(
- description
+ description,
)
if len(description_chunks) > 1:
description_ref["reference_link"] = self.__get_href_url(
- description_chunks[1]
+ description_chunks[1],
)
return description_ref
@@ -167,7 +167,7 @@ def __remove_paragraph_tags(self, html_string):
return re.sub(r"|
", "", html_string)
def _convert_attack_vectors_to_endpoints(
- self, attack_vectors: List[dict]
+ self, attack_vectors: List[dict],
) -> List["Endpoint"]:
"""
Takes a list of Attack Vectors dictionaries from the WhiteHat vuln API and converts them to Defect Dojo
@@ -182,13 +182,13 @@ def _convert_attack_vectors_to_endpoints(
# This should be in the Endpoint class should it not?
for attack_vector in attack_vectors:
endpoints_list.append(
- Endpoint.from_uri(attack_vector["request"]["url"])
+ Endpoint.from_uri(attack_vector["request"]["url"]),
)
return endpoints_list
def _convert_whitehat_sentinel_vulns_to_dojo_finding(
- self, whitehat_sentinel_vulns: [dict], test: str
+ self, whitehat_sentinel_vulns: [dict], test: str,
):
"""
Converts a WhiteHat Sentinel vuln to a DefectDojo finding
@@ -206,10 +206,10 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
if mitigated_ts is not None:
mitigated_ts = datetime.strptime(mitigated_ts, "%Y-%m-%dT%H:%M:%SZ")
cwe = self._parse_cwe_from_tags(
- whitehat_vuln["attack_vectors"][0].get("scanner_tags", [])
+ whitehat_vuln["attack_vectors"][0].get("scanner_tags", []),
)
description_ref = self._parse_description(
- whitehat_vuln["description"]
+ whitehat_vuln["description"],
)
description = description_ref["description"]
references = (
@@ -225,7 +225,7 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
else whitehat_vuln.get("risk")
)
severity = self._convert_whitehat_severity_id_to_dojo_severity(
- risk_id
+ risk_id,
)
false_positive = whitehat_vuln.get("status") == "invalid"
@@ -233,7 +233,7 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
is_mitigated = not active
dupe_key = hashlib.md5(
- whitehat_vuln["id"].encode("utf-8")
+ whitehat_vuln["id"].encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
@@ -266,7 +266,7 @@ def _convert_whitehat_sentinel_vulns_to_dojo_finding(
# Get Endpoints from Attack Vectors
endpoints = self._convert_attack_vectors_to_endpoints(
- whitehat_vuln["attack_vectors"]
+ whitehat_vuln["attack_vectors"],
)
finding.unsaved_endpoints = endpoints
diff --git a/dojo/tools/wiz/parser.py b/dojo/tools/wiz/parser.py
index 722a7d2dec..a68ecae2bb 100644
--- a/dojo/tools/wiz/parser.py
+++ b/dojo/tools/wiz/parser.py
@@ -86,6 +86,6 @@ def get_findings(self, filename, test):
dynamic_finding=True,
mitigation=row.get("Remediation Recommendation"),
test=test,
- )
+ ),
)
return findings
diff --git a/dojo/tools/wpscan/parser.py b/dojo/tools/wpscan/parser.py
index 30f523265c..70081dc064 100644
--- a/dojo/tools/wpscan/parser.py
+++ b/dojo/tools/wpscan/parser.py
@@ -46,7 +46,7 @@ def get_vulnerabilities(
dynamic_finding=True,
static_finding=False,
scanner_confidence=self._get_scanner_confidence(
- detection_confidence
+ detection_confidence,
),
unique_id_from_tool=vul["references"]["wpvulndb"][0],
nb_occurences=1,
@@ -68,12 +68,12 @@ def get_vulnerabilities(
finding.unsaved_vulnerability_ids = []
for vulnerability_id in vul["references"]["cve"]:
finding.unsaved_vulnerability_ids.append(
- f"CVE-{vulnerability_id}"
+ f"CVE-{vulnerability_id}",
)
# internal de-duplication
dupe_key = hashlib.sha256(
- str(finding.unique_id_from_tool).encode("utf-8")
+ str(finding.unique_id_from_tool).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
@@ -121,13 +121,13 @@ def get_findings(self, file, test):
# manage interesting interesting_findings
for interesting_finding in tree.get("interesting_findings", []):
references = self.generate_references(
- interesting_finding["references"]
+ interesting_finding["references"],
)
description = "\n".join(
[
"**Type:** `" + interesting_finding.get("type") + "`\n",
"**Url:** `" + interesting_finding["url"] + "`\n",
- ]
+ ],
)
if interesting_finding["interesting_entries"]:
description += (
@@ -143,7 +143,7 @@ def get_findings(self, file, test):
dynamic_finding=True,
static_finding=False,
scanner_confidence=self._get_scanner_confidence(
- interesting_finding.get("confidence")
+ interesting_finding.get("confidence"),
),
)
# manage endpoint
@@ -159,8 +159,8 @@ def get_findings(self, file, test):
str(
"interesting_findings"
+ finding.title
- + interesting_finding["url"]
- ).encode("utf-8")
+ + interesting_finding["url"],
+ ).encode("utf-8"),
).hexdigest()
if dupe_key in dupes:
find = dupes[dupe_key]
diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py
index 0486967517..13a898b9f6 100644
--- a/dojo/tools/xanitizer/parser.py
+++ b/dojo/tools/xanitizer/parser.py
@@ -100,7 +100,7 @@ def generate_title(self, finding, line):
def generate_description(self, finding):
description = "**Description:**\n{}".format(
- finding.find("description").text
+ finding.find("description").text,
)
if finding.find("startNode") is not None:
@@ -108,11 +108,11 @@ def generate_description(self, finding):
endnode = finding.find("endNode")
description = f"{description}\n-----\n"
description = "{}\n**Starting at:** {} - **Line** {}".format(
- description, startnode.get("classFQN"), startnode.get("lineNo")
+ description, startnode.get("classFQN"), startnode.get("lineNo"),
)
description = self.add_code(startnode, False, description)
description = "{}\n\n**Ending at:** {} - **Line** {}".format(
- description, endnode.get("classFQN"), endnode.get("lineNo")
+ description, endnode.get("classFQN"), endnode.get("lineNo"),
)
description = self.add_code(endnode, True, description)
elif finding.find("node") is not None:
@@ -146,11 +146,11 @@ def add_code(self, node, showline, description):
for code in codelines:
if code.text:
description = "{}\n{}: {}".format(
- description, code.get("lineNo"), code.text
+ description, code.get("lineNo"), code.text,
)
else:
description = "{}\n{}: ".format(
- description, code.get("lineNo")
+ description, code.get("lineNo"),
)
return description
@@ -158,11 +158,11 @@ def add_code(self, node, showline, description):
def generate_file_path(self, finding):
if finding.find("endNode") is not None and finding.find("endNode").get(
- "relativePath"
+ "relativePath",
):
return finding.find("endNode").get("relativePath")
elif finding.find("node") is not None and finding.find("node").get(
- "relativePath"
+ "relativePath",
):
return finding.find("node").get("relativePath")
diff --git a/dojo/tools/zap/parser.py b/dojo/tools/zap/parser.py
index f8e983f152..c56ec6169f 100644
--- a/dojo/tools/zap/parser.py
+++ b/dojo/tools/zap/parser.py
@@ -35,10 +35,10 @@ def get_findings(self, file, test):
title=item.findtext("alert"),
description=html2text(item.findtext("desc")),
severity=self.MAPPING_SEVERITY.get(
- item.findtext("riskcode")
+ item.findtext("riskcode"),
),
scanner_confidence=self.MAPPING_CONFIDENCE.get(
- item.findtext("riskcode")
+ item.findtext("riskcode"),
),
mitigation=html2text(item.findtext("solution")),
references=html2text(item.findtext("reference")),
@@ -62,10 +62,10 @@ def get_findings(self, file, test):
if instance.findtext("requestheader") is not None:
# Assemble the request from header and body
request = instance.findtext(
- "requestheader"
+ "requestheader",
) + instance.findtext("requestbody")
response = instance.findtext(
- "responseheader"
+ "responseheader",
) + instance.findtext("responsebody")
else:
# The report is in the regular XML format, without requests and responses.
@@ -81,7 +81,7 @@ def get_findings(self, file, test):
endpoint.fragment = None
finding.unsaved_endpoints.append(endpoint)
finding.unsaved_req_resp.append(
- {"req": request, "resp": response}
+ {"req": request, "resp": response},
)
items.append(finding)
return items
diff --git a/dojo/urls.py b/dojo/urls.py
index b9d9493c66..dd438c8f72 100644
--- a/dojo/urls.py
+++ b/dojo/urls.py
@@ -222,7 +222,7 @@
f"^{get_system_setting('url_prefix')}api/v2/api-token-auth/",
tokenviews.obtain_auth_token,
name='api-token-auth',
- )
+ ),
]
urlpatterns = []
@@ -243,7 +243,7 @@
re_path(r'^robots.txt', lambda x: HttpResponse("User-Agent: *\nDisallow: /", content_type="text/plain"), name="robots_file"),
re_path(r'^manage_files/(?P\d+)/(?P\w+)$', views.manage_files, name='manage_files'),
re_path(r'^access_file/(?P\d+)/(?P\d+)/(?P\w+)$', views.access_file, name='access_file'),
- re_path(r'^{}/(?P.*)$'.format(settings.MEDIA_URL.strip('/')), views.protected_serve, {'document_root': settings.MEDIA_ROOT})
+ re_path(r'^{}/(?P.*)$'.format(settings.MEDIA_URL.strip('/')), views.protected_serve, {'document_root': settings.MEDIA_ROOT}),
]
urlpatterns += api_v2_urls
diff --git a/dojo/user/urls.py b/dojo/user/urls.py
index adf3dd80cb..8dbf0b0686 100644
--- a/dojo/user/urls.py
+++ b/dojo/user/urls.py
@@ -25,7 +25,7 @@
re_path(r'^user/(?P\d+)/add_product_type_member$', views.add_product_type_member, name='add_product_type_member_user'),
re_path(r'^user/(?P\d+)/add_product_member$', views.add_product_member, name='add_product_member_user'),
re_path(r'^user/(?P\d+)/add_group_member$', views.add_group_member, name='add_group_member_user'),
- re_path(r'^user/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_user_permissions')
+ re_path(r'^user/(?P\d+)/edit_permissions$', views.edit_permissions, name='edit_user_permissions'),
]
if settings.FORGOT_PASSWORD:
urlpatterns.extend([
@@ -50,7 +50,7 @@
), name="forgot_username_done"),
re_path(r'^forgot_username/$', views.DojoForgotUsernameView.as_view(
template_name='login/forgot_username.html',
- success_url=reverse_lazy("forgot_username_done")
+ success_url=reverse_lazy("forgot_username_done"),
), name="forgot_username"),
])
diff --git a/dojo/user/views.py b/dojo/user/views.py
index ea60c93fc1..25d4692ea9 100644
--- a/dojo/user/views.py
+++ b/dojo/user/views.py
@@ -126,7 +126,7 @@ def login_view(request):
settings.AUTH0_OAUTH2_ENABLED,
settings.KEYCLOAK_OAUTH2_ENABLED,
settings.GITHUB_ENTERPRISE_OAUTH2_ENABLED,
- settings.SAML2_ENABLED
+ settings.SAML2_ENABLED,
]) == 1 and 'force_login_form' not in request.GET:
if settings.GOOGLE_OAUTH_ENABLED:
social_auth = 'google-oauth2'
@@ -587,7 +587,7 @@ def add_group_member(request, uid):
add_breadcrumb(title=_("Add Group Member"), top_level=False, request=request)
return render(request, 'dojo/new_group_member_user.html', {
'user': user,
- 'form': memberform
+ 'form': memberform,
})
diff --git a/dojo/utils.py b/dojo/utils.py
index d66c538529..2bf51f60f1 100644
--- a/dojo/utils.py
+++ b/dojo/utils.py
@@ -89,14 +89,14 @@ def do_false_positive_history(finding, *args, **kwargs):
existing_findings = match_finding_to_existing_findings(finding, product=finding.test.engagement.product)
deduplicationLogger.debug(
"FALSE_POSITIVE_HISTORY: Found %i existing findings in the same product",
- len(existing_findings)
+ len(existing_findings),
)
existing_fp_findings = existing_findings.filter(false_p=True)
deduplicationLogger.debug(
"FALSE_POSITIVE_HISTORY: Found %i existing findings in the same product "
+ "that were previously marked as false positive",
- len(existing_fp_findings)
+ len(existing_fp_findings),
)
if existing_fp_findings:
@@ -119,7 +119,7 @@ def do_false_positive_history(finding, *args, **kwargs):
for find in to_mark_as_fp:
deduplicationLogger.debug(
"FALSE_POSITIVE_HISTORY: Marking Finding %i:%s from %s as false positive",
- find.id, find.title, find.test.engagement
+ find.id, find.title, find.test.engagement,
)
try:
find.false_p = True
@@ -164,14 +164,14 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
deduplicationLogger.debug(
'Matching finding %i:%s to existing findings in %s %s using %s as deduplication algorithm.',
- finding.id, finding.title, custom_filter_type, list(custom_filter.values())[0], deduplication_algorithm
+ finding.id, finding.title, custom_filter_type, list(custom_filter.values())[0], deduplication_algorithm,
)
if deduplication_algorithm == 'hash_code':
return (
Finding.objects.filter(
**custom_filter,
- hash_code=finding.hash_code
+ hash_code=finding.hash_code,
).exclude(hash_code=None)
.exclude(id=finding.id)
.order_by('id')
@@ -181,7 +181,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
return (
Finding.objects.filter(
**custom_filter,
- unique_id_from_tool=finding.unique_id_from_tool
+ unique_id_from_tool=finding.unique_id_from_tool,
).exclude(unique_id_from_tool=None)
.exclude(id=finding.id)
.order_by('id')
@@ -193,7 +193,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
(
(Q(hash_code__isnull=False) & Q(hash_code=finding.hash_code))
| (Q(unique_id_from_tool__isnull=False) & Q(unique_id_from_tool=finding.unique_id_from_tool))
- )
+ ),
).exclude(id=finding.id).order_by('id')
deduplicationLogger.debug(query.query)
return query
@@ -209,7 +209,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
**custom_filter,
title=finding.title,
severity=finding.severity,
- numerical_severity=Finding.get_numerical_severity(finding.severity)
+ numerical_severity=Finding.get_numerical_severity(finding.severity),
).order_by('id')
)
@@ -625,7 +625,7 @@ def findings_this_period(findings, period_type, stuff, o_stuff, a_stuff):
'one': 0,
'two': 0,
'three': 0,
- 'total': 0
+ 'total': 0,
}
a_count = {
'closed': 0,
@@ -633,7 +633,7 @@ def findings_this_period(findings, period_type, stuff, o_stuff, a_stuff):
'one': 0,
'two': 0,
'three': 0,
- 'total': 0
+ 'total': 0,
}
for f in findings:
if f.mitigated is not None and end_of_period >= f.mitigated >= start_of_period:
@@ -710,7 +710,7 @@ def add_breadcrumb(parent=None,
crumbs = [
{
'title': _('Home'),
- 'url': reverse('home')
+ 'url': reverse('home'),
},
]
if parent is not None and getattr(parent, "get_breadcrumbs", None):
@@ -718,7 +718,7 @@ def add_breadcrumb(parent=None,
else:
crumbs += [{
'title': title,
- 'url': request.get_full_path() if url is None else url
+ 'url': request.get_full_path() if url is None else url,
}]
else:
resolver = get_resolver(None).resolve
@@ -727,12 +727,12 @@ def add_breadcrumb(parent=None,
if title is not None:
obj_crumbs += [{
'title': title,
- 'url': request.get_full_path() if url is None else url
+ 'url': request.get_full_path() if url is None else url,
}]
else:
obj_crumbs = [{
'title': title,
- 'url': request.get_full_path() if url is None else url
+ 'url': request.get_full_path() if url is None else url,
}]
for crumb in crumbs:
@@ -930,13 +930,13 @@ def get_period_counts_legacy(findings,
new_date.year,
new_date.month,
monthrange(new_date.year, new_date.month)[1],
- tzinfo=timezone.get_current_timezone())
+ tzinfo=timezone.get_current_timezone()),
])
else:
risks_a = None
crit_count, high_count, med_count, low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
for finding in findings:
if new_date <= datetime.combine(finding.date, datetime.min.time(
@@ -956,7 +956,7 @@ def get_period_counts_legacy(findings,
crit_count, high_count, med_count, low_count, total,
closed_in_range_count])
crit_count, high_count, med_count, low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
if risks_a is not None:
for finding in risks_a:
@@ -976,7 +976,7 @@ def get_period_counts_legacy(findings,
return {
'opened_per_period': opened_in_period,
- 'accepted_per_period': accepted_in_period
+ 'accepted_per_period': accepted_in_period,
}
@@ -1023,7 +1023,7 @@ def get_period_counts(findings,
if accepted_findings:
date_range = [
datetime(new_date.year, new_date.month, new_date.day, tzinfo=tz),
- datetime(end_date.year, end_date.month, end_date.day, tzinfo=tz)
+ datetime(end_date.year, end_date.month, end_date.day, tzinfo=tz),
]
try:
risks_a = accepted_findings.filter(risk_acceptance__created__date__range=date_range)
@@ -1033,13 +1033,13 @@ def get_period_counts(findings,
risks_a = None
f_crit_count, f_high_count, f_med_count, f_low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
ra_crit_count, ra_high_count, ra_med_count, ra_low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
active_crit_count, active_high_count, active_med_count, active_low_count, _ = [
- 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0,
]
for finding in findings:
@@ -1113,7 +1113,7 @@ def get_period_counts(findings,
return {
'opened_per_period': opened_in_period,
'accepted_per_period': accepted_in_period,
- 'active_per_period': active_in_period
+ 'active_per_period': active_in_period,
}
@@ -1191,7 +1191,7 @@ def opened_in_period(start_date, end_date, **kwargs):
out_of_scope=False,
mitigated__isnull=True,
**kwargs,
- severity__in=('Critical', 'High', 'Medium', 'Low')).count()
+ severity__in=('Critical', 'High', 'Medium', 'Low')).count(),
}
for o in opened_in_period:
@@ -2295,7 +2295,7 @@ def __init__(self, *args, **kwargs):
'Engagement': [
(Finding, 'test__engagement'),
(Test, 'engagement')],
- 'Test': [(Finding, 'test')]
+ 'Test': [(Finding, 'test')],
}
@dojo_async_task
@@ -2359,7 +2359,7 @@ def log_user_login(sender, request, user, **kwargs):
logger.info('login user: {user} via ip: {ip}'.format(
user=user.username,
- ip=request.META.get('REMOTE_ADDR')
+ ip=request.META.get('REMOTE_ADDR'),
))
@@ -2368,7 +2368,7 @@ def log_user_logout(sender, request, user, **kwargs):
logger.info('logout user: {user} via ip: {ip}'.format(
user=user.username,
- ip=request.META.get('REMOTE_ADDR')
+ ip=request.META.get('REMOTE_ADDR'),
))
@@ -2378,11 +2378,11 @@ def log_user_login_failed(sender, credentials, request, **kwargs):
if 'username' in credentials:
logger.warning('login failed for: {credentials} via ip: {ip}'.format(
credentials=credentials['username'],
- ip=request.META['REMOTE_ADDR']
+ ip=request.META['REMOTE_ADDR'],
))
else:
logger.error('login failed because of missing username via ip: {ip}'.format(
- ip=request.META['REMOTE_ADDR']
+ ip=request.META['REMOTE_ADDR'],
))
@@ -2514,7 +2514,7 @@ def get_open_findings_burndown(product):
'High': [],
'Medium': [],
'Low': [],
- 'Info': []
+ 'Info': [],
}
# count the number of open findings for the 90-day window
diff --git a/dojo/views.py b/dojo/views.py
index 09a0dcad73..cd22e6ac2d 100644
--- a/dojo/views.py
+++ b/dojo/views.py
@@ -118,7 +118,7 @@ def action_history(request, cid, oid):
"obj": obj,
"test": test,
"object_value": object_value,
- "finding": finding
+ "finding": finding,
})
diff --git a/dojo/widgets.py b/dojo/widgets.py
index 0d0b245e41..83d3267c31 100644
--- a/dojo/widgets.py
+++ b/dojo/widgets.py
@@ -27,6 +27,6 @@ def render(self, name, value, attrs=None, renderer=None):
'paginator': paginator,
'page_number': page_number,
'page': page,
- 'page_param': 'apage'
+ 'page_param': 'apage',
}
return render_to_string(self.template_name, context)
diff --git a/dojo/wsgi.py b/dojo/wsgi.py
index 1f79043d49..0e8b2c7f8c 100644
--- a/dojo/wsgi.py
+++ b/dojo/wsgi.py
@@ -45,7 +45,7 @@ def is_debugger_listening(port):
# Required, otherwise debugpy will try to use the uwsgi binary as the python interpreter - https://github.com/microsoft/debugpy/issues/262
debugpy.configure({
"python": "python",
- "subProcess": True
+ "subProcess": True,
})
debugpy.listen(("0.0.0.0", debugpy_port)) # noqa: T100
if os.environ.get("DD_DEBUG_WAIT_FOR_CLIENT") == "True":
diff --git a/ruff.toml b/ruff.toml
index ed814e15f9..30a62e2c0c 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -41,6 +41,7 @@ select = [
"ASYNC",
"TRIO",
"S2", "S5", "S7",
+ "COM",
"C4",
"T10",
"DJ003", "DJ012", "DJ013",
diff --git a/tests/base_test_class.py b/tests/base_test_class.py
index 8f27bed85b..e676e91916 100644
--- a/tests/base_test_class.py
+++ b/tests/base_test_class.py
@@ -70,7 +70,7 @@ def setUpClass(cls):
dd_driver_options.add_argument("--no-sandbox")
dd_driver_options.add_argument("--disable-dev-shm-usage")
dd_driver_options.add_argument(
- "--disable-gpu"
+ "--disable-gpu",
) # on windows sometimes chrome can't start with certain gpu driver versions, even in headless mode
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
@@ -89,7 +89,7 @@ def setUpClass(cls):
# change path of chromedriver according to which directory you have chromedriver.
print(
- "starting chromedriver with options: ", vars(dd_driver_options), desired
+ "starting chromedriver with options: ", vars(dd_driver_options), desired,
)
# TODO - this filter needs to be removed
@@ -124,14 +124,14 @@ def login_page(self):
driver.find_element(By.ID, "id_username").send_keys(os.environ["DD_ADMIN_USER"])
driver.find_element(By.ID, "id_password").clear()
driver.find_element(By.ID, "id_password").send_keys(
- os.environ["DD_ADMIN_PASSWORD"]
+ os.environ["DD_ADMIN_PASSWORD"],
)
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click()
self.assertFalse(
self.is_element_by_css_selector_present(
- ".alert-danger", "Please enter a correct username and password"
- )
+ ".alert-danger", "Please enter a correct username and password",
+ ),
)
return driver
@@ -146,8 +146,8 @@ def login_standard_page(self):
self.assertFalse(
self.is_element_by_css_selector_present(
- ".alert-danger", "Please enter a correct username and password"
- )
+ ".alert-danger", "Please enter a correct username and password",
+ ),
)
return driver
@@ -244,7 +244,7 @@ def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
if no_content is None:
# wait for product_wrapper div as datatables javascript modifies the DOM on page load.
WebDriverWait(self.driver, 30).until(
- EC.presence_of_element_located((By.ID, wrapper_id))
+ EC.presence_of_element_located((By.ID, wrapper_id)),
)
def is_element_by_css_selector_present(self, selector, text=None):
@@ -353,7 +353,7 @@ def set_block_execution(self, block_execution=True):
# check if it's enabled after reload
self.assertTrue(
driver.find_element(By.ID, "id_block_execution").is_selected()
- == block_execution
+ == block_execution,
)
return driver
@@ -428,19 +428,19 @@ def assertNoConsoleErrors(self):
print(entry)
print(
- "There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens"
+ "There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens",
)
print(
"Currently there is no reliable way to find out at which url the error happened, but it could be: ."
- + self.driver.current_url
+ + self.driver.current_url,
)
if self.accept_javascript_errors:
print(
- "WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!"
+ "WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!",
)
elif re.search(accepted_javascript_messages, entry["message"]):
print(
- "WARNING: skipping javascript errors related to known issues images, see https://github.com/DefectDojo/django-DefectDojo/blob/master/tests/base_test_class.py#L324"
+ "WARNING: skipping javascript errors related to known issues images, see https://github.com/DefectDojo/django-DefectDojo/blob/master/tests/base_test_class.py#L324",
)
else:
self.assertNotEqual(entry["level"], "SEVERE")
diff --git a/tests/false_positive_history_test.py b/tests/false_positive_history_test.py
index 5d4c4c91f4..d330ffb194 100644
--- a/tests/false_positive_history_test.py
+++ b/tests/false_positive_history_test.py
@@ -102,13 +102,13 @@ def test_retroactive_edit_finding(self):
product_name='QA Test',
engagement_name='FP History Eng 1',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Edit Test'
+ finding_name='Fake Vulnerability for Edit Test',
)
finding_2 = self.create_finding(
product_name='QA Test',
engagement_name='FP History Eng 2',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Edit Test'
+ finding_name='Fake Vulnerability for Edit Test',
)
# Assert that both findings are active
self.assert_is_active(finding_1)
@@ -130,13 +130,13 @@ def test_retroactive_bulk_edit_finding(self):
product_name='QA Test',
engagement_name='FP History Eng 1',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Bulk Edit Test'
+ finding_name='Fake Vulnerability for Bulk Edit Test',
)
finding_2 = self.create_finding(
product_name='QA Test',
engagement_name='FP History Eng 2',
test_name='FP History Test',
- finding_name='Fake Vulnerability for Bulk Edit Test'
+ finding_name='Fake Vulnerability for Bulk Edit Test',
)
# Assert that both findings are active
self.assert_is_active(finding_1)
diff --git a/tests/notifications_test.py b/tests/notifications_test.py
index d6f0b46382..2a5c832ab2 100644
--- a/tests/notifications_test.py
+++ b/tests/notifications_test.py
@@ -136,7 +136,7 @@ def test_user_mail_notifications_change(self):
originally_selected = {
'product_added': driver.find_element(By.XPATH,
"//input[@name='product_added' and @value='mail']").is_selected(),
- 'scan_added': driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected()
+ 'scan_added': driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected(),
}
driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").click()
diff --git a/tests/zap.py b/tests/zap.py
index 3516779342..db0f77bf3d 100755
--- a/tests/zap.py
+++ b/tests/zap.py
@@ -76,7 +76,7 @@ class Main:
for alert in zap.core.alerts():
sort_by_url[alert['url']].append({
'risk': alert['risk'],
- 'alert': alert['alert']
+ 'alert': alert['alert'],
})
summary = PrettyTable(["Risk", "Count"])
diff --git a/unittests/dojo_test_case.py b/unittests/dojo_test_case.py
index 1a4468c6f0..2c8cd2abfe 100644
--- a/unittests/dojo_test_case.py
+++ b/unittests/dojo_test_case.py
@@ -176,7 +176,7 @@ def get_new_product_with_jira_project_data(self):
'jira-project-form-push_notes': 'on',
'jira-project-form-product_jira_sla_notification': 'on',
'jira-project-form-custom_fields': 'null',
- 'sla_configuration': 1
+ 'sla_configuration': 1,
}
@@ -207,7 +207,7 @@ def get_product_with_jira_project_data(self, product):
'jira-project-form-push_notes': 'on',
'jira-project-form-product_jira_sla_notification': 'on',
'jira-project-form-custom_fields': 'null',
- 'sla_configuration': 1
+ 'sla_configuration': 1,
}
@@ -223,7 +223,7 @@ def get_product_with_jira_project_data2(self, product):
'jira-project-form-push_notes': 'on',
'jira-project-form-product_jira_sla_notification': 'on',
'jira-project-form-custom_fields': 'null',
- 'sla_configuration': 1
+ 'sla_configuration': 1,
}
diff --git a/unittests/test_api_sonarqube_updater.py b/unittests/test_api_sonarqube_updater.py
index 56d341093f..42f3f65731 100644
--- a/unittests/test_api_sonarqube_updater.py
+++ b/unittests/test_api_sonarqube_updater.py
@@ -15,83 +15,83 @@ def setUp(self):
def test_transitions_for_sonarqube_from_open_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('OPEN', 'CONFIRMED'),
- ['confirm']
+ ['confirm'],
)
def test_transitions_for_sonarqube_from_open_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('OPEN', 'RESOLVED / FIXED'),
- ['resolve']
+ ['resolve'],
)
def test_transitions_for_sonarqube_from_reopened_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'RESOLVED / FIXED'),
- ['resolve']
+ ['resolve'],
)
def test_transitions_for_sonarqube_from_reopened_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'CONFIRMED'),
- ['confirm']
+ ['confirm'],
)
def test_transitions_for_sonarqube_from_resolved_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'CONFIRMED'),
- ['reopen', 'confirm']
+ ['reopen', 'confirm'],
)
def test_transitions_for_sonarqube_from_resolved_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'RESOLVED / FALSE-POSITIVE'),
- ['reopen', 'falsepositive']
+ ['reopen', 'falsepositive'],
)
def test_transitions_for_sonarqube_from_resolved_3(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'RESOLVED / WONTFIX'),
- ['reopen', 'wontfix']
+ ['reopen', 'wontfix'],
)
def test_transitions_for_sonarqube_fake_target_origin(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('FAKE_STATUS', 'RESOLVED / FIXED'),
- None
+ None,
)
def test_transitions_for_sonarqube_fake_target_status(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('RESOLVED / FIXED', 'FAKE_STATUS'),
- None
+ None,
)
def test_transitions_for_sonarqube_from_confirmed_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('CONFIRMED', 'REOPENED'),
- ['unconfirm']
+ ['unconfirm'],
)
def test_transitions_for_sonarqube_from_confirmed_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('CONFIRMED', 'RESOLVED / FIXED'),
- ['resolve']
+ ['resolve'],
)
def test_transitions_for_open_reopen_status_1(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('OPEN', 'REOPENED'),
- None
+ None,
)
def test_transitions_for_open_reopen_status_2(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'OPEN'),
- None
+ None,
)
def test_transitions_for_open_reopen_status_3(self):
self.assertEqual(
self.updater.get_sonarqube_required_transitions_for('REOPENED', 'REOPENED'),
- None
+ None,
)
diff --git a/unittests/test_apiv2_endpoint.py b/unittests/test_apiv2_endpoint.py
index e197fb6eec..b0900f9fe3 100644
--- a/unittests/test_apiv2_endpoint.py
+++ b/unittests/test_apiv2_endpoint.py
@@ -16,13 +16,13 @@ def setUp(self):
def test_endpoint_missing_host_product(self):
r = self.client.post(reverse('endpoint-list'), {
- "host": "FOO.BAR"
+ "host": "FOO.BAR",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Attribute 'product' is required", r.content.decode("utf-8"))
r = self.client.post(reverse('endpoint-list'), {
- "product": 1
+ "product": 1,
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Host must not be empty", r.content.decode("utf-8"))
@@ -30,13 +30,13 @@ def test_endpoint_missing_host_product(self):
def test_endpoint_add_existing(self):
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "FOO.BAR"
+ "host": "FOO.BAR",
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "FOO.BAR"
+ "host": "FOO.BAR",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('It appears as though an endpoint with this data already '
@@ -44,7 +44,7 @@ def test_endpoint_add_existing(self):
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "foo.bar"
+ "host": "foo.bar",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('It appears as though an endpoint with this data already '
@@ -53,13 +53,13 @@ def test_endpoint_add_existing(self):
def test_endpoint_change_product(self):
r = self.client.post(reverse('endpoint-list'), {
"product": 1,
- "host": "product1"
+ "host": "product1",
}, format='json')
eid = r.json()['id']
self.assertEqual(r.status_code, 201, r.content[:1000])
r = self.client.patch(reverse('endpoint-detail', args=(eid,)), {
- "product": 2
+ "product": 2,
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Change of product is not possible", r.content.decode("utf-8"))
@@ -67,13 +67,13 @@ def test_endpoint_change_product(self):
def test_endpoint_remove_host(self):
payload = {
"product": 1,
- "host": "host1"
+ "host": "host1",
}
r = self.client.post(reverse('endpoint-list'), payload, format='json')
eid = r.json()['id']
self.assertEqual(r.status_code, 201, r.content[:1000])
r = self.client.patch(reverse('endpoint-detail', args=(eid,)), {
- "host": None
+ "host": None,
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Host must not be empty", r.content.decode("utf-8"))
diff --git a/unittests/test_apiv2_methods_and_endpoints.py b/unittests/test_apiv2_methods_and_endpoints.py
index a3508f9880..6169f28f75 100644
--- a/unittests/test_apiv2_methods_and_endpoints.py
+++ b/unittests/test_apiv2_methods_and_endpoints.py
@@ -51,7 +51,7 @@ def test_is_defined(self):
'questionnaire_answers', 'questionnaire_answered_questionnaires',
'questionnaire_engagement_questionnaires', 'questionnaire_general_questionnaires',
'dojo_group_members', 'product_members', 'product_groups', 'product_type_groups',
- 'product_type_members'
+ 'product_type_members',
]
for reg, _, _ in sorted(self.registry):
if reg in exempt_list:
diff --git a/unittests/test_apiv2_notifications.py b/unittests/test_apiv2_notifications.py
index 06aa7413c6..f45d7433b9 100644
--- a/unittests/test_apiv2_notifications.py
+++ b/unittests/test_apiv2_notifications.py
@@ -16,7 +16,7 @@ def setUp(self):
r = self.create(
template=True,
- scan_added=['alert', 'slack']
+ scan_added=['alert', 'slack'],
)
self.assertEqual(r.status_code, 201)
@@ -27,7 +27,7 @@ def create_test_user(self):
password = 'testTEST1234!@#$'
r = self.client.post(reverse('user-list'), {
"username": "api-user-notification",
- "password": password
+ "password": password,
}, format='json')
return r.json()["id"]
diff --git a/unittests/test_apiv2_user.py b/unittests/test_apiv2_user.py
index 8bdac8b813..e93fb39fa1 100644
--- a/unittests/test_apiv2_user.py
+++ b/unittests/test_apiv2_user.py
@@ -28,7 +28,7 @@ def test_user_list(self):
def test_user_add(self):
# simple user without password
r = self.client.post(reverse('user-list'), {
- "username": "api-user-1"
+ "username": "api-user-1",
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
@@ -36,21 +36,21 @@ def test_user_add(self):
password = 'testTEST1234!@#$'
r = self.client.post(reverse('user-list'), {
"username": "api-user-2",
- "password": password
+ "password": password,
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
# test password by fetching API key
r = self.client.post(reverse('api-token-auth'), {
"username": "api-user-2",
- "password": password
+ "password": password,
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
# user with weak password
r = self.client.post(reverse('user-list'), {
"username": "api-user-3",
- "password": "weakPassword"
+ "password": "weakPassword",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn('Password must contain at least 1 digit, 0-9.', r.content.decode("utf-8"))
@@ -58,31 +58,31 @@ def test_user_add(self):
def test_user_change_password(self):
# some user
r = self.client.post(reverse('user-list'), {
- "username": "api-user-4"
+ "username": "api-user-4",
}, format='json')
self.assertEqual(r.status_code, 201, r.content[:1000])
user_id = r.json()['id']
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
- "first_name": "first"
- }, format='json',)
+ "first_name": "first",
+ }, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
- "last_name": "last"
+ "last_name": "last",
}, format='json')
self.assertEqual(r.status_code, 200, r.content[:1000])
r = self.client.put("{}{}/".format(reverse('user-list'), user_id), {
"username": "api-user-4",
- "password": "testTEST1234!@#$"
+ "password": "testTEST1234!@#$",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
r = self.client.patch("{}{}/".format(reverse('user-list'), user_id), {
- "password": "testTEST1234!@#$"
+ "password": "testTEST1234!@#$",
}, format='json')
self.assertEqual(r.status_code, 400, r.content[:1000])
self.assertIn("Update of password though API is not allowed", r.content.decode("utf-8"))
diff --git a/unittests/test_apply_finding_template.py b/unittests/test_apply_finding_template.py
index 3042098b41..5e58bdde62 100644
--- a/unittests/test_apply_finding_template.py
+++ b/unittests/test_apply_finding_template.py
@@ -179,7 +179,7 @@ def test_unauthorized_apply_template_to_finding_fails(self):
'severity': 'High',
'description': 'Finding for Testing Apply Template Functionality',
'mitigation': 'template mitigation',
- 'impact': 'template impact'}
+ 'impact': 'template impact'},
)
self.assertEqual(302, result.status_code)
self.assertIn('login', result.url)
diff --git a/unittests/test_dashboard.py b/unittests/test_dashboard.py
index 8d853c46a9..a5f73a14e8 100644
--- a/unittests/test_dashboard.py
+++ b/unittests/test_dashboard.py
@@ -21,7 +21,7 @@ def create(when: datetime, product_id: int, titles_and_severities: List[Tuple[st
test = Test.objects.create(engagement=engagement, test_type_id=120, target_start=when, target_end=when)
Finding.objects.bulk_create(
(Finding(title=title, test=test, severity=severity, verified=False)
- for title, severity in titles_and_severities)
+ for title, severity in titles_and_severities),
)
@@ -36,7 +36,7 @@ def create_with_duplicates(when: datetime, product_id: int, titles_and_severitie
Finding.objects.bulk_create(
(Finding(title=title, test=test, severity=severity, verified=False,
duplicate=(title in originals_map), duplicate_finding=originals_map.get(title))
- for title, severity in titles_and_severities)
+ for title, severity in titles_and_severities),
)
diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py
index 6369d26d3a..46a99090b4 100644
--- a/unittests/test_deduplication_logic.py
+++ b/unittests/test_deduplication_logic.py
@@ -1166,7 +1166,7 @@ def log_findings(self, findings):
+ ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: '
+ (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code)
+ ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()])
- + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else '')
+ + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else ''),
)
logger.debug('\t\tendpoints')
diff --git a/unittests/test_endpoint_model.py b/unittests/test_endpoint_model.py
index 72b0e1843e..69694680df 100644
--- a/unittests/test_endpoint_model.py
+++ b/unittests/test_endpoint_model.py
@@ -112,47 +112,47 @@ def test_url_normalize(self):
def test_get_or_create(self):
_endpoint1, created1 = endpoint_get_or_create(
protocol='http',
- host='bar.foo'
+ host='bar.foo',
)
self.assertTrue(created1)
_endpoint2, created2 = endpoint_get_or_create(
protocol='http',
- host='bar.foo'
+ host='bar.foo',
)
self.assertFalse(created2)
_endpoint3, created3 = endpoint_get_or_create(
protocol='http',
host='bar.foo',
- port=80
+ port=80,
)
self.assertFalse(created3)
_endpoint4, created4 = endpoint_get_or_create(
protocol='http',
host='bar.foo',
- port=8080
+ port=8080,
)
self.assertTrue(created4)
_endpoint5, created5 = endpoint_get_or_create(
protocol='https',
host='bar.foo',
- port=443
+ port=443,
)
self.assertTrue(created5)
_endpoint6, created6 = endpoint_get_or_create(
protocol='https',
- host='bar.foo'
+ host='bar.foo',
)
self.assertFalse(created6)
_endpoint7, created7 = endpoint_get_or_create(
protocol='https',
host='bar.foo',
- port=8443
+ port=8443,
)
self.assertTrue(created7)
@@ -171,7 +171,7 @@ def test_equality_with_one_product_one_without(self):
p = Product.objects.get_or_create(
name="test product",
description="",
- prod_type=Product_Type.objects.get_or_create(name="test pt")[0]
+ prod_type=Product_Type.objects.get_or_create(name="test pt")[0],
)[0]
e1 = Endpoint(host="localhost")
e2 = Endpoint(host="localhost", product=p)
@@ -184,12 +184,12 @@ def test_equality_with_products(self):
p1 = Product.objects.get_or_create(
name="test product 1",
description="",
- prod_type=Product_Type.objects.get_or_create(name="test pt")[0]
+ prod_type=Product_Type.objects.get_or_create(name="test pt")[0],
)[0]
p2 = Product.objects.get_or_create(
name="test product 2",
description="",
- prod_type=Product_Type.objects.get_or_create(name="test pt")[0]
+ prod_type=Product_Type.objects.get_or_create(name="test pt")[0],
)[0]
# Define the endpoints
e1 = Endpoint(host="localhost", product=p1)
@@ -213,13 +213,13 @@ def test_endpoint_status_broken(self):
self.engagement = Engagement.objects.create(
product=self.product,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
- target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc)
+ target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
)
self.test = Test.objects.create(
engagement=self.engagement,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
- test_type_id=1
+ test_type_id=1,
)
from django.contrib.auth import get_user_model
user = get_user_model().objects.create().pk
@@ -233,36 +233,36 @@ def test_endpoint_status_broken(self):
last_modified=datetime.datetime(2021, 4, 1, tzinfo=timezone.utc),
mitigated=False,
finding_id=self.finding,
- endpoint_id=self.endpoint
+ endpoint_id=self.endpoint,
).pk,
'removed_endpoint': Endpoint_Status.objects.create(
date=datetime.datetime(2021, 2, 1, tzinfo=timezone.utc),
last_modified=datetime.datetime(2021, 5, 1, tzinfo=timezone.utc),
mitigated=True,
finding_id=self.another_finding,
- endpoint_id=None
+ endpoint_id=None,
).pk,
'removed_finding': Endpoint_Status.objects.create(
date=datetime.datetime(2021, 2, 1, tzinfo=timezone.utc),
last_modified=datetime.datetime(2021, 5, 1, tzinfo=timezone.utc),
mitigated=True,
finding_id=None,
- endpoint_id=self.another_endpoint
+ endpoint_id=self.another_endpoint,
).pk,
}
Finding.objects.get(id=self.finding).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['standard'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['standard']),
)
Finding.objects.get(id=self.another_finding).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['removed_endpoint'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['removed_endpoint']),
)
Endpoint.objects.get(id=self.endpoint).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['standard'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['standard']),
)
Endpoint.objects.get(id=self.another_endpoint).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status['removed_finding'])
+ Endpoint_Status.objects.get(id=self.endpoint_status['removed_finding']),
)
remove_broken_endpoint_statuses(apps)
diff --git a/unittests/test_false_positive_history_logic.py b/unittests/test_false_positive_history_logic.py
index 29cb18f947..ac949bf5f7 100644
--- a/unittests/test_false_positive_history_logic.py
+++ b/unittests/test_false_positive_history_logic.py
@@ -1683,7 +1683,7 @@ def log_findings(self, findings):
+ ': dup: ' + f'{str(finding.duplicate):5.5}' + ': dup_id: '
+ (f'{str(finding.duplicate_finding.id):4.4}' if finding.duplicate_finding else 'None') + ': hash_code: ' + str(finding.hash_code)
+ ': eps: ' + str(finding.endpoints.count()) + ": notes: " + str([n.id for n in finding.notes.all()])
- + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else '')
+ + ': uid: ' + f'{str(finding.unique_id_from_tool):5.5}' + (' fp' if finding.false_p else ''),
)
logger.debug('\t\tendpoints')
diff --git a/unittests/test_finding_helper.py b/unittests/test_finding_helper.py
index a3491a423c..1ef97136b5 100644
--- a/unittests/test_finding_helper.py
+++ b/unittests/test_finding_helper.py
@@ -40,7 +40,7 @@ def test_new_finding(self, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (True, False, False, False, False, None, None, frozen_datetime)
+ (True, False, False, False, False, None, None, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -58,7 +58,7 @@ def test_no_status_change(self, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- status_fields
+ status_fields,
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -70,7 +70,7 @@ def test_mark_fresh_as_mitigated(self, mock_dt):
finding.save()
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -88,7 +88,7 @@ def test_mark_old_active_as_mitigated(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -110,7 +110,7 @@ def test_mark_old_active_as_mitigated_custom_edit(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime)
+ (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -132,7 +132,7 @@ def test_update_old_mitigated_with_custom_edit(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime)
+ (False, False, False, False, True, custom_mitigated, self.user_2, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -155,7 +155,7 @@ def test_update_old_mitigated_with_missing_data(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -174,7 +174,7 @@ def test_set_old_mitigated_as_active(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
- (True, False, False, False, False, None, None, frozen_datetime)
+ (True, False, False, False, False, None, None, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -192,7 +192,7 @@ def test_set_active_as_false_p(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
# TODO marking as false positive resets verified to False, possible bug / undesired behaviour?
- (False, False, True, False, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, True, False, True, frozen_datetime, self.user_1, frozen_datetime),
)
@mock.patch('dojo.finding.helper.timezone.now')
@@ -210,7 +210,7 @@ def test_set_active_as_out_of_scope(self, mock_can_edit, mock_tz):
self.assertEqual(
self.get_status_fields(finding),
# TODO marking as false positive resets verified to False, possible bug / undesired behaviour?
- (False, False, False, True, True, frozen_datetime, self.user_1, frozen_datetime)
+ (False, False, False, True, True, frozen_datetime, self.user_1, frozen_datetime),
)
diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py
index dfbd9c21ca..45c8ed63fa 100644
--- a/unittests/test_import_reimport.py
+++ b/unittests/test_import_reimport.py
@@ -1491,8 +1491,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4},
+ },
})
test_id = import0['test']
@@ -1541,8 +1541,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
'low': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
- 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}}
- }
+ 'total': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0}},
+ },
})
with assertTestImportModelsCreated(self, reimports=1, affected_findings=2, closed=1, reactivated=1, untouched=3):
@@ -1591,8 +1591,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'info': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
'low': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0},
'medium': {'active': 0, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 0, 'verified': 0},
- 'total': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}}
- }
+ 'total': {'active': 1, 'duplicate': 0, 'false_p': 0, 'is_mitigated': 0, 'out_of_scope': 0, 'risk_accepted': 0, 'total': 1, 'verified': 0}},
+ },
})
# without import history, there are no delta statistics
@@ -1609,8 +1609,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4},
+ },
})
test_id = import0['test']
@@ -1624,7 +1624,7 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4}
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 4},
},
'after': {
'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
@@ -1632,8 +1632,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5},
+ },
})
with assertTestImportModelsCreated(self, reimports=0, affected_findings=0, closed=0, reactivated=0, untouched=0):
@@ -1646,7 +1646,7 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5},
},
'after': {
'info': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
@@ -1654,8 +1654,8 @@ def test_import_0_reimport_1_active_verified_reimport_0_active_verified_statisti
'medium': {'active': 1, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 1},
'high': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
'critical': {'active': 0, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 0, 'risk_accepted': 0, 'total': 0},
- 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5}
- }
+ 'total': {'active': 4, 'verified': 0, 'duplicate': 0, 'false_p': 0, 'out_of_scope': 0, 'is_mitigated': 1, 'risk_accepted': 0, 'total': 5},
+ },
})
# Reimport tests to test Scan_Date logic (usecase not supported on UI)
diff --git a/unittests/test_jira_webhook.py b/unittests/test_jira_webhook.py
index 5d7eccd2f5..d88161e46f 100644
--- a/unittests/test_jira_webhook.py
+++ b/unittests/test_jira_webhook.py
@@ -29,11 +29,11 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=x small&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"body": "test2",
"updateAuthor": {
@@ -43,15 +43,15 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=xsmall&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"created": "2020-11-11T18:55:21.425+0100",
- "updated": "2020-11-11T18:55:21.425+0100"
- }
+ "updated": "2020-11-11T18:55:21.425+0100",
+ },
}
jira_issue_comment_template_json_with_email = {
@@ -67,11 +67,11 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=x small&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"body": "test2",
"updateAuthor": {
@@ -81,15 +81,15 @@ class JIRAWebhookTest(DojoTestCase):
"48x48": "http://www.testjira.com/secure/useravatar?ownerId=valentijn&avatarId=11101",
"24x24": "http://www.testjira.com/secure/useravatar?size=small&ownerId=valentijn&avatarId=11101",
"16x16": "http://www.testjira.com/secure/useravatar?size=xsmall&ownerId=valentijn&avatarId=11101",
- "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101"
+ "32x32": "http://www.testjira.com/secure/useravatar?size=medium&ownerId=valentijn&avatarId=11101",
},
"displayName": "Valentijn Scholten",
"active": "true",
- "timeZone": "Europe/Amsterdam"
+ "timeZone": "Europe/Amsterdam",
},
"created": "2020-11-11T18:55:21.425+0100",
- "updated": "2020-11-11T18:55:21.425+0100"
- }
+ "updated": "2020-11-11T18:55:21.425+0100",
+ },
}
jira_issue_update_template_string = """
diff --git a/unittests/test_metrics_queries.py b/unittests/test_metrics_queries.py
index 6378248e0e..c8fdc30007 100644
--- a/unittests/test_metrics_queries.py
+++ b/unittests/test_metrics_queries.py
@@ -36,7 +36,7 @@ def test_finding_queries_no_data(self):
product_types = []
finding_queries = utils.finding_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -54,7 +54,7 @@ def test_finding_queries(self, mock_timezone):
product_types = []
finding_queries = utils.finding_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -71,48 +71,48 @@ def test_finding_queries(self, mock_timezone):
'start_date',
'end_date',
'form',
- ]
+ ],
)
# Assert that we get expected querysets back. This is to be used to
# support refactoring, in attempt of lowering the query count.
self.assertSequenceEqual(
finding_queries['all'].values(),
- []
+ [],
# [{'id': 226, 'title': 'Test Endpoint Mitigation - Finding F1 Without Endpoints', 'date': date(2022, 10, 15), 'sla_start_date': None, 'cwe': None, 'cve': None, 'cvssv3': None, 'cvssv3_score': None, 'url': None, 'severity': 'Info', 'description': 'vulnerability', 'mitigation': '', 'impact': '', 'steps_to_reproduce': '', 'severity_justification': '', 'references': '', 'test_id': 89, 'active': True, 'verified': True, 'false_p': False, 'duplicate': False, 'duplicate_finding_id': None, 'out_of_scope': False, 'risk_accepted': False, 'under_review': False, 'last_status_update': None, 'review_requested_by_id': None, 'under_defect_review': False, 'defect_review_requested_by_id': None, 'is_mitigated': False, 'thread_id': 0, 'mitigated': None, 'mitigated_by_id': None, 'reporter_id': 1, 'numerical_severity': 'S4', 'last_reviewed': None, 'last_reviewed_by_id': None, 'param': None, 'payload': None, 'hash_code': 'a6dd6bd359ff0b504a21b8a7ae5e59f1b40dd0fa1715728bd58de8f688f01b19', 'line': None, 'file_path': '', 'component_name': None, 'component_version': None, 'static_finding': False, 'dynamic_finding': True, 'created': datetime(2022, 10, 15, 23, 12, 52, 966000, tzinfo=pytz.UTC), 'scanner_confidence': None, 'sonarqube_issue_id': None, 'unique_id_from_tool': None, 'vuln_id_from_tool': None, 'sast_source_object': None, 'sast_sink_object': None, 'sast_source_line': None, 'sast_source_file_path': None, 'nb_occurences': None, 'publish_date': None, 'service': None, 'planned_remediation_date': None, 'test__engagement__product__prod_type__member': True, 'test__engagement__product__member': True, 'test__engagement__product__prod_type__authorized_group': False, 'test__engagement__product__authorized_group': False}]
)
self.assertSequenceEqual(
finding_queries['closed'].values(),
- []
+ [],
)
self.assertSequenceEqual(
finding_queries['accepted'].values(),
- []
+ [],
)
self.assertSequenceEqual(
list(finding_queries['accepted_count'].values()),
- [0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0],
)
self.assertSequenceEqual(
finding_queries['top_ten'].values(),
- []
+ [],
)
self.assertEqual(
list(finding_queries['monthly_counts'].values()),
[
[
{'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
- {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}
+ {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
],
[
{'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
+ {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
],
[
{'epoch': 1604188800000, 'grouped_date': date(2020, 11, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
- ]
- ]
+ {'epoch': 1606780800000, 'grouped_date': date(2020, 12, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
+ ],
+ ],
)
self.assertEqual(
finding_queries['weekly_counts'],
@@ -120,19 +120,19 @@ def test_finding_queries(self, mock_timezone):
'opened_per_period': [
{'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0},
{'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0},
- {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0}
+ {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'closed': 0},
],
'accepted_per_period': [
{'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
{'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
- {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}
+ {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
],
'active_per_period': [
{'epoch': 1606694400000, 'grouped_date': date(2020, 11, 30), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
{'epoch': 1607299200000, 'grouped_date': date(2020, 12, 7), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
- {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0}
- ]
- }
+ {'epoch': 1607904000000, 'grouped_date': date(2020, 12, 14), 'total': 0, 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0},
+ ],
+ },
)
self.assertEqual(finding_queries['weeks_between'], 2)
self.assertIsInstance(finding_queries['start_date'], datetime)
@@ -155,7 +155,7 @@ def test_endpoint_queries_no_data(self):
product_types = []
endpoint_queries = utils.endpoint_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -169,7 +169,7 @@ def test_endpoint_queries(self):
product_types = []
endpoint_queries = utils.endpoint_queries(
product_types,
- self.request
+ self.request,
)
self.assertSequenceEqual(
@@ -186,7 +186,7 @@ def test_endpoint_queries(self):
'start_date',
'end_date',
'form',
- ]
+ ],
)
# Assert that we get expected querysets back. This is to be used to
@@ -199,7 +199,7 @@ def test_endpoint_queries(self):
{'id': 4, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': True, 'risk_accepted': False, 'endpoint_id': 5, 'finding_id': 229, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
{'id': 5, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': True, 'endpoint_id': 5, 'finding_id': 230, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
{'id': 7, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 7, 'finding_id': 227, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
- {'id': 8, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 8, 'finding_id': 231, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False}
+ {'id': 8, 'date': date(2020, 7, 1), 'last_modified': datetime(2020, 7, 1, 17, 45, 39, 791907, tzinfo=pytz.UTC), 'mitigated': False, 'mitigated_time': None, 'mitigated_by_id': None, 'false_positive': False, 'out_of_scope': False, 'risk_accepted': False, 'endpoint_id': 8, 'finding_id': 231, 'endpoint__product__prod_type__member': True, 'endpoint__product__member': True, 'endpoint__product__prod_type__authorized_group': False, 'endpoint__product__authorized_group': False},
],
)
self.assertSequenceEqual(
@@ -223,16 +223,16 @@ def test_endpoint_queries(self):
[
[
{'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
- {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0}
+ {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0},
],
[
{'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5}
+ {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5},
],
[
{'epoch': 1590969600000, 'grouped_date': date(2020, 6, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
- {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1}
- ]
+ {'epoch': 1593561600000, 'grouped_date': date(2020, 7, 1), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1},
+ ],
],
)
self.assertEqual(
@@ -241,18 +241,18 @@ def test_endpoint_queries(self):
[
{'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
{'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 5, 'total': 6, 'closed': 0},
- {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0}
+ {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0, 'closed': 0},
],
[
{'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
{'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 1, 'medium': 0, 'low': 0, 'info': 4, 'total': 5},
- {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
+ {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
],
[
{'epoch': 1592784000000, 'grouped_date': date(2020, 6, 22), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
{'epoch': 1593388800000, 'grouped_date': date(2020, 6, 29), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 1, 'total': 1},
- {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0}
- ]
+ {'epoch': 1593993600000, 'grouped_date': date(2020, 7, 6), 'critical': 0, 'high': 0, 'medium': 0, 'low': 0, 'info': 0, 'total': 0},
+ ],
],
)
self.assertEqual(endpoint_queries['weeks_between'], 2)
diff --git a/unittests/test_migrations.py b/unittests/test_migrations.py
index 6800f3346e..16b7525c47 100644
--- a/unittests/test_migrations.py
+++ b/unittests/test_migrations.py
@@ -24,13 +24,13 @@ def prepare(self):
self.engagement = Engagement.objects.create(
product_id=self.product.pk,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
- target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc)
+ target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
)
self.test = Test.objects.create(
engagement_id=self.engagement.pk,
target_start=datetime.datetime(2020, 1, 1, tzinfo=timezone.utc),
target_end=datetime.datetime(2022, 1, 1, tzinfo=timezone.utc),
- test_type_id=1
+ test_type_id=1,
)
from django.contrib.auth import get_user_model
user = get_user_model().objects.create().pk
@@ -39,16 +39,16 @@ def prepare(self):
self.endpoint = Endpoint.objects.create(host='foo.bar', product_id=self.product.pk).pk
self.endpoint_status = Endpoint_Status.objects.create(
finding_id=self.finding,
- endpoint_id=self.endpoint
+ endpoint_id=self.endpoint,
).pk
Endpoint.objects.get(id=self.endpoint).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status)
+ Endpoint_Status.objects.get(id=self.endpoint_status),
)
Finding.objects.get(id=self.finding).endpoint_status.add(
- Endpoint_Status.objects.get(id=self.endpoint_status)
+ Endpoint_Status.objects.get(id=self.endpoint_status),
)
Finding.objects.get(id=self.finding).endpoints.add(
- Endpoint.objects.get(id=self.endpoint).pk
+ Endpoint.objects.get(id=self.endpoint).pk,
)
self.presudotest_before_migration()
diff --git a/unittests/test_parsers.py b/unittests/test_parsers.py
index 43ee4e2419..53af54d17a 100644
--- a/unittests/test_parsers.py
+++ b/unittests/test_parsers.py
@@ -27,23 +27,23 @@ def test_file_existence(self):
doc_file = os.path.join(basedir, 'docs', 'content', 'en', 'integrations', 'parsers', category, f"{doc_name}.md")
self.assertTrue(
os.path.isfile(doc_file),
- f"Documentation file '{doc_file}' is missing or using different name"
+ f"Documentation file '{doc_file}' is missing or using different name",
)
with open(doc_file) as file:
content = file.read()
self.assertTrue(re.search("title:", content),
- f"Documentation file '{doc_file}' does not contain a title"
+ f"Documentation file '{doc_file}' does not contain a title",
)
self.assertTrue(re.search("toc_hide: true", content),
- f"Documentation file '{doc_file}' does not contain toc_hide: true"
+ f"Documentation file '{doc_file}' does not contain toc_hide: true",
)
if category == "file":
self.assertTrue(re.search("### Sample Scan Data", content),
- f"Documentation file '{doc_file}' does not contain ### Sample Scan Data"
+ f"Documentation file '{doc_file}' does not contain ### Sample Scan Data",
)
self.assertTrue(re.search("https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans", content),
- f"Documentation file '{doc_file}' does not contain https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans"
+ f"Documentation file '{doc_file}' does not contain https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans",
)
if parser_dir.name not in [
@@ -53,7 +53,7 @@ def test_file_existence(self):
parser_test_file = os.path.join(basedir, 'unittests', 'tools', f"test_{parser_dir.name}_parser.py")
self.assertTrue(
os.path.isfile(parser_test_file),
- f"Unittest of parser '{parser_test_file}' is missing or using different name"
+ f"Unittest of parser '{parser_test_file}' is missing or using different name",
)
if parser_dir.name not in [
@@ -63,7 +63,7 @@ def test_file_existence(self):
scan_dir = os.path.join(basedir, 'unittests', 'scans', parser_dir.name)
self.assertTrue(
os.path.isdir(scan_dir),
- f"Test files for unittest of parser '{scan_dir}' are missing or using different name"
+ f"Test files for unittest of parser '{scan_dir}' are missing or using different name",
)
if category == 'api':
@@ -75,7 +75,7 @@ def test_file_existence(self):
importer_test_file = os.path.join(basedir, 'unittests', 'tools', f"test_{parser_dir.name}_importer.py")
self.assertTrue(
os.path.isfile(importer_test_file),
- f"Unittest of importer '{importer_test_file}' is missing or using different name"
+ f"Unittest of importer '{importer_test_file}' is missing or using different name",
)
for file in os.scandir(os.path.join(basedir, 'dojo', 'tools', parser_dir.name)):
if file.is_file() and file.name != '__pycache__' and file.name != "__init__.py":
@@ -100,11 +100,11 @@ def test_file_existence(self):
def test_parser_existence(self):
for docs in os.scandir(os.path.join(basedir, 'docs', 'content', 'en', 'integrations', 'parsers', 'file')):
if docs.name not in [
- '_index.md', 'codeql.md', 'edgescan.md'
+ '_index.md', 'codeql.md', 'edgescan.md',
]:
with self.subTest(parser=docs.name.split('.md')[0], category='parser'):
parser = os.path.join(basedir, 'dojo', 'tools', f"{docs.name.split('.md')[0]}", "parser.py")
self.assertTrue(
os.path.isfile(parser),
- f"Parser '{parser}' is missing or using different name"
+ f"Parser '{parser}' is missing or using different name",
)
diff --git a/unittests/test_remote_user.py b/unittests/test_remote_user.py
index 28d9a139bd..02dd871169 100644
--- a/unittests/test_remote_user.py
+++ b/unittests/test_remote_user.py
@@ -34,8 +34,8 @@ def test_disabled(self):
def test_basic(self):
resp = self.client1.get('/profile',
headers={
- "Remote-User": self.user.username
- }
+ "Remote-User": self.user.username,
+ },
)
self.assertEqual(resp.status_code, 200)
@@ -53,7 +53,7 @@ def test_update_user(self):
"Remote-Firstname": "new_first",
"Remote-Lastname": "new_last",
"Remote-Email": "new@mail.com",
- }
+ },
)
self.assertEqual(resp.status_code, 200)
updated_user = User.objects.get(pk=self.user.pk)
@@ -72,7 +72,7 @@ def test_update_groups_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group1.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.filter(user=self.user)
@@ -83,7 +83,7 @@ def test_update_groups_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group2.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.all().filter(user=self.user)
@@ -101,7 +101,7 @@ def test_update_multiple_groups_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": f"{self.group1.name},{self.group2.name}",
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.filter(user=self.user)
@@ -118,7 +118,7 @@ def test_update_groups_no_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group1.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
@@ -126,7 +126,7 @@ def test_update_groups_no_cleanup(self):
headers={
"Remote-User": self.user.username,
"Remote-Groups": self.group2.name,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
dgms = Dojo_Group_Member.objects.filter(user=self.user)
@@ -142,7 +142,7 @@ def test_trusted_proxy(self):
REMOTE_ADDR='192.168.0.42',
headers={
"Remote-User": self.user.username,
- }
+ },
)
self.assertEqual(resp.status_code, 200)
@@ -157,7 +157,7 @@ def test_untrusted_proxy(self):
REMOTE_ADDR='192.168.1.42',
headers={
"Remote-User": self.user.username,
- }
+ },
)
self.assertEqual(resp.status_code, 302)
self.assertIn('Requested came from untrusted proxy', cm.output[0])
diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py
index ce1ad77da1..242c95d223 100644
--- a/unittests/test_rest_framework.py
+++ b/unittests/test_rest_framework.py
@@ -406,7 +406,7 @@ def test_detail_prefetch(self):
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '{}/'.format(current_objects['results'][0]['id'])
response = self.client.get(relative_url, data={
- "prefetch": ','.join(prefetchable_fields)
+ "prefetch": ','.join(prefetchable_fields),
})
self.assertEqual(200, response.status_code)
@@ -496,7 +496,7 @@ def test_list_prefetch(self):
prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)]
response = self.client.get(self.url, data={
- "prefetch": ','.join(prefetchable_fields)
+ "prefetch": ','.join(prefetchable_fields),
})
self.assertEqual(200, response.status_code)
@@ -830,7 +830,7 @@ def __init__(self, *args, **kwargs):
'icon': '',
'website': '',
'website_found': '',
- 'created': '2018-08-16T16:58:23.908Z'
+ 'created': '2018-08-16T16:58:23.908Z',
}
self.update_fields = {'version': '9.0'}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -902,7 +902,7 @@ def test_update_patch_unsuccessful(self):
unsucessful_payload = {
'endpoint': object2['endpoint'],
- 'finding': object2['finding']
+ 'finding': object2['finding'],
}
relative_url = self.url + '{}/'.format(object1['id'])
@@ -923,7 +923,7 @@ def test_update_put_unsuccessful(self):
unsucessful_payload = {
'endpoint': object2['endpoint'],
- 'finding': object2['finding']
+ 'finding': object2['finding'],
}
relative_url = self.url + '{}/'.format(object1['id'])
@@ -948,7 +948,7 @@ def __init__(self, *args, **kwargs):
'query': 'test=true',
'fragment': 'test-1',
'product': 1,
- "tags": ["mytag", "yourtag"]
+ "tags": ["mytag", "yourtag"],
}
self.update_fields = {'protocol': 'ftp', 'tags': ['one_new_tag']}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -979,7 +979,7 @@ def __init__(self, *args, **kwargs):
"reason": "",
"test_strategy": "",
"product": "1",
- "tags": ["mytag"]
+ "tags": ["mytag"],
}
self.update_fields = {'version': 'latest'}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1017,9 +1017,9 @@ def __init__(self, *args, **kwargs):
"updated": "2023-09-15T17:17:39.462854Z",
"owner": 1,
"accepted_findings": [
- 226
+ 226,
],
- "notes": []
+ "notes": [],
}
self.update_fields = {'name': 'newName'}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1054,9 +1054,9 @@ def test_update_forbidden_engagement(self):
"updated": "2023-09-15T17:17:39.462854Z",
"owner": 1,
"accepted_findings": [
- 4
+ 4,
],
- "notes": []
+ "notes": [],
}
current_objects = self.client.get(self.url, format='json').data
relative_url = self.url + '{}/'.format(current_objects['results'][0]['id'])
@@ -1076,7 +1076,7 @@ def setUp(self):
def test_request_response_post(self):
length = BurpRawRequestResponse.objects.count()
payload = {
- "req_resp": [{"request": "POST", "response": "200"}]
+ "req_resp": [{"request": "POST", "response": "200"}],
}
response = self.client.post('/api/v2/findings/7/request_response/', dumps(payload), content_type='application/json')
self.assertEqual(200, response.status_code, response.content[:1000])
@@ -1102,7 +1102,7 @@ def setUp(self):
self.url_levels = {
'findings/7': 0,
'tests/3': 0,
- 'engagements/1': 0
+ 'engagements/1': 0,
}
def test_request_response_post_and_download(self):
@@ -1112,7 +1112,7 @@ def test_request_response_post_and_download(self):
with open(f'{str(self.path)}/scans/acunetix/one_finding.xml') as testfile:
payload = {
"title": level,
- "file": testfile
+ "file": testfile,
}
response = self.client.post(f'/api/v2/{level}/files/', payload)
self.assertEqual(201, response.status_code, response.data)
@@ -1355,7 +1355,7 @@ def __init__(self, *args, **kwargs):
"high_mapping_severity": "LOW",
"critical_mapping_severity": "LOW",
"finding_text": "",
- "global_jira_sla_notification": False
+ "global_jira_sla_notification": False,
}
self.update_fields = {'epic_name_id': 1}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1424,7 +1424,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"key": "AREwS5n5TxsFUNm31CxP",
"status": "OPEN",
- "type": "VULNERABILITY"
+ "type": "VULNERABILITY",
}
self.update_fields = {'key': 'AREwS5n5TxsFUNm31CxP'}
self.test_type = TestType.STANDARD
@@ -1444,7 +1444,7 @@ def __init__(self, *args, **kwargs):
"sonarqube_issue": 1,
"finding_status": "Active, Verified",
"sonarqube_status": "OPEN",
- "transitions": "confirm"
+ "transitions": "confirm",
}
self.update_fields = {'sonarqube_status': 'CLOSED'}
self.test_type = TestType.STANDARD
@@ -1462,7 +1462,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product": 2,
"service_key_1": "dojo_sonar_key",
- "tool_configuration": 3
+ "tool_configuration": 3,
}
self.update_fields = {'tool_configuration': 2}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1489,7 +1489,7 @@ def __init__(self, *args, **kwargs):
"prod_type": 1,
"name": "Test Product",
"description": "test product",
- "tags": ["mytag", "yourtag"]
+ "tags": ["mytag", "yourtag"],
}
self.update_fields = {'prod_type': 2}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -1629,7 +1629,7 @@ def __init__(self, *args, **kwargs):
self.viewset = ToolTypesViewSet
self.payload = {
"name": "Tool Type",
- "description": "test tool type"
+ "description": "test tool type",
}
self.update_fields = {'description': 'changed description'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1650,7 +1650,7 @@ def __init__(self, *args, **kwargs):
"description": "not that much",
"is_single": False,
"is_active": True,
- "is_mandatory": False
+ "is_mandatory": False,
}
self.update_fields = {'description': 'changed description'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1670,7 +1670,7 @@ def __init__(self, *args, **kwargs):
"id": 1,
"entry": "updated_entry",
"author": '{"username": "admin"}',
- "editor": '{"username": "user1"}'
+ "editor": '{"username": "user1"}',
}
self.update_fields = {'entry': 'changed entry'}
self.test_type = TestType.STANDARD
@@ -1691,7 +1691,7 @@ def __init__(self, *args, **kwargs):
"last_name": "user",
"email": "example@email.com",
"is_active": True,
- "configuration_permissions": [217, 218]
+ "configuration_permissions": [217, 218],
}
self.update_fields = {"first_name": "test changed", "configuration_permissions": [219, 220]}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -1841,7 +1841,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1873,7 +1873,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product(
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1905,7 +1905,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1938,7 +1938,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -1949,7 +1949,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
- Permissions.Import_Scan_Result)
+ Permissions.Import_Scan_Result),
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@@ -1974,7 +1974,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2006,7 +2006,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2136,7 +2136,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2147,7 +2147,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_engagement(s
Permissions.Engagement_Add),
call(User.objects.get(username='admin'),
Product.objects.get(id=1),
- Permissions.Import_Scan_Result)
+ Permissions.Import_Scan_Result),
])
importer_mock.assert_called_once()
reimporter_mock.assert_not_called()
@@ -2173,7 +2173,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product(self
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2205,7 +2205,7 @@ def test_create_authorized_product_name_engagement_name_auto_create_product_type
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2231,7 +2231,7 @@ def test_create_not_authorized_test_id(self, mock, importer_mock, reimporter_moc
"scan_type": 'ZAP Scan',
"file": testfile,
"test": 3,
- "version": "1.0.1"
+ "version": "1.0.1",
}
response = self.client.post(self.url, payload)
self.assertEqual(403, response.status_code, response.content[:1000])
@@ -2263,7 +2263,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_engageme
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2295,7 +2295,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product(
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2327,7 +2327,7 @@ def test_create_not_authorized_product_name_engagement_name_auto_create_product_
"lead": 2,
"tags": ["ci/cd", "api"],
"version": "1.0.0",
- "auto_create_context": True
+ "auto_create_context": True,
}
response = self.client.post(self.url, payload)
@@ -2407,7 +2407,7 @@ def __init__(self, *args, **kwargs):
"name": "Test Product Type",
"description": "Test",
"key_product": True,
- "critical_product": False
+ "critical_product": False,
}
self.update_fields = {'description': "changed"}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2514,7 +2514,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"group": 1,
"user": 3,
- "role": 4
+ "role": 4,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2548,7 +2548,7 @@ def __init__(self, *args, **kwargs):
self.viewset = GlobalRoleViewSet
self.payload = {
"user": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.STANDARD
@@ -2567,7 +2567,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product_type": 1,
"user": 3,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2590,7 +2590,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product": 3,
"user": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2613,7 +2613,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product_type": 1,
"group": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2636,7 +2636,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
"product": 1,
"group": 2,
- "role": 2
+ "role": 2,
}
self.update_fields = {'role': 3}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2659,7 +2659,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
'language': 'Test',
'color': 'red',
- 'created': '2018-08-16T16:58:23.908Z'
+ 'created': '2018-08-16T16:58:23.908Z',
}
self.update_fields = {'color': 'blue'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -2683,7 +2683,7 @@ def __init__(self, *args, **kwargs):
'blank': 3,
'comment': 4,
'code': 5,
- 'created': '2018-08-16T16:58:23.908Z'
+ 'created': '2018-08-16T16:58:23.908Z',
}
self.update_fields = {'code': 10}
self.test_type = TestType.OBJECT_PERMISSIONS
@@ -2705,7 +2705,7 @@ def __init__(self, *args, **kwargs):
self.viewset = ImportLanguagesView
self.payload = {
'product': 1,
- 'file': open("unittests/files/defectdojo_cloc.json")
+ 'file': open("unittests/files/defectdojo_cloc.json"),
}
self.test_type = TestType.OBJECT_PERMISSIONS
self.permission_check_class = Languages
@@ -2748,7 +2748,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
'product': 1,
'user': 3,
- 'product_type_added': ["alert", "msteams"]
+ 'product_type_added': ["alert", "msteams"],
}
self.update_fields = {'product_added': ["alert", "msteams"]}
self.test_type = TestType.STANDARD
@@ -2794,7 +2794,7 @@ def __init__(self, *args, **kwargs):
self.viewname = 'development_environment'
self.viewset = DevelopmentEnvironmentViewSet
self.payload = {
- 'name': 'Test_1'
+ 'name': 'Test_1',
}
self.update_fields = {'name': 'Test_2'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -2817,7 +2817,7 @@ def __init__(self, *args, **kwargs):
self.viewname = 'test_type'
self.viewset = TestTypesViewSet
self.payload = {
- 'name': 'Test_1'
+ 'name': 'Test_1',
}
self.update_fields = {'name': 'Test_2'}
self.test_type = TestType.CONFIGURATION_PERMISSIONS
@@ -2848,7 +2848,7 @@ def __init__(self, *args, **kwargs):
self.payload = {
'cred_id': 1,
'product': 1,
- 'url': 'https://google.com'
+ 'url': 'https://google.com',
}
self.update_fields = {'url': 'https://bing.com'}
self.test_type = TestType.OBJECT_PERMISSIONS
diff --git a/unittests/test_risk_acceptance.py b/unittests/test_risk_acceptance.py
index 43a0bd578a..6a7961affa 100644
--- a/unittests/test_risk_acceptance.py
+++ b/unittests/test_risk_acceptance.py
@@ -35,7 +35,7 @@ class RiskAcceptanceTestUI(DojoTestCase):
# 'path: (binary)
'owner': 1,
'expiration_date': '2021-07-15',
- 'reactivate_expired': True
+ 'reactivate_expired': True,
}
data_remove_finding_from_ra = {
@@ -53,7 +53,7 @@ def setUp(self):
def add_risk_acceptance(self, eid, data_risk_accceptance, fid=None):
if fid:
- args = (eid, fid, )
+ args = (eid, fid)
else:
args = (eid, )
@@ -103,7 +103,7 @@ def test_add_findings_to_risk_acceptance_findings_accepted(self):
data_add_findings_to_ra = {
'add_findings': 'Add Selected Findings',
- 'accepted_findings': [4, 5]
+ 'accepted_findings': [4, 5],
}
response = self.client.post(reverse('view_risk_acceptance', args=(1, ra.id)),
@@ -133,7 +133,7 @@ def test_remove_risk_acceptance_findings_active(self):
data = {'id': ra.id}
- self.client.post(reverse('delete_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('delete_risk_acceptance', args=(1, ra.id)), data)
self.assert_all_active_not_risk_accepted(findings)
self.assert_all_active_not_risk_accepted(Finding.objects.filter(test__engagement=1))
@@ -148,7 +148,7 @@ def test_expire_risk_acceptance_findings_active(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
self.assert_all_active_not_risk_accepted(findings)
@@ -170,7 +170,7 @@ def test_expire_risk_acceptance_findings_not_active(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
# no reactivation on expiry
@@ -193,7 +193,7 @@ def test_expire_risk_acceptance_sla_not_reset(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
@@ -209,7 +209,7 @@ def test_expire_risk_acceptance_sla_reset(self):
data = {'id': ra.id}
- self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('expire_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
@@ -224,7 +224,7 @@ def test_reinstate_risk_acceptance_findings_accepted(self):
data = {'id': ra.id}
- self.client.post(reverse('reinstate_risk_acceptance', args=(1, ra.id, )), data)
+ self.client.post(reverse('reinstate_risk_acceptance', args=(1, ra.id)), data)
ra.refresh_from_db()
expiration_delta_days = get_system_setting('risk_acceptance_form_default_days', 90)
diff --git a/unittests/test_search_parser.py b/unittests/test_search_parser.py
index 06115c7e07..9e4d221fc5 100644
--- a/unittests/test_search_parser.py
+++ b/unittests/test_search_parser.py
@@ -58,7 +58,7 @@ def test_parse_query(self):
self.assertEqual(keywords[1], "space inside")
operators, keywords = parse_search_query(
- "tags:anchore cve:CVE-2020-1234 jquery tags:beer"
+ "tags:anchore cve:CVE-2020-1234 jquery tags:beer",
)
self.assertEqual(len(operators), 2)
self.assertEqual(len(operators["tags"]), 2)
diff --git a/unittests/test_utils.py b/unittests/test_utils.py
index 70c8a9c8c0..3bf031ba10 100644
--- a/unittests/test_utils.py
+++ b/unittests/test_utils.py
@@ -198,8 +198,8 @@ def __exit__(self, exc_type, exc_value, exc_traceback):
self.test_case.assertEqual(
created_count, self.num,
"%i %s objects created, %i expected. query: %s, first 100 objects: %s" % (
- created_count, self.queryset.model, self.num, self.queryset.query, self.queryset.all().order_by('-id')[:100]
- )
+ created_count, self.queryset.model, self.num, self.queryset.query, self.queryset.all().order_by('-id')[:100],
+ ),
)
@@ -222,7 +222,7 @@ def assertTestImportModelsCreated(test_case, imports=0, reimports=0, affected_fi
tifa_created_count,
tifa_closed_count,
tifa_reactivated_count,
- tifa_untouched_count
+ tifa_untouched_count,
)
diff --git a/unittests/tools/test_anchore_enterprise_parser.py b/unittests/tools/test_anchore_enterprise_parser.py
index 1d286d8a84..b7badd1571 100644
--- a/unittests/tools/test_anchore_enterprise_parser.py
+++ b/unittests/tools/test_anchore_enterprise_parser.py
@@ -45,22 +45,22 @@ def test_anchore_policy_check_extract_vulnerability_id(self):
def test_anchore_policy_check_parser_search_filepath(self):
file_path = search_filepath(
- "MEDIUM Vulnerability found in non-os package type (python) - /usr/lib64/python2.7/lib-dynload/Python (CVE-2014-4616 - https://nvd.nist.gov/vuln/detail/CVE-2014-4616)"
+ "MEDIUM Vulnerability found in non-os package type (python) - /usr/lib64/python2.7/lib-dynload/Python (CVE-2014-4616 - https://nvd.nist.gov/vuln/detail/CVE-2014-4616)",
)
self.assertEqual("/usr/lib64/python2.7/lib-dynload/Python", file_path)
file_path = search_filepath(
- "HIGH Vulnerability found in non-os package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)"
+ "HIGH Vulnerability found in non-os package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)",
)
self.assertEqual(
"/root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar",
file_path,
)
file_path = search_filepath(
- "test /usr/local/bin/ag package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)"
+ "test /usr/local/bin/ag package type (java) - /root/.m2/repository/org/apache/struts/struts-core/1.3.8/struts-core-1.3.8.jar (CVE-2015-0899 - https://nvd.nist.gov/vuln/detail/CVE-2015-0899)",
)
self.assertEqual("/usr/local/bin/ag", file_path)
file_path = search_filepath(
- "HIGH Vulnerability found in os package type (rpm) - kernel-headers (RHSA-2017:0372 - https://access.redhat.com/errata/RHSA-2017:0372)"
+ "HIGH Vulnerability found in os package type (rpm) - kernel-headers (RHSA-2017:0372 - https://access.redhat.com/errata/RHSA-2017:0372)",
)
self.assertEqual("", file_path)
file_path = search_filepath("test")
diff --git a/unittests/tools/test_api_bugcrowd_importer.py b/unittests/tools/test_api_bugcrowd_importer.py
index e8fb4f784e..9e8ca88ac7 100644
--- a/unittests/tools/test_api_bugcrowd_importer.py
+++ b/unittests/tools/test_api_bugcrowd_importer.py
@@ -91,7 +91,7 @@ def test_prepare_client_no_configuration(self, mock_foo):
mock_foo.count.return_value = 0
with self.assertRaisesRegex(
- ValidationError, r'There are no API Scan Configurations for this Product\. Please add at least one API Scan Configuration for bugcrowd to this Product\. Product: "Product" \(None\)'
+ ValidationError, r'There are no API Scan Configurations for this Product\. Please add at least one API Scan Configuration for bugcrowd to this Product\. Product: "Product" \(None\)',
):
bugrcrowd_api_importer = BugcrowdApiImporter()
bugrcrowd_api_importer.prepare_client(self.test)
@@ -106,11 +106,11 @@ def test_prepare_client_one_product_configuration(self, mock_foo):
bugrcrowd_api_importer = BugcrowdApiImporter()
bugcrowd_api, api_scan_configuration = bugrcrowd_api_importer.prepare_client(
- self.test
+ self.test,
)
mock_foo.filter.assert_called_with(
- product=self.product, tool_configuration__tool_type__name="Bugcrowd API"
+ product=self.product, tool_configuration__tool_type__name="Bugcrowd API",
)
self.assertEqual(api_scan_configuration, self.api_scan_configuration)
self.assertEqual(bugcrowd_api.api_token, "API_KEY")
@@ -118,7 +118,7 @@ def test_prepare_client_one_product_configuration(self, mock_foo):
def test_prepare_client_one_test_configuration(self):
bugrcrowd_api_importer = BugcrowdApiImporter()
bugcrowd_api, api_scan_configuration = bugrcrowd_api_importer.prepare_client(
- self.test_2
+ self.test_2,
)
self.assertEqual(api_scan_configuration, self.api_scan_configuration_2)
diff --git a/unittests/tools/test_api_bugcrowd_parser.py b/unittests/tools/test_api_bugcrowd_parser.py
index 2569fb16cc..4433ea61ee 100644
--- a/unittests/tools/test_api_bugcrowd_parser.py
+++ b/unittests/tools/test_api_bugcrowd_parser.py
@@ -33,7 +33,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
finding = findings[0]
self.assertEqual(finding.title, "JWT Alg none")
self.assertEqual(
- datetime.datetime.date(finding.date), datetime.date(2002, 4, 1)
+ datetime.datetime.date(finding.date), datetime.date(2002, 4, 1),
)
self.assertEqual(str(finding.unsaved_endpoints[0]), "https://example.com")
self.assertEqual(finding.severity, "Info")
@@ -41,11 +41,11 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
self.assertEqual(finding.mitigation, "Properly do JWT")
self.assertEqual(finding.active, True)
self.assertEqual(
- finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a"
+ finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a",
)
self.assertIn(
"/submissions/a4201d47-62e1-4287-9ff6-30807ae9d36a",
- finding.references
+ finding.references,
)
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
@@ -64,23 +64,23 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
self.assertEqual(finding_3.title, "you did something wrong (returned)")
self.assertEqual(
- datetime.datetime.date(finding_1.date), datetime.date(2000, 1, 1)
+ datetime.datetime.date(finding_1.date), datetime.date(2000, 1, 1),
)
self.assertEqual(
- datetime.datetime.date(finding_2.date), datetime.date(2000, 1, 2)
+ datetime.datetime.date(finding_2.date), datetime.date(2000, 1, 2),
)
self.assertEqual(
- datetime.datetime.date(finding_3.date), datetime.date(2000, 1, 3)
+ datetime.datetime.date(finding_3.date), datetime.date(2000, 1, 3),
)
self.assertEqual(
- str(finding_1.unsaved_endpoints[0]), "https://example.com/1"
+ str(finding_1.unsaved_endpoints[0]), "https://example.com/1",
)
self.assertEqual(
- str(finding_2.unsaved_endpoints[0]), "https://example.com/2"
+ str(finding_2.unsaved_endpoints[0]), "https://example.com/2",
)
self.assertEqual(
- str(finding_3.unsaved_endpoints[0]), "https://example.com/3"
+ str(finding_3.unsaved_endpoints[0]), "https://example.com/3",
)
for endpoint in finding_1.unsaved_endpoints:
endpoint.clean()
@@ -106,18 +106,18 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
self.assertEqual(finding_3.risk_accepted, False)
self.assertEqual(
- finding_1.unique_id_from_tool, "3b0e6b2a-c21e-493e-bd19-de40f525016e"
+ finding_1.unique_id_from_tool, "3b0e6b2a-c21e-493e-bd19-de40f525016e",
)
self.assertEqual(
- finding_2.unique_id_from_tool, "b2f1066a-6188-4479-bab8-39cc5434f06f"
+ finding_2.unique_id_from_tool, "b2f1066a-6188-4479-bab8-39cc5434f06f",
)
self.assertEqual(
- finding_3.unique_id_from_tool, "335a7ba5-57ba-485a-b40e-2f9aa4e19786"
+ finding_3.unique_id_from_tool, "335a7ba5-57ba-485a-b40e-2f9aa4e19786",
)
def test_parse_file_with_not_reproducible_finding(self):
with open(
- "unittests/scans/api_bugcrowd/bugcrowd_not_reproducible.json"
+ "unittests/scans/api_bugcrowd/bugcrowd_not_reproducible.json",
) as testfile:
# description = """
@@ -134,7 +134,7 @@ def test_parse_file_with_not_reproducible_finding(self):
finding = findings[0]
self.assertEqual(finding.title, "JWT Alg none")
self.assertEqual(
- datetime.datetime.date(finding.date), datetime.date(2002, 4, 1)
+ datetime.datetime.date(finding.date), datetime.date(2002, 4, 1),
)
self.assertEqual(str(finding.unsaved_endpoints[0]), "https://example.com")
self.assertEqual(finding.severity, "Info")
@@ -143,7 +143,7 @@ def test_parse_file_with_not_reproducible_finding(self):
self.assertEqual(finding.active, False)
self.assertEqual(finding.false_p, True)
self.assertEqual(
- finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a"
+ finding.unique_id_from_tool, "a4201d47-62e1-4287-9ff6-30807ae9d36a",
)
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
diff --git a/unittests/tools/test_api_edgescan_parser.py b/unittests/tools/test_api_edgescan_parser.py
index 94d45fabf6..93399d952e 100644
--- a/unittests/tools/test_api_edgescan_parser.py
+++ b/unittests/tools/test_api_edgescan_parser.py
@@ -20,7 +20,7 @@ def get_description_for_scan_types(self):
parser = ApiEdgescanParser()
self.assertEqual(
parser.get_description_for_scan_types(scan_type),
- "Edgescan findings can be imported by API or JSON file."
+ "Edgescan findings can be imported by API or JSON file.",
)
def test_requires_file(self):
diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py
index 0b30008c1c..f2a49cc20d 100644
--- a/unittests/tools/test_api_sonarqube_importer.py
+++ b/unittests/tools/test_api_sonarqube_importer.py
@@ -70,7 +70,7 @@ class TestSonarqubeImporterNoSQToolConfig(DojoTestCase):
# Testing case no 1. https://github.com/DefectDojo/django-DefectDojo/pull/4676
fixtures = [
'unit_sonarqube_toolType.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -88,7 +88,7 @@ class TestSonarqubeImporterOneSQToolConfig(DojoTestCase):
fixtures = [
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -107,7 +107,7 @@ class TestSonarqubeImporterMultipleSQToolConfig(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_toolConfig2.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -127,7 +127,7 @@ class TestSonarqubeImporterOneSQConfigNoKey(DojoTestCase):
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
- 'unit_sonarqube_sqcNoKey.json'
+ 'unit_sonarqube_sqcNoKey.json',
]
def setUp(self):
@@ -153,7 +153,7 @@ class TestSonarqubeImporterOneSQConfigWithKey(DojoTestCase):
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -180,7 +180,7 @@ class TestSonarqubeImporterMultipleSQConfigs(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -201,7 +201,7 @@ class TestSonarqubeImporterSelectedSQConfigsNoKey(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -209,7 +209,7 @@ def setUp(self):
engagement = Engagement(product=product)
self.test = Test(
engagement=engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().first()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().first(),
)
@mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.find_project', dummy_product)
@@ -231,7 +231,7 @@ class TestSonarqubeImporterSelectedSQConfigsWithKey(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -239,13 +239,13 @@ def setUp(self):
engagement = Engagement(product=product)
self.test = Test(
engagement=engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().last()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(),
)
other_product = Product(name='other product')
other_engagement = Engagement(product=other_product)
self.other_test = Test(
engagement=other_engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().last()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(),
)
@mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product)
@@ -272,7 +272,7 @@ class TestSonarqubeImporterExternalRule(DojoTestCase):
'unit_sonarqube_toolConfig2.json',
'unit_sonarqube_product.json',
'unit_sonarqube_sqcNoKey.json',
- 'unit_sonarqube_sqcWithKey.json'
+ 'unit_sonarqube_sqcWithKey.json',
]
def setUp(self):
@@ -280,7 +280,7 @@ def setUp(self):
engagement = Engagement(product=product)
self.test = Test(
engagement=engagement,
- api_scan_configuration=Product_API_Scan_Configuration.objects.all().last()
+ api_scan_configuration=Product_API_Scan_Configuration.objects.all().last(),
)
@mock.patch('dojo.tools.api_sonarqube.api_client.SonarQubeAPI.get_project', dummy_product)
@@ -308,7 +308,7 @@ class TestSonarqubeImporterTwoIssuesNoHotspots(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -333,7 +333,7 @@ class TestSonarqubeImporterNoIssuesOneHotspot(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -358,7 +358,7 @@ class TestSonarqubeImporterNoIssuesTwoHotspots(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -383,7 +383,7 @@ class TestSonarqubeImporterTwoIssuesTwoHotspots(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -408,7 +408,7 @@ class TestSonarqubeImporterValidateHotspotData(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -437,7 +437,7 @@ def test_parser(self):
'\n\n'
'There is a risk if you answered yes to any of those questions.'
'\n\n',
- findings[0].description
+ findings[0].description,
)
self.assertEqual(str(findings[0].severity), 'High')
self.assertMultiLineEqual(
@@ -459,7 +459,7 @@ def test_parser(self):
'\n'
'[Hard Coded Password](http://h3xstream.github.io/find-sec-bugs/bugs.htm#HARD_CODE_PASSWORD)'
'\n',
- findings[0].references
+ findings[0].references,
)
self.assertEqual(str(findings[0].file_path), 'internal.dummy.project:spec/support/user_fixture.rb')
self.assertEqual(findings[0].line, 9)
@@ -479,7 +479,7 @@ class TestSonarqubeImporterHotspotRule_WO_Risk_Description(DojoTestCase):
'unit_sonarqube_toolType.json',
'unit_sonarqube_toolConfig1.json',
'unit_sonarqube_sqcWithKey.json',
- 'unit_sonarqube_product.json'
+ 'unit_sonarqube_product.json',
]
def setUp(self):
@@ -508,7 +508,7 @@ def test_parser(self):
'\n\n'
'There is a risk if you answered yes to any of those questions.'
'\n\n',
- findings[0].description
+ findings[0].description,
)
self.assertEqual(str(findings[0].severity), 'High')
self.assertEqual(findings[0].references, '[Hotspot permalink](http://localhosecurity_hotspots?id=internal.dummy.project&hotspots=AXgm6Z-ophPPY0C1qhRq) \n')
diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py
index ffb33b76ee..d9963d1fb7 100644
--- a/unittests/tools/test_api_sonarqube_parser.py
+++ b/unittests/tools/test_api_sonarqube_parser.py
@@ -50,10 +50,10 @@ def setUp(self):
# build Sonarqube conf (the parser need it)
tool_type, _ = Tool_Type.objects.get_or_create(name="SonarQube")
tool_conf, _ = Tool_Configuration.objects.get_or_create(
- name="SQ1_unittests", authentication_type="API", tool_type=tool_type, url='http://dummy.url.foo.bar/api'
+ name="SQ1_unittests", authentication_type="API", tool_type=tool_type, url='http://dummy.url.foo.bar/api',
)
pasc, _ = Product_API_Scan_Configuration.objects.get_or_create(
- product=product, tool_configuration=tool_conf, service_key_1='ABCD'
+ product=product, tool_configuration=tool_conf, service_key_1='ABCD',
)
self.test = Test(engagement=engagement, api_scan_configuration=pasc)
diff --git a/unittests/tools/test_auditjs_parser.py b/unittests/tools/test_auditjs_parser.py
index 789efc73d0..7e128183a8 100644
--- a/unittests/tools/test_auditjs_parser.py
+++ b/unittests/tools/test_auditjs_parser.py
@@ -64,7 +64,7 @@ def test_auditjs_parser_empty_with_error(self):
parser.get_findings(testfile, Test())
self.assertTrue(
- "Invalid JSON format. Are you sure you used --json option ?" in str(context.exception)
+ "Invalid JSON format. Are you sure you used --json option ?" in str(context.exception),
)
def test_auditjs_parser_with_package_name_has_namespace(self):
diff --git a/unittests/tools/test_aws_prowler_parser.py b/unittests/tools/test_aws_prowler_parser.py
index 2c33d706bd..db567d00f0 100644
--- a/unittests/tools/test_aws_prowler_parser.py
+++ b/unittests/tools/test_aws_prowler_parser.py
@@ -22,7 +22,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self):
open("unittests/scans/aws_prowler/one_vuln.csv"))
self.assertEqual(1, len(findings))
self.assertEqual(
- "Root user in the account wasn't accessed in the last 1 days", findings[0].title
+ "Root user in the account wasn't accessed in the last 1 days", findings[0].title,
)
def test_aws_prowler_parser_with_many_vuln_has_many_findings(self):
diff --git a/unittests/tools/test_awssecurityhub_parser.py b/unittests/tools/test_awssecurityhub_parser.py
index 7993b065a5..f287f8937b 100644
--- a/unittests/tools/test_awssecurityhub_parser.py
+++ b/unittests/tools/test_awssecurityhub_parser.py
@@ -53,7 +53,7 @@ def test_unique_id(self):
findings = parser.get_findings(test_file, Test())
self.assertEqual(
"arn:aws:securityhub:us-east-1:012345678912:subscription/aws-foundational-security-best-practices/v/1.0.0/IAM.5/finding/de861909-2d26-4e45-bd86-19d2ab6ceef1",
- findings[0].unique_id_from_tool
+ findings[0].unique_id_from_tool,
)
def test_inspector_ec2(self):
diff --git a/unittests/tools/test_bandit_parser.py b/unittests/tools/test_bandit_parser.py
index dc4a3f34c8..a27d629e63 100644
--- a/unittests/tools/test_bandit_parser.py
+++ b/unittests/tools/test_bandit_parser.py
@@ -77,7 +77,7 @@ def test_bandit_parser_has_many_findings_recent2(self):
with self.subTest(i=50):
item = findings[50]
self.assertEqual(
- "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed.", item.title
+ "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed.", item.title,
)
self.assertEqual(datetime.datetime(2021, 10, 3, 12, 53, 18, tzinfo=tzlocal()), item.date)
self.assertEqual("Medium", item.severity)
diff --git a/unittests/tools/test_blackduck_binary_analysis_parser.py b/unittests/tools/test_blackduck_binary_analysis_parser.py
index 29c4130294..92d92c111b 100644
--- a/unittests/tools/test_blackduck_binary_analysis_parser.py
+++ b/unittests/tools/test_blackduck_binary_analysis_parser.py
@@ -21,7 +21,7 @@ def test_parse_one_vuln(self):
self.assertIsNotNone(finding.title)
self.assertEqual(
"instrument.dll: zlib 1.2.13 Vulnerable to CVE-2023-45853",
- finding.title
+ finding.title,
)
self.assertIsNotNone(finding.description)
@@ -37,7 +37,7 @@ def test_parse_one_vuln(self):
self.assertIsNotNone(finding.file_path)
self.assertEqual(
"JRE.msi:JRE.msi-30276-90876123.cab:instrument.dll",
- finding.file_path
+ finding.file_path,
)
self.assertIsNotNone(finding.vuln_id_from_tool)
diff --git a/unittests/tools/test_blackduck_component_risk_parser.py b/unittests/tools/test_blackduck_component_risk_parser.py
index 2a520c33aa..ccb613ce9e 100644
--- a/unittests/tools/test_blackduck_component_risk_parser.py
+++ b/unittests/tools/test_blackduck_component_risk_parser.py
@@ -9,7 +9,7 @@ class TestBlackduckComponentRiskParser(DojoTestCase):
def test_blackduck_enhanced_zip_upload(self):
testfile = Path(
get_unit_tests_path() + "/scans/blackduck_component_risk/"
- "blackduck_hub_component_risk.zip"
+ "blackduck_hub_component_risk.zip",
)
parser = BlackduckComponentRiskParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_blackduck_parser.py b/unittests/tools/test_blackduck_parser.py
index 96c0f2eac3..d2d16c6942 100644
--- a/unittests/tools/test_blackduck_parser.py
+++ b/unittests/tools/test_blackduck_parser.py
@@ -44,7 +44,7 @@ def test_blackduck_csv_parser_new_format_has_many_findings(self):
def test_blackduck_enhanced_has_many_findings(self):
testfile = Path(
- get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest.zip"
+ get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest.zip",
)
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
@@ -52,7 +52,7 @@ def test_blackduck_enhanced_has_many_findings(self):
def test_blackduck_enhanced_zip_upload(self):
testfile = Path(
- get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest_v2.zip"
+ get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest_v2.zip",
)
parser = BlackduckParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_checkmarx_osa_parser.py b/unittests/tools/test_checkmarx_osa_parser.py
index bfe1590c77..ba348b64e5 100644
--- a/unittests/tools/test_checkmarx_osa_parser.py
+++ b/unittests/tools/test_checkmarx_osa_parser.py
@@ -28,7 +28,7 @@ def test_checkmarx_osa_parse_file_with_no_vulnerabilities_has_no_findings(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/no_finding.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/no_finding.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -42,7 +42,7 @@ def test_checkmarx_osa_parse_file_with_single_vulnerability_has_single_finding(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -94,7 +94,7 @@ def test_checkmarx_osa_parse_file_with_false_positive_is_false_positive(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_false_positive.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_false_positive.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -115,7 +115,7 @@ def test_checkmarx_osa_parse_file_with_confirmed_is_verified(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_confirmed.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_confirmed.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -136,7 +136,7 @@ def test_checkmarx_osa_parse_file_with_multiple_findings(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/multiple_findings.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/multiple_findings.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -150,7 +150,7 @@ def test_checkmarx_osa_parse_file_with_no_score(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_score.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_score.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -166,7 +166,7 @@ def test_checkmarx_osa_parse_file_with_no_url(
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_url.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_url.json",
)
parser = CheckmarxOsaParser()
findings = parser.get_findings(my_file_handle, test)
@@ -183,12 +183,12 @@ def test_checkmarx_osa_parse_file_with_no_libraryId_raises_ValueError(
):
with self.assertRaises(ValueError) as context:
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_libraryId.json"
+ get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_libraryId.json",
)
with my_file_handle:
parser = CheckmarxOsaParser()
parser.get_findings(my_file_handle, test)
self.assertEqual(
- "Invalid format: missing mandatory field libraryId", str(context.exception)
+ "Invalid format: missing mandatory field libraryId", str(context.exception),
)
diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py
index 88e5cc965b..6bfbbc1304 100644
--- a/unittests/tools/test_checkmarx_parser.py
+++ b/unittests/tools/test_checkmarx_parser.py
@@ -29,7 +29,7 @@ def teardown(self, my_file_handle):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/no_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/no_finding.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -40,7 +40,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings
def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self, mock):
"""Checkmarx detailed scanner, with all vulnerabilities from checkmarx"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/no_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/no_finding.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -51,7 +51,7 @@ def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self, mock)
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_finding(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -79,7 +79,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -213,7 +213,7 @@ def check_parse_file_with_single_vulnerability_has_single_finding(self, findings
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -225,7 +225,7 @@ def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(s
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_false_positive_is_false_positive(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -254,7 +254,7 @@ def check_parse_file_with_false_positive_is_false_positive(self, findings):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_false_p(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/two_aggregated_findings_one_is_false_positive.xml"
+ get_unit_tests_path() + "/scans/checkmarx/two_aggregated_findings_one_is_false_positive.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -280,7 +280,7 @@ def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_fal
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -299,7 +299,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -322,7 +322,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sinkFilename_is_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -336,7 +336,7 @@ def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sink
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_different_sourceFilename_same_sinkFilename_is_not_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -353,7 +353,7 @@ def test_detailed_parse_file_with_different_sourceFilename_same_sinkFilename_is_
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -365,7 +365,7 @@ def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sink
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -380,7 +380,7 @@ def test_detailed_parse_file_with_same_sourceFilename_different_sinkFilename_is_
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -408,7 +408,7 @@ def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_utf8_replacement_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -526,7 +526,7 @@ def check_parse_file_with_utf8_replacement_char(self, findings):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -554,7 +554,7 @@ def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self,
@patch('dojo.tools.checkmarx.parser.add_language')
def test_detailed_parse_file_with_utf8_various_non_ascii_char(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml"
+ get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -672,7 +672,7 @@ def check_parse_file_with_utf8_various_non_ascii_char(self, findings):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_query_id.xml"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_query_id.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -692,7 +692,7 @@ def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_empty_filename(self, mock):
my_file_handle, product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_no_filename.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_no_filename.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -712,7 +712,7 @@ def test_file_with_empty_filename(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_many_aggregated_findings(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/many_aggregated_findings.xml"
+ get_unit_tests_path() + "/scans/checkmarx/many_aggregated_findings.xml",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, test)
@@ -729,7 +729,7 @@ def test_file_with_many_aggregated_findings(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_with_many_findings_json(self, mock):
my_file_handle, _product, _engagement, _test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, Test())
@@ -763,7 +763,7 @@ def test_file_with_many_findings_json(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_file_issue6956(self, mock):
my_file_handle, _product, _engagement, _test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/sample_report.json"
+ get_unit_tests_path() + "/scans/checkmarx/sample_report.json",
)
parser = CheckmarxParser()
findings = parser.get_findings(my_file_handle, Test())
@@ -826,7 +826,7 @@ def test_file_issue6956(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_finding_date_should_be_date_xml(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/single_finding.xml"
+ get_unit_tests_path() + "/scans/checkmarx/single_finding.xml",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
@@ -837,7 +837,7 @@ def test_finding_date_should_be_date_xml(self, mock):
@patch('dojo.tools.checkmarx.parser.add_language')
def test_finding_date_should_be_date_json(self, mock):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json"
+ get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json",
)
parser = CheckmarxParser()
parser.set_mode('detailed')
diff --git a/unittests/tools/test_checkov_parser.py b/unittests/tools/test_checkov_parser.py
index b626fc952e..49bc3d1a5f 100644
--- a/unittests/tools/test_checkov_parser.py
+++ b/unittests/tools/test_checkov_parser.py
@@ -53,7 +53,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
'Check Type: terraform\n'
'Check Id: CKV_AWS_161\n'
'Ensure RDS database has IAM authentication enabled\n',
- first_terraform_finding.description
+ first_terraform_finding.description,
)
self.assertEqual('/aws/db-app.tf', first_terraform_finding.file_path)
self.assertEqual(1, first_terraform_finding.line)
@@ -68,7 +68,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
'Check Type: dockerfile\n'
'Check Id: CKV_DOCKER_3\n'
'Ensure that a user for the container has been created\n',
- first_dockerfile_finding.description
+ first_dockerfile_finding.description,
)
self.assertEqual('/aws/resources/Dockerfile', first_dockerfile_finding.file_path)
self.assertEqual(0, first_dockerfile_finding.line)
@@ -76,7 +76,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self):
self.assertEqual('', first_dockerfile_finding.mitigation)
self.assertEqual(
'https://docs.bridgecrew.io/docs/ensure-that-a-user-for-the-container-has-been-created',
- first_dockerfile_finding.references
+ first_dockerfile_finding.references,
)
def test_parse_file_with_specified_severity(self):
diff --git a/unittests/tools/test_codechecker_parser.py b/unittests/tools/test_codechecker_parser.py
index 8c6d9e6cc6..756ba4c780 100644
--- a/unittests/tools/test_codechecker_parser.py
+++ b/unittests/tools/test_codechecker_parser.py
@@ -7,7 +7,7 @@ class TestCodeCheckerParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-0-vuln.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-0-vuln.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-1-vuln.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-1-vuln.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -33,7 +33,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-many-vuln.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-many-vuln.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
@@ -60,7 +60,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
def test_parse_file_with_various_review_statuses(self):
with open(
- get_unit_tests_path() + "/scans/codechecker/cc-report-review-status.json"
+ get_unit_tests_path() + "/scans/codechecker/cc-report-review-status.json",
) as testfile:
parser = CodeCheckerParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_crashtest_security_parser.py b/unittests/tools/test_crashtest_security_parser.py
index 2eaa5211cd..a87248ca3d 100644
--- a/unittests/tools/test_crashtest_security_parser.py
+++ b/unittests/tools/test_crashtest_security_parser.py
@@ -25,7 +25,7 @@ def test_crashtest_security_json_parser_full_file_has_many_findings(self):
def test_crashtest_security_json_parser_extracted_data_file_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/crashtest_security/data_extracted.json"
+ get_unit_tests_path() + "/scans/crashtest_security/data_extracted.json",
)
parser = CrashtestSecurityParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py
index 0bf78406f4..f72db048a4 100644
--- a/unittests/tools/test_dependency_check_parser.py
+++ b/unittests/tools/test_dependency_check_parser.py
@@ -77,7 +77,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
"Update org.dom4j:dom4j:2.1.1.redhat-00001 to at least the version recommended in the description",
)
self.assertEqual(
- items[0].date, datetime(2016, 11, 5, 14, 52, 15, 748000, tzinfo=tzoffset(None, -14400))
+ items[0].date, datetime(2016, 11, 5, 14, 52, 15, 748000, tzinfo=tzoffset(None, -14400)),
) # 2016-11-05T14:52:15.748-0400
self.assertEqual(1, len(items[0].unsaved_vulnerability_ids))
self.assertEqual('CVE-0000-0001', items[0].unsaved_vulnerability_ids[0])
@@ -128,7 +128,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
with self.subTest(i=3):
# identifier -> package url javascript, no vulnerabilitids, 3 vulnerabilities, relateddependencies without filename (pre v6.0.0)
self.assertEqual(
- items[3].title, "yargs-parser:5.0.0 | 1500"
+ items[3].title, "yargs-parser:5.0.0 | 1500",
)
self.assertEqual(items[3].component_name, "yargs-parser")
self.assertEqual(items[3].component_version, "5.0.0")
@@ -137,7 +137,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[3].severity, "Low")
self.assertEqual(items[3].file_path, "yargs-parser:5.0.0")
self.assertEqual(
- items[3].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description"
+ items[3].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description",
)
self.assertIn(
"**Source:** NPM",
@@ -163,7 +163,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[4].severity, "High")
self.assertEqual(items[4].file_path, "yargs-parser:5.0.0")
self.assertEqual(
- items[4].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description"
+ items[4].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description",
)
self.assertEqual(1, len(items[4].unsaved_vulnerability_ids))
self.assertEqual('CVE-2020-7608', items[4].unsaved_vulnerability_ids[0])
@@ -187,7 +187,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[5].severity, "High")
self.assertEqual(items[5].file_path, "yargs-parser:5.0.0")
self.assertEqual(
- items[5].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description"
+ items[5].mitigation, "Update yargs-parser:5.0.0 to at least the version recommended in the description",
)
self.assertIsNone(items[5].unsaved_vulnerability_ids)
@@ -212,7 +212,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[7].component_version, "2.1.1")
self.assertEqual(items[7].severity, "High")
self.assertEqual(
- items[7].mitigation, "Update dom4j:2.1.1 to at least the version recommended in the description"
+ items[7].mitigation, "Update dom4j:2.1.1 to at least the version recommended in the description",
)
with self.subTest(i=8):
@@ -225,7 +225,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self):
self.assertEqual(items[8].component_version, "3.1.1")
self.assertEqual(items[8].severity, "High")
self.assertEqual(
- items[8].mitigation, "Update jquery:3.1.1 to at least the version recommended in the description"
+ items[8].mitigation, "Update jquery:3.1.1 to at least the version recommended in the description",
)
with self.subTest(i=9):
@@ -299,7 +299,7 @@ def test_parse_file_pr6439(self):
items[0].mitigation,
)
self.assertEqual(
- items[0].date, datetime(2022, 12, 14, 1, 35, 43, 684166, tzinfo=tzlocal())
+ items[0].date, datetime(2022, 12, 14, 1, 35, 43, 684166, tzinfo=tzlocal()),
) # 2016-11-05T14:52:15.748-0400
self.assertEqual(1, len(items[0].unsaved_vulnerability_ids))
self.assertEqual('CVE-2015-3208', items[0].unsaved_vulnerability_ids[0])
diff --git a/unittests/tools/test_dependency_track_parser.py b/unittests/tools/test_dependency_track_parser.py
index 41fb2591fa..40fd0a8177 100644
--- a/unittests/tools/test_dependency_track_parser.py
+++ b/unittests/tools/test_dependency_track_parser.py
@@ -5,9 +5,9 @@
class TestDependencyTrackParser(DojoTestCase):
- def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_findings(self,):
+ def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_empty_list.json"
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_empty_list.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_finding
def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_missing.json"
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_missing.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -23,7 +23,7 @@ def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self)
def test_dependency_track_parser_with_null_findings_key_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_null.json"
+ get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_null.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -31,7 +31,7 @@ def test_dependency_track_parser_with_null_findings_key_has_no_findings(self):
def test_dependency_track_parser_has_many_findings(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings.json"
+ get_unit_tests_path() + "/scans/dependency_track/many_findings.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -49,7 +49,7 @@ def test_dependency_track_parser_has_many_findings(self):
def test_dependency_track_parser_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/one_finding.json"
+ get_unit_tests_path() + "/scans/dependency_track/one_finding.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -57,7 +57,7 @@ def test_dependency_track_parser_has_one_finding(self):
def test_dependency_track_parser_v3_8_0(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/dependency_track_3.8.0_2021-01-18.json"
+ get_unit_tests_path() + "/scans/dependency_track/dependency_track_3.8.0_2021-01-18.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -67,7 +67,7 @@ def test_dependency_track_parser_v3_8_0(self):
def test_dependency_track_parser_findings_with_alias(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings_with_alias.json"
+ get_unit_tests_path() + "/scans/dependency_track/many_findings_with_alias.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
@@ -79,7 +79,7 @@ def test_dependency_track_parser_findings_with_alias(self):
def test_dependency_track_parser_findings_with_empty_alias(self):
with open(
- get_unit_tests_path() + "/scans/dependency_track/many_findings_with_empty_alias.json"
+ get_unit_tests_path() + "/scans/dependency_track/many_findings_with_empty_alias.json",
) as testfile:
parser = DependencyTrackParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_dockerbench_parser.py b/unittests/tools/test_dockerbench_parser.py
index 8a2ec6137d..b3d5f603f0 100644
--- a/unittests/tools/test_dockerbench_parser.py
+++ b/unittests/tools/test_dockerbench_parser.py
@@ -7,7 +7,7 @@ class TestDockerBenchParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-zero-vulns.json"
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-zero-vulns.json",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-single-vuln.json"
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-single-vuln.json",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-many-vulns.json"
+ get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-many-vulns.json",
) as testfile:
parser = DockerBenchParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_gitlab_container_scan_parser.py b/unittests/tools/test_gitlab_container_scan_parser.py
index e4da366286..ab3e05a2b6 100644
--- a/unittests/tools/test_gitlab_container_scan_parser.py
+++ b/unittests/tools/test_gitlab_container_scan_parser.py
@@ -93,7 +93,7 @@ def test_gitlab_container_scan_parser_with_fless_data_v14(self):
finding = findings[50]
self.assertIsNone(finding.date)
self.assertEqual(
- "openssl: Infinite loop in BN_mod_sqrt() reachable when parsing certificates", finding.title
+ "openssl: Infinite loop in BN_mod_sqrt() reachable when parsing certificates", finding.title,
)
self.assertEqual("libretls", finding.component_name)
self.assertEqual("3.3.4-r2", finding.component_version)
diff --git a/unittests/tools/test_gitlab_dast_parser.py b/unittests/tools/test_gitlab_dast_parser.py
index 4e6cc5d41c..a2d5c2f762 100644
--- a/unittests/tools/test_gitlab_dast_parser.py
+++ b/unittests/tools/test_gitlab_dast_parser.py
@@ -22,7 +22,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v14(self):
endpoint.clean()
self.assertEqual(
- "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool
+ "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool,
)
self.assertEqual(3, finding.scanner_confidence)
# vulnerability does not have a name: fallback to using id as a title
@@ -50,7 +50,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v15(self):
endpoint.clean()
self.assertEqual(
- "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool
+ "5ec00bbc-2e53-44cb-83e9-3d35365277e3", finding.unique_id_from_tool,
)
self.assertEqual(None, finding.scanner_confidence)
# vulnerability does not have a name: fallback to using id as a title
@@ -90,7 +90,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self):
date = finding.date.strftime("%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual("2021-04-23T15:46:40.644000", date)
self.assertEqual(
- "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool
+ "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool,
)
# vulnerability does not have a name: fallback to using id as a title
self.assertEqual(finding.unique_id_from_tool, finding.title)
@@ -128,7 +128,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self):
date = finding.date.strftime("%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual("2021-04-23T15:46:40.644000", date)
self.assertEqual(
- "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool
+ "87e98ddf-7d75-444a-be6d-45400151a0fe", finding.unique_id_from_tool,
)
# vulnerability does not have a name: fallback to using id as a title
self.assertEqual(finding.unique_id_from_tool, finding.title)
diff --git a/unittests/tools/test_govulncheck_parser.py b/unittests/tools/test_govulncheck_parser.py
index f90a699fb1..78f706e47a 100644
--- a/unittests/tools/test_govulncheck_parser.py
+++ b/unittests/tools/test_govulncheck_parser.py
@@ -11,7 +11,7 @@ def test_parse_empty(self):
parser = GovulncheckParser()
parser.get_findings(testfile, Test())
self.assertIn(
- "Invalid JSON format", str(exp.exception)
+ "Invalid JSON format", str(exp.exception),
)
def test_parse_no_findings(self):
diff --git a/unittests/tools/test_huskyci_parser.py b/unittests/tools/test_huskyci_parser.py
index d0b76d7313..22199ed5bb 100644
--- a/unittests/tools/test_huskyci_parser.py
+++ b/unittests/tools/test_huskyci_parser.py
@@ -13,7 +13,7 @@ def test_parse_file_no_finding(self):
def test_parse_file_has_one_finding_one_tool(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_one_finding_one_tool.json"
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_one_finding_one_tool.json",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
@@ -21,7 +21,7 @@ def test_parse_file_has_one_finding_one_tool(self):
def test_parse_file_has_many_finding_one_tool(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_one_tool.json"
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_one_tool.json",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
@@ -29,7 +29,7 @@ def test_parse_file_has_many_finding_one_tool(self):
def test_parse_file_has_many_finding_two_tools(self):
with open(
- get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_two_tools.json"
+ get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_two_tools.json",
) as testfile:
parser = HuskyCIParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_hydra_parser.py b/unittests/tools/test_hydra_parser.py
index 22beeccebe..93077abb16 100644
--- a/unittests/tools/test_hydra_parser.py
+++ b/unittests/tools/test_hydra_parser.py
@@ -41,7 +41,7 @@ def test_hydra_parser_with_one_finding_has_one_finding(self):
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self):
@@ -59,7 +59,7 @@ def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self):
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self):
@@ -77,7 +77,7 @@ def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
def test_hydra_parser_with_many_findings_has_many_findings(self):
@@ -93,7 +93,7 @@ def test_hydra_parser_with_many_findings_has_many_findings(self):
"127.0.0.1",
"9999",
"bill@example.com",
- "bill"
+ "bill",
)
self.__assertFindingEquals(
findings[1],
@@ -101,7 +101,7 @@ def test_hydra_parser_with_many_findings_has_many_findings(self):
"192.168.0.1",
"1234",
"joe@example.com",
- "joe"
+ "joe",
)
self.__assertFindingEquals(
findings[2],
@@ -109,7 +109,7 @@ def test_hydra_parser_with_many_findings_has_many_findings(self):
"something.bad.com",
"4321",
"jimmy@bad.com",
- "somesimplepassword"
+ "somesimplepassword",
)
def __assertFindingEquals(
@@ -119,7 +119,7 @@ def __assertFindingEquals(
finding_url,
finding_port,
finding_username,
- finding_password
+ finding_password,
):
self.assertEqual("Weak username / password combination found for " + finding_url, actual_finding.title)
self.assertEqual(date, actual_finding.date)
diff --git a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
index 615cad2595..330b35431c 100644
--- a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
+++ b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py
@@ -53,7 +53,7 @@ def test_parse_file_with_one_vuln(self):
def test_parse_file_with_many_vulns(self):
testfile = open(
- "unittests/scans/jfrog_xray_api_summary_artifact/many_vulns.json"
+ "unittests/scans/jfrog_xray_api_summary_artifact/many_vulns.json",
)
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
@@ -65,7 +65,7 @@ def test_parse_file_with_many_vulns(self):
def test_parse_file_with_malformed_cvssv3_score(self):
testfile = open(
- "unittests/scans/jfrog_xray_api_summary_artifact/malformed_cvssv3.json"
+ "unittests/scans/jfrog_xray_api_summary_artifact/malformed_cvssv3.json",
)
parser = JFrogXrayApiSummaryArtifactParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_kubebench_parser.py b/unittests/tools/test_kubebench_parser.py
index 601db2707d..2e732fef40 100644
--- a/unittests/tools/test_kubebench_parser.py
+++ b/unittests/tools/test_kubebench_parser.py
@@ -7,7 +7,7 @@ class TestKubeBenchParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-zero-vuln.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-zero-vuln.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def test_parse_file_with_one_vuln_has_one_finding(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-one-vuln.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-one-vuln.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -23,7 +23,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-report-many-vuln.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-report-many-vuln.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
@@ -33,7 +33,7 @@ def test_parse_file_with_controls_tag(self):
# The testfile has been derived from https://github.com/kubernetes-sigs/wg-policy-prototypes/blob/master/policy-report/kube-bench-adapter/samples/kube-bench-output.json
with open(
- get_unit_tests_path() + "/scans/kubebench/kube-bench-controls.json"
+ get_unit_tests_path() + "/scans/kubebench/kube-bench-controls.json",
) as testfile:
parser = KubeBenchParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_kubehunter_parser.py b/unittests/tools/test_kubehunter_parser.py
index 6912df0380..df5cf17334 100644
--- a/unittests/tools/test_kubehunter_parser.py
+++ b/unittests/tools/test_kubehunter_parser.py
@@ -41,7 +41,7 @@ def test_kubehunter_parser_empty_with_error(self):
parser.get_findings(testfile, Test())
self.assertEqual(
- "Expecting value: line 1 column 1 (char 0)", str(context.exception)
+ "Expecting value: line 1 column 1 (char 0)", str(context.exception),
)
def test_kubehunter_parser_dupe(self):
diff --git a/unittests/tools/test_mend_parser.py b/unittests/tools/test_mend_parser.py
index 8deaf6f2be..3a48c5c49e 100644
--- a/unittests/tools/test_mend_parser.py
+++ b/unittests/tools/test_mend_parser.py
@@ -30,7 +30,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
def test_parse_file_with_multiple_vuln_cli_output(self):
with open(
- get_unit_tests_path() + "/scans/mend/cli_generated_many_vulns.json"
+ get_unit_tests_path() + "/scans/mend/cli_generated_many_vulns.json",
) as testfile:
parser = MendParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_microfocus_webinspect_parser.py b/unittests/tools/test_microfocus_webinspect_parser.py
index 0d2dd131c1..76869be044 100644
--- a/unittests/tools/test_microfocus_webinspect_parser.py
+++ b/unittests/tools/test_microfocus_webinspect_parser.py
@@ -10,7 +10,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_no_vuln.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_no_vuln.xml",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_one_vuln.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_one_vuln.xml",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -42,7 +42,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_many_vuln.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_many_vuln.xml",
)as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
@@ -54,7 +54,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
self.assertEqual(525, item.cwe)
self.assertIsNotNone(item.references)
self.assertEqual(
- "1cfe38ee-89f7-4110-ad7c-8fca476b2f04", item.unique_id_from_tool
+ "1cfe38ee-89f7-4110-ad7c-8fca476b2f04", item.unique_id_from_tool,
)
self.assertEqual(1, len(item.unsaved_endpoints))
endpoint = item.unsaved_endpoints[0]
@@ -65,11 +65,11 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
def test_convert_severity(self):
with self.subTest("convert info", val="0"):
self.assertEqual(
- "Info", MicrofocusWebinspectParser.convert_severity("0")
+ "Info", MicrofocusWebinspectParser.convert_severity("0"),
)
with self.subTest("convert medium", val="2"):
self.assertEqual(
- "Medium", MicrofocusWebinspectParser.convert_severity("2")
+ "Medium", MicrofocusWebinspectParser.convert_severity("2"),
)
def test_parse_file_version_18_20(self):
@@ -121,7 +121,7 @@ def test_parse_file_issue7690(self):
test.engagement = Engagement()
test.engagement.product = Product()
with open(
- get_unit_tests_path() + "/scans/microfocus_webinspect/issue_7690.xml"
+ get_unit_tests_path() + "/scans/microfocus_webinspect/issue_7690.xml",
) as testfile:
parser = MicrofocusWebinspectParser()
findings = parser.get_findings(testfile, test)
diff --git a/unittests/tools/test_noseyparker_parser.py b/unittests/tools/test_noseyparker_parser.py
index 4e98bbc04f..e55087eb3e 100644
--- a/unittests/tools/test_noseyparker_parser.py
+++ b/unittests/tools/test_noseyparker_parser.py
@@ -40,6 +40,6 @@ def test_noseyparker_parser_error(self):
testfile.close()
self.assertEqual(0, len(findings))
self.assertTrue(
- "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0" in str(context.exception)
+ "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0" in str(context.exception),
)
self.assertTrue("ECONNREFUSED" in str(context.exception))
diff --git a/unittests/tools/test_ort_parser.py b/unittests/tools/test_ort_parser.py
index f523d35626..0d5c618cb6 100644
--- a/unittests/tools/test_ort_parser.py
+++ b/unittests/tools/test_ort_parser.py
@@ -11,7 +11,7 @@ def test_parse_without_file_has_no_finding(self):
def test_parse_file_has_many_finding_one_tool(self):
testfile = open(
- get_unit_tests_path() + "/scans/ort/evaluated-model-reporter-test-output.json"
+ get_unit_tests_path() + "/scans/ort/evaluated-model-reporter-test-output.json",
)
parser = OrtParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_ossindex_devaudit_parser.py b/unittests/tools/test_ossindex_devaudit_parser.py
index e617654a20..8f30f96466 100644
--- a/unittests/tools/test_ossindex_devaudit_parser.py
+++ b/unittests/tools/test_ossindex_devaudit_parser.py
@@ -7,7 +7,7 @@ class TestOssIndexDevauditParser(DojoTestCase):
def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_no_vuln.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_no_vuln.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -16,7 +16,7 @@ def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self):
def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -25,7 +25,7 @@ def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self):
def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_multiple_vulns.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_multiple_vulns.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -34,7 +34,7 @@ def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self)
def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_vuln_no_cvssscore.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_vuln_no_cvssscore.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -43,7 +43,7 @@ def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self):
def test_ossindex_devaudit_parser_with_reference_shows_reference(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -55,7 +55,7 @@ def test_ossindex_devaudit_parser_with_reference_shows_reference(self):
def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_reference.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_reference.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -66,7 +66,7 @@ def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(sel
def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_reference.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_reference.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -77,7 +77,7 @@ def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self):
def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_cwe.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_cwe.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -88,7 +88,7 @@ def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_null_cwe.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_null_cwe.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -99,7 +99,7 @@ def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_cwe.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_cwe.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -110,7 +110,7 @@ def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self):
def test_ossindex_devaudit_parser_get_severity_shows_info(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_info.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_info.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -121,7 +121,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_info(self):
def test_ossindex_devaudit_parser_get_severity_shows_critical(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_critical.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_critical.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -132,7 +132,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_critical(self):
def test_ossindex_devaudit_parser_get_severity_shows_high(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_high.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_high.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -143,7 +143,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_high(self):
def test_ossindex_devaudit_parser_get_severity_shows_medium(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_medium.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_medium.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
@@ -154,7 +154,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_medium(self):
def test_ossindex_devaudit_parser_get_severity_shows_low(self):
testfile = open(
- get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_low.json"
+ get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_low.json",
)
parser = OssIndexDevauditParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_php_symfony_security_check_parser.py b/unittests/tools/test_php_symfony_security_check_parser.py
index 6566c02ebe..5e8c4bd51d 100644
--- a/unittests/tools/test_php_symfony_security_check_parser.py
+++ b/unittests/tools/test_php_symfony_security_check_parser.py
@@ -7,7 +7,7 @@ class TestPhpSymfonySecurityCheckerParser(DojoTestCase):
def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_no_vuln.json"
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_no_vuln.json",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
@@ -19,7 +19,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin
self,
):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_one_vuln.json"
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_one_vuln.json",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
@@ -28,7 +28,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin
def test_php_symfony_security_check_parser_with_many_vuln_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_many_vuln.json"
+ get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_many_vuln.json",
)
parser = PhpSymfonySecurityCheckParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_qualys_infrascan_webgui_parser.py b/unittests/tools/test_qualys_infrascan_webgui_parser.py
index 78e57188a6..941aee124c 100644
--- a/unittests/tools/test_qualys_infrascan_webgui_parser.py
+++ b/unittests/tools/test_qualys_infrascan_webgui_parser.py
@@ -11,7 +11,7 @@ class TestQualysInfrascanWebguiParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_0.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_0.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -21,7 +21,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
# + also verify data with one test
def test_parse_file_with_one_vuln_has_one_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_1.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_1.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -38,7 +38,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self):
# Sample with Multiple Test
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_multiple.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_multiple.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
@@ -61,7 +61,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
# Sample with Multiple Test
def test_parse_file_with_finding_no_dns(self):
with open(
- get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_3.xml"
+ get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_3.xml",
) as testfile:
parser = QualysInfrascanWebguiParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_qualys_parser.py b/unittests/tools/test_qualys_parser.py
index be96a3334a..3580196116 100644
--- a/unittests/tools/test_qualys_parser.py
+++ b/unittests/tools/test_qualys_parser.py
@@ -18,7 +18,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self):
def parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys/empty.xml"
+ get_unit_tests_path() + "/scans/qualys/empty.xml",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -35,7 +35,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
def parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml"
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -46,13 +46,13 @@ def parse_file_with_multiple_vuln_has_multiple_findings(self):
finding = findings[0]
self.assertEqual(
- finding.title, "QID-6 | DNS Host Name"
+ finding.title, "QID-6 | DNS Host Name",
)
self.assertEqual(
- finding.severity, "Informational"
+ finding.severity, "Informational",
)
self.assertEqual(
- finding.unsaved_endpoints[0].host, "demo13.s02.sjc01.qualys.com"
+ finding.unsaved_endpoints[0].host, "demo13.s02.sjc01.qualys.com",
)
for finding in findings:
if finding.unsaved_endpoints[0].host == "demo14.s02.sjc01.qualys.com" and finding.title == "QID-370876 | AMD Processors Multiple Security Vulnerabilities (RYZENFALL/MASTERKEY/CHIMERA-FW/FALLOUT)":
@@ -61,15 +61,15 @@ def parse_file_with_multiple_vuln_has_multiple_findings(self):
finding_cvssv3_vector = finding
self.assertEqual(
# CVSS_FINAL is defined without a cvssv3 vector
- finding_cvssv3_score.cvssv3, None
+ finding_cvssv3_score.cvssv3, None,
)
self.assertEqual(
- finding_cvssv3_score.severity, "High"
+ finding_cvssv3_score.severity, "High",
)
self.assertEqual(finding_cvssv3_vector.cvssv3,
"CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H")
self.assertEqual(
- finding_cvssv3_vector.severity, "High"
+ finding_cvssv3_vector.severity, "High",
)
return finding
@@ -82,7 +82,7 @@ def test_parse_file_with_no_vuln_has_no_findings_csv(self):
def parse_file_with_no_vuln_has_no_findings_csv(self):
with open(
- get_unit_tests_path() + "/scans/qualys/empty.csv"
+ get_unit_tests_path() + "/scans/qualys/empty.csv",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -99,7 +99,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
def parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.csv"
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.csv",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -113,30 +113,30 @@ def parse_file_with_multiple_vuln_has_multiple_findings_csv(self):
finding.title,
"QID-105971 | EOL/Obsolete Software: Microsoft ASP.NET 1.0 Detected")
self.assertEqual(
- finding.severity, "Critical"
+ finding.severity, "Critical",
)
self.assertEqual(
- finding.unsaved_endpoints[0].host, "ip-10-98-57-180.eu-west-1.compute.internal"
+ finding.unsaved_endpoints[0].host, "ip-10-98-57-180.eu-west-1.compute.internal",
)
for finding in findings:
if finding.unsaved_endpoints[0].host == "ip-10-98-57-180.eu-west-1.compute.internal" and finding.title == "QID-105971 | EOL/Obsolete Software: Microsoft ASP.NET 1.0 Detected":
self.assertEqual(
- finding.severity, "Critical"
+ finding.severity, "Critical",
)
self.assertEqual(
finding.cvssv3,
"CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H/E:U/RL:U/RC:C")
self.assertEqual(
- finding.severity, "Critical"
+ finding.severity, "Critical",
)
return findings[0]
def test_parse_file_monthly_pci_issue6932(self):
with open(
- get_unit_tests_path() + "/scans/qualys/monthly_pci_issue6932.csv"
+ get_unit_tests_path() + "/scans/qualys/monthly_pci_issue6932.csv",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -144,7 +144,7 @@ def test_parse_file_monthly_pci_issue6932(self):
def test_parse_file_with_cvss_values_and_scores(self):
with open(
- get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml"
+ get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml",
) as testfile:
parser = QualysParser()
findings = parser.get_findings(testfile, Test())
@@ -158,24 +158,24 @@ def test_parse_file_with_cvss_values_and_scores(self):
# The CVSS Vector is not used from the Knowledgebase
self.assertEqual(
# CVSS_FINAL is defined without a cvssv3 vector
- finding_cvssv3_score.cvssv3, None
+ finding_cvssv3_score.cvssv3, None,
)
# Nevertheless the CVSSv3 Score should be set
self.assertEqual(
- finding_cvssv3_score.cvssv3_score, 8.2
+ finding_cvssv3_score.cvssv3_score, 8.2,
)
# If no cvss information is present in detection and not in knowledgebase values should be empty
self.assertEqual(
- finding_no_cvssv3.cvssv3, None
+ finding_no_cvssv3.cvssv3, None,
)
self.assertEqual(
- finding_no_cvssv3.cvssv3_score, None
+ finding_no_cvssv3.cvssv3_score, None,
)
# No CVSS Values available in detection and it uses the knowledgebase then
self.assertEqual(finding_no_cvssv3_at_detection.cvssv3,
"CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H")
self.assertEqual(
- finding_no_cvssv3_at_detection.cvssv3_score, 9.0
+ finding_no_cvssv3_at_detection.cvssv3_score, 9.0,
)
def test_get_severity_legacy(self):
diff --git a/unittests/tools/test_qualys_webapp_parser.py b/unittests/tools/test_qualys_webapp_parser.py
index 2df655e36b..078e8f7dd0 100644
--- a/unittests/tools/test_qualys_webapp_parser.py
+++ b/unittests/tools/test_qualys_webapp_parser.py
@@ -31,7 +31,7 @@ def test_qualys_webapp_parser_with_one_criticle_vuln_has_one_findings(self):
def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml"
+ get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test())
@@ -45,7 +45,7 @@ def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self):
def test_qualys_webapp_parser_info_is_vuln(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml"
+ get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test(), True)
@@ -59,7 +59,7 @@ def test_qualys_webapp_parser_info_is_vuln(self):
def test_discussion_10239(self):
testfile = open(
- get_unit_tests_path() + "/scans/qualys_webapp/discussion_10239.xml"
+ get_unit_tests_path() + "/scans/qualys_webapp/discussion_10239.xml",
)
parser = QualysWebAppParser()
findings = parser.get_findings(testfile, Test(), True)
diff --git a/unittests/tools/test_sarif_parser.py b/unittests/tools/test_sarif_parser.py
index eb3dd05332..a819846169 100644
--- a/unittests/tools/test_sarif_parser.py
+++ b/unittests/tools/test_sarif_parser.py
@@ -18,8 +18,8 @@ def common_checks(self, finding):
def test_example_report(self):
with open(
path.join(
- get_unit_tests_path() + "/scans/sarif/DefectDojo_django-DefectDojo__2020-12-11_13 42 10__export.sarif"
- )
+ get_unit_tests_path() + "/scans/sarif/DefectDojo_django-DefectDojo__2020-12-11_13 42 10__export.sarif",
+ ),
)as testfile:
parser = SarifParser()
findings = parser.get_findings(testfile, Test())
@@ -109,7 +109,7 @@ def test_example_k4_report_mitigation(self):
with self.subTest(i=0):
finding = findings[0]
self.assertEqual(
- 'Variable "ptr" was used without being initialized. It was declared [here](0).', finding.title
+ 'Variable "ptr" was used without being initialized. It was declared [here](0).', finding.title,
)
self.assertEqual("C2001", finding.vuln_id_from_tool)
self.assertEqual("collections/list.h", finding.file_path)
@@ -276,7 +276,7 @@ def test_dockle(self):
**Rule short description:** Do not store credential in ENVIRONMENT vars/files"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0010", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0010", finding.references,
)
with self.subTest(i=1):
finding = findings[1]
@@ -286,7 +286,7 @@ def test_dockle(self):
**Rule short description:** Enable Content trust for Docker"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0005", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0005", finding.references,
)
with self.subTest(i=2):
finding = findings[2]
@@ -296,7 +296,7 @@ def test_dockle(self):
**Rule short description:** Add HEALTHCHECK instruction to the container image"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0006", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0006", finding.references,
)
with self.subTest(i=3):
finding = findings[3]
@@ -306,7 +306,7 @@ def test_dockle(self):
**Rule short description:** Confirm safety of setuid/setgid files"""
self.assertEqual(description, finding.description)
self.assertEqual(
- "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0008", finding.references
+ "https://github.com/goodwithtech/dockle/blob/master/CHECKPOINT.md#CIS-DI-0008", finding.references,
)
def test_mobsfscan(self):
@@ -394,7 +394,7 @@ def test_flawfinder(self):
self.assertEqual(327, finding.cwe)
self.assertEqual("FF1048", finding.vuln_id_from_tool)
self.assertEqual(
- "e6c1ad2b1d96ffc4035ed8df070600566ad240b8ded025dac30620f3fd4aa9fd", finding.unique_id_from_tool
+ "e6c1ad2b1d96ffc4035ed8df070600566ad240b8ded025dac30620f3fd4aa9fd", finding.unique_id_from_tool,
)
self.assertEqual("https://cwe.mitre.org/data/definitions/327.html", finding.references)
with self.subTest(i=20):
@@ -417,7 +417,7 @@ def test_flawfinder(self):
self.assertEqual(120, finding.cwe)
self.assertEqual("FF1004", finding.vuln_id_from_tool)
self.assertEqual(
- "327fc54b75ab37bbbb31a1b71431aaefa8137ff755acc103685ad5adf88f5dda", finding.unique_id_from_tool
+ "327fc54b75ab37bbbb31a1b71431aaefa8137ff755acc103685ad5adf88f5dda", finding.unique_id_from_tool,
)
self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references)
with self.subTest(i=52):
@@ -439,7 +439,7 @@ def test_flawfinder(self):
self.assertEqual(482, finding.line)
self.assertEqual("FF1021", finding.vuln_id_from_tool)
self.assertEqual(
- "ad8408027235170e870e7662751a01386beb2d2ed8beb75dd4ba8e4a70e91d65", finding.unique_id_from_tool
+ "ad8408027235170e870e7662751a01386beb2d2ed8beb75dd4ba8e4a70e91d65", finding.unique_id_from_tool,
)
self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references)
diff --git a/unittests/tools/test_scantist_parser.py b/unittests/tools/test_scantist_parser.py
index 7b8e0b0d4c..a51223869a 100644
--- a/unittests/tools/test_scantist_parser.py
+++ b/unittests/tools/test_scantist_parser.py
@@ -26,7 +26,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self):
"attack against hashes associated with the maximum exponent.",
)
self.assertEqual(
- findings.severity, "Medium"
+ findings.severity, "Medium",
) # Negligible is translated to Informational
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
diff --git a/unittests/tools/test_snyk_parser.py b/unittests/tools/test_snyk_parser.py
index 59fde5a85f..ba6f27cb3c 100644
--- a/unittests/tools/test_snyk_parser.py
+++ b/unittests/tools/test_snyk_parser.py
@@ -63,7 +63,7 @@ def test_snykParser_finding_has_fields(self):
finding.severity_justification,
)
self.assertEqual(
- "SNYK-JAVA-ORGAPACHESANTUARIO-460281", finding.vuln_id_from_tool
+ "SNYK-JAVA-ORGAPACHESANTUARIO-460281", finding.vuln_id_from_tool,
)
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
self.assertEqual("CVE-2019-12400", finding.unsaved_vulnerability_ids[0])
@@ -85,7 +85,7 @@ def test_snykParser_finding_has_fields(self):
finding.references,
)
self.assertEqual(
- "com.test:myframework > org.apache.santuario:xmlsec", finding.file_path
+ "com.test:myframework > org.apache.santuario:xmlsec", finding.file_path,
)
def test_snykParser_file_path_with_ampersand_is_preserved(self):
@@ -97,7 +97,7 @@ def test_snykParser_file_path_with_ampersand_is_preserved(self):
finding = findings[0]
self.assertEqual(
"myproject > @angular/localize > @babel/core > lodash",
- finding.file_path
+ finding.file_path,
)
def test_snykParser_allprojects_issue4277(self):
@@ -146,7 +146,7 @@ def test_snykParser_cvssscore_none(self):
finding = findings[0]
self.assertEqual("Low", finding.severity)
self.assertEqual(
- "SNYK-SLES153-PERMISSIONS-2648113", finding.vuln_id_from_tool
+ "SNYK-SLES153-PERMISSIONS-2648113", finding.vuln_id_from_tool,
)
def test_snykParser_target_file(self):
diff --git a/unittests/tools/test_solar_appscreener_parser.py b/unittests/tools/test_solar_appscreener_parser.py
index 3e2284ee80..0fb8cf4ee4 100644
--- a/unittests/tools/test_solar_appscreener_parser.py
+++ b/unittests/tools/test_solar_appscreener_parser.py
@@ -59,6 +59,6 @@ def test_solar_appscreener_parser_with_many_vuln_has_many_findings(self):
self.assertEqual("Trust boundary violation", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual("index.php", finding.sast_source_file_path)
- self.assertEqual(51, finding.sast_source_line),
+ self.assertEqual(51, finding.sast_source_line)
self.assertEqual("index.php", finding.file_path)
self.assertEqual(51, finding.line)
diff --git a/unittests/tools/test_sonarqube_parser.py b/unittests/tools/test_sonarqube_parser.py
index cf72d020e5..57e110f59c 100644
--- a/unittests/tools/test_sonarqube_parser.py
+++ b/unittests/tools/test_sonarqube_parser.py
@@ -21,7 +21,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -31,7 +31,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings
# SonarQube Scan detailed - no finding
def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -44,7 +44,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -93,7 +93,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi
def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -141,7 +141,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -154,7 +154,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi
self,
):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -167,7 +167,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi
def test_detailed_parse_file_with_table_in_table(self):
"""Test parsing when the vulnerability details include a table, with tr and td that should be ignored when looking for list of rules"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -232,7 +232,7 @@ def test_detailed_parse_file_with_table_in_table(self):
)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
- "java/org/apache/catalina/util/URLEncoder.java", item.file_path
+ "java/org/apache/catalina/util/URLEncoder.java", item.file_path,
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
@@ -247,7 +247,7 @@ def test_detailed_parse_file_with_table_in_table(self):
def test_detailed_parse_file_with_rule_undefined(self):
"""the vulnerability's rule is not in the list of rules"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-rule-undefined.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-rule-undefined.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -278,7 +278,7 @@ def test_detailed_parse_file_with_rule_undefined(self):
self.assertEqual("", item.references)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
- "java/org/apache/catalina/util/URLEncoder.java", item.file_path
+ "java/org/apache/catalina/util/URLEncoder.java", item.file_path,
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
@@ -293,7 +293,7 @@ def test_detailed_parse_file_with_rule_undefined(self):
# SonarQube Scan - report with aggregations to be made
def test_file_name_aggregated_parse_file_with_vuln_on_same_filename(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -356,7 +356,7 @@ def test_file_name_aggregated_parse_file_with_vuln_on_same_filename(self):
# SonarQube Scan detailed - report with aggregations to be made
def test_detailed_parse_file_with_vuln_on_same_filename(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -384,7 +384,7 @@ def test_detailed_parse_file_table_has_whitespace(self):
Data table will have some whitespaces, parser should strip it before compare or use these properties.
"""
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table-with-whitespace.html"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table-with-whitespace.html",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -449,7 +449,7 @@ def test_detailed_parse_file_table_has_whitespace(self):
)
self.assertEqual(str, type(item.file_path))
self.assertEqual(
- "java/org/apache/catalina/util/URLEncoder.java", item.file_path
+ "java/org/apache/catalina/util/URLEncoder.java", item.file_path,
)
self.assertEqual(str, type(item.line))
self.assertEqual("190", item.line)
@@ -463,7 +463,7 @@ def test_detailed_parse_file_table_has_whitespace(self):
def test_detailed_parse_json_file_with_no_vulnerabilities_has_no_findings(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.json"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.json",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -473,7 +473,7 @@ def test_detailed_parse_json_file_with_no_vulnerabilities_has_no_findings(self):
def test_detailed_parse_json_file_with_single_vulnerability_has_single_finding(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.json"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.json",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -552,7 +552,7 @@ def test_detailed_parse_json_file_with_single_vulnerability_has_single_finding(s
def test_detailed_parse_json_file_with_multiple_vulnerabilities_has_multiple_findings(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.json"
+ get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.json",
)
parser = SonarQubeParser()
parser.set_mode('detailed')
@@ -564,7 +564,7 @@ def test_detailed_parse_json_file_with_multiple_vulnerabilities_has_multiple_fin
def test_parse_json_file_from_api_with_multiple_findings_json(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api.json"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -596,7 +596,7 @@ def test_parse_json_file_from_api_with_multiple_findings_json(self):
def test_parse_json_file_from_api_with_multiple_findings_hotspots_json(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api_hotspots.json"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api_hotspots.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -615,7 +615,7 @@ def test_parse_json_file_from_api_with_multiple_findings_hotspots_json(self):
def test_parse_json_file_from_api_with_empty_json(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api_empty.json"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api_empty.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -624,7 +624,7 @@ def test_parse_json_file_from_api_with_empty_json(self):
def test_parse_json_file_from_api_with_emppty_zip(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/empty_zip.zip"
+ get_unit_tests_path() + "/scans/sonarqube/empty_zip.zip",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -633,7 +633,7 @@ def test_parse_json_file_from_api_with_emppty_zip(self):
def test_parse_json_file_from_api_with_multiple_findings_zip(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/findings_over_api.zip"
+ get_unit_tests_path() + "/scans/sonarqube/findings_over_api.zip",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
@@ -652,7 +652,7 @@ def test_parse_json_file_from_api_with_multiple_findings_zip(self):
def test_parse_json_file_issue_10150(self):
my_file_handle, _product, _engagement, test = self.init(
- get_unit_tests_path() + "/scans/sonarqube/issue_10150.json"
+ get_unit_tests_path() + "/scans/sonarqube/issue_10150.json",
)
parser = SonarQubeParser()
findings = parser.get_findings(my_file_handle, test)
diff --git a/unittests/tools/test_spotbugs_parser.py b/unittests/tools/test_spotbugs_parser.py
index 879c971312..2587bc71b2 100644
--- a/unittests/tools/test_spotbugs_parser.py
+++ b/unittests/tools/test_spotbugs_parser.py
@@ -75,7 +75,7 @@ def test_description(self):
test_finding = findings[0]
# Test if line 13 is correct
self.assertEqual(
- "At IdentityFunctionCommandInjection.kt:[lines 20-170]", test_finding.description.splitlines()[12]
+ "At IdentityFunctionCommandInjection.kt:[lines 20-170]", test_finding.description.splitlines()[12],
)
def test_mitigation(self):
diff --git a/unittests/tools/test_sslyze_parser.py b/unittests/tools/test_sslyze_parser.py
index 03194a31d0..24c930b6f2 100644
--- a/unittests/tools/test_sslyze_parser.py
+++ b/unittests/tools/test_sslyze_parser.py
@@ -95,7 +95,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self):
self.assertEqual('Medium', finding.severity)
self.assertEqual(
'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10',
- finding.references
+ finding.references,
)
self.assertEqual(1, len(finding.unsaved_endpoints))
@@ -134,7 +134,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self):
self.assertEqual('Medium', finding.severity)
self.assertEqual(
'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10',
- finding.references
+ finding.references,
)
self.assertEqual(1, len(finding.unsaved_endpoints))
@@ -148,7 +148,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self):
self.assertEqual('Medium', finding.severity)
self.assertEqual(
'TLS recommendations of German BSI: https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/Publications/TechGuidelines/TG02102/BSI-TR-02102-2.pdf?__blob=publicationFile&v=10',
- finding.references
+ finding.references,
)
self.assertEqual(1, len(finding.unsaved_endpoints))
diff --git a/unittests/tools/test_stackhawk_parser.py b/unittests/tools/test_stackhawk_parser.py
index 64151495da..94cf2de470 100644
--- a/unittests/tools/test_stackhawk_parser.py
+++ b/unittests/tools/test_stackhawk_parser.py
@@ -46,7 +46,7 @@ def test_stackhawk_parser_with_one_high_vuln_has_one_findings(self):
"20012",
"10",
False,
- False
+ False,
)
def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicates(self):
@@ -67,7 +67,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"90027",
"10",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -81,7 +81,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"40025",
"10",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -95,7 +95,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"20012",
"10",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -109,7 +109,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"40012",
"1",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -123,7 +123,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"10038",
"12",
False,
- False
+ False,
)
self.__assertFindingEquals(
@@ -137,7 +137,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate
"10063",
"12",
False,
- False
+ False,
)
def test_that_a_scan_import_updates_the_test_description(self):
@@ -149,7 +149,7 @@ def test_that_a_scan_import_updates_the_test_description(self):
test.description,
'View scan details here: '
+ '[https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27]'
- + '(https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27)'
+ + '(https://app.stackhawk.com/scans/e2ff5651-7eef-47e9-b743-0c2f7d861e27)',
)
def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_false_positive(self):
@@ -169,7 +169,7 @@ def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_fal
"90027",
"3",
True,
- False
+ False,
)
def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk_accepted(self):
@@ -189,7 +189,7 @@ def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk
"90027",
"3",
False,
- True
+ True,
)
def test_that_a_scan_with_endpoints_in_differing_statuses_does_not_mark_as_risk_accepted_or_false_positive(self):
@@ -209,7 +209,7 @@ def test_that_a_scan_with_endpoints_in_differing_statuses_does_not_mark_as_risk_
"90027",
"3",
False,
- False
+ False,
)
def __assertFindingEquals(
@@ -224,7 +224,7 @@ def __assertFindingEquals(
finding_id,
count,
false_positive,
- risk_accepted
+ risk_accepted,
):
self.assertEqual(title, actual_finding.title)
self.assertEqual(date, actual_finding.date)
@@ -235,7 +235,7 @@ def __assertFindingEquals(
actual_finding.description)
self.assertRegex(
actual_finding.steps_to_reproduce,
- "Use a specific message link and click 'Validate' to see the cURL!.*"
+ "Use a specific message link and click 'Validate' to see the cURL!.*",
)
self.assertFalse(actual_finding.static_finding)
self.assertTrue(actual_finding.dynamic_finding)
diff --git a/unittests/tools/test_sysdig_reports_parser.py b/unittests/tools/test_sysdig_reports_parser.py
index 00979f66e8..2e38af87e0 100644
--- a/unittests/tools/test_sysdig_reports_parser.py
+++ b/unittests/tools/test_sysdig_reports_parser.py
@@ -42,7 +42,7 @@ def test_sysdig_parser_missing_cve_field_id_from_csv_file(self):
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(
- "Number of fields in row (22) does not match number of headers (21)", str(context.exception)
+ "Number of fields in row (22) does not match number of headers (21)", str(context.exception),
)
def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self):
@@ -54,7 +54,7 @@ def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self):
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(
- "Number of fields in row (22) does not match number of headers (21)", str(context.exception)
+ "Number of fields in row (22) does not match number of headers (21)", str(context.exception),
)
def test_sysdig_parser_json_with_many_findings(self):
diff --git a/unittests/tools/test_talisman_parser.py b/unittests/tools/test_talisman_parser.py
index 0f05b83d71..9862f6088a 100644
--- a/unittests/tools/test_talisman_parser.py
+++ b/unittests/tools/test_talisman_parser.py
@@ -27,7 +27,7 @@ def test_parse_many_finding(self):
self.assertEqual(3, len(findings))
finding = findings[0]
self.assertEqual(
- "talisman_report/talisman_reports/data/report.json", finding.file_path
+ "talisman_report/talisman_reports/data/report.json", finding.file_path,
)
self.assertEqual(
"Secret pattern found in talisman_report/talisman_reports/data/report.json file",
diff --git a/unittests/tools/test_trustwave_fusion_api_parser.py b/unittests/tools/test_trustwave_fusion_api_parser.py
index 7773af5cb2..f09a31a0d0 100644
--- a/unittests/tools/test_trustwave_fusion_api_parser.py
+++ b/unittests/tools/test_trustwave_fusion_api_parser.py
@@ -6,7 +6,7 @@
class TestTrustwaveFusionAPIParser(DojoTestCase):
def test_parse_file_with_no_vuln_has_no_findings(self):
with open(
- get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_zero_vul.json"
+ get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_zero_vul.json",
) as testfile:
parser = TrustwaveFusionAPIParser()
findings = parser.get_findings(testfile, Test())
@@ -26,7 +26,7 @@ def test_vuln_with_valid_cve(self):
self.assertEqual(1, len(finding.unsaved_vulnerability_ids))
self.assertEqual("CVE-2017-7529", finding.unsaved_vulnerability_ids[0])
self.assertEqual(
- "Vulnerability/Missing Patch", finding.description
+ "Vulnerability/Missing Patch", finding.description,
)
# second example
@@ -42,7 +42,7 @@ def test_vuln_with_valid_cve(self):
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
with open(
- get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_many_vul.json"
+ get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_many_vul.json",
) as testfile:
parser = TrustwaveFusionAPIParser()
findings = parser.get_findings(testfile, Test())
@@ -58,7 +58,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
self.assertEqual("0123456:id", finding.unique_id_from_tool)
self.assertEqual("Website Detected", finding.title)
self.assertEqual(
- "Information/Service Discovery", finding.description
+ "Information/Service Discovery", finding.description,
)
self.assertIsNone(finding.unsaved_vulnerability_ids)
date = finding.date.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
diff --git a/unittests/tools/test_twistlock_parser.py b/unittests/tools/test_twistlock_parser.py
index 8d8121305b..ce91e7cd0d 100644
--- a/unittests/tools/test_twistlock_parser.py
+++ b/unittests/tools/test_twistlock_parser.py
@@ -47,7 +47,7 @@ def test_parse_file_which_contain_packages_info(self):
def test_parse_file_prisma_twistlock_images_no_vuln(self):
testfile = open(
- path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_no_vuln.csv")
+ path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_no_vuln.csv"),
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
@@ -56,7 +56,7 @@ def test_parse_file_prisma_twistlock_images_no_vuln(self):
def test_parse_file_prisma_twistlock_images_four_vulns(self):
testfile = open(
- path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_four_vulns.csv")
+ path.join(path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_four_vulns.csv"),
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
@@ -68,8 +68,8 @@ def test_parse_file_prisma_twistlock_images_four_vulns(self):
def test_parse_file_prisma_twistlock_images_long_package_name(self):
testfile = open(
path.join(
- path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_long_package_name.csv"
- )
+ path.dirname(__file__), "../scans/twistlock/scan_report_prisma_twistlock_images_long_package_name.csv",
+ ),
)
parser = TwistlockParser()
findings = parser.get_findings(testfile, Test())
diff --git a/unittests/tools/test_veracode_parser.py b/unittests/tools/test_veracode_parser.py
index 9a00b0d646..3daed41862 100644
--- a/unittests/tools/test_veracode_parser.py
+++ b/unittests/tools/test_veracode_parser.py
@@ -390,7 +390,7 @@ def json_dynamic_findings_test(self, file_name):
host="application.insecure-company-alliance.com",
port=443,
path="api/*_*//new_user_sign_up",
- query="param=wild-things"
+ query="param=wild-things",
))
@override_settings(USE_FIRST_SEEN=True)
diff --git a/unittests/tools/test_yarn_audit_parser.py b/unittests/tools/test_yarn_audit_parser.py
index 1206f88b43..65206725ca 100644
--- a/unittests/tools/test_yarn_audit_parser.py
+++ b/unittests/tools/test_yarn_audit_parser.py
@@ -68,7 +68,7 @@ def test_yarn_audit_parser_empty_with_error(self):
parser = YarnAuditParser()
parser.get_findings(testfile, self.get_test())
self.assertTrue(
- "yarn audit report contains errors:" in str(context.exception)
+ "yarn audit report contains errors:" in str(context.exception),
)
self.assertTrue("ECONNREFUSED" in str(context.exception))